text
stringlengths 2
999k
|
|---|
# Generated by Django 4.0 on 2022-01-25 21:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0006_ticketimage_external_url_alter_warehousereply_files'),
]
operations = [
migrations.RemoveField(
model_name='warehousereply',
name='files',
),
migrations.AlterField(
model_name='ticketimage',
name='ticket',
field=models.ManyToManyField(to='main.Tickets'),
),
migrations.CreateModel(
name='ReplyImage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(blank=True, max_length=255, upload_to='replyfiles/%Y/%m/%d')),
('reply', models.ManyToManyField(to='main.WarehouseReply')),
],
),
]
|
class ProgressBarStyle(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the style that a System.Windows.Forms.ProgressBar uses to indicate the progress of an operation.
enum ProgressBarStyle,values: Blocks (0),Continuous (1),Marquee (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Blocks=None
Continuous=None
Marquee=None
value__=None
|
# -*- coding: utf-8 -*-
import config as cfg
import pyodbc
"""
Fill in closest Police Stations within 10 miles
If there is no closest PS, then ClosestPSDistance = MeanPSDistance = 10.5 miles
Latitude/Longitude distance coefficients:
--Miles 3958.75
--Kilometers 6367.45
--Feet 20890584
--Meters 6367450
"""
def calculate_distance(bankID, lat, lng):
query = """
DECLARE @latitude float, @longitude float
SELECT @latitude = ?, @longitude = ?
SELECT
[StationID]
,[Name]
, [Address1]
, [City]
, [State]
, Distance
FROM
(
SELECT
[StationID]
,[Name]
, [Address1]
, [City]
, [State]
, ( 3959 * acos( cos( radians(@latitude) ) * cos( radians( Lat ) ) * cos( radians( Lng )
- radians(@longitude) ) + sin( radians(@latitude) ) * sin( radians( Lat ) ) ) ) AS Distance
FROM PoliceStation
) as x
WHERE Distance <= 10
ORDER BY Distance;
"""
cnxn = pyodbc.connect( 'DRIVER={ODBC Driver 13 for SQL Server};SERVER=' + cfg.mssql['server'] + ';DATABASE='
+ cfg.mssql['database'] + ';UID=' + cfg.mssql['username'] + ';PWD=' + cfg.mssql['password'] )
cursor = cnxn.cursor()
params = [lat, lng]
rows = cursor.execute(query, params)
#Calculate
psCount = 0
totdist = 0
closestStationID = None
for row in rows:
totdist = totdist + float(row.Distance)
if(psCount == 0):
print(bankID, row.StationID, row.Name, row.City, row.Distance)
closestStationID = row.StationID
closestPSDistance = float(row.Distance)
psCount = psCount + 1
meanPSDistance = totdist / psCount if psCount else None
# Save back into table
query2 = """
UPDATE Bank
SET
ClosestStationID = ?
, ClosestPSDistance = ?
, MeanPSDistance = ?
, PSCount = ?
WHERE BankID = ?
;
"""
over10 = 10.5 #over 10 miles
if not closestStationID: #no closest station in 10 miles
closestStationID = None
closestPSDistance = over10
meanPSDistance = over10
psCount = 0
params2 = [closestStationID, closestPSDistance, meanPSDistance, psCount, bankID]
cursor.execute(query2, params2)
cnxn.commit()
#----------------------------------------------
# Calculate distance for all rows
cnxn = pyodbc.connect( 'DRIVER={ODBC Driver 13 for SQL Server};SERVER=' + cfg.mssql['server'] + ';DATABASE='
+ cfg.mssql['database'] + ';UID=' + cfg.mssql['username'] + ';PWD=' + cfg.mssql['password'] )
cursor = cnxn.cursor()
query = "SELECT bankID, lat, lng FROM Bank WHERE [ClosestPSDistance] IS NULL;"
rows = cursor.execute(query)
for row in rows:
calculate_distance(row.bankID, row.lat, row.lng)
|
i = 0
while i < 21474826:
i = i + 1
if i % 1000000 == 0:
print i
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author: Wesley
# @time: 2020-12-11 10:47
import os
import time
import torch
from torch import nn
from models.dinknet34 import DinkNet34
from loss import dice_bce_loss
from models.unet import UNet
from dataset import MyDataset
from torch.utils.data import DataLoader
img_path = r'E:\PyCharmProject\datasets\5k\train_set\JPEGImages'
mask_path = r'E:\PyCharmProject\datasets\5k\train_set\SegmentationClass'
val_img_path = r'E:\PyCharmProject\datasets\5k\validate_set\JPEGImages'
val_mask_path = r'E:\PyCharmProject\datasets\5k\validate_set\SegmentationClass'
log = './dinknet.txt'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size_per = 16
batch_size = batch_size_per * torch.cuda.device_count()
epoch_limit = 10
net = DinkNet34().to(device)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
weight = r'E:\PyCharmProject\Road-Detection\weights\dinknet34.pt'
# if os.path.exists(weight):
# net.load_state_dict(torch.load(weight))
train_dataset = MyDataset(img_path, mask_path)
val_dataset = MyDataset(val_img_path, val_mask_path)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size)
adam = torch.optim.Adam(net.parameters(), lr=2e-4)
sgd = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
loss_fun = dice_bce_loss()
if __name__ == '__main__':
epoch = 1
log = open(log, 'w', encoding='utf-8')
log.write('epoch' + '\t' + 'loss' + '\t' + 'pa' + '\t' + 'iou' + '\t' + 'precision' + '\n')
log.flush()
while epoch < 300:
s_time = time.time()
print('epoch - {} - training'.format(epoch))
net.train()
TP = FP = TN = FN = 0
pa = 0
iou = 0
stop = 0
flag = 0
train_loss = 0
batch = len(train_dataloader)
for i, (img, mask) in enumerate(train_dataloader):
img = img.to(device)
mask = mask.to(device)
out = net(img)
loss = loss_fun(mask, out)
adam.zero_grad()
loss.backward()
adam.step()
if i % 10 == 0:
print('{}: {}/{} - loss: {}'.format(epoch, i, batch, loss.item()))
# torch.save(net.state_dict(), weight)
# print('save success')
train_loss += loss.item()
epoch_loss = train_loss / len(train_dataloader)
e_time = time.time()
print('epoch - {} - epoch_loss: {}'.format(epoch, epoch_loss))
print('total-time: ', e_time - s_time)
print('epoch - {} - evaluating'.format(epoch))
net.eval()
for img, mask in val_dataloader:
img = img.to(device)
mask = mask.to(device)
with torch.no_grad():
pred = net(img)
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
TP += ((pred == 1) & (mask == 1)).cpu().sum().item()
TN += ((pred == 0) & (mask == 0)).cpu().sum().item()
FN += ((pred == 0) & (mask == 1)).cpu().sum().item()
FP += ((pred == 1) & (mask == 0)).cpu().sum().item()
pa = (TP + TN) / (TP + TN + FP + FN)
precision = TP / (TP + FN)
iou = TP / (TP + FP + FN)
print('pa: ', pa)
print('iou: ', iou)
print('precision', precision)
log.write(
str(epoch) + '\t' + str(epoch_loss) + '\t' + str(pa) + '\t' + str(iou) + '\t' + str(precision) + '\n')
log.flush()
if iou > stop:
stop = iou
torch.save(net.state_dict(), weight)
print("save success,iou updated to: {}".format(iou))
flag = 0
else:
flag += 1
print("pa为{},没有提升,参数未更新,iou为{},第{}次未更新".format(iou, stop, flag))
if flag >= epoch_limit:
print("early stop at epoch {}, finally iou: {}".format(epoch, stop))
break
epoch += 1
log.close()
|
"""
Convert a large audio wav file (album length, i.e. > 30 minutes typically)
into a series of videos consisting of the audio synchronized with images of the
spectrogram.
"""
import os
import sys
import multiprocessing as mp
import subprocess
import tqdm
import numpy as np
import librosa.core
import librosa.display
import librosa.feature
import matplotlib.pyplot as plt
plt.switch_backend("agg")
SAMPLERATE = 44.1e3 # samples/sec
WAVPATH = sys.argv[1]
BASENAME = os.path.basename(WAVPATH).replace(".wav", "")
ROOT = "/mnt/nfs-share/music/data"
FRAMEROOT = ROOT + "/frames/" + BASENAME
DURATION = 20 #
NUMPROC = 8
FFTFREQ = librosa.fft_frequencies(sr=SAMPLERATE)
F_MAX = np.max(FFTFREQ)
N_FFT = 2048
N_HOP = int(1.0 / 4 * N_FFT)
FILETIME = librosa.core.get_duration(filename=WAVPATH)
NFRAME = int(FILETIME) / DURATION # allow truncation
DUMPFILE = "data.npy"
FPS = 5
def single_image(argtuple):
y, i_frame, i_second = argtuple
fractional_second = float(i_second) / FPS
abs_index = i_frame * DURATION * FPS + i_second
time = DURATION*i_frame + fractional_second
titlestr = "%s - file time %0.2f seconds" % (BASENAME, time)
# display the spectrogram
plt.figure(figsize=(18, 8))
librosa.display.specshow(
y, x_axis='time', y_axis='mel', sr=SAMPLERATE, hop_length=N_HOP)
plt.vlines(
fractional_second, 0, F_MAX,
linestyles='dashed', colors='w', alpha=0.6)
plt.title(titlestr)
plt.savefig(FRAMEROOT + "/%05d.png" % (abs_index))
plt.tight_layout()
plt.close()
def main():
""" main
"""
pbar = tqdm.tqdm(total=NFRAME)
pool = mp.Pool(NUMPROC)
init = False
if not os.path.exists(FRAMEROOT):
os.makedirs(FRAMEROOT)
for i_frame in range(10, NFRAME):
# load the audio
x, sr = librosa.core.load(
WAVPATH, sr=SAMPLERATE,
offset=DURATION * i_frame, duration=DURATION)
# compute the spectrogram
x = librosa.power_to_db(
librosa.feature.melspectrogram(
y=x, hop_length=N_HOP, n_fft=N_FFT, sr=SAMPLERATE),
ref=np.max)
if not init:
f_mean = np.sum(x, axis=1)
init = True
else:
f_mean += np.sum(x, axis=1)
# loop updates
pbar.update(1)
pool.map(
single_image,
[(x, i_frame, i_second) for i_second in range(FPS*DURATION)])
np.save(BASENAME + 'f_mean.npy', f_mean)
pbar.close()
subprocess.call([
"ffmpeg", '-r', '5', '-i', FRAMEROOT + '%05d.png', '-i', WAVPATH,
'-shortest', '-c:v', 'libx264', '-c:a', 'aac', '-strict', '-2',
'-pix_fmt', 'yuv420p', '-crf', '23', '-r', '5', '-y',
ROOT + "/videos/" + BASENAME + '.mp4'])
if __name__ == '__main__':
main()
|
import requests
parameters = {
"amount": 10,
"type": "multiple"
}
response = requests.get(url="https://opentdb.com/api.php", params=parameters)
question_data = response.json()["results"]
"""
Sample Response
[
{
'category': 'Sports',
'type': 'multiple',
'difficulty': 'medium',
'question': 'Which Formula One driver was nicknamed 'The Professor'?',
'correct_answer': 'Alain Prost',
'incorrect_answers': [
'Ayrton Senna',
'Niki Lauda',
'Emerson Fittipaldi'
]
},
{
'category': 'Entertainment: Music',
'type': 'multiple',
'difficulty': 'medium',
'question': 'In which city did American rap producer DJ Khaled originate from?',
'correct_answer': 'Miami',
'incorrect_answers': [
'New York',
'Detroit',
'Atlanta'
]
}
]
"""
|
from imageio import imread
import matplotlib.pyplot as plt
def plot_animal_tree(ax=None):
import graphviz
if ax is None:
ax = plt.gca()
mygraph = graphviz.Digraph(node_attr={'shape': 'box'},
edge_attr={'labeldistance': "10.5"},
format="png")
mygraph.node("0", "Has feathers?")
mygraph.node("1", "Can fly?")
mygraph.node("2", "Has fins?")
mygraph.node("3", "Hawk")
mygraph.node("4", "Penguin")
mygraph.node("5", "Dolphin")
mygraph.node("6", "Bear")
mygraph.edge("0", "1", label="True")
mygraph.edge("0", "2", label="False")
mygraph.edge("1", "3", label="True")
mygraph.edge("1", "4", label="False")
mygraph.edge("2", "5", label="True")
mygraph.edge("2", "6", label="False")
mygraph.render("tmp")
ax.imshow(imread("tmp.png"))
ax.set_axis_off()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-21 23:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property_api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='ckanresource',
name='ckan_instance',
),
migrations.AddField(
model_name='ckanresource',
name='slug',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.DeleteModel(
name='CKANInstance',
),
]
|
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="sizemode", parent_name="scattercarpet.marker", **kwargs
):
super(SizemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs,
)
|
from typing import List
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.orm import Session
from dispatch.database import get_db, search_filter_sort_paginate
from .models import (
TeamContactCreate,
TeamContactRead,
TeamContactUpdate,
TeamPagination,
)
from .service import create, delete, get, get_by_email, update
router = APIRouter()
@router.get("/", response_model=TeamPagination)
def get_teams(
db_session: Session = Depends(get_db),
page: int = 1,
items_per_page: int = Query(5, alias="itemsPerPage"),
query_str: str = Query(None, alias="q"),
sort_by: List[str] = Query([], alias="sortBy[]"),
descending: List[bool] = Query([], alias="descending[]"),
fields: List[str] = Query([], alias="fields[]"),
ops: List[str] = Query([], alias="ops[]"),
values: List[str] = Query([], alias="values[]"),
):
"""
Get all team contacts.
"""
return search_filter_sort_paginate(
db_session=db_session,
model="TeamContact",
query_str=query_str,
page=page,
items_per_page=items_per_page,
sort_by=sort_by,
descending=descending,
fields=fields,
values=values,
ops=ops,
)
@router.post("/", response_model=TeamContactRead)
def create_team(*, db_session: Session = Depends(get_db), team_contact_in: TeamContactCreate):
"""
Create a new team contact.
"""
team = get_by_email(db_session=db_session, email=team_contact_in.email)
if team:
raise HTTPException(status_code=400, detail="The team with this email already exists.")
team = create(db_session=db_session, team_contact_in=team_contact_in)
return team
@router.get("/{team_id}", response_model=TeamContactRead)
def get_team(*, db_session: Session = Depends(get_db), team_contact_id: int):
"""
Get a team contact.
"""
team = get(db_session=db_session, team_contact_id=team_contact_id)
if not team:
raise HTTPException(status_code=404, detail="The team with this id does not exist.")
return team
@router.put("/{team_contact_id}", response_model=TeamContactRead)
def update_team(
*,
db_session: Session = Depends(get_db),
team_contact_id: int,
team_contact_in: TeamContactUpdate,
):
"""
Update a team contact.
"""
team = get(db_session=db_session, team_contact_id=team_contact_id)
if not team:
raise HTTPException(status_code=404, detail="The team with this id does not exist.")
team = update(db_session=db_session, team_contact=team, team_contact_in=team_contact_in)
return team
@router.delete("/{team_contact_id}", response_model=TeamContactRead)
def delete_team(*, db_session: Session = Depends(get_db), team_contact_id: int):
"""
Delete a team contact.
"""
team = get(db_session=db_session, team_contact_id=team_contact_id)
if not team:
raise HTTPException(status_code=404, detail="The team with this id does not exist.")
delete(db_session=db_session, team_contact_id=team_contact_id)
return team
|
'''
only for RRP Hopper
Shihao Feng
2021.10.28
'''
import numpy as np
import pybullet as p
from leg_kinematics import LegKinematicsRRP
import pinocchio as pin
class JointPDController(object):
def __init__ (self):
self.kp = np.array([70, 70, 1500])
self.kd = np.array([2, 2, 10])
def solve(self, q_d, dq_d, q_state, dq_state):
q = q_state[7:10]
dq = dq_state[6:9]
ddq_d = np.zeros(3) # 期望加速度计算量大,简单地设为0
tau_d = ddq_d + self.kd*(dq_d - dq) + self.kp*(q_d - q) # (3,)
return tau_d
class SLIPController(object):
def __init__(self):
self.q_d = np.array([0., 0., 0.]) # q_d[2] always 0
self.dq_d = np.array([0., 0., 0.]) # always 0
# 关节增益
self.kp = np.array([70., 70., 3000.])
self.kd = np.array([2., 2., 10.]) # 阻尼模拟能量损失, 同时防止腿抖动
# 身体姿态增益
self.kp_pose = 5. * np.ones(2)
self.kd_pose = 1. * np.ones(2)
# 水平速度增益
self.kp_vel = 0.1 * np.ones(2)
self.leg_length_normal = 0.55
self.RRP = LegKinematicsRRP(L=self.leg_length_normal)
# private methods
def __w_to_drpy(self, rpy, w):
'''
rpy -> (3,), w -> (3,),drpy -> (3,)
'''
H = np.array([[np.cos(rpy[2])/np.cos(rpy[1]), np.sin(rpy[2])/np.cos(rpy[1]), 0.],
[-np.sin(rpy[2]), np.cos(rpy[2]), 0.],
[np.cos(rpy[2])*np.tan(rpy[1]), np.sin(rpy[2])*np.tan(rpy[1]), 0.]])
drpy = (H @ w.reshape(-1,1)).ravel()
return drpy
def solve(self, q_state, dq_state, robot_state_machine, T_s, vel, dir, F_thrust):
tau_d = np.zeros(3) # 初始化
orn_body = q_state[3:7] # 身体姿态 四元数
rpy = np.array(p.getEulerFromQuaternion(orn_body))
w_body = dq_state[3:6] # 身体角速度 w
drpy = self.__w_to_drpy(rpy, w_body)
q = q_state[7:10] # 关节位置
dq = dq_state[6:9] # 关节速度
# 控制虚拟弹簧力
tau_d[2] = self.kd[2]*(self.dq_d[2] - dq[2]) \
+ self.kp[2]*(self.q_d[2] - q[2])
# 弹簧伸长时,施加推力抵消能量损耗
if robot_state_machine == 'THRUST':
tau_d[2] += F_thrust
# 触地或者离地时,关节扭矩为0
if (robot_state_machine == 'LOADING' or robot_state_machine == 'UNLOADING'):
tau_d[0:2] = np.zeros(2)
# 弹簧压缩或者伸长时,施加关节扭矩控制身体姿态
if (robot_state_machine == 'COMPRESSION' or robot_state_machine == 'THRUST'):
# 姿态线性伺服控制
tau_d[0:2] = - (self.kd_pose*(np.zeros(2) - drpy[0:2]) \
+ self.kp_pose*(np.zeros(2) - rpy[0:2])) # (2,)
# 飞行时,控制足端移动到落地点
if robot_state_machine == 'FLIGHT':
vel_xy_d = np.array([vel*np.cos(dir), vel*np.sin(dir)])
v_body = dq_state[0:2] # 当前水平速度
# 相对于H系:坐标系原点与身体坐标系重合,方向与世界坐标系平行
xy_d = v_body*T_s/2 - self.kp_vel*(vel_xy_d - v_body) # 计算落脚点
r = q[2] + self.leg_length_normal
z_d = - (r**2 - xy_d[0]**2 - xy_d[1]**2)**0.5
# 转换到B系:身体坐标系
R_HB = pin.rpy.rpyToMatrix(rpy)
R_BH = R_HB.T
p_H = np.array([xy_d[0], xy_d[1], z_d])
p_B = (R_BH @ p_H.reshape(-1,1)).ravel() # (3,)
q_d = self.RRP.IK(p_B)
self.q_d[0:2] = q_d[0:2]
# 关节PD控制
tau_d[0:2] = self.kd[0:2]*(self.dq_d[0:2] - dq[0:2]) \
+ self.kp[0:2]*(self.q_d[0:2] - q[0:2]) # (2,)
print('tau_d: ', tau_d)
return tau_d
|
"""A collection of classes and methods to deal with collections of
rates that together make up a network."""
# Common Imports
from __future__ import print_function
import functools
import math
from operator import mul
import os
from collections import OrderedDict
from ipywidgets import interact
import matplotlib as mpl
import matplotlib.pyplot as plt
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import networkx as nx
# Import Rate
from pynucastro.rates import Rate, Nucleus, Library
mpl.rcParams['figure.dpi'] = 100
class Composition(object):
"""a composition holds the mass fractions of the nuclei in a network
-- useful for evaluating the rates
"""
def __init__(self, nuclei, small=1.e-16):
"""nuclei is an iterable of the nuclei (Nucleus objects) in the network"""
if not isinstance(nuclei[0], Nucleus):
raise ValueError("must supply an iterable of Nucleus objects")
else:
self.X = {k: small for k in nuclei}
def set_solar_like(self, Z=0.02):
""" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and
the remainder evenly distributed with Z """
num = len(self.X)
rem = Z/(num-2)
for k in self.X:
if k == Nucleus("p"):
self.X[k] = 0.7
elif k.raw == "he4":
self.X[k] = 0.3 - Z
else:
self.X[k] = rem
self.normalize()
def set_all(self, xval):
""" set all species to a particular value """
for k in self.X:
self.X[k] = xval
def set_nuc(self, name, xval):
""" set nuclei name to the mass fraction xval """
for k in self.X:
if k.raw == name:
self.X[k] = xval
break
def normalize(self):
""" normalize the mass fractions to sum to 1 """
X_sum = sum([self.X[k] for k in self.X])
for k in self.X:
self.X[k] /= X_sum
def get_molar(self):
""" return a dictionary of molar fractions"""
molar_frac = {k: v/k.A for k, v in self.X.items()}
return molar_frac
def __str__(self):
ostr = ""
for k in self.X:
ostr += " X({}) : {}\n".format(k, self.X[k])
return ostr
class RateCollection(object):
""" a collection of rates that together define a network """
pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, rate_files=None, libraries=None, rates=None):
"""
rate_files are the files that together define the network. This
can be any iterable or single string.
This can include Reaclib library files storing multiple rates.
If libraries is supplied, initialize a RateCollection using the rates
in the Library object(s) in list 'libraries'.
If rates is supplied, initialize a RateCollection using the
Rate objects in the list 'rates'.
Any combination of these options may be combined.
"""
self.files = []
self.rates = []
self.library = None
if rate_files:
if isinstance(rate_files, str):
rate_files = [rate_files]
self._read_rate_files(rate_files)
if rates:
if isinstance(rates, Rate):
rates = [rates]
try:
for r in rates:
assert(isinstance(r, Rate))
except:
print('Expected Rate object or list of Rate objects passed as the rates argument.')
raise
else:
rlib = Library(rates=rates)
if not self.library:
self.library = rlib
else:
self.library = self.library + rlib
if libraries:
if isinstance(libraries, Library):
libraries = [libraries]
try:
for lib in libraries:
assert(isinstance(lib, Library))
except:
print('Expected Library object or list of Library objects passed as the libraries argument.')
raise
else:
if not self.library:
self.library = libraries.pop(0)
for lib in libraries:
self.library = self.library + lib
if self.library:
self.rates = self.rates + self.library.get_rates()
# get the unique nuclei
u = []
for r in self.rates:
t = set(r.reactants + r.products)
u = set(list(u) + list(t))
self.unique_nuclei = sorted(u)
# now make a list of each rate that touches each nucleus
# we'll store this in a dictionary keyed on the nucleus
self.nuclei_consumed = OrderedDict()
self.nuclei_produced = OrderedDict()
for n in self.unique_nuclei:
self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]
self.nuclei_produced[n] = [r for r in self.rates if n in r.products]
# Re-order self.rates so Reaclib rates come first,
# followed by Tabular rates. This is needed if
# reaclib coefficients are targets of a pointer array
# in the Fortran network.
# It is desired to avoid wasting array size
# storing meaningless Tabular coefficient pointers.
self.rates = sorted(self.rates,
key=lambda r: r.chapter == 't')
self.tabular_rates = []
self.reaclib_rates = []
for n, r in enumerate(self.rates):
if r.chapter == 't':
self.tabular_rates.append(n)
elif isinstance(r.chapter, int):
self.reaclib_rates.append(n)
else:
print('ERROR: Chapter type unknown for rate chapter {}'.format(
str(r.chapter)))
exit()
def _read_rate_files(self, rate_files):
# get the rates
self.files = rate_files
for rf in self.files:
try:
rflib = Library(rf)
except:
print("Error reading library from file: {}".format(rf))
raise
else:
if not self.library:
self.library = rflib
else:
self.library = self.library + rflib
def get_nuclei(self):
""" get all the nuclei that are part of the network """
return self.unique_nuclei
def evaluate_rates(self, rho, T, composition):
"""evaluate the rates for a specific density, temperature, and
composition"""
rvals = OrderedDict()
ys = composition.get_molar()
for r in self.rates:
val = r.prefactor * rho**r.dens_exp * r.eval(T)
yfac = functools.reduce(mul, [ys[q] for q in r.reactants])
rvals[r] = yfac * val
return rvals
def network_overview(self):
""" return a verbose network overview """
ostr = ""
for n in self.unique_nuclei:
ostr += "{}\n".format(n)
ostr += " consumed by:\n"
for r in self.nuclei_consumed[n]:
ostr += " {}\n".format(r.string)
ostr += " produced by:\n"
for r in self.nuclei_produced[n]:
ostr += " {}\n".format(r.string)
ostr += "\n"
return ostr
def write_network(self, *args, **kwargs):
"""Before writing the network, check to make sure the rates
are distinguishable by name."""
assert self._distinguishable_rates(), "ERROR: Rates not uniquely identified by Rate.fname"
self._write_network(*args, **kwargs)
def _distinguishable_rates(self):
"""Every Rate in this RateCollection should have a unique Rate.fname,
as the network writers distinguish the rates on this basis."""
names = [r.fname for r in self.rates]
return len(set(names)) == len(self.rates)
def _write_network(self, *args, **kwargs):
"""A stub for function to output the network -- this is implementation
dependent."""
print('To create network integration source code, use a class that implements a specific network type.')
return
def plot(self, outfile=None, rho=None, T=None, comp=None, size=(800, 600), dpi=100):
"""Make a plot of the network structure showing the links between nuclei"""
G = nx.MultiDiGraph()
G.position = {}
G.labels = {}
fig, ax = plt.subplots()
#divider = make_axes_locatable(ax)
#cax = divider.append_axes('right', size='15%', pad=0.05)
ax.plot([0, 0], [8, 8], 'b-')
# nodes -- the node nuclei will be all of the heavies, but not
# p, n, alpha, unless we have p + p, 3-a, etc.
node_nuclei = []
for n in self.unique_nuclei:
if n.raw not in ["p", "n", "he4"]:
node_nuclei.append(n)
else:
for r in self.rates:
if r.reactants.count(n) > 1:
node_nuclei.append(n)
break
for n in node_nuclei:
G.add_node(n)
G.position[n] = (n.N, n.Z)
G.labels[n] = r"${}$".format(n.pretty)
if rho is not None and T is not None and comp is not None:
ydots = self.evaluate_rates(rho, T, comp)
else:
ydots = None
#for rr in ydots:
# print("{}: {}".format(rr, ydots[rr]))
# edges
for n in node_nuclei:
for r in self.nuclei_consumed[n]:
for p in r.products:
if p in node_nuclei:
# networkx doesn't seem to keep the edges in
# any particular order, so we associate data
# to the edges here directly, in this case,
# the reaction rate, which will be used to
# color it
if ydots is None:
G.add_edges_from([(n, p)], weight=0.5)
else:
try:
rate_weight = math.log10(ydots[r])
except ValueError:
# if ydots[r] is zero, then set the weight
# to roughly the minimum exponent possible
# for python floats
rate_weight = -308
except:
raise
G.add_edges_from([(n, p)], weight=rate_weight)
nx.draw_networkx_nodes(G, G.position,
node_color="#A0CBE2", alpha=1.0,
node_shape="o", node_size=1000, linewidth=2.0, zorder=10, ax=ax)
nx.draw_networkx_labels(G, G.position, G.labels,
font_size=13, font_color="w", zorder=100, ax=ax)
# get the edges and weights coupled in the same order
edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
edges_lc = nx.draw_networkx_edges(G, G.position, width=3,
edgelist=edges, edge_color=weights,
node_size=1000,
edge_cmap=plt.cm.viridis, zorder=1, ax=ax)
# for networkx <= 2.0 draw_networkx_edges returns a
# LineCollection matplotlib type which we can use for the
# colorbar directly. For networkx >= 2.1, it is a collection
# of FancyArrowPatch-s, which we need to run through a
# PatchCollection. See:
# https://stackoverflow.com/questions/18658047/adding-a-matplotlib-colorbar-from-a-patchcollection
if ydots is not None:
pc = mpl.collections.PatchCollection(edges_lc, cmap=plt.cm.viridis)
pc.set_array(weights)
plt.colorbar(pc, label="log10(rate)")
Ns = [n.N for n in node_nuclei]
Zs = [n.Z for n in node_nuclei]
plt.xlim(min(Ns)-1, max(Ns)+1)
#plt.ylim(min(Zs)-1, max(Zs)+1)
plt.xlabel(r"$N$", fontsize="large")
plt.ylabel(r"$Z$", fontsize="large")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_aspect("equal", "datalim")
fig.set_size_inches(size[0]/dpi, size[1]/dpi)
if outfile is None:
plt.show()
else:
plt.tight_layout()
plt.savefig(outfile, dpi=dpi)
def __repr__(self):
string = ""
for r in self.rates:
string += "{}\n".format(r.string)
return string
class Explorer(object):
""" interactively explore a rate collection """
def __init__(self, rc, comp, size=(800, 600)):
""" take a RateCollection and a composition """
self.rc = rc
self.comp = comp
self.size = size
def _make_plot(self, logrho, logT):
self.rc.plot(rho=10.0**logrho, T=10.0**logT, comp=self.comp, size=self.size)
def explore(self, logrho=(2, 6, 0.1), logT=(7, 9, 0.1)):
"""Perform interactive exploration of the network structure."""
interact(self._make_plot, logrho=logrho, logT=logT)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import numpy as np
import matplotlib.pyplot as plt
# from MMDBalancing import MMDBalancing as MMDB
# from OptimalTransportBalancing import OptimalTransportBalancing as OTB
# from NeuralAdversarialBalancing import NeuralAdversarialBalancing as NAB
#get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
# utils
from utils_balancing import *
# In[2]:
def static_simulation():
n = 5000
m = 5000
d = 1
r = lambda x:(x-3).square() + (x>-2)*(x+3).square() +x.abs()
#r = lambda x:x.square()
def get_data(n = 500,m = 500, r = r, d = d):
def pi(x):
return torch.sin(x)+ 2*torch.rand(x.shape)-1
def pi_ring(x):
return torch.sin(x)+ 1*torch.rand(x.shape)-0.5
xi = torch.normal(mean = -1, std = 2, size = (n,d))
xi_ring = torch.zeros(size = (m,d))
for i in range(m):
if torch.rand(1).item()>0.3:
xi_ring[i,0] = torch.normal(mean = -4, std = 2, size = (1,)).item()
else:
xi_ring[i,0] = torch.normal(mean = 3, std = 0.2, size = (1,)).item()
w = torch.ones(n)
w_ring = torch.ones(m)
xi_natural = torch.cat((xi, pi(xi)),axis = 1)
xi_ring_natural = torch.cat((xi_ring, pi_ring(xi_ring)), axis = 1)
Z =xi_natural[:,0]+xi_natural[:,1] + torch.rand((n,))
Z_ring =xi_ring_natural[:,0]+xi_ring_natural[:,1]+torch.rand((m,))
R = r(Z)
return xi_natural,xi_ring_natural,R,Z,Z_ring
# ## Reference value
# In[7]:
xi_natural, xi_ring_natural,R,Z,Z_ring = get_data(n = 50000, m = 50000)
ref = r(Z_ring).mean()
# ### Re-generate data set with $n=m=500$.
# In[8]:
n = 500
m = 500
xi_natural, xi_ring_natural,R,Z,Z_ring = get_data(n = n, m = m, r = r)
# # GIPWE: DE and DRE
#
# 1. Data splitting (K-folds with K = 3)
# In[9]:
def get_split_ind(n,K = 3):
I_n = torch.arange(n, dtype = float)
rand_ind_n = torch.multinomial(I_n,len(I_n),replacement = False)
num_folds_n = int(n/K)
Ind = []
for i in range(K):
if (i+1)*num_folds_n <= n:
Ind.append(list(rand_ind_n[i*num_folds_n:(i+1)*num_folds_n].detach().numpy()))
else:
Ind.append(list(rand_ind_n[i*num_folds_n:].detach().numpy()))
Ind_split = []
for i in range(K):
list_n = []
for j in range(n):
if j >= i*num_folds_n and j < (i+1)*num_folds_n:
pass
else:
list_n.append(rand_ind_n[j].item())
Ind_split.append(list_n)
return Ind_split,Ind
# In[10]:
K = 3
Ind_out, Ind_in = get_split_ind(n,K)
# 2. Get GIPW weights
# In[11]:
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
# In[12]:
XGB = xgb.XGBRegressor(gamma = 5e0)
RF = RandomForestRegressor(n_estimators = 20, min_samples_split = 20)
LR = LogisticRegression()
def get_GIPW_weights(model):
eta = np.zeros(n)
for k in range(K):
SGIPW = Shallow_GIPW(xi_natural[Ind_out[k],:], xi_ring_natural)
SGIPW.train(model,xi = np.array(xi_natural[Ind_in[k],:]),log=False)
eta[Ind_in[k]] = SGIPW.weights*(SGIPW.weights>0)
return eta
eta_XGB = get_GIPW_weights(XGB)
eta_RF = get_GIPW_weights(RF)
eta_LR = get_GIPW_weights(LR)
# In[13]:
# OT
OTB = OptimalTransportBalancing()
eta_OT = OTB.get_weights(xi_natural,xi_ring_natural)
eta_OT = eta_OT.detach().numpy()
# In[17]:
# MMD weights
lambda_RKHS = 1e2
lambda_l2 = 1e-3
MMDB = MMDBalancing(xi_natural,xi_ring_natural,sigma = 5e-1,D = 2000)
eta_MMD = MMDB.get_weights(lambda_RKHS = lambda_RKHS, lambda_l2 = lambda_l2)
eta_MMD = eta_MMD.to("cpu").detach().numpy()
# In[18]:
# In[20]:
# Neural Adversarial Balancing
class NeuralNetwork(nn.Module):
def __init__(self,input_dim = 1, num_nodes = 32):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(input_dim, num_nodes),
nn.ReLU(),
#nn.Dropout(0.3),
#nn.BatchNorm1d(num_nodes),
nn.Linear(num_nodes, num_nodes),
nn.ReLU(),
nn.Linear(num_nodes, num_nodes),
nn.ReLU(),
#nn.Dropout(0.3),
#nn.BatchNorm1d(num_nodes),
#nn.Linear(num_nodes, num_nodes),
#nn.ReLU(),
# # #nn.Dropout(0.3),
# # nn.BatchNorm1d(num_nodes),
nn.Linear(num_nodes, 1),
)
def forward(self, x):
x = self.flatten(x)
target = self.linear_relu_stack(x)
return target
# In[21]:
AB = Adversarial_Balancing(xi_natural,xi_ring_natural)
num_nodes_IPM = 24
model_IPM = NeuralNetwork(input_dim = d*2,num_nodes = 2*num_nodes_IPM).to(AB.dev)
model_reweighting = NeuralNetwork(input_dim = d*2, num_nodes = num_nodes_IPM).to(AB.dev)
learning_rate = 1e-3
optimizer_IPM = torch.optim.Adam(model_IPM.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)
optimizer_reweighting = torch.optim.Adam(model_reweighting.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)
# In[22]:
epochs = 50
loss_trace = []
for t in range(epochs):
#print(f"Epoch {t+1}\n-------------------------------")
current_test_loss = AB.train_loop(model_IPM = model_IPM,
model_reweighting = model_reweighting,
optimizer_IPM = optimizer_IPM,
optimizer_reweighting = optimizer_reweighting,
IPM_steps = 3,
reweight_steps = 3,
lambda_l2_weight = 5e-2,
lambda_l2_IPM = 1e-2,
lambda_l1_IPM = 1e-2,
)
loss_trace.append(current_test_loss.to("cpu").detach().numpy())
weights = model_reweighting(xi_natural.to("cuda:0"))
#weights /=weights.mean()
eta_NAB = weights.to("cpu").detach().numpy()
# 4. Get $r^{\natural}$ estimation with the same K-fold splitting
# In[26]:
from sklearn.linear_model import LinearRegression
RF_R = RandomForestRegressor(n_estimators = 20, min_samples_split = 5)
#model_r = RF_R
model_r = LinearRegression()
# In[27]:
def get_r_estimation(model, K = 3):
r_hat = np.zeros(n)
r_hat_ring = np.zeros(m)
for k in range(K):
SGIPW = Shallow_GIPW(xi_natural[Ind_out[k],:], xi_ring_natural)
model_k = model
model_k.fit(xi_natural[Ind_out[k],:].detach().numpy(), R[Ind_out[k]].detach().numpy())
r_hat[Ind_in[k]] = model_k.predict(xi_natural[Ind_in[k]].detach().numpy())
r_hat_ring += model_k.predict(xi_ring_natural.detach().numpy())
r_hat_ring /= K
return r_hat, r_hat_ring
# In[28]:
r_hat,r_hat_ring = get_r_estimation(model_r)
# In[29]:
# ## Estimators
# In[30]:
def get_DE(eta, R = R, ref= ref):
try:
eta = torch.from_numpy(eta)
except:
pass
pred = (eta*R).mean().item()
error = torch.abs(pred - ref).item()
return pred, error
def get_DRE(eta,r_hat, r_hat_ring, R = R, ref = ref):
try:
eta = torch.from_numpy(eta)
r_hat = torch.from_numpy(r_hat)
except:
pass
pred = (eta*(R -r_hat)).mean() + r_hat_ring.mean()
error = torch.abs(pred - ref).item()
return pred.item(), error
# In[31]:
#pd.set_option("display.precision", 2)
#pd.set_option('display.float_format', lambda x: '%.2f' % x)
table_bad_reg = pd.DataFrame([[get_DE(eta_OT)[1],get_DRE(eta_OT,r_hat,r_hat_ring)[1]],[get_DE(eta_MMD)[1],get_DRE(eta_MMD,r_hat,r_hat_ring)[1]], [get_DE(eta_NAB)[1],get_DRE(eta_NAB,r_hat,r_hat_ring)[1]], [get_DE(eta_RF)[1],get_DRE(eta_RF,r_hat,r_hat_ring)[1]],[get_DE(eta_XGB)[1],get_DRE(eta_XGB,r_hat,r_hat_ring)[1]], [get_DE(eta_LR)[1],get_DRE(eta_LR,r_hat,r_hat_ring)[1]],[None, torch.abs(r_hat_ring.mean()-ref).item()]], columns = ("DE","DRE"), index = ("OT", "MMD","NAB", "GIPW-RF","GIPW-XGB","GIPW-LR","G-computation"))
# ## Bad regression model: Linear regression
# In[32]:
# In[ ]:
# ## Good regression model: XGBoosting
# In[33]:
XGB_R = xgb.XGBRegressor(n_estimators = 20, gamma = 1e-0)
model_r = XGB_R
r_hat,r_hat_ring = get_r_estimation(model_r)
# In[34]:
pd.set_option("display.precision", 2)
table_good_reg = pd.DataFrame([[get_DE(eta_OT)[1],get_DRE(eta_OT,r_hat,r_hat_ring)[1]],[get_DE(eta_MMD)[1],get_DRE(eta_MMD,r_hat,r_hat_ring)[1]], [get_DE(eta_NAB)[1],get_DRE(eta_NAB,r_hat,r_hat_ring)[1]], [get_DE(eta_RF)[1],get_DRE(eta_RF,r_hat,r_hat_ring)[1]],[get_DE(eta_XGB)[1],get_DRE(eta_XGB,r_hat,r_hat_ring)[1]], [get_DE(eta_LR)[1],get_DRE(eta_LR,r_hat,r_hat_ring)[1]],[None, torch.abs(r_hat_ring.mean()-ref).item()]], columns = ("DE","DRE"), index = ("OT", "MMD","NAB", "GIPW-RF","GIPW-XGB","GIPW-LR","G-computation"))
# In[35]:
return table_bad_reg, table_good_reg
|
# -*- coding: utf-8 -*-
import datetime
from django.db.models import Count
import os
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from django.dispatch import receiver
from uuslug import uuslug as slugify
from sorl.thumbnail import ImageField
from tags.models import Tag, TaggedItem
from comments.models import Comment, CommentAnswer
def category_upload_path(instance, filename):
from utils import timestampbased_filename
category_slug = slugify(instance.title)
path = os.path.join(
'article_category',
category_slug,
timestampbased_filename(filename)
)
return path
def article_upload_path(instance, filename):
from utils import timestampbased_filename
article_slug = slugify(instance.title)
path = os.path.join(
'articles',
article_slug,
timestampbased_filename(filename)
)
return path
class ArticleCategory(models.Model):
title = models.CharField("Заголовок", max_length=255)
slug = models.SlugField("URL", unique=True)
image = ImageField("Изображение", upload_to=category_upload_path, blank=True, null=True)
image_alt = models.CharField("ALT изображения", max_length=255, blank=True, null=True)
image_title = models.CharField("TITLE изображения", max_length=255, blank=True, null=True)
add_watermark = models.BooleanField("Добавлять водяной знак?", default=False)
description = models.TextField("Описание", blank=True, null=True)
author = models.ForeignKey(User, verbose_name="Автор")
published = models.BooleanField("Опубликовано", default=True)
created = models.DateTimeField("Время создания", auto_now_add=True)
updated = models.DateTimeField("Время последнего обновления", auto_now=True)
visits_num = models.PositiveIntegerField("Кол. посещений", default=0, editable=False)
def set_tags(self, tags):
Tag.objects.update_tags(self, tags)
def get_tags(self, tags):
return Tag.objects.get_for_object(self)
def inc_visits(self):
self.visits_num += 1
self.save()
@property
def tags(self):
content_type = ContentType.objects.get_for_model(self)
try:
tagged_item = TaggedItem.objects.get(content_type=content_type, object_id=self.id)\
.prefetch_related('tags')
except TaggedItem.DoesNotExist:
return []
return tagged_item.tags.all()
def get_absolute_url(self):
return reverse("articlecategory_details", args=(self.slug, ))
def __unicode__(self):
return self.title
class Meta:
verbose_name = "Категория статей"
verbose_name_plural = "Категории статей"
class Article(models.Model):
title = models.CharField("Заголовок", max_length=255)
slug = models.SlugField("URL", unique=True)
old_id = models.IntegerField("Старый ID", unique=True, blank=True, null=True)
image = models.ImageField("Изображение", upload_to=article_upload_path,
blank=True, null=True
)
image_alt = models.CharField("ALT изображения", max_length=255, blank=True, null=True)
image_title = models.CharField("TITLE изображения", max_length=255, blank=True, null=True)
add_watermark = models.BooleanField("Добавлять водяной знак?", default=False)
description = models.TextField("Описание", blank=True, null=True)
body = models.TextField("Текст статьи")
author = models.ForeignKey(User, verbose_name="Автор")
category = models.ForeignKey(ArticleCategory, verbose_name="Категория", related_name="articles")
verified = models.BooleanField("Проверена", default=False)
published = models.BooleanField("Опубликовано", default=True)
pub_date = models.DateTimeField("Опубликовано", blank=True)
created = models.DateTimeField("Создано", auto_now_add=True)
updated = models.DateTimeField("Обновлено", auto_now=True)
visits_num = models.PositiveIntegerField("Кол. посещений", default=0, editable=False)
comments_num = models.PositiveIntegerField(u"Кол. коментариев", default=0, editable=False)
def inc_visits(self):
self.visits_num += 1
self.save()
@property
def num_comments(self):
comments = Comment.objects.filter(
content_type_id=ContentType.objects.get_for_model(self).id,
object_id=self.id
).annotate(answer_count=Count('answers')).values_list('answer_count', flat=True)
cnt = 0
for i in range(len(comments)):
cnt += 1 + comments[i]
return cnt
@property
def tags(self):
content_type = ContentType.objects.get_for_model(self)
try:
tagged_item = TaggedItem.objects.get(content_type=content_type, object_id=self.id)
except TaggedItem.DoesNotExist:
return []
return tagged_item.tags.all()
def save(self, *args, **kwargs):
if not self.pub_date:
self.pub_date = datetime.datetime.now()
super(Article, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse("article_details", args=(self.slug, ))
def get_contenttype_id(self):
return ContentType.objects.get_for_model(Article).id
def __unicode__(self):
return self.title
class Meta:
verbose_name = "Статья"
verbose_name_plural = "Статьи"
@receiver(post_save, sender=Article)
def publish_article_task(sender, instance, created, **kwargs):
from articles.tasks import publish_article
if not instance.published:
publish_article.apply_async(args=(instance, ), eta=instance.pub_date)
@receiver(post_save, sender=Article)
def article_watermark(sender, instance, created, **kwargs):
if not instance.add_watermark:
return
from utils import add_watermark
marked_img = add_watermark(instance.image)
if not marked_img:
return
instance.image = marked_img
instance.save()
@receiver(post_save, sender=ArticleCategory)
def articlecategory_watermark(sender, instance, created, **kwargs):
if not instance.add_watermark:
return
from utils import add_watermark
marked_img = add_watermark(instance.image)
if not marked_img:
return
instance.image = marked_img
instance.save()
|
from pathlib import Path
from PIL import Image, ImageOps
def generate_thumbnail(file_path, max_height):
size = (max_height, max_height)
thumbnail = ImageOps.fit(Image.open(file_path), size, Image.ANTIALIAS)
thumbnail.save(f'{Path(file_path).stem}_thumb_{max_height}.jpg', 'JPEG')
return thumbnail
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""pytest fixtures for use with the aiida.restapi tests"""
import pytest
@pytest.fixture(scope='function')
def restapi_server():
"""Make REST API server"""
from werkzeug.serving import make_server
from aiida.restapi.common.config import CLI_DEFAULTS
from aiida.restapi.run_api import configure_api
def _restapi_server(restapi=None):
if restapi is None:
flask_restapi = configure_api()
else:
flask_restapi = configure_api(flask_api=restapi)
return make_server(
host=CLI_DEFAULTS['HOST_NAME'],
port=int(CLI_DEFAULTS['PORT']),
app=flask_restapi.app,
threaded=True,
processes=1,
request_handler=None,
passthrough_errors=True,
ssl_context=None,
fd=None
)
return _restapi_server
@pytest.fixture
def server_url():
from aiida.restapi.common.config import CLI_DEFAULTS, API_CONFIG
return f"http://{CLI_DEFAULTS['HOST_NAME']}:{CLI_DEFAULTS['PORT']}{API_CONFIG['PREFIX']}"
@pytest.fixture
def restrict_sqlalchemy_queuepool(aiida_profile):
"""Create special SQLAlchemy engine for use with QueryBuilder - backend-agnostic"""
from aiida.manage.manager import get_manager
backend_manager = get_manager().get_backend_manager()
backend_manager.reset_backend_environment()
backend_manager.load_backend_environment(aiida_profile, pool_timeout=1, max_overflow=0)
@pytest.fixture
def populate_restapi_database(clear_database_before_test):
"""Populates the database with a considerable set of nodes to test the restAPI"""
# pylint: disable=unused-argument
from aiida import orm
struct_forcif = orm.StructureData().store()
orm.StructureData().store()
orm.StructureData().store()
orm.Dict().store()
orm.Dict().store()
orm.CifData(ase=struct_forcif.get_ase()).store()
orm.KpointsData().store()
orm.FolderData().store()
orm.CalcFunctionNode().store()
orm.CalcJobNode().store()
orm.CalcJobNode().store()
orm.WorkFunctionNode().store()
orm.WorkFunctionNode().store()
orm.WorkChainNode().store()
|
import matplotlib.pyplot as plt
import seaborn as sns
class PlotTree():
def __init__(self,tree_class):
self._tree_class=tree_class
self._decision_node = dict(boxstyle="sawtooth", fc="0.8")
self._leaf_node = dict(boxstyle="round4", fc="0.8")
self._arrow_args = dict(arrowstyle="<-")
def __get_tree_depth(self,tree):
"""获取树的深度"""
depth = 0
# 定义的dict中首位储存的是节点信息,不计入计数
for key in ('Left', 'Right'):
# 记录各子节点的深度
sub_tree = tree[key]
if type(sub_tree).__name__ == "dict":
# 如果该节点有分支,迭代计算该节点的深度
thisdepth = self.__get_tree_depth(sub_tree)
else:
# 否则深度为一
thisdepth = 1
# 比较各分支深度,保留最深记录
if thisdepth > depth:
depth = thisdepth
# 分支深度加一即为当前节点深度
return depth + 1
def __plot_node(self,node_txt, cntr_pt, prnt_pt, node_type):
self._ax1.annotate(node_txt, xy=prnt_pt, xycoords='axes fraction',
xytext=cntr_pt, textcoords='axes fraction',
va="center", ha="center", bbox=node_type, arrowprops=self._arrow_args)
def __plot_mid_text(self,cntr_pt, prnt_pt, txt_string):
xMid = (prnt_pt[0] - cntr_pt[0]) / 2.0 + cntr_pt[0]
yMid = (prnt_pt[1] - cntr_pt[1]) / 2.0 + cntr_pt[1]
self._ax1.text(xMid, yMid, txt_string, va="center",
ha="center", rotation=30)
def __plot_tree(self,tree, prnt_pt, node_txt, branch=None):
self._layer += 1
diff = 1 / 2**(self._layer)
keys = list(tree.keys())
text = tree[keys[0]]
if branch == 'Left':
self._xOff -= diff
elif branch == 'Right':
self._xOff += diff
else:
pass
cntr_pt = (self._xOff, self._yOff)
self.__plot_mid_text(cntr_pt, prnt_pt, node_txt)
self.__plot_node(text, cntr_pt, prnt_pt, self._decision_node)
self._yOff = self._yOff - 1.0 / self._totalD
for key in keys[1:]:
sub_tree = tree[key]
if type(sub_tree).__name__ == 'dict':
self.__plot_tree(sub_tree, cntr_pt, str(key), key)
else:
if key == 'Left':
x = self._xOff - diff / 2
elif key == 'Right':
x = self._xOff + diff / 2
else:
pass
self.__plot_node(sub_tree, (x, self._yOff), cntr_pt, self._leaf_node)
self.__plot_mid_text((x, self._yOff), cntr_pt, str(key))
if branch == 'Left':
self._xOff += diff
elif branch == 'Right':
self._xOff -= diff
else:
pass
self._layer -= 1
self._yOff = self._yOff + 1.0 / self._totalD
def tree_structure_plot(self):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
self._ax1 = plt.subplot(111, frameon=False, **axprops)
self._totalD = float(self.__get_tree_depth(self._tree_class.tree))
self._xOff = 0.5
self._yOff = 1.0
self._layer = 0
self.__plot_tree(self._tree_class.tree, (0.5, 1.0), '')
plt.show()
def confusion_matrix_plot(self):
mat=self._tree_class.confusion_matrix
if mat is None:
print("The confusion matrix is not computed. Please use 'test()' in 'DecisionTree' class to get it.")
else:
fig, ax = plt.subplots(figsize=(6, 6))
sns.heatmap(mat,xticklabels=mat.columns,yticklabels=mat.index,
cbar_kws={"shrink": .5}, ax=ax)
plt.tight_layout()
plt.show()
|
"""
Delta E z.
https://www.osapublishing.org/oe/fulltext.cfm?uri=oe-25-13-15131&id=368272
"""
from ..distance import DeltaE
import math
from .. import util
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING: # pragma: no cover
from ..color import Color
class DEZ(DeltaE):
"""Delta E z class."""
NAME = "jz"
@classmethod
def distance(cls, color: 'Color', sample: 'Color', **kwargs: Any) -> float:
"""Delta E z color distance formula."""
jz1, az1, bz1 = util.no_nans(color.convert('jzazbz').coords())
jz2, az2, bz2 = util.no_nans(sample.convert('jzazbz').coords())
cz1 = math.sqrt(az1 ** 2 + bz1 ** 2)
cz2 = math.sqrt(az2 ** 2 + bz2 ** 2)
hz1 = math.atan2(bz1, az1)
hz2 = math.atan2(bz2, az2)
djz = jz1 - jz2
dcz = cz1 - cz2
dhz = 2 * math.sqrt(cz1 * cz2) * math.sin((hz1 - hz2) / 2)
return math.sqrt(djz ** 2 + dcz ** 2 + dhz ** 2)
|
from typing import Union, Tuple, Sized, Container, Any, TypeVar, Callable
from typing import Iterable, Iterator, Sequence, Dict, Generic, cast
from typing import Optional, List, overload
from dataclasses import dataclass
import numpy
import sys
try:
import cupy
get_array_module = cupy.get_array_module
except ImportError:
get_array_module = lambda obj: numpy
# Use typing_extensions for Python versions < 3.8
if sys.version_info < (3, 8):
from typing_extensions import Protocol, Literal
else:
from typing import Protocol, Literal # noqa: F401
# fmt: off
XY_YZ_OutT = TypeVar("XY_YZ_OutT")
XY_XY_OutT = TypeVar("XY_XY_OutT")
DeviceTypes = Literal["cpu", "gpu", "tpu"]
Batchable = Union["Pairs", "Ragged", "Padded", "ArrayXd", List, Tuple]
Xp = Union["numpy", "cupy"] # type: ignore
Shape = Tuple[int, ...]
DTypes = Literal["f", "i", "float16", "float32", "float64", "int32", "int64", "uint32", "uint64"]
DTypesFloat = Literal["f", "float32", "float16", "float64"]
DTypesInt = Literal["i", "int32", "int64", "uint32", "uint64"]
Array1d = Union["Floats1d", "Ints1d"]
Array2d = Union["Floats2d", "Ints2d"]
Array3d = Union["Floats3d", "Ints3d"]
Array4d = Union["Floats4d", "Ints4d"]
FloatsXd = Union["Floats1d", "Floats2d", "Floats3d", "Floats4d"]
IntsXd = Union["Ints1d", "Ints2d", "Ints3d", "Ints4d"]
ArrayXd = Union[FloatsXd, IntsXd]
List1d = Union[List["Floats1d"], List["Ints1d"]]
List2d = Union[List["Floats2d"], List["Ints2d"]]
List3d = Union[List["Floats3d"], List["Ints3d"]]
List4d = Union[List["Floats4d"], List["Ints4d"]]
ListXd = Union[List["FloatsXd"], List["IntsXd"]]
ArrayT = TypeVar("ArrayT")
SelfT = TypeVar("SelfT")
Array1dT = TypeVar("Array1dT", bound="Array1d")
# These all behave the same as far as indexing is concerned
Slicish = Union[slice, List[int], "ArrayXd"]
_1_KeyScalar = int
_1_Key1d = Slicish
_1_AllKeys = Union[_1_KeyScalar, _1_Key1d]
_F1_AllReturns = Union[float, "Floats1d"]
_I1_AllReturns = Union[int, "Ints1d"]
_2_KeyScalar = Tuple[int, int]
_2_Key1d = Union[int, Tuple[Slicish, int], Tuple[int, Slicish]]
_2_Key2d = Union[Tuple[Slicish, Slicish], Slicish]
_2_AllKeys = Union[_2_KeyScalar, _2_Key1d, _2_Key2d]
_F2_AllReturns = Union[float, "Floats1d", "Floats2d"]
_I2_AllReturns = Union[int, "Ints1d", "Ints2d"]
_3_KeyScalar = Tuple[int, int, int]
_3_Key1d = Union[Tuple[int, int], Tuple[int, int, Slicish], Tuple[int, Slicish, int], Tuple[Slicish, int, int]]
_3_Key2d = Union[int, Tuple[int, Slicish], Tuple[Slicish, int], Tuple[int, Slicish, Slicish], Tuple[Slicish, int, Slicish], Tuple[Slicish, Slicish, int]]
_3_Key3d = Union[Slicish, Tuple[Slicish, Slicish], Tuple[Slicish, Slicish, Slicish]]
_3_AllKeys = Union[_3_KeyScalar, _3_Key1d, _3_Key2d, _3_Key3d]
_F3_AllReturns = Union[float, "Floats1d", "Floats2d", "Floats3d"]
_I3_AllReturns = Union[int, "Ints1d", "Ints2d", "Ints3d"]
_4_KeyScalar = Tuple[int, int, int, int]
_4_Key1d = Union[Tuple[int, int, int], Tuple[int, int, int, Slicish], Tuple[int, int, Slicish, int], Tuple[int, Slicish, int, int], Tuple[Slicish, int, int, int]]
_4_Key2d = Union[Tuple[int, int], Tuple[int, int, Slicish], Tuple[int, Slicish, int], Tuple[Slicish, int, int], Tuple[int, int, Slicish, Slicish], Tuple[int, Slicish, int, Slicish], Tuple[int, Slicish, Slicish, int], Tuple[Slicish, int, int, Slicish], Tuple[Slicish, int, Slicish, int], Tuple[Slicish, Slicish, int, int]]
_4_Key3d = Union[int, Tuple[int, Slicish], Tuple[Slicish, int], Tuple[int, Slicish, Slicish], Tuple[Slicish, int, Slicish], Tuple[Slicish, Slicish, int], Tuple[int, Slicish, Slicish, Slicish], Tuple[Slicish, int, Slicish, Slicish], Tuple[Slicish, Slicish, int, Slicish], Tuple[Slicish, Slicish, Slicish, int]]
_4_Key4d = Union[Slicish, Tuple[Slicish, Slicish], Tuple[Slicish, Slicish, Slicish], Tuple[Slicish, Slicish, Slicish, Slicish]]
_4_AllKeys = Union[_4_KeyScalar, _4_Key1d, _4_Key2d, _4_Key3d, _4_Key4d]
_F4_AllReturns = Union[float, "Floats1d", "Floats2d", "Floats3d", "Floats4d"]
_I4_AllReturns = Union[int, "Ints1d", "Ints2d", "Ints3d", "Ints4d"]
# Typedefs for the reduction methods.
Tru = Literal[True]
Fal = Literal[False]
OneAx = Union[int, Tuple[int]]
TwoAx = Tuple[int, int]
ThreeAx = Tuple[int, int, int]
FourAx = Tuple[int, int, int, int]
_1_AllAx = Optional[OneAx]
_2_AllAx = Union[Optional[TwoAx], OneAx]
_3_AllAx = Union[Optional[ThreeAx], TwoAx, OneAx]
_4_AllAx = Union[Optional[FourAx], ThreeAx, TwoAx, OneAx]
_1F_ReduceResults = Union[float, "Floats1d"]
_2F_ReduceResults = Union[float, "Floats1d", "Floats2d"]
_3F_ReduceResults = Union[float, "Floats1d", "Floats2d", "Floats3d"]
_4F_ReduceResults = Union[float, "Floats1d", "Floats2d", "Floats3d", "Floats4d"]
_1I_ReduceResults = Union[int, "Ints1d"]
_2I_ReduceResults = Union[int, "Ints1d", "Ints2d"]
_3I_ReduceResults = Union[int, "Ints1d", "Ints2d", "Ints3d"]
_4I_ReduceResults = Union[int, "Ints1d", "Ints2d", "Ints3d", "Ints4d"]
# TODO:
# We need to get correct overloads in for the following reduction methods.
# The 'sum' reduction is correct --- the others need to be just the same,
# but with a different name.
# max, min, prod, round, var, mean, ptp, std
# There's also one *slightly* different function, cumsum. This doesn't
# have a scalar version -- it always makes an array.
class _Array(Sized, Container):
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v)
@property
def dtype(self) -> DTypes: ...
@property
def data(self) -> memoryview: ...
@property
def flags(self) -> Any: ...
@property
def size(self) -> int: ...
@property
def itemsize(self) -> int: ...
@property
def nbytes(self) -> int: ...
@property
def ndim(self) -> int: ...
@property
def shape(self) -> Shape: ...
@property
def strides(self) -> Tuple[int, ...]: ...
# TODO: Is ArrayT right?
def astype(self: ArrayT, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> ArrayT: ...
def copy(self: ArrayT, order: str = ...) -> ArrayT: ...
def fill(self, value: Any) -> None: ...
# Shape manipulation
def reshape(self: ArrayT, shape: Shape, *, order: str = ...) -> ArrayT: ...
def transpose(self: ArrayT, axes: Shape) -> ArrayT: ...
# TODO: is this right? It returns 1d
def flatten(self, order: str = ...): ...
# TODO: is this right? It returns 1d
def ravel(self, order: str = ...): ...
def squeeze(self, axis: Union[int, Shape] = ...): ...
def __len__(self) -> int: ...
def __setitem__(self, key, value): ...
def __iter__(self) -> Iterator[Any]: ...
def __contains__(self, key) -> bool: ...
def __index__(self) -> int: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
def __bool__(self) -> bool: ...
def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __copy__(self, order: str = ...): ...
def __deepcopy__(self, memo: dict) -> ArrayT: ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __add__(self, other): ...
def __radd__(self, other): ...
def __iadd__(self, other): ...
def __sub__(self, other): ...
def __rsub__(self, other): ...
def __isub__(self, other): ...
def __mul__(self, other): ...
def __rmul__(self, other): ...
def __imul__(self, other): ...
def __truediv__(self, other): ...
def __rtruediv__(self, other): ...
def __itruediv__(self, other): ...
def __floordiv__(self, other): ...
def __rfloordiv__(self, other): ...
def __ifloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
def __imod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
# NumPy's __pow__ doesn't handle a third argument
def __pow__(self, other): ...
def __rpow__(self, other): ...
def __ipow__(self, other): ...
def __lshift__(self, other): ...
def __rlshift__(self, other): ...
def __ilshift__(self, other): ...
def __rshift__(self, other): ...
def __rrshift__(self, other): ...
def __irshift__(self, other): ...
def __and__(self, other): ...
def __rand__(self, other): ...
def __iand__(self, other): ...
def __xor__(self, other): ...
def __rxor__(self, other): ...
def __ixor__(self, other): ...
def __or__(self, other): ...
def __ror__(self, other): ...
def __ior__(self, other): ...
def __matmul__(self, other): ...
def __rmatmul__(self, other): ...
def __neg__(self: ArrayT) -> ArrayT: ...
def __pos__(self: ArrayT) -> ArrayT: ...
def __abs__(self: ArrayT) -> ArrayT: ...
def __invert__(self: ArrayT) -> ArrayT: ...
def get(self: ArrayT) -> ArrayT: ...
def all(self, axis: int = -1, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def any(self, axis: int = -1, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
# def argmax(self, axis: int = -1, out: Optional["Array"] = None, keepdims: Union[Tru, Fal]=False) -> Union[int, "Ints1d"]: ...
def argmin(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
def clip(self, a_min: Any, a_max: Any, out: Optional[ArrayT]) -> ArrayT: ...
#def cumsum( self: ArrayT, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None) -> ArrayT: ...
def max(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
# def mean(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[SelfT] = None, keepdims: bool = False) -> "Array": ...
def min(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
def nonzero(self) -> ArrayT: ...
def prod(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def round(self, decimals: int = 0, out: Optional[ArrayT] = None) -> ArrayT: ...
# def sum(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def tobytes(self, order: str = "C") -> bytes: ...
def tolist(self) -> List[Any]: ...
def var(self: SelfT, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, ddof: int = 0, keepdims: bool = False) -> SelfT: ...
class _Floats(_Array):
@property
def dtype(self) -> DTypesFloat: ...
def fill(self, value: float) -> None: ...
def reshape(self, shape: Shape, *, order: str = ...) -> "_Floats": ...
class _Ints(_Array):
@property
def dtype(self) -> DTypesInt: ...
def fill(self, value: int) -> None: ...
def reshape(self, shape: Shape, *, order: str = ...) -> "_Ints": ...
"""
Extensive overloads to represent __getitem__ behaviour.
In an N+1 dimensional array, there will be N possible return types. For instance,
if you have a 2d array, you could get back a float (array[i, j]), a floats1d
(array[i]) or a floats2d (array[:i, :j]). You'll get the scalar if you have N
ints in the index, a 1d array if you have N-1 ints, etc.
So the trick here is to make a union with the various combinations that produce
each result type, and then only have one overload per result. If we overloaded
on each *key* type, that would get crazy, because there's tonnes of combinations.
In each rank, we can use the same key-types for float and int, but we need a
different return-type union.
"""
class _Array1d(_Array):
"""1-dimensional array."""
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=1)
@property
def ndim(self) -> Literal[1]: ...
@property
def shape(self) -> Tuple[int]: ...
def __iter__(self) -> Iterator[Union[float, int]]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "_Array1d": ...
def flatten(self: SelfT, order: str = ...) -> SelfT: ...
def ravel(self: SelfT, order: str = ...) -> SelfT: ...
# These is actually a bit too strict: It's legal to say 'array1d + array2d'
# That's kind of bad code though; it's better to write array2d + array1d.
# We could relax this, but let's try the strict version.
def __add__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __sub__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __mul__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __pow__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __matmul__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, "Array1d"]): ...
def __isub__(self, other: Union[float, int, "Array1d"]): ...
def __imul__(self, other: Union[float, int, "Array1d"]): ...
def __ipow__(self, other: Union[float, int, "Array1d"]): ...
@overload
def argmax(self, keepdims: Fal = False, axis: int = -1, out: Optional[_Array] = None) -> int: ...
@overload
def argmax(self, keepdims: Tru, axis: int = -1, out: Optional[_Array] = None) -> "Ints1d": ...
def argmax(self, keepdims: bool = False, axis: int = -1, out: Optional[_Array] = None) -> Union[int, "Ints1d"]: ...
@overload
def mean(self, keepdims: Tru, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload
def mean(self, keepdims: Fal = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats1d"] = None) -> float: ...
def mean(self, keepdims: bool = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats1d"] = None) -> Union["Floats1d", float]: ...
class Floats1d(_Array1d, _Floats):
"""1-dimensional array of floats."""
T: "Floats1d"
@classmethod
def __get_validators__(cls):
"""Runtine validation for pydantic."""
yield lambda v: validate_array(v, ndim=1, dtype="f")
def __iter__(self) -> Iterator[float]: ...
@overload
def __getitem__(self, key: _1_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _1_Key1d) -> "Floats1d": ...
def __getitem__(self, key: _1_AllKeys) -> _F1_AllReturns: ...
@overload
def __setitem__(self, key: _1_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _1_Key1d, value: "Floats1d") -> None: ...
def __setitem__(self, key: _1_AllKeys, _F1_AllReturns) -> None: ...
@overload
def cumsum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload # Cumsum is unusual in this
def cumsum(self, *, keepdims: Fal, axis: Optional[OneAx] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
def cumsum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload
def sum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload
def sum(self, *, keepdims: Fal, axis: Optional[OneAx] = None, out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Floats1d"] = None) -> _1F_ReduceResults: ...
class Ints1d(_Array1d, _Ints):
"""1-dimensional array of ints."""
T: "Ints1d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=1, dtype="i")
def __iter__(self) -> Iterator[int]: ...
@overload
def __getitem__(self, key: _1_KeyScalar) -> int: ...
@overload
def __getitem__(self, key: _1_Key1d) -> "Ints1d": ...
def __getitem__(self, key: _1_AllKeys) -> _I1_AllReturns: ...
@overload
def __setitem__(self, key: _1_KeyScalar, value: int) -> None: ...
@overload
def __setitem__(self, key: _1_Key1d, value: Union[int, "Ints1d"]) -> None: ...
def __setitem__(self, key: _1_AllKeys, _I1_AllReturns) -> None: ...
@overload
def cumsum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
@overload
def cumsum(self, *, keepdims: Fal = False, axis: Optional[OneAx] = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
def cumsum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
@overload
def sum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: Optional[OneAx] = None, out = None) -> int: ...
def sum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Ints1d"] = None) -> _1I_ReduceResults: ...
class _Array2d(_Array):
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=2)
@property
def ndim(self) -> Literal[2]: ...
@property
def shape(self) -> Tuple[int, int]: ...
def __iter__(self) -> Iterator[Array1d]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "Array2d": ...
# These is actually a bit too strict: It's legal to say 'array2d + array3d'
# That's kind of bad code though; it's better to write array3d + array2d.
# We could relax this, but let's try the strict version.
def __add__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __sub__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __mul__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __pow__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __matmul__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, Array1d, "Array2d"]): ...
def __isub__(self, other: Union[float, int, Array1d, "Array2d"]): ...
def __imul__(self, other: Union[float, int, Array1d, "Array2d"]): ...
def __ipow__(self, other: Union[float, int, Array1d, "Array2d"]): ...
@overload
def argmax(self, keepdims: Fal = False, axis: int = -1, out: Optional[_Array] = None) -> Ints1d: ...
@overload
def argmax(self, keepdims: Tru, axis: int = -1, out: Optional[_Array] = None) -> "Ints2d": ...
def argmax(self, keepdims: bool = False, axis: int = -1, out: Optional[_Array] = None) -> Union[Ints1d, "Ints2d"]: ...
@overload
def mean(self, keepdims: Fal = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats2d"] = None) -> Floats1d: ...
@overload
def mean(self, keepdims: Tru, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats2d"] = None) -> "Floats2d": ...
def mean(self, keepdims: bool = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats2d"] = None) -> Union["Floats2d", Floats1d]: ...
class Floats2d(_Array2d, _Floats):
"""2-dimensional array of floats"""
T: "Floats2d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=2, dtype="f")
def __iter__(self) -> Iterator[Floats1d]: ...
@overload
def __getitem__(self, key: _2_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _2_Key1d) -> Floats1d: ...
@overload
def __getitem__(self, key: _2_Key2d) -> "Floats2d": ...
def __getitem__(self, key: _2_AllKeys) -> _F2_AllReturns: ...
@overload
def __setitem__(self, key: _2_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _2_Key1d, value: Union[float, Floats1d]) -> None: ...
@overload
def __setitem__(self, key: _2_Key2d, value: _F2_AllReturns) -> None: ...
def __setitem__(self, key: _2_AllKeys, value: _F2_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _2_AllAx = None, out: Optional["Floats2d"] = None) -> "Floats2d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: OneAx, out: Optional[Floats1d] = None) -> Floats1d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: TwoAx, out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _2_AllAx = None, out: Union[None, "Floats1d", "Floats2d"] = None) -> _2F_ReduceResults: ...
class Ints2d(_Array2d, _Ints):
"""2-dimensional array of ints."""
T: "Ints2d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=2, dtype="i")
def __iter__(self) -> Iterator[Ints1d]: ...
@overload
def __getitem__(self, key: _2_KeyScalar) -> int: ...
@overload
def __getitem__(self, key: _2_Key1d) -> Ints1d: ...
@overload
def __getitem__(self, key: _2_Key2d) -> "Ints2d": ...
def __getitem__(self, key: _2_AllKeys) -> _I2_AllReturns: ...
@overload
def __setitem__(self, key: _2_KeyScalar, value: int) -> None: ...
@overload
def __setitem__(self, key: _2_Key1d, value: Ints1d) -> None: ...
@overload
def __setitem__(self, key: _2_Key2d, value: "Ints2d") -> None: ...
def __setitem__(self, key: _2_AllKeys, value: _I2_AllReturns) -> None: ...
@overload
def sum(self, keepdims: Fal = False, axis: int = -1, out: Optional["Ints1d"] = None) -> Ints1d: ...
@overload
def sum(self, keepdims: Tru, axis: int = -1, out: Optional["Ints2d"] = None) -> "Ints2d": ...
def sum(self, keepdims: bool = False, axis: int = -1, out: Optional[Union["Ints1d", "Ints2d"]] = None) -> Union["Ints2d", Ints1d]: ...
class _Array3d(_Array):
"""3-dimensional array of floats"""
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=3)
@property
def ndim(self) -> Literal[3]: ...
@property
def shape(self) -> Tuple[int, int, int]: ...
def __iter__(self) -> Iterator[Array2d]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "Array3d": ...
# These is actually a bit too strict: It's legal to say 'array2d + array3d'
# That's kind of bad code though; it's better to write array3d + array2d.
# We could relax this, but let's try the strict version.
def __add__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __sub__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __mul__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __pow__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __matmul__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
def __isub__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
def __imul__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
def __ipow__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
@overload
def argmax(self, keepdims: Fal = False, axis: int = -1, out: Optional[_Array] = None) -> Ints2d: ...
@overload
def argmax(self, keepdims: Tru, axis: int = -1, out: Optional[_Array] = None) -> "Ints3d": ...
def argmax(self, keepdims: bool = False, axis: int = -1, out: Optional[_Array] = None) -> Union[Ints2d, "Ints3d"]: ...
class Floats3d(_Array3d, _Floats):
"""3-dimensional array of floats"""
T: "Floats3d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=3, dtype="f")
def __iter__(self) -> Iterator[Floats2d]: ...
@overload
def __getitem__(self, key: _3_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _3_Key1d) -> Floats1d: ...
@overload
def __getitem__(self, key: _3_Key2d) -> Floats2d: ...
@overload
def __getitem__(self, key: _3_Key3d) -> "Floats3d": ...
def __getitem__(self, key: _3_AllKeys) -> _F3_AllReturns: ...
@overload
def __setitem__(self, key: _3_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _3_Key1d, value: Floats1d) -> None: ...
@overload
def __setitem__(self, key: _3_Key2d, value: Floats2d) -> None: ...
@overload
def __setitem__(self, key: _3_Key3d, value: "Floats3d") -> None: ...
def __setitem__(self, key: _3_AllKeys, value: _F3_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _3_AllAx = None, out: Optional["Floats3d"] = None) -> "Floats3d": ...
@overload
def sum(self, *, keepdims: Fal, axis: OneAx, out: Optional[Floats2d] = None) -> Floats2d: ...
@overload
def sum(self, *, keepdims: Fal, axis: TwoAx, out: Optional[Floats1d] = None) -> Floats1d: ...
@overload
def sum(self, *, keepdims: Fal, axis: Optional[ThreeAx], out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _3_AllAx = None, out: Union[None, Floats1d, Floats2d, "Floats3d"] = None) -> _3F_ReduceResults: ...
class Ints3d(_Array3d, _Ints):
"""3-dimensional array of ints."""
T: "Ints3d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=3, dtype="i")
def __iter__(self) -> Iterator[Ints2d]: ...
@overload
def __getitem__(self, key: _3_KeyScalar) -> int: ...
@overload
def __getitem__(self, key: _3_Key1d) -> Ints1d: ...
@overload
def __getitem__(self, key: _3_Key2d) -> Ints2d: ...
@overload
def __getitem__(self, key: _3_Key3d) -> "Ints3d": ...
def __getitem__(self, key: _3_AllKeys) -> _I3_AllReturns: ...
@overload
def __setitem__(self, key: _3_KeyScalar, value: int) -> None: ...
@overload
def __setitem__(self, key: _3_Key1d, value: Ints1d) -> None: ...
@overload
def __setitem__(self, key: _3_Key2d, value: Ints2d) -> None: ...
@overload
def __setitem__(self, key: _3_Key3d, value: "Ints3d") -> None: ...
def __setitem__(self, key: _3_AllKeys, value: _I3_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _3_AllAx = None, out: Optional["Ints3d"] = None) -> "Ints3d": ...
@overload
def sum(self, *, keepdims: Fal, axis: OneAx, out: Optional[Ints2d] = None) -> Ints2d: ...
@overload
def sum(self, *, keepdims: Fal, axis: TwoAx, out: Optional[Ints1d] = None) -> Ints1d: ...
@overload
def sum(self, *, keepdims: Fal, axis: Optional[ThreeAx], out = None) -> int: ...
def sum(self, *, keepdims: bool = False, axis: _3_AllAx = None, out: Union[None, Ints1d, Ints2d, "Ints3d"] = None) -> _3I_ReduceResults: ...
class _Array4d(_Array):
"""4-dimensional array."""
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=4)
@property
def ndim(self) -> Literal[4]: ...
@property
def shape(self) -> Tuple[int, int, int, int]: ...
def __iter__(self) -> Iterator[Array3d]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "_Array4d": ...
# These is actually a bit too strict: It's legal to say 'array4d + array5d'
# That's kind of bad code though; it's better to write array5d + array4d.
# We could relax this, but let's try the strict version.
def __add__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __sub__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __mul__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __pow__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __matmul__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
def __isub__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
def __imul__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
def __ipow__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
class Floats4d(_Array4d, _Floats):
"""4-dimensional array of floats."""
T: "Floats4d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=4, dtype="f")
def __iter__(self) -> Iterator[Floats3d]: ...
@overload
def __getitem__(self, key: _4_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _4_Key1d) -> Floats1d: ...
@overload
def __getitem__(self, key: _4_Key2d) -> Floats2d: ...
@overload
def __getitem__(self, key: _4_Key3d) -> Floats3d: ...
@overload
def __getitem__(self, key: _4_Key4d) -> "Floats4d": ...
def __getitem__(self, key: _4_AllKeys) -> _F4_AllReturns: ...
@overload
def __setitem__(self, key: _4_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _4_Key1d, value: Floats1d) -> None: ...
@overload
def __setitem__(self, key: _4_Key2d, value: Floats2d) -> None: ...
@overload
def __setitem__(self, key: _4_Key3d, value: Floats3d) -> None: ...
@overload
def __setitem__(self, key: _4_Key4d, value: "Floats4d") -> None: ...
def __setitem__(self, key: _4_AllKeys, value: _F4_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _4_AllAx = None, out: Optional["Floats4d"] = None) -> "Floats4d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: OneAx, out: Optional[Floats3d] = None) -> Floats3d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: TwoAx, out: Optional[Floats2d] = None) -> Floats2d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: ThreeAx, out: Optional[Floats1d] = None) -> Floats1d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: Optional[FourAx], out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _4_AllAx = None, out: Union[None, Floats1d, Floats2d, Floats3d, "Floats4d"] = None) -> _4F_ReduceResults: ...
class Ints4d(_Array4d, _Ints):
"""4-dimensional array of ints."""
T: "Ints4d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=4, dtype="i")
def __iter__(self) -> Iterator[Ints3d]: ...
# def __getitem__(self, key: int) -> Ints3d: ...
@overload
def sum(self, *, keepdims: Tru, axis: _4_AllAx = None, out: Optional["Ints4d"] = None) -> "Ints4d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: OneAx, out: Optional[Ints3d] = None) -> Ints3d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: TwoAx, out: Optional[Ints2d] = None) -> Ints2d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: ThreeAx, out: Optional[Ints1d] = None) -> Ints1d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: Optional[FourAx] = None, out = None) -> int: ...
def sum(self, *, keepdims: bool = False, axis: _4_AllAx = None, out: Optional[Union[Ints1d, Ints2d, Ints3d, "Ints4d"]] = None) -> _4I_ReduceResults: ...
_DIn = TypeVar("_DIn")
class Decorator(Protocol):
"""Protocol to mark a function as returning its child with identical signature."""
def __call__(self, name: str) -> Callable[[_DIn], _DIn]: ...
# fmt: on
class Generator(Iterator):
"""Custom generator type. Used to annotate function arguments that accept
generators so they can be validated by pydantic (which doesn't support
iterators/iterables otherwise).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not hasattr(v, "__iter__") and not hasattr(v, "__next__"):
raise TypeError("not a valid iterator")
return v
@dataclass
class SizedGenerator:
"""A generator that has a __len__ and can repeatedly call the generator
function.
"""
get_items: Callable[[], Generator]
length: int
def __len__(self):
return self.length
def __iter__(self):
yield from self.get_items()
@dataclass
class Padded:
"""A batch of padded sequences, sorted by decreasing length. The data array
is of shape (step, batch, ...). The auxiliary array size_at_t indicates the
length of the batch at each timestep, so you can do data[:, :size_at_t[t]] to
shrink the batch. The lengths array indicates the length of each row b,
and the indices indicates the original ordering.
"""
data: Floats3d
size_at_t: Ints1d
lengths: Ints1d
indices: Ints1d
def copy(self):
return Padded(
self.data.copy(),
self.size_at_t.copy(),
self.lengths.copy(),
self.indices.copy()
)
def __len__(self) -> int:
return self.lengths.shape[0]
def __getitem__(self, index: Union[int, slice, Ints1d]) -> "Padded":
if isinstance(index, int):
# Slice to keep the dimensionality
return Padded(
self.data[:, index : index + 1],
self.lengths[index : index + 1],
self.lengths[index : index + 1],
self.indices[index : index + 1],
)
elif isinstance(index, slice):
return Padded(
self.data[:, index],
self.lengths[index],
self.lengths[index],
self.indices[index],
)
else:
# If we get a sequence of indices, we need to be careful that
# we maintain the length-sorting, while also keeping the mapping
# back to the original order correct.
sorted_index = list(sorted(index))
return Padded(
self.data[sorted_index],
self.size_at_t[sorted_index],
self.lengths[sorted_index],
self.indices[index], # Use original, to maintain order.
)
@dataclass
class Ragged:
"""A batch of concatenated sequences, that vary in the size of their
first dimension. Ragged allows variable-length sequence data to be contiguous
in memory, without padding.
Indexing into Ragged is just like indexing into the *lengths* array, except
it returns a Ragged object with the accompanying sequence data. For instance,
you can write ragged[1:4] to get a Ragged object with sequences 1, 2 and 3.
"""
data: Array2d
lengths: Ints1d
data_shape: Tuple[int, ...]
starts_ends: Optional[Ints1d] = None
def __init__(self, data: _Array, lengths: Ints1d):
self.lengths = lengths
# Frustratingly, the -1 dimension doesn't work with 0 size...
if data.size:
self.data = cast(Array2d, data.reshape((data.shape[0], -1)))
else:
self.data = cast(Array2d, data.reshape((0, 0)))
self.data_shape = (-1,) + data.shape[1:]
@property
def dataXd(self) -> ArrayXd:
if self.data.size:
reshaped = self.data.reshape(self.data_shape)
else:
reshaped = self.data.reshape((self.data.shape[0],) + self.data_shape[1:])
return cast(ArrayXd, reshaped)
def __len__(self) -> int:
return self.lengths.shape[0]
def __getitem__(self, index: Union[int, slice, Array1d]) -> "Ragged":
if isinstance(index, tuple):
raise IndexError("Ragged arrays do not support 2d indexing.")
starts = self._get_starts()
ends = self._get_ends()
if isinstance(index, int):
s = starts[index]
e = ends[index]
return Ragged(self.data[s:e], self.lengths[index : index + 1])
elif isinstance(index, slice):
lengths = self.lengths[index]
if len(lengths) == 0:
return Ragged(self.data[0:0].reshape(self.data_shape), lengths)
start = starts[index][0] if index.start >= 1 else 0
end = ends[index][-1]
return Ragged(self.data[start:end].reshape(self.data_shape), lengths)
else:
# There must be a way to do this "properly" :(. Sigh, hate numpy.
xp = get_array_module(self.data)
data = xp.vstack([self[int(i)].data for i in index])
return Ragged(data.reshape(self.data_shape), self.lengths[index])
def _get_starts_ends(self) -> Ints1d:
if self.starts_ends is None:
xp = get_array_module(self.lengths)
self.starts_ends = xp.empty(self.lengths.size + 1, dtype="i")
self.starts_ends[0] = 0
self.lengths.cumsum(out=self.starts_ends[1:])
return self.starts_ends
def _get_starts(self) -> Ints1d:
return self._get_starts_ends()[:-1]
def _get_ends(self) -> Ints1d:
return self._get_starts_ends()[1:]
_P = TypeVar("_P", bound=Sequence)
@dataclass
class Pairs(Generic[_P]):
"""Dataclass for pairs of sequences that allows indexing into the sequences
while keeping them aligned.
"""
one: _P
two: _P
def __getitem__(self, index) -> "Pairs[_P]":
return Pairs(self.one[index], self.two[index])
def __len__(self) -> int:
return len(self.one)
@dataclass
class ArgsKwargs:
"""A tuple of (args, kwargs) that can be spread into some function f:
f(*args, **kwargs)
"""
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
@classmethod
def from_items(cls, items: Sequence[Tuple[Union[int, str], Any]]) -> "ArgsKwargs":
"""Create an ArgsKwargs object from a sequence of (key, value) tuples,
such as produced by argskwargs.items(). Each key should be either a string
or an integer. Items with int keys are added to the args list, and
items with string keys are added to the kwargs list. The args list is
determined by sequence order, not the value of the integer.
"""
args = []
kwargs = {}
for key, value in items:
if isinstance(key, int):
args.append(value)
else:
kwargs[key] = value
return cls(args=tuple(args), kwargs=kwargs)
def keys(self) -> Iterable[Union[int, str]]:
"""Yield indices from self.args, followed by keys from self.kwargs."""
yield from range(len(self.args))
yield from self.kwargs.keys()
def values(self) -> Iterable[Any]:
"""Yield elements of from self.args, followed by values from self.kwargs."""
yield from self.args
yield from self.kwargs.values()
def items(self) -> Iterable[Tuple[Union[int, str], Any]]:
"""Yield enumerate(self.args), followed by self.kwargs.items()"""
yield from enumerate(self.args)
yield from self.kwargs.items()
@dataclass
class Unserializable:
"""Wrap a value to prevent it from being serialized by msgpack."""
obj: Any
def validate_array(obj, ndim=None, dtype=None):
"""Runtime validator for pydantic to validate array types."""
xp = get_array_module(obj)
if not isinstance(obj, xp.ndarray):
raise TypeError("not a valid numpy or cupy array")
errors = []
if ndim is not None and obj.ndim != ndim:
errors.append(f"wrong array dimensions (expected {ndim}, got {obj.ndim})")
if dtype is not None:
dtype_mapping = {"f": ["float32"], "i": ["int32", "int64", "uint32", "uint64"]}
expected_types = dtype_mapping.get(dtype, [])
if obj.dtype not in expected_types:
expected = "/".join(expected_types)
err = f"wrong array data type (expected {expected}, got {obj.dtype})"
errors.append(err)
if errors:
raise ValueError(", ".join(errors))
return obj
|
"""Talk"""
|
"""Debugger basics"""
import fnmatch
import sys
import os
__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely."""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if filename not in self.breaks:
return False
lineno = frame.f_lineno
if lineno not in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if lineno not in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError("subclass of bdb must implement do_clear()")
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = False
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame, lineno=None):
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
# the name "until" is borrowed from gdb
if lineno is None:
lineno = frame.f_lineno + 1
self._set_stopinfo(frame, frame, lineno)
def set_step(self):
"""Stop after one line of code."""
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = True
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=False, cond=None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename, lineno)
list = self.breaks.setdefault(filename, [])
if lineno not in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if filename not in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename, lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
bp = self.get_bpbynumber(arg)
except ValueError as err:
return str(err)
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if filename not in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_bpbynumber(self, arg):
if not arg:
raise ValueError('Breakpoint number expected')
try:
number = int(arg)
except ValueError:
raise ValueError('Non-numeric breakpoint number %s' % arg)
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
raise ValueError('Breakpoint number %d out of range' % number)
if bp is None:
raise ValueError('Breakpoint %d already deleted' % number)
return bp
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, reprlib
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s += frame.f_code.co_name
else:
s += "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s += reprlib.repr(args)
else:
s += '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s += '->'
s += reprlib.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line:
s += lprefix + line.strip()
return s
# The following methods can be called by clients to use
# a debugger to debug a statement or an expression.
# Both can be given as a string, or a code object.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
if isinstance(cmd, str):
cmd = compile(cmd, "<string>", "exec")
sys.settrace(self.trace_dispatch)
try:
exec(cmd, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = True
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = True
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = True
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class.
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=False, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = True
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next += 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def bpprint(self, out=None):
if out is None:
out = sys.stdout
print(self.bpformat(), file=out)
def bpformat(self):
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
ret += '\n\tstop only if %s' % (self.cond,)
if self.ignore:
ret += '\n\tignore next %d hits' % (self.ignore,)
if self.hits:
if self.hits > 1:
ss = 's'
else:
ss = ''
ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss)
return ret
def __str__(self):
return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line)
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file, line]
for b in possibles:
if not b.enabled:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits += 1
if not b.cond:
# If unconditional, and ignoring go on to next, else break
if b.ignore > 0:
b.ignore -= 1
continue
else:
# breakpoint and marker that it's ok to delete if temporary
return (b, True)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals, frame.f_locals)
if val:
if b.ignore > 0:
b.ignore -= 1
# continue
else:
return (b, True)
# else:
# continue
except:
# if eval fails, most conservative thing is to stop on
# breakpoint regardless of ignore count. Don't delete
# temporary, as another hint to user.
return (b, False)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print('+++ call', name, args)
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print('+++', fn, frame.f_lineno, name, ':', line.strip())
def user_return(self, frame, retval):
print('+++ return', retval)
def user_exception(self, frame, exc_stuff):
print('+++ exception', exc_stuff)
self.set_continue()
def foo(n):
print('foo(', n, ')')
x = bar(n*10)
print('bar returned', x)
def bar(a):
print('bar(', a, ')')
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
ANSIBLE_SSH_PORT = '2222'
def get_args():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host')
return parser.parse_args()
def wd_to_script_dir():
import os
path = os.path.abspath(__file__)
dir = os.path.dirname(path)
os.chdir(dir)
def terraform_output(key):
ret = os.popen('terraform output -json ' + key).read()
return json.loads(ret)
def main():
args = get_args()
wd_to_script_dir()
primary_managers = terraform_output('swarm-primary-managers')
secondary_managers = terraform_output('swarm-secondary-managers')
workers = terraform_output('swarm-workers')
ssh_public_key = terraform_output('ssh-public-key')
if args.list:
inventory = {
'swarm-primary-managers': list(primary_managers.keys()),
'swarm-secondary-managers': list(secondary_managers.keys()),
'swarm-workers': list(workers.keys())
}
print(json.dumps(inventory))
if args.host:
hosts = {**primary_managers, **secondary_managers, **workers}
print(json.dumps({
'ansible_host': hosts[args.host],
'ansible_port': ANSIBLE_SSH_PORT,
'ssh_public_key': ssh_public_key
}))
if __name__ == "__main__":
main()
|
"""ThreatConnect TI Address"""
from ..indicator import Indicator
class Address(Indicator):
"""Unique API calls for Address API Endpoints"""
def __init__(self, tcex, **kwargs):
"""Initialize Class Properties.
Args:
ip (str): The value for this Indicator.
active (bool, kwargs): If False the indicator is marked "inactive" in TC.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): [Read-Only] The date timestamp the Indicator was created.
last_modified (str, kwargs): [Read-Only] The date timestamp the Indicator was last
modified.
private_flag (bool, kwargs): If True the indicator is marked as private in TC.
rating (str, kwargs): The threat rating for this Indicator.
"""
super().__init__(
tcex, sub_type='Address', api_entity='address', api_branch='addresses', **kwargs
)
self.unique_id = kwargs.get('unique_id', kwargs.get('ip'))
self.data['ip'] = self.unique_id
def _set_unique_id(self, json_response):
"""Set the unique_id provided a json response.
Args:
json_response:
"""
self.unique_id = json_response.get('ip', '')
def can_create(self):
"""Return True if address can be created.
If the ip address has been provided returns that the address can be created, otherwise
returns that the address cannot be created.
"""
return not self.data.get('ip') is None
# TODO: @burdy - is this correct for address?
def dns_resolution(self):
"""Update the DNS resolution.
Returns:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.dns_resolution(
self.api_type, self.api_branch, self.unique_id, owner=self.owner
)
|
#
# PySNMP MIB module A3COM-HUAWEI-LswIGSP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-LswIGSP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:51:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
lswCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "lswCommon")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, Unsigned32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, iso, TimeTicks, NotificationType, Counter32, Integer32, ObjectIdentity, Gauge32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Unsigned32", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "iso", "TimeTicks", "NotificationType", "Counter32", "Integer32", "ObjectIdentity", "Gauge32", "Bits")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
hwLswIgmpsnoopingMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7))
hwLswIgmpsnoopingMib.setRevisions(('2001-06-29 00:00',))
if mibBuilder.loadTexts: hwLswIgmpsnoopingMib.setLastUpdated('200106290000Z')
if mibBuilder.loadTexts: hwLswIgmpsnoopingMib.setOrganization('')
class EnabledStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
hwLswIgmpsnoopingMibObject = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1))
hwIgmpSnoopingStatus = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingStatus.setStatus('current')
hwIgmpSnoopingRouterPortAge = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)).clone(105)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingRouterPortAge.setStatus('current')
hwIgmpSnoopingResponseTime = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingResponseTime.setStatus('current')
hwIgmpSnoopingHostTime = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(200, 1000)).clone(260)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingHostTime.setStatus('current')
hwIgmpSnoopingGroupLimitTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 5), )
if mibBuilder.loadTexts: hwIgmpSnoopingGroupLimitTable.setStatus('current')
hwIgmpSnoopingGroupLimitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 5, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswIGSP-MIB", "hwIgmpSnoopingGroupIfIndex"))
if mibBuilder.loadTexts: hwIgmpSnoopingGroupLimitEntry.setStatus('current')
hwIgmpSnoopingGroupIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwIgmpSnoopingGroupIfIndex.setStatus('current')
hwIgmpSnoopingGroupLimitNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 5, 1, 2), Unsigned32().clone(4294967295)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingGroupLimitNumber.setStatus('current')
hwIgmpSnoopingFastLeaveTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 6), )
if mibBuilder.loadTexts: hwIgmpSnoopingFastLeaveTable.setStatus('current')
hwIgmpSnoopingFastLeaveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 6, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswIGSP-MIB", "hwIgmpSnoopingFastLeaveIfIndex"))
if mibBuilder.loadTexts: hwIgmpSnoopingFastLeaveEntry.setStatus('current')
hwIgmpSnoopingFastLeaveIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 6, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwIgmpSnoopingFastLeaveIfIndex.setStatus('current')
hwIgmpSnoopingFastLeaveStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 6, 1, 2), EnabledStatus().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingFastLeaveStatus.setStatus('current')
hwIgmpSnoopingGroupPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 7), )
if mibBuilder.loadTexts: hwIgmpSnoopingGroupPolicyTable.setStatus('current')
hwIgmpSnoopingGroupPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 7, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswIGSP-MIB", "hwIgmpSnoopingGroupPolicyIfIndex"), (0, "A3COM-HUAWEI-LswIGSP-MIB", "hwIgmpSnoopingGroupPolicyVlanID"))
if mibBuilder.loadTexts: hwIgmpSnoopingGroupPolicyEntry.setStatus('current')
hwIgmpSnoopingGroupPolicyIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 7, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwIgmpSnoopingGroupPolicyIfIndex.setStatus('current')
hwIgmpSnoopingGroupPolicyVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hwIgmpSnoopingGroupPolicyVlanID.setStatus('current')
hwIgmpSnoopingGroupPolicyParameter = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 2999))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwIgmpSnoopingGroupPolicyParameter.setStatus('current')
hwIgmpSnoopingGroupPolicyStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 7, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwIgmpSnoopingGroupPolicyStatus.setStatus('current')
hwIgmpSnoopingNonFloodingStatus = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 8), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingNonFloodingStatus.setStatus('current')
hwIgmpSnoopingVlanStatusTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 9), )
if mibBuilder.loadTexts: hwIgmpSnoopingVlanStatusTable.setStatus('current')
hwIgmpSnoopingVlanStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 9, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswIGSP-MIB", "hwIgmpSnoopingVlanID"))
if mibBuilder.loadTexts: hwIgmpSnoopingVlanStatusEntry.setStatus('current')
hwIgmpSnoopingVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hwIgmpSnoopingVlanID.setStatus('current')
hwIgmpSnoopingVlanEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 9, 1, 2), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingVlanEnabled.setStatus('current')
hwIgmpSnoopingStatsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10))
hwRecvIGMPGQueryNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRecvIGMPGQueryNum.setStatus('current')
hwRecvIGMPSQueryNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRecvIGMPSQueryNum.setStatus('current')
hwRecvIGMPV1ReportNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRecvIGMPV1ReportNum.setStatus('current')
hwRecvIGMPV2ReportNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRecvIGMPV2ReportNum.setStatus('current')
hwRecvIGMPLeaveNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRecvIGMPLeaveNum.setStatus('current')
hwRecvErrorIGMPPacketNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRecvErrorIGMPPacketNum.setStatus('current')
hwSentIGMPSQueryNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSentIGMPSQueryNum.setStatus('current')
hwIgmpSnoopingClearStats = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 7, 1, 10, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("clear", 1), ("counting", 2))).clone('counting')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwIgmpSnoopingClearStats.setStatus('current')
mibBuilder.exportSymbols("A3COM-HUAWEI-LswIGSP-MIB", hwIgmpSnoopingStatus=hwIgmpSnoopingStatus, hwIgmpSnoopingResponseTime=hwIgmpSnoopingResponseTime, hwIgmpSnoopingGroupPolicyParameter=hwIgmpSnoopingGroupPolicyParameter, hwIgmpSnoopingRouterPortAge=hwIgmpSnoopingRouterPortAge, hwIgmpSnoopingHostTime=hwIgmpSnoopingHostTime, hwRecvIGMPV2ReportNum=hwRecvIGMPV2ReportNum, hwIgmpSnoopingGroupPolicyEntry=hwIgmpSnoopingGroupPolicyEntry, hwIgmpSnoopingGroupPolicyVlanID=hwIgmpSnoopingGroupPolicyVlanID, hwIgmpSnoopingGroupLimitEntry=hwIgmpSnoopingGroupLimitEntry, hwSentIGMPSQueryNum=hwSentIGMPSQueryNum, hwIgmpSnoopingGroupPolicyStatus=hwIgmpSnoopingGroupPolicyStatus, hwLswIgmpsnoopingMibObject=hwLswIgmpsnoopingMibObject, hwRecvIGMPSQueryNum=hwRecvIGMPSQueryNum, hwIgmpSnoopingGroupIfIndex=hwIgmpSnoopingGroupIfIndex, hwLswIgmpsnoopingMib=hwLswIgmpsnoopingMib, hwIgmpSnoopingVlanEnabled=hwIgmpSnoopingVlanEnabled, hwIgmpSnoopingClearStats=hwIgmpSnoopingClearStats, hwIgmpSnoopingStatsObjects=hwIgmpSnoopingStatsObjects, hwRecvErrorIGMPPacketNum=hwRecvErrorIGMPPacketNum, PYSNMP_MODULE_ID=hwLswIgmpsnoopingMib, hwIgmpSnoopingFastLeaveIfIndex=hwIgmpSnoopingFastLeaveIfIndex, hwRecvIGMPLeaveNum=hwRecvIGMPLeaveNum, hwIgmpSnoopingGroupLimitNumber=hwIgmpSnoopingGroupLimitNumber, hwIgmpSnoopingNonFloodingStatus=hwIgmpSnoopingNonFloodingStatus, hwIgmpSnoopingGroupLimitTable=hwIgmpSnoopingGroupLimitTable, hwIgmpSnoopingFastLeaveTable=hwIgmpSnoopingFastLeaveTable, hwRecvIGMPGQueryNum=hwRecvIGMPGQueryNum, EnabledStatus=EnabledStatus, hwIgmpSnoopingVlanStatusEntry=hwIgmpSnoopingVlanStatusEntry, hwIgmpSnoopingGroupPolicyIfIndex=hwIgmpSnoopingGroupPolicyIfIndex, hwIgmpSnoopingFastLeaveStatus=hwIgmpSnoopingFastLeaveStatus, hwIgmpSnoopingVlanID=hwIgmpSnoopingVlanID, hwIgmpSnoopingGroupPolicyTable=hwIgmpSnoopingGroupPolicyTable, hwIgmpSnoopingVlanStatusTable=hwIgmpSnoopingVlanStatusTable, hwIgmpSnoopingFastLeaveEntry=hwIgmpSnoopingFastLeaveEntry, hwRecvIGMPV1ReportNum=hwRecvIGMPV1ReportNum)
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces for Hadoop filesystem access via HttpFs/WebHDFS
"""
import errno
import logging
import posixpath
import stat
import threading
import time
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from desktop.lib.rest import http_client, resource
from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
from hadoop.fs.hadoopfs import Hdfs
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary
from hadoop.conf import UPLOAD_CHUNK_SIZE
from hadoop.hdfs_site import get_nn_sentry_prefixes
import hadoop.conf
import desktop.conf
DEFAULT_HDFS_SUPERUSER = desktop.conf.DEFAULT_HDFS_SUPERUSER.get()
# The number of bytes to read if not specified
DEFAULT_READ_SIZE = 1024*1024 # 1MB
LOG = logging.getLogger(__name__)
class WebHdfs(Hdfs):
"""
WebHdfs implements the filesystem interface via the WebHDFS rest protocol.
"""
DEFAULT_USER = desktop.conf.DEFAULT_USER.get() # This should be the user running Hue
TRASH_CURRENT = 'Current'
def __init__(self, url,
fs_defaultfs,
logical_name=None,
hdfs_superuser=None,
security_enabled=False,
temp_dir="/tmp",
umask=01022):
self._url = url
self._superuser = hdfs_superuser
self._security_enabled = security_enabled
self._temp_dir = temp_dir
self._umask = umask
self._fs_defaultfs = fs_defaultfs
self._logical_name = logical_name
self._client = self._make_client(url, security_enabled)
self._root = resource.Resource(self._client)
# To store user info
self._thread_local = threading.local()
LOG.debug("Initializing Hadoop WebHdfs: %s (security: %s, superuser: %s)" %
(self._url, self._security_enabled, self._superuser))
@classmethod
def from_config(cls, hdfs_config):
fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
return cls(url=_get_service_url(hdfs_config),
fs_defaultfs=fs_defaultfs,
logical_name=hdfs_config.LOGICAL_NAME.get(),
security_enabled=hdfs_config.SECURITY_ENABLED.get(),
temp_dir=hdfs_config.TEMP_DIR.get(),
umask=hdfs_config.UMASK.get())
def __str__(self):
return "WebHdfs at %s" % self._url
def _make_client(self, url, security_enabled):
client = http_client.HttpClient(
url, exc_class=WebHdfsException, logger=LOG)
if security_enabled:
client.set_kerberos_auth()
return client
@property
def uri(self):
return self._url
@property
def logical_name(self):
return self._logical_name
@classmethod
def is_sentry_managed(cls, path):
prefixes = get_nn_sentry_prefixes().split(',')
return any([path.startswith(p) for p in prefixes if p])
@property
def fs_defaultfs(self):
return self._fs_defaultfs
@property
def umask(self):
return self._umask
@property
def security_enabled(self):
return self._security_enabled
@property
def superuser(self):
if self._superuser is None:
try:
# The owner of '/' is usually the superuser
sb = self.stats('/')
self._superuser = sb.user
except Exception, ex:
LOG.exception('Failed to determine superuser of %s: %s' % (self, ex))
self._superuser = DEFAULT_HDFS_SUPERUSER
return self._superuser
@property
def user(self):
try:
return self._thread_local.user
except AttributeError:
return WebHdfs.DEFAULT_USER
@property
def trash_path(self):
return self.join(self.get_home_dir(), '.Trash')
@property
def current_trash_path(self):
return self.join(self.trash_path, self.TRASH_CURRENT)
def _getparams(self):
return {
"user.name" : WebHdfs.DEFAULT_USER,
"doas" : self.user
}
def setuser(self, user):
"""Set a new user. Return the current user."""
curr = self.user
self._thread_local.user = user
return curr
def listdir_stats(self, path, glob=None):
"""
listdir_stats(path, glob=None) -> [ WebHdfsStat ]
Get directory listing with stats.
"""
path = Hdfs.normpath(path)
params = self._getparams()
if glob is not None:
params['filter'] = glob
params['op'] = 'LISTSTATUS'
json = self._root.get(path, params)
filestatus_list = json['FileStatuses']['FileStatus']
return [ WebHdfsStat(st, path) for st in filestatus_list ]
def listdir(self, path, glob=None):
"""
listdir(path, glob=None) -> [ entry names ]
Get directory entry names without stats.
"""
dirents = self.listdir_stats(path, glob)
return [Hdfs.basename(x.path) for x in dirents]
def get_content_summary(self, path):
"""
get_content_summary(path) -> WebHdfsContentSummary
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETCONTENTSUMMARY'
json = self._root.get(path, params)
return WebHdfsContentSummary(json['ContentSummary'])
def _stats(self, path):
"""This version of stats returns None if the entry is not found"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETFILESTATUS'
try:
json = self._root.get(path, params)
return WebHdfsStat(json['FileStatus'], path)
except WebHdfsException, ex:
if ex.server_exc == 'FileNotFoundException' or ex.code == 404:
return None
raise ex
def stats(self, path):
"""
stats(path) -> WebHdfsStat
"""
res = self._stats(path)
if res is not None:
return res
raise IOError(errno.ENOENT, _("File %s not found") % path)
def exists(self, path):
return self._stats(path) is not None
def isdir(self, path):
sb = self._stats(path)
if sb is None:
return False
return sb.isDir
def isfile(self, path):
sb = self._stats(path)
if sb is None:
return False
return not sb.isDir
def _ensure_current_trash_directory(self):
"""Create trash directory for a user if it doesn't exist."""
if self.exists(self.current_trash_path):
self.mkdir(self.current_trash_path)
return self.current_trash_path
def _trash(self, path, recursive=False):
"""
_trash(path, recursive=False)
Move a file or directory to trash.
Will create a timestamped directory underneath /user/<username>/.Trash.
Trash must be enabled for this to work.
"""
if not self.exists(path):
raise IOError(errno.ENOENT, _("File %s not found") % path)
if not recursive and self.isdir(path):
raise IOError(errno.EISDIR, _("File %s is a directory") % path)
if path.startswith(self.trash_path):
raise IOError(errno.EPERM, _("File %s is already trashed") % path)
# Make path (with timestamp suffix if necessary)
base_trash_path = self.join(self._ensure_current_trash_directory(), path[1:])
trash_path = base_trash_path
while self.exists(trash_path):
trash_path = base_trash_path + str(time.time())
# Move path to trash path
self.mkdir(self.dirname(trash_path))
self.rename(path, trash_path)
def _delete(self, path, recursive=False):
"""
_delete(path, recursive=False)
Delete a file or directory.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'DELETE'
params['recursive'] = recursive and 'true' or 'false'
result = self._root.delete(path, params)
# This part of the API is nonsense.
# The lack of exception should indicate success.
if not result['boolean']:
raise IOError(_('Delete failed: %s') % path)
def remove(self, path, skip_trash=False):
"""Delete a file."""
if skip_trash:
self._delete(path, recursive=False)
else:
self._trash(path, recursive=False)
def rmdir(self, path, skip_trash=False):
"""Delete a directory."""
self.remove(path, skip_trash)
def rmtree(self, path, skip_trash=False):
"""Delete a tree recursively."""
if skip_trash:
self._delete(path, recursive=True)
else:
self._trash(path, recursive=True)
def restore(self, path):
"""
restore(path)
The root of ``path`` will be /users/<current user>/.Trash/<timestamp>.
Removing the root from ``path`` will provide the original path.
Ensure parent directories exist and rename path.
"""
if not path.startswith(self.trash_path):
raise IOError(errno.EPERM, _("File %s is not in trash") % path)
# Build original path
original_path = []
split_path = self.split(path)
while split_path[0] != self.trash_path:
original_path.append(split_path[1])
split_path = self.split(split_path[0])
original_path.reverse()
original_path = self.join(posixpath.sep, *original_path)
# move to original path
# the path could have been expunged.
if self.exists(original_path):
raise IOError(errno.EEXIST, _("Path %s already exists.") % str(smart_str(original_path)))
self.rename(path, original_path)
def purge_trash(self):
"""
purge_trash()
Purge all trash in users ``trash_path``
"""
for timestamped_directory in self.listdir(self.trash_path):
self.rmtree(self.join(self.trash_path, timestamped_directory), True)
def mkdir(self, path, mode=None):
"""
mkdir(path, mode=None)
Creates a directory and any parent directory if necessary.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'MKDIRS'
if mode is None:
mode = self.getDefaultDirPerms()
params['permission'] = safe_octal(mode)
success = self._root.put(path, params)
if not success:
raise IOError(_("Mkdir failed: %s") % path)
def rename(self, old, new):
"""rename(old, new)"""
old = Hdfs.normpath(old)
if not new.startswith('/'):
new = Hdfs.join(Hdfs.dirname(old), new)
new = Hdfs.normpath(new)
params = self._getparams()
params['op'] = 'RENAME'
# Encode `new' because it's in the params
params['destination'] = smart_str(new)
result = self._root.put(old, params)
if not result['boolean']:
raise IOError(_("Rename failed: %s -> %s") %
(str(smart_str(old)), str(smart_str(new))))
def rename_star(self, old_dir, new_dir):
"""Equivalent to `mv old_dir/* new"""
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % old_dir)
if not self.exists(new_dir):
self.mkdir(new_dir)
elif not self.isdir(new_dir):
raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % new_dir)
ls = self.listdir(old_dir)
for dirent in ls:
self.rename(Hdfs.join(old_dir, dirent), Hdfs.join(new_dir, dirent))
def chown(self, path, user=None, group=None, recursive=False):
"""chown(path, user=None, group=None, recursive=False)"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETOWNER'
if user is not None:
params['owner'] = user
if group is not None:
params['group'] = group
if recursive:
for xpath in self.listdir_recursive(path):
self._root.put(xpath, params)
else:
self._root.put(path, params)
def chmod(self, path, mode, recursive=False):
"""
chmod(path, mode, recursive=False)
`mode' should be an octal integer or string.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETPERMISSION'
params['permission'] = safe_octal(mode)
if recursive:
for xpath in self.listdir_recursive(path):
self._root.put(xpath, params)
else:
self._root.put(path, params)
def get_home_dir(self):
"""get_home_dir() -> Home directory for the current user"""
params = self._getparams()
params['op'] = 'GETHOMEDIRECTORY'
res = self._root.get(params=params)
return res['Path']
def read(self, path, offset, length, bufsize=None):
"""
read(path, offset, length[, bufsize]) -> data
Read data from a file.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'OPEN'
params['offset'] = long(offset)
params['length'] = long(length)
if bufsize is not None:
params['bufsize'] = bufsize
try:
return self._root.get(path, params)
except WebHdfsException, ex:
if "out of the range" in ex.message:
return ""
raise ex
def open(self, path, mode='r'):
"""
DEPRECATED!
open(path, mode='r') -> File object
This exists for legacy support and backwards compatibility only.
Please use read().
"""
return File(self, path, mode)
def getDefaultFilePerms(self):
return 0666 & (01777 ^ self.umask)
def getDefaultDirPerms(self):
return 01777 & (01777 ^ self.umask)
def create(self, path, overwrite=False, blocksize=None, replication=None, permission=None, data=None):
"""
create(path, overwrite=False, blocksize=None, replication=None, permission=None)
Creates a file with the specified parameters.
`permission' should be an octal integer or string.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'CREATE'
params['overwrite'] = overwrite and 'true' or 'false'
if blocksize is not None:
params['blocksize'] = long(blocksize)
if replication is not None:
params['replication'] = int(replication)
if permission is None:
permission = self.getDefaultFilePerms()
params['permission'] = safe_octal(permission)
self._invoke_with_redirect('PUT', path, params, data)
def append(self, path, data):
"""
append(path, data)
Append data to a given file.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'APPEND'
self._invoke_with_redirect('POST', path, params, data)
# e.g. ACLSPEC = user:joe:rwx,user::rw-
def modify_acl_entries(self, path, aclspec):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'MODIFYACLENTRIES'
params['aclspec'] = aclspec
return self._root.put(path, params)
def remove_acl_entries(self, path, aclspec):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'REMOVEACLENTRIES'
params['aclspec'] = aclspec
return self._root.put(path, params)
def remove_default_acl(self, path):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'REMOVEDEFAULTACL'
return self._root.put(path, params)
def remove_acl(self, path):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'REMOVEACL'
return self._root.put(path, params)
def set_acl(self, path, aclspec):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETACL'
params['aclspec'] = aclspec
return self._root.put(path, params)
def get_acl_status(self, path):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETACLSTATUS'
return self._root.get(path, params)
def copyfile(self, src, dst, skip_header=False):
sb = self._stats(src)
if sb is None:
raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src)
if sb.isDir:
raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src)
if self.isdir(dst):
raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst)
offset = 0
while True:
data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get())
if offset == 0:
if skip_header:
n = data.index('\n')
if n > 0:
data = data[n + 1:]
self.create(dst,
overwrite=True,
blocksize=sb.blockSize,
replication=sb.replication,
permission=oct(stat.S_IMODE(sb.mode)),
data=data)
if offset != 0:
self.append(dst, data)
cnt = len(data)
if cnt < UPLOAD_CHUNK_SIZE.get():
break
offset += cnt
def copy_remote_dir(self, source, destination, dir_mode=None, owner=None):
if owner is None:
owner = self.DEFAULT_USER
if dir_mode is None:
dir_mode = self.getDefaultDirPerms()
self.do_as_user(owner, self.mkdir, destination, mode=dir_mode)
for stat in self.listdir_stats(source):
source_file = stat.path
destination_file = posixpath.join(destination, stat.name)
if stat.isDir:
self.copy_remote_dir(source_file, destination_file, dir_mode, owner)
else:
self.do_as_user(owner, self.copyfile, source_file, destination_file)
self.do_as_superuser(self.chown, destination_file, owner, owner)
def copy(self, src, dest, recursive=False, dir_mode=None, owner=None):
"""
Copy file, or directory, in HDFS to another location in HDFS.
``src`` -- The directory, or file, to copy from.
``dest`` -- the directory, or file, to copy to.
If 'dest' is a directory that exists, copy 'src' into dest.
If 'dest' is a file that exists and 'src' is a file, overwrite dest.
If 'dest' does not exist, create 'src' as 'dest'.
``recursive`` -- Recursively copy contents of 'src' to 'dest'.
This is required for directories.
``dir_mode`` and ``owner`` are used to define permissions on the newly
copied files and directories.
This method will overwrite any pre-existing files that collide with what is being copied.
Copying a directory to a file is not allowed.
"""
if owner is None:
owner = self.user
# Hue was defauling permissions on copying files to the permissions
# of the original file, but was not doing the same for directories
# changed below for directories to remain consistent
if dir_mode is None:
sb = self._stats(src)
dir_mode=oct(stat.S_IMODE(sb.mode))
src = self.abspath(src)
dest = self.abspath(dest)
if not self.exists(src):
raise IOError(errno.ENOENT, _("File not found: %s") % src)
if self.isdir(src):
# 'src' is directory.
# Skip if not recursive copy and 'src' is directory.
if not recursive:
LOG.debug("Skipping contents of %s" % src)
return None
# If 'dest' is a directory change 'dest'
# to include 'src' basename.
# create 'dest' if it doesn't already exist.
if self.exists(dest):
if self.isdir(dest):
dest = self.join(dest, self.basename(src))
else:
raise IOError(errno.EEXIST, _("Destination file %s exists and is not a directory.") % dest)
self.do_as_user(owner, self.mkdir, dest, mode=dir_mode)
# Copy files in 'src' directory to 'dest'.
self.copy_remote_dir(src, dest, dir_mode, owner)
else:
# 'src' is a file.
# If 'dest' is a directory, then copy 'src' into that directory.
# Other wise, copy to 'dest'.
if self.exists(dest) and self.isdir(dest):
self.copyfile(src, self.join(dest, self.basename(src)))
else:
self.copyfile(src, dest)
@staticmethod
def urlsplit(url):
return Hdfs.urlsplit(url)
def get_hdfs_path(self, path):
return posixpath.join(self.fs_defaultfs, path.lstrip('/'))
def _invoke_with_redirect(self, method, path, params=None, data=None):
"""
Issue a request, and expect a redirect, and then submit the data to
the redirected location. This is used for create, write, etc.
Returns the response from the redirected request.
"""
next_url = None
try:
# Do not pass data in the first leg.
self._root.invoke(method, path, params)
except WebHdfsException, ex:
# This is expected. We get a 307 redirect.
# The following call may throw.
next_url = self._get_redirect_url(ex)
if next_url is None:
raise WebHdfsException(
_("Failed to create '%s'. HDFS did not return a redirect") % path)
# Now talk to the real thing. The redirect url already includes the params.
client = self._make_client(next_url, self.security_enabled)
headers = {'Content-Type': 'application/octet-stream'}
return resource.Resource(client).invoke(method, data=data, headers=headers)
def _get_redirect_url(self, webhdfs_ex):
"""Retrieve the redirect url from an exception object"""
try:
# The actual HttpError (307) is wrapped inside
http_error = webhdfs_ex.get_parent_ex()
if http_error is None:
raise webhdfs_ex
if http_error.response.status_code not in (301, 302, 303, 307):
LOG.error("Response is not a redirect: %s" % webhdfs_ex)
raise webhdfs_ex
return http_error.response.headers['location']
except Exception, ex:
LOG.error("Failed to read redirect from response: %s (%s)" %
(webhdfs_ex, ex))
raise webhdfs_ex
def get_delegation_token(self, renewer):
"""get_delegation_token(user) -> Delegation token"""
# Workaround for HDFS-3988
if self._security_enabled:
self.get_home_dir()
params = self._getparams()
params['op'] = 'GETDELEGATIONTOKEN'
params['renewer'] = renewer
res = self._root.get(params=params)
return res['Token']['urlString']
def do_as_user(self, username, fn, *args, **kwargs):
prev_user = self.user
try:
self.setuser(username)
return fn(*args, **kwargs)
finally:
self.setuser(prev_user)
def do_as_superuser(self, fn, *args, **kwargs):
return self.do_as_user(self.superuser, fn, *args, **kwargs)
def do_recursively(self, fn, path, *args, **kwargs):
for stat in self.listdir_stats(path):
try:
if stat.isDir:
self.do_recursively(fn, stat.path, *args, **kwargs)
fn(stat.path, *args, **kwargs)
except Exception:
pass
class File(object):
"""
DEPRECATED!
Represent an open file on HDFS. This exists to mirror the old thriftfs
interface, for backwards compatibility only.
"""
def __init__(self, fs, path, mode='r'):
self._fs = fs
self._path = normpath(path)
self._pos = 0
self._mode = mode
try:
self._stat = fs.stats(path)
if self._stat.isDir:
raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path)
except IOError, ex:
if ex.errno == errno.ENOENT and 'w' in self._mode:
self._fs.create(self._path)
self.stat()
else:
raise ex
def seek(self, offset, whence=0):
"""Set the file pointer to the given spot. @see file.seek"""
if whence == SEEK_SET:
self._pos = offset
elif whence == SEEK_CUR:
self._pos += offset
elif whence == SEEK_END:
self.stat()
self._pos = self._fs.stats(self._path).size + offset
else:
raise IOError(errno.EINVAL, _("Invalid argument to seek for whence"))
def stat(self):
self._stat = self._fs.stats(self._path)
return self._stat
def tell(self):
return self._pos
def read(self, length=DEFAULT_READ_SIZE):
data = self._fs.read(self._path, self._pos, length)
self._pos += len(data)
return data
def write(self, data):
"""Append the data to the end of the file"""
self.append(data)
def append(self, data):
if 'w' not in self._mode:
raise IOError(errno.EINVAL, _("File not open for writing"))
self._fs.append(self._path, data=data)
def flush(self):
pass
def close(self):
pass
def safe_octal(octal_value):
"""
safe_octal(octal_value) -> octal value in string
This correctly handles octal values specified as a string or as a numeric.
"""
try:
return oct(octal_value)
except TypeError:
return str(octal_value)
def _get_service_url(hdfs_config):
override = hdfs_config.WEBHDFS_URL.get()
if override:
return override
fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
netloc = Hdfs.urlsplit(fs_defaultfs)[1]
host = netloc.split(':')[0]
port = hadoop.conf.DEFAULT_NN_HTTP_PORT
return "http://%s:%s/webhdfs/v1" % (host, port)
def test_fs_configuration(fs_config):
"""
This is a config validation method. Returns a list of
[ (config_variable, error_message) ]
"""
fs = WebHdfs.from_config(fs_config)
fs.setuser(fs.superuser)
# Access root
try:
statbuf = fs.stats('/')
if statbuf.user != DEFAULT_HDFS_SUPERUSER:
return [(fs_config.WEBHDFS_URL, _("Filesystem root '/' should be owned by 'hdfs'"))]
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL, _('Failed to access filesystem root'))]
# Write a file
tmpname = fs.mktemp(prefix='hue_config_validation')
try:
fs.create(tmpname)
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL,
_('Failed to create temporary file "%s"') % tmpname)]
# Check superuser has super power
try:
try:
fs.chown(tmpname, fs.superuser)
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL,
'Failed to chown file. Please make sure that the filesystem root '
'is owned by the cluster superuser ("hdfs" in most cases).')]
finally:
try:
fs.remove(tmpname)
except Exception, ex:
LOG.error("Failed to remove '%s': %s" % (tmpname, ex))
return [(fs_config.WEBHDFS_URL,
_('Failed to remove temporary file "%s"') % tmpname)]
return [ ]
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2020
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the PicklePersistence class."""
import pickle
from collections import defaultdict
from copy import deepcopy
from typing import Any, DefaultDict, Dict, Optional, Tuple
from telegram.ext import BasePersistence
from telegram.utils.types import ConversationDict
class PicklePersistence(BasePersistence):
"""Using python's builtin pickle for making you bot persistent.
Warning:
:class:`PicklePersistence` will try to replace :class:`telegram.Bot` instances by
:attr:`REPLACED_BOT` and insert the bot set with
:meth:`telegram.ext.BasePersistence.set_bot` upon loading of the data. This is to ensure
that changes to the bot apply to the saved objects, too. If you change the bots token, this
may lead to e.g. ``Chat not found`` errors. For the limitations on replacing bots see
:meth:`telegram.ext.BasePersistence.replace_bot` and
:meth:`telegram.ext.BasePersistence.insert_bot`.
Attributes:
filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
is :obj:`False` this will be used as a prefix.
store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this
persistence class.
store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this
persistence class.
store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this
persistence class.
single_file (:obj:`bool`): Optional. When :obj:`False` will store 3 separate files of
`filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
:obj:`True`.
on_flush (:obj:`bool`, optional): When :obj:`True` will only save to file when
:meth:`flush` is called and keep data in memory until that happens. When
:obj:`False` will store data on any transaction *and* on call to :meth:`flush`.
Default is :obj:`False`.
Args:
filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
is :obj:`False` this will be used as a prefix.
store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this
persistence class. Default is :obj:`True`.
store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this
persistence class. Default is :obj:`True`.
store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this
persistence class. Default is :obj:`True` .
single_file (:obj:`bool`, optional): When :obj:`False` will store 3 separate files of
`filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
:obj:`True`.
on_flush (:obj:`bool`, optional): When :obj:`True` will only save to file when
:meth:`flush` is called and keep data in memory until that happens. When
:obj:`False` will store data on any transaction *and* on call to :meth:`flush`.
Default is :obj:`False`.
"""
def __init__(
self,
filename: str,
store_user_data: bool = True,
store_chat_data: bool = True,
store_bot_data: bool = True,
single_file: bool = True,
on_flush: bool = False,
):
super().__init__(
store_user_data=store_user_data,
store_chat_data=store_chat_data,
store_bot_data=store_bot_data,
)
self.filename = filename
self.single_file = single_file
self.on_flush = on_flush
self.user_data: Optional[DefaultDict[int, Dict]] = None
self.chat_data: Optional[DefaultDict[int, Dict]] = None
self.bot_data: Optional[Dict] = None
self.conversations: Optional[Dict[str, Dict[Tuple, Any]]] = None
def load_singlefile(self) -> None:
try:
filename = self.filename
with open(self.filename, "rb") as file:
data = pickle.load(file)
self.user_data = defaultdict(dict, data['user_data'])
self.chat_data = defaultdict(dict, data['chat_data'])
# For backwards compatibility with files not containing bot data
self.bot_data = data.get('bot_data', {})
self.conversations = data['conversations']
except IOError:
self.conversations = dict()
self.user_data = defaultdict(dict)
self.chat_data = defaultdict(dict)
self.bot_data = {}
except pickle.UnpicklingError as exc:
raise TypeError(f"File {filename} does not contain valid pickle data") from exc
except Exception as exc:
raise TypeError(f"Something went wrong unpickling {filename}") from exc
@staticmethod
def load_file(filename: str) -> Any:
try:
with open(filename, "rb") as file:
return pickle.load(file)
except IOError:
return None
except pickle.UnpicklingError as exc:
raise TypeError(f"File {filename} does not contain valid pickle data") from exc
except Exception as exc:
raise TypeError(f"Something went wrong unpickling {filename}") from exc
def dump_singlefile(self) -> None:
with open(self.filename, "wb") as file:
data = {
'conversations': self.conversations,
'user_data': self.user_data,
'chat_data': self.chat_data,
'bot_data': self.bot_data,
}
pickle.dump(data, file)
@staticmethod
def dump_file(filename: str, data: Any) -> None:
with open(filename, "wb") as file:
pickle.dump(data, file)
def get_user_data(self) -> DefaultDict[int, Dict[Any, Any]]:
"""Returns the user_data from the pickle file if it exists or an empty :obj:`defaultdict`.
Returns:
:obj:`defaultdict`: The restored user data.
"""
if self.user_data:
pass
elif not self.single_file:
filename = f"{self.filename}_user_data"
data = self.load_file(filename)
if not data:
data = defaultdict(dict)
else:
data = defaultdict(dict, data)
self.user_data = data
else:
self.load_singlefile()
return deepcopy(self.user_data) # type: ignore[arg-type]
def get_chat_data(self) -> DefaultDict[int, Dict[Any, Any]]:
"""Returns the chat_data from the pickle file if it exists or an empty :obj:`defaultdict`.
Returns:
:obj:`defaultdict`: The restored chat data.
"""
if self.chat_data:
pass
elif not self.single_file:
filename = f"{self.filename}_chat_data"
data = self.load_file(filename)
if not data:
data = defaultdict(dict)
else:
data = defaultdict(dict, data)
self.chat_data = data
else:
self.load_singlefile()
return deepcopy(self.chat_data) # type: ignore[arg-type]
def get_bot_data(self) -> Dict[Any, Any]:
"""Returns the bot_data from the pickle file if it exists or an empty :obj:`dict`.
Returns:
:obj:`dict`: The restored bot data.
"""
if self.bot_data:
pass
elif not self.single_file:
filename = f"{self.filename}_bot_data"
data = self.load_file(filename)
if not data:
data = {}
self.bot_data = data
else:
self.load_singlefile()
return deepcopy(self.bot_data) # type: ignore[arg-type]
def get_conversations(self, name: str) -> ConversationDict:
"""Returns the conversations from the pickle file if it exsists or an empty dict.
Args:
name (:obj:`str`): The handlers name.
Returns:
:obj:`dict`: The restored conversations for the handler.
"""
if self.conversations:
pass
elif not self.single_file:
filename = f"{self.filename}_conversations"
data = self.load_file(filename)
if not data:
data = {name: {}}
self.conversations = data
else:
self.load_singlefile()
return self.conversations.get(name, {}).copy() # type: ignore[union-attr]
def update_conversation(
self, name: str, key: Tuple[int, ...], new_state: Optional[object]
) -> None:
"""Will update the conversations for the given handler and depending on :attr:`on_flush`
save the pickle file.
Args:
name (:obj:`str`): The handler's name.
key (:obj:`tuple`): The key the state is changed for.
new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.
"""
if not self.conversations:
self.conversations = dict()
if self.conversations.setdefault(name, {}).get(key) == new_state:
return
self.conversations[name][key] = new_state
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_conversations"
self.dump_file(filename, self.conversations)
else:
self.dump_singlefile()
def update_user_data(self, user_id: int, data: Dict) -> None:
"""Will update the user_data and depending on :attr:`on_flush` save the pickle file.
Args:
user_id (:obj:`int`): The user the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].
"""
if self.user_data is None:
self.user_data = defaultdict(dict)
if self.user_data.get(user_id) == data:
return
self.user_data[user_id] = data
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_user_data"
self.dump_file(filename, self.user_data)
else:
self.dump_singlefile()
def update_chat_data(self, chat_id: int, data: Dict) -> None:
"""Will update the chat_data and depending on :attr:`on_flush` save the pickle file.
Args:
chat_id (:obj:`int`): The chat the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].
"""
if self.chat_data is None:
self.chat_data = defaultdict(dict)
if self.chat_data.get(chat_id) == data:
return
self.chat_data[chat_id] = data
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_chat_data"
self.dump_file(filename, self.chat_data)
else:
self.dump_singlefile()
def update_bot_data(self, data: Dict) -> None:
"""Will update the bot_data and depending on :attr:`on_flush` save the pickle file.
Args:
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.
"""
if self.bot_data == data:
return
self.bot_data = data.copy()
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_bot_data"
self.dump_file(filename, self.bot_data)
else:
self.dump_singlefile()
def flush(self) -> None:
"""Will save all data in memory to pickle file(s)."""
if self.single_file:
if self.user_data or self.chat_data or self.bot_data or self.conversations:
self.dump_singlefile()
else:
if self.user_data:
self.dump_file(f"{self.filename}_user_data", self.user_data)
if self.chat_data:
self.dump_file(f"{self.filename}_chat_data", self.chat_data)
if self.bot_data:
self.dump_file(f"{self.filename}_bot_data", self.bot_data)
if self.conversations:
self.dump_file(f"{self.filename}_conversations", self.conversations)
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
from lineuzinho import Lineuzinho
def main():
logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.INFO)
lineuzinho = Lineuzinho()
updater = Updater(lineuzinho.API_TOKEN)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", lineuzinho.start))
dp.add_handler(CommandHandler("links", lineuzinho.getGeneralRelevantLinks))
dp.add_handler(CommandHandler("repo", lineuzinho.getRepo))
dp.add_handler(CommandHandler("contatinhos", lineuzinho.getContatinhosLink))
dp.add_handler(CommandHandler("feijao", lineuzinho.getBeanFlavor))
dp.add_handler(CommandHandler("docs", lineuzinho.getDocsChannel))
dp.add_handler(CommandHandler("save", lineuzinho.saveMessage))
dp.add_handler(CommandHandler("help", lineuzinho.getHelpText))
dp.add_handler(CommandHandler("pi_rank", lineuzinho.getPiRanking))
dp.add_handler(CommandHandler("pi_index", lineuzinho.publishUserPiRanking))
dp.add_handler(CommandHandler("birthday", lineuzinho.getBirthdaySongAudio))
dp.add_handler(CommandHandler("beni", lineuzinho.getBeniSongAudio))
dp.add_handler(CommandHandler("ain", lineuzinho.getRandomAin))
dp.add_handler(CommandHandler("grupos", lineuzinho.getSubjectsGroupsLinks))
dp.add_handler(CommandHandler("meet", lineuzinho.getSubjectMeetLinks))
dp.add_handler(MessageHandler(Filters.text, lineuzinho.agiotar))
dp.add_handler(MessageHandler(Filters.status_update.new_chat_members, lineuzinho.greet))
updater.start_polling()
logging.info("=== Lineuzinho up&running! ===")
updater.idle()
logging.info("=== Lineuzinho shutting down :( ===")
if __name__ == "__main__":
main()
|
# Goal: get ebola/Lassa for Bonnie's plasma samples.
# Simple clean and merge
import pandas as pd
import os
os.chdir("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/")
import helpers
df = pd.read_excel("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/one_offs/CViSB Plasma Samples_Bonnie_2019-06-03.xlsx")
df.shape
df['privatePatientID'] = df["Sample ID"].apply(helpers.interpretID)
# id dictionary
ids = pd.read_json("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/output_data/patients/patients_2019-06-03_PRIVATE_dict.json")
ids.reset_index(inplace=True)
ids.head()
merged = pd.merge(df, ids, how="left", left_on="privatePatientID", right_on="index", indicator=True)
merged._merge.value_counts()
merged[merged._merge == "left_only"]
merged = merged[['Sample ID', "Date of collection", "Sample type", "cohort", "elisa", "sID", "gID", "patientID"]]
merged.to_csv("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/one_offs/2019-06-03_CViSBplasma_Bonnie.csv", index = False)
|
#!/usr/bin/env python
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title :run-pmi-diffs.py
#
# Notes:
#
# TODO: Instead of downloading and extracting the dotnet CLI, can we convert
# to using init-tools.cmd/sh and the Tools/dotnetcli "last known good"
# version? (This maybe should be done for format.py as well.)
#
# Script to automate running PMI diffs on a pull request
#
##########################################################################
##########################################################################
import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import urllib
import sys
import tarfile
import zipfile
# Version specific imports
if sys.version_info.major < 3:
import urllib
else:
import urllib.request
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "scripts"))
from coreclr_arguments import *
##########################################################################
# Globals
##########################################################################
testing = False
Coreclr_url = 'https://github.com/dotnet/coreclr.git'
Jitutils_url = 'https://github.com/dotnet/jitutils.git'
# The Docker file and possibly options should be hoisted out to a text file to be shared between scripts.
Docker_name_arm32 = 'mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-14.04-cross-e435274-20180426002420'
Docker_opts_arm32 = '-e ROOTFS_DIR=/crossrootfs/arm'
Docker_name_arm64 = 'mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-a3ae44b-20180315221921'
Docker_opts_arm64 = '-e ROOTFS_DIR=/crossrootfs/arm64'
Is_illumos = ('illumos' in subprocess.Popen(["uname", "-o"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode('utf-8'))
# This should be factored out of build.sh
Unix_name_map = {
'Linux': 'Linux',
'Darwin': 'OSX',
'FreeBSD': 'FreeBSD',
'OpenBSD': 'OpenBSD',
'NetBSD': 'NetBSD',
'SunOS': 'illumos' if Is_illumos else 'Solaris'
}
Is_windows = (os.name == 'nt')
Clr_os = 'windows' if Is_windows else Unix_name_map[os.uname()[0]]
##########################################################################
# Delete protocol
##########################################################################
def del_rw(action, name, exc):
os.chmod(name, 0o651)
os.remove(name)
##########################################################################
# Argument Parser
##########################################################################
description = 'Tool to generate JIT assembly diffs from the CoreCLR repo'
parser = argparse.ArgumentParser(description=description)
# base_root is normally expected to be None, in which case we'll clone the
# coreclr tree and build it. If base_root is passed, we'll use it, and not
# clone or build the base.
parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-ci_arch', dest='ci_arch', default=None)
parser.add_argument('-build_type', dest='build_type', default='Checked')
parser.add_argument('-base_root', dest='base_root', default=None)
parser.add_argument('-diff_root', dest='diff_root', default=None)
parser.add_argument('-scratch_root', dest='scratch_root', default=None)
parser.add_argument('--skip_baseline_build', dest='skip_baseline_build', action='store_true', default=False)
parser.add_argument('--skip_diffs', dest='skip_diffs', action='store_true', default=False)
parser.add_argument('-target_branch', dest='target_branch', default='main')
parser.add_argument('-commit_hash', dest='commit_hash', default=None)
##########################################################################
# Class to change the current directory, and automatically restore the
# directory back to what it used to be, on exit.
##########################################################################
class ChangeDir:
def __init__(self, dir):
self.dir = dir
self.cwd = None
def __enter__(self):
self.cwd = os.getcwd()
log('[cd] %s' % self.dir)
if not testing:
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
log('[cd] %s' % self.cwd)
if not testing:
os.chdir(self.cwd)
##########################################################################
# Helper Functions
##########################################################################
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser) : Args parsed by the argument parser.
Returns:
args (CoreclrArguments) : Args parsed
Notes:
If the arguments are valid then return them all in a tuple. If not,
raise an exception stating x argument is incorrect.
"""
coreclr_setup_args = CoreclrArguments(args,
require_built_test_dir=False,
require_built_core_root=True,
require_built_product_dir=False)
coreclr_setup_args.verify(args,
"base_root",
lambda directory: os.path.isdir(directory) if directory is not None else True,
"Base root is not a valid directory")
coreclr_setup_args.verify(args,
"diff_root",
lambda directory: os.path.isdir(directory) if directory is not None else True,
"Diff root is not a valid directory",
modify_arg=lambda directory: nth_dirname(os.path.abspath(sys.argv[0]), 3) if directory is None else os.path.abspath(directory))
coreclr_setup_args.verify(args,
"scratch_root",
lambda unused: True,
"Error setting scratch_root",
modify_arg=lambda directory: os.path.join(coreclr_setup_args.diff_root, '_', 'pmi') if directory is None else os.path.abspath(directory))
coreclr_setup_args.verify(args,
"skip_baseline_build",
lambda unused: True,
"Error setting baseline build")
coreclr_setup_args.verify(args,
"skip_diffs",
lambda unused: True,
"Error setting skip_diffs")
coreclr_setup_args.verify(args,
"target_branch",
lambda unused: True,
"Error setting target_branch")
coreclr_setup_args.verify(args,
"commit_hash",
lambda unused: True,
"Error setting commit_hash")
coreclr_setup_args.verify(args,
"ci_arch",
lambda ci_arch: ci_arch in coreclr_setup_args.valid_arches + ['x86_arm_altjit', 'x64_arm64_altjit'],
"Error setting ci_arch")
args = (
coreclr_setup_args.arch,
coreclr_setup_args.ci_arch,
coreclr_setup_args.build_type,
coreclr_setup_args.base_root,
coreclr_setup_args.diff_root,
coreclr_setup_args.scratch_root,
coreclr_setup_args.skip_baseline_build,
coreclr_setup_args.skip_diffs,
coreclr_setup_args.target_branch,
coreclr_setup_args.commit_hash
)
log('Configuration:')
log(' arch: %s' % coreclr_setup_args.arch)
log(' ci_arch: %s' % coreclr_setup_args.ci_arch)
log(' build_type: %s' % coreclr_setup_args.build_type)
log(' base_root: %s' % coreclr_setup_args.base_root)
log(' diff_root: %s' % coreclr_setup_args.diff_root)
log(' scratch_root: %s' % coreclr_setup_args.scratch_root)
log(' skip_baseline_build: %s' % coreclr_setup_args.skip_baseline_build)
log(' skip_diffs: %s' % coreclr_setup_args.skip_diffs)
log(' target_branch: %s' % coreclr_setup_args.target_branch)
log(' commit_hash: %s' % coreclr_setup_args.commit_hash)
return args
def nth_dirname(path, n):
""" Find the Nth parent directory of the given path
Args:
path (str): path name containing at least N components
n (int): num of basenames to remove
Returns:
outpath (str): path with the last n components removed
Notes:
If n is 0, path is returned unmodified
"""
assert n >= 0
for i in range(0, n):
path = os.path.dirname(path)
return path
def log(message):
""" Print logging information
Args:
message (str): message to be printed
"""
print('[%s]: %s' % (sys.argv[0], message))
def copy_files(source_dir, target_dir):
""" Copy any files in the source_dir to the target_dir.
The copy is not recursive.
The directories must already exist.
Args:
source_dir (str): source directory path
target_dir (str): target directory path
Returns:
Nothing
"""
global testing
assert os.path.isdir(source_dir)
assert os.path.isdir(target_dir)
for source_filename in os.listdir(source_dir):
source_pathname = os.path.join(source_dir, source_filename)
if os.path.isfile(source_pathname):
target_pathname = os.path.join(target_dir, source_filename)
log('Copy: %s => %s' % (source_pathname, target_pathname))
if not testing:
shutil.copy2(source_pathname, target_pathname)
def run_command(command, command_env):
""" Run a command (process) in a given environment. stdout/stderr are output piped through.
Args:
command (array): the command to run, with components of the command as separate elements.
command_env (map): environment in which the command should be run
Returns:
The return code of the command.
"""
returncode = 0
log('Invoking: %s' % (' '.join(command)))
if not testing:
proc = subprocess.Popen(command, env=command_env)
output,error = proc.communicate()
returncode = proc.returncode
if returncode != 0:
log('Return code = %s' % returncode)
return returncode
##########################################################################
# Do baseline build:
# 1. determine appropriate commit,
# 2. clone coreclr,
# 3. do build
##########################################################################
def baseline_build():
if not testing:
if os.path.isdir(baseCoreClrPath):
log('Removing existing tree: %s' % baseCoreClrPath)
shutil.rmtree(baseCoreClrPath, onerror=del_rw)
# Find the baseline commit
# Clone at that commit
command = 'git clone -b %s --single-branch %s %s' % (
target_branch, Coreclr_url, baseCoreClrPath)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: git clone failed')
return 1
# Change directory to the baseline root
with ChangeDir(baseCoreClrPath):
# Set up for possible docker usage
scriptPath = '.'
buildOpts = ''
dockerCmd = ''
if not Is_windows and (arch == 'arm' or arch == 'arm64'):
# Linux arm and arm64 builds are cross-compilation builds using Docker.
if arch == 'arm':
dockerFile = Docker_name_arm32
dockerOpts = Docker_opts_arm32
else:
# arch == 'arm64'
dockerFile = Docker_name_arm64
dockerOpts = Docker_opts_arm64
dockerCmd = 'docker run -i --rm -v %s:%s -w %s %s %s ' % (baseCoreClrPath, baseCoreClrPath, baseCoreClrPath, dockerOpts, dockerFile)
buildOpts = 'cross'
scriptPath = baseCoreClrPath
# Build a checked baseline jit
if Is_windows:
command = 'set __TestIntermediateDir=int&&build.cmd %s checked skiptests skipbuildpackages' % arch
else:
command = '%s%s/build.sh %s checked skipbuildpackages %s' % (dockerCmd, scriptPath, arch, buildOpts)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: build failed')
return 1
# Build the layout (Core_Root) directory
# For Windows, invoke build-test.cmd to restore packages before generating the layout.
if Is_windows:
command = 'build-test.cmd %s %s skipmanaged skipnative' % (build_type, arch)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: restoring packages failed')
return 1
if Is_windows:
command = 'tests\\runtest.cmd %s checked GenerateLayoutOnly' % arch
else:
command = '%s%s/build-test.sh %s checked generatelayoutonly' % (dockerCmd, scriptPath, arch)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: generating layout failed')
return 1
return 0
##########################################################################
# Do PMI diff run:
# 1. download dotnet CLI (needed by jitutils)
# 2. clone jitutils repo
# 3. build jitutils
# 4. run PMI asm generation on baseline and diffs
# 5. run jit-analyze to compare baseline and diff
##########################################################################
def do_pmi_diffs():
global baseCoreClrPath
# Setup scratch directories. Names are short to avoid path length problems on Windows.
dotnetcliPath = os.path.abspath(os.path.join(scratch_root, 'cli'))
jitutilsPath = os.path.abspath(os.path.join(scratch_root, 'jitutils'))
asmRootPath = os.path.abspath(os.path.join(scratch_root, 'asm'))
dotnet_tool = 'dotnet.exe' if Is_windows else 'dotnet'
# Make sure the temporary directories do not exist. If they do already, delete them.
if not testing:
# If we can't delete the dotnet tree, it might be because a previous run failed or was
# cancelled, and the build servers are still running. Try to stop it if that happens.
if os.path.isdir(dotnetcliPath):
try:
log('Removing existing tree: %s' % dotnetcliPath)
shutil.rmtree(dotnetcliPath, onerror=del_rw)
except OSError:
if os.path.isfile(os.path.join(dotnetcliPath, dotnet_tool)):
log('Failed to remove existing tree; trying to shutdown the dotnet build servers before trying again.')
# Looks like the dotnet too is still there; try to run it to shut down the build servers.
temp_env = my_env
temp_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
log('Shutting down build servers')
command = ["dotnet", "build-server", "shutdown"]
returncode = run_command(command, temp_env)
# Try again
log('Trying again to remove existing tree: %s' % dotnetcliPath)
shutil.rmtree(dotnetcliPath, onerror=del_rw)
else:
log('Failed to remove existing tree')
return 1
if os.path.isdir(jitutilsPath):
log('Removing existing tree: %s' % jitutilsPath)
shutil.rmtree(jitutilsPath, onerror=del_rw)
if os.path.isdir(asmRootPath):
log('Removing existing tree: %s' % asmRootPath)
shutil.rmtree(asmRootPath, onerror=del_rw)
try:
os.makedirs(dotnetcliPath)
os.makedirs(jitutilsPath)
os.makedirs(asmRootPath)
except OSError:
if not os.path.isdir(dotnetcliPath):
log('ERROR: cannot create CLI install directory %s' % dotnetcliPath)
return 1
if not os.path.isdir(jitutilsPath):
log('ERROR: cannot create jitutils install directory %s' % jitutilsPath)
return 1
if not os.path.isdir(asmRootPath):
log('ERROR: cannot create asm directory %s' % asmRootPath)
return 1
log('dotnet CLI install directory: %s' % dotnetcliPath)
log('jitutils install directory: %s' % jitutilsPath)
log('asm directory: %s' % asmRootPath)
# Download .NET CLI
log('Downloading .NET CLI')
dotnetcliUrl = ""
dotnetcliFilename = ""
if Clr_os == 'Linux' and arch == 'x64':
dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-linux-x64.tar.gz"
elif Clr_os == 'Linux' and arch == 'arm':
dotnetcliUrl = "https://dotnetcli.blob.core.windows.net/dotnet/Sdk/release/2.1.4xx/dotnet-sdk-latest-linux-arm.tar.gz"
elif Clr_os == 'Linux' and arch == 'arm64':
# Use the latest (3.0) dotnet SDK. Earlier versions don't work.
dotnetcliUrl = "https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/dotnet-sdk-latest-linux-arm64.tar.gz"
elif Clr_os == 'OSX':
dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-osx-x64.tar.gz"
elif Clr_os == 'windows':
dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-win-x64.zip"
else:
log('ERROR: unknown or unsupported OS (%s) architecture (%s) combination' % (Clr_os, arch))
return 1
if Is_windows:
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.zip')
else:
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
log('Downloading: %s => %s' % (dotnetcliUrl, dotnetcliFilename))
if not testing:
urlretrieve = urllib.urlretrieve if sys.version_info.major < 3 else urllib.request.urlretrieve
urlretrieve(dotnetcliUrl, dotnetcliFilename)
if not os.path.isfile(dotnetcliFilename):
log('ERROR: Did not download .NET CLI')
return 1
# Install .NET CLI
log('Unpacking .NET CLI')
if not testing:
if Is_windows:
with zipfile.ZipFile(dotnetcliFilename, "r") as z:
z.extractall(dotnetcliPath)
else:
tar = tarfile.open(dotnetcliFilename)
tar.extractall(dotnetcliPath)
tar.close()
if not os.path.isfile(os.path.join(dotnetcliPath, dotnet_tool)):
log('ERROR: did not extract .NET CLI from download')
return 1
# Add dotnet CLI to PATH we'll use to spawn processes.
log('Add %s to my PATH' % dotnetcliPath)
my_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
# To aid diagnosing problems, do "dotnet --info" to output to any capturing logfile.
command = ["dotnet", "--info"]
returncode = run_command(command, my_env)
# Clone jitutils
command = 'git clone -b main --single-branch %s %s' % (Jitutils_url, jitutilsPath)
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
log('ERROR: cannot clone jitutils');
return 1
# We're going to start running dotnet CLI commands. Unfortunately, once you've done that,
# the dotnet CLI sticks around with a set of build server processes running. Put all this
# in a try/finally, and stop the build servers under any circumstance.
try:
#
# Build jitutils, including "dotnet restore"
#
# Change directory to the jitutils root
with ChangeDir(jitutilsPath):
# Do "dotnet restore"
command = ["dotnet", "restore"]
returncode = run_command(command, my_env)
# Do build
command = ['build.cmd', '-p'] if Is_windows else ['bash', './build.sh', '-p']
returncode = run_command(command, my_env)
if returncode != 0:
log('ERROR: jitutils build failed')
return 1
jitutilsBin = os.path.join(jitutilsPath, "bin")
if not testing and not os.path.isdir(jitutilsBin):
log("ERROR: jitutils not correctly built")
return 1
jitDiffPath = os.path.join(jitutilsBin, "jit-diff.dll")
if not testing and not os.path.isfile(jitDiffPath):
log("ERROR: jit-diff.dll not built")
return 1
jitAnalyzePath = os.path.join(jitutilsBin, "jit-analyze.dll")
if not testing and not os.path.isfile(jitAnalyzePath):
log("ERROR: jit-analyze.dll not built")
return 1
# Add jitutils bin to path for spawned processes
log('Add %s to my PATH' % jitutilsBin)
my_env["PATH"] = jitutilsBin + os.pathsep + my_env["PATH"]
#
# Run PMI asm diffs
#
# We want this script as a whole to return 0 if it succeeds (even if there are diffs) and only
# return non-zero if there are any fatal errors.
#
# TO DO: figure out how to differentiate fatal errors and a return code indicating there are diffs,
# and have the invoking netci.groovy code act differently for each case.
# Generate the diffs
#
# Invoke command like:
# dotnet c:\gh\jitutils\artifacts\jit-diff.dll diff --pmi --base --base_root f:\gh\coreclr12 --diff --diff_root f:\gh\coreclr10 --arch x64 --build Checked --tag 1 --noanalyze --output f:\output --corelib
#
# We pass --noanalyze and call jit-analyze manually. This isn't really necessary, but it does give us better output
# due to https://github.com/dotnet/jitutils/issues/175.
altjit_args = []
if ci_arch is not None and (ci_arch == 'x86_arm_altjit' or ci_arch == 'x64_arm64_altjit'):
altjit_args = ["--altjit", "protononjit.dll"]
# Over which set of assemblies should we generate asm?
# TODO: parameterize this
asm_source_args = ["--frameworks", "--benchmarks"]
command = ["dotnet", jitDiffPath, "diff", "--pmi", "--base", "--base_root", baseCoreClrPath, "--diff", "--diff_root", diff_root, "--arch", arch, "--build", build_type, "--tag", "1", "--noanalyze", "--output", asmRootPath] + asm_source_args + altjit_args
returncode = run_command(command, my_env)
# We ignore the return code: it is non-zero if there are any diffs. If there are fatal errors here, we will miss them.
# Question: does jit-diff distinguish between non-zero fatal error code and the existence of diffs?
# Did we get any diffs?
baseOutputDir = os.path.join(asmRootPath, "1", "base")
if not testing and not os.path.isdir(baseOutputDir):
log("ERROR: base asm not generated")
return 1
diffOutputDir = os.path.join(asmRootPath, "1", "diff")
if not testing and not os.path.isdir(diffOutputDir):
log("ERROR: diff asm not generated")
return 1
# Do the jit-analyze comparison:
# dotnet c:\gh\jitutils\artifacts\jit-analyze.dll --base f:\output\diffs\1\base --recursive --diff f:\output\diffs\1\diff
command = ["dotnet", jitAnalyzePath, "--recursive", "--base", baseOutputDir, "--diff", diffOutputDir]
returncode = run_command(command, my_env)
if returncode != 0:
# This is not a fatal error.
log('Compare: %s %s' % (baseOutputDir, diffOutputDir))
finally:
# Shutdown the dotnet build servers before cleaning things up
# TODO: make this shutdown happen anytime after we've run any 'dotnet' commands. I.e., try/finally style.
log('Shutting down build servers')
command = ["dotnet", "build-server", "shutdown"]
returncode = run_command(command, my_env)
return 0
##########################################################################
# Main
##########################################################################
def main(args):
global arch, ci_arch, build_type, base_root, diff_root, scratch_root, skip_baseline_build, skip_diffs, target_branch, commit_hash
global my_env
global base_layout_root
global diff_layout_root
global baseCoreClrPath
global testing
arch, ci_arch, build_type, base_root, diff_root, scratch_root, skip_baseline_build, skip_diffs, target_branch, commit_hash = validate_args(args)
my_env = os.environ
if not testing and not os.path.isdir(diff_root):
log('ERROR: root directory for coreclr diff tree not found: %s' % diff_root)
return 1
# Check the diff layout directory before going too far.
diff_layout_root = os.path.join(diff_root,
'bin',
'tests',
'%s.%s.%s' % (Clr_os, arch, build_type),
'Tests',
'Core_Root')
if not testing and not os.path.isdir(diff_layout_root):
log('ERROR: diff test overlay not found or is not a directory: %s' % diff_layout_root)
return 1
# Create the scratch root directory
if not testing:
try:
os.makedirs(scratch_root)
except OSError:
if not os.path.isdir(scratch_root):
log('ERROR: cannot create scratch directory %s' % scratch_root)
return 1
# Set up baseline root directory. If one is passed to us, we use it. Otherwise, we create
# a temporary directory.
if base_root is None:
# Setup scratch directories. Names are short to avoid path length problems on Windows.
# No need to create this directory now, as the "git clone" will do it later.
baseCoreClrPath = os.path.abspath(os.path.join(scratch_root, 'base'))
else:
baseCoreClrPath = os.path.abspath(base_root)
if not testing and not os.path.isdir(baseCoreClrPath):
log('ERROR: base root directory not found or is not a directory: %s' % baseCoreClrPath)
return 1
# Do the baseline build, if needed
if not skip_baseline_build and base_root is None:
returncode = baseline_build()
if returncode != 0:
return 1
# Check that the baseline root directory was created.
base_layout_root = os.path.join(baseCoreClrPath,
'bin',
'tests',
'%s.%s.%s' % (Clr_os, arch, build_type),
'Tests',
'Core_Root')
if not testing and not os.path.isdir(base_layout_root):
log('ERROR: baseline test overlay not found or is not a directory: %s' % base_layout_root)
return 1
# Do the diff run, if needed
if not skip_diffs:
returncode = do_pmi_diffs()
if returncode != 0:
return 1
return 0
##########################################################################
# setup for Main
##########################################################################
if __name__ == '__main__':
Args = parser.parse_args(sys.argv[1:])
return_code = main(Args)
log('Exit code: %s' % return_code)
sys.exit(return_code)
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListApisBindedToRequestThrottlingPolicyV2Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'size': 'int',
'apis': 'list[ThrottleBindingApiResp]'
}
attribute_map = {
'total': 'total',
'size': 'size',
'apis': 'apis'
}
def __init__(self, total=None, size=None, apis=None):
"""ListApisBindedToRequestThrottlingPolicyV2Response - a model defined in huaweicloud sdk"""
super().__init__()
self._total = None
self._size = None
self._apis = None
self.discriminator = None
if total is not None:
self.total = total
if size is not None:
self.size = size
if apis is not None:
self.apis = apis
@property
def total(self):
"""Gets the total of this ListApisBindedToRequestThrottlingPolicyV2Response.
满足条件的API总数
:return: The total of this ListApisBindedToRequestThrottlingPolicyV2Response.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListApisBindedToRequestThrottlingPolicyV2Response.
满足条件的API总数
:param total: The total of this ListApisBindedToRequestThrottlingPolicyV2Response.
:type: int
"""
self._total = total
@property
def size(self):
"""Gets the size of this ListApisBindedToRequestThrottlingPolicyV2Response.
本次返回的API列表长度
:return: The size of this ListApisBindedToRequestThrottlingPolicyV2Response.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ListApisBindedToRequestThrottlingPolicyV2Response.
本次返回的API列表长度
:param size: The size of this ListApisBindedToRequestThrottlingPolicyV2Response.
:type: int
"""
self._size = size
@property
def apis(self):
"""Gets the apis of this ListApisBindedToRequestThrottlingPolicyV2Response.
本次查询返回的API列表
:return: The apis of this ListApisBindedToRequestThrottlingPolicyV2Response.
:rtype: list[ThrottleBindingApiResp]
"""
return self._apis
@apis.setter
def apis(self, apis):
"""Sets the apis of this ListApisBindedToRequestThrottlingPolicyV2Response.
本次查询返回的API列表
:param apis: The apis of this ListApisBindedToRequestThrottlingPolicyV2Response.
:type: list[ThrottleBindingApiResp]
"""
self._apis = apis
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListApisBindedToRequestThrottlingPolicyV2Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_weapon import Weapon
from ... import dice as D, material as M
class BaseClub(Weapon):
pass
class Club(BaseClub):
def __init__(self):
super().__init__('club', weight=30, damage=D.Dice.from_str('d3'), material=M.Wood, hit=0)
class Aklys(BaseClub):
def __init__(self):
super().__init__('aklys', weight=15,
damage=D.Dice.from_str('d3'), material=M.Iron, hit=0)
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AuditProgressProgress(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'protocol_audit_cee_time': 'int',
'protocol_audit_log_time': 'int',
'protocol_audit_syslog_time': 'int'
}
attribute_map = {
'protocol_audit_cee_time': 'protocol_audit_cee_time',
'protocol_audit_log_time': 'protocol_audit_log_time',
'protocol_audit_syslog_time': 'protocol_audit_syslog_time'
}
def __init__(self, protocol_audit_cee_time=None, protocol_audit_log_time=None, protocol_audit_syslog_time=None): # noqa: E501
"""AuditProgressProgress - a model defined in Swagger""" # noqa: E501
self._protocol_audit_cee_time = None
self._protocol_audit_log_time = None
self._protocol_audit_syslog_time = None
self.discriminator = None
if protocol_audit_cee_time is not None:
self.protocol_audit_cee_time = protocol_audit_cee_time
if protocol_audit_log_time is not None:
self.protocol_audit_log_time = protocol_audit_log_time
if protocol_audit_syslog_time is not None:
self.protocol_audit_syslog_time = protocol_audit_syslog_time
@property
def protocol_audit_cee_time(self):
"""Gets the protocol_audit_cee_time of this AuditProgressProgress. # noqa: E501
Specifies the last protocol audit event time consumed by the CEE forwarder. # noqa: E501
:return: The protocol_audit_cee_time of this AuditProgressProgress. # noqa: E501
:rtype: int
"""
return self._protocol_audit_cee_time
@protocol_audit_cee_time.setter
def protocol_audit_cee_time(self, protocol_audit_cee_time):
"""Sets the protocol_audit_cee_time of this AuditProgressProgress.
Specifies the last protocol audit event time consumed by the CEE forwarder. # noqa: E501
:param protocol_audit_cee_time: The protocol_audit_cee_time of this AuditProgressProgress. # noqa: E501
:type: int
"""
if protocol_audit_cee_time is not None and protocol_audit_cee_time > 4294967295: # noqa: E501
raise ValueError("Invalid value for `protocol_audit_cee_time`, must be a value less than or equal to `4294967295`") # noqa: E501
if protocol_audit_cee_time is not None and protocol_audit_cee_time < 0: # noqa: E501
raise ValueError("Invalid value for `protocol_audit_cee_time`, must be a value greater than or equal to `0`") # noqa: E501
self._protocol_audit_cee_time = protocol_audit_cee_time
@property
def protocol_audit_log_time(self):
"""Gets the protocol_audit_log_time of this AuditProgressProgress. # noqa: E501
Specifies the last logged audit protocol event time. # noqa: E501
:return: The protocol_audit_log_time of this AuditProgressProgress. # noqa: E501
:rtype: int
"""
return self._protocol_audit_log_time
@protocol_audit_log_time.setter
def protocol_audit_log_time(self, protocol_audit_log_time):
"""Sets the protocol_audit_log_time of this AuditProgressProgress.
Specifies the last logged audit protocol event time. # noqa: E501
:param protocol_audit_log_time: The protocol_audit_log_time of this AuditProgressProgress. # noqa: E501
:type: int
"""
if protocol_audit_log_time is not None and protocol_audit_log_time > 4294967295: # noqa: E501
raise ValueError("Invalid value for `protocol_audit_log_time`, must be a value less than or equal to `4294967295`") # noqa: E501
if protocol_audit_log_time is not None and protocol_audit_log_time < 0: # noqa: E501
raise ValueError("Invalid value for `protocol_audit_log_time`, must be a value greater than or equal to `0`") # noqa: E501
self._protocol_audit_log_time = protocol_audit_log_time
@property
def protocol_audit_syslog_time(self):
"""Gets the protocol_audit_syslog_time of this AuditProgressProgress. # noqa: E501
Specifies the last protocol audit event time consumed by the Syslog forwarder. # noqa: E501
:return: The protocol_audit_syslog_time of this AuditProgressProgress. # noqa: E501
:rtype: int
"""
return self._protocol_audit_syslog_time
@protocol_audit_syslog_time.setter
def protocol_audit_syslog_time(self, protocol_audit_syslog_time):
"""Sets the protocol_audit_syslog_time of this AuditProgressProgress.
Specifies the last protocol audit event time consumed by the Syslog forwarder. # noqa: E501
:param protocol_audit_syslog_time: The protocol_audit_syslog_time of this AuditProgressProgress. # noqa: E501
:type: int
"""
if protocol_audit_syslog_time is not None and protocol_audit_syslog_time > 4294967295: # noqa: E501
raise ValueError("Invalid value for `protocol_audit_syslog_time`, must be a value less than or equal to `4294967295`") # noqa: E501
if protocol_audit_syslog_time is not None and protocol_audit_syslog_time < 0: # noqa: E501
raise ValueError("Invalid value for `protocol_audit_syslog_time`, must be a value greater than or equal to `0`") # noqa: E501
self._protocol_audit_syslog_time = protocol_audit_syslog_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuditProgressProgress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""
LoRaWAN Specification v1.0.2
Test Case Group: Functionality
Test Name: FUN_02
"""
#################################################################################
# MIT License
#
# Copyright (c) 2018, Pablo D. Modernell, Universitat Oberta de Catalunya (UOC),
# Universidad de la Republica Oriental del Uruguay (UdelaR).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################
import lorawan.lorawan_conformance.lorawan_steps as lorawan_steps
import conformance_testing.test_step_sequence
import conformance_testing.test_errors as test_errors
class ActokToPingDelay(lorawan_steps.ActokToPing):
"""
Checks the tolerance to delays in timing from the specified start of the reception windows.
Expected reception: Activation Ok.
Sends after check: Ping message with an extra delay.
"""
def __init__(self, ctx_test_manager, step_name, delay, next_step, default_rx1_window=True):
"""
:param ctx_test_manager: Test Manager of the Test Case.
:param step_name: string representation of the step name.
:param delay: extra delay in microseconds (positive or negative).
:param next_step: next step of the test.
:param default_rx1_window: flag to indicate if the default behaviour should be sending
downlink in RX1 (or RX2).
"""
super().__init__(ctx_test_manager=ctx_test_manager, step_name=step_name,
default_rx1_window=default_rx1_window,
next_step=next_step)
self.delay = delay
def step_handler(self, ch, method, properties, body):
self.ctx_test_manager.device_under_test.loramac_params.rx1_delay += self.delay
try:
super().step_handler(ch, method, properties, body)
except test_errors.TestingToolError as tt_e:
raise tt_e
finally:
self.ctx_test_manager.device_under_test.loramac_params.rx1_delay -= self.delay
class TestAppManager(conformance_testing.test_step_sequence.TestManager):
"""
The TestAppManager (Test Application Manager) is a TestManager defined in each test,
it specifies the different steps that the test performs.
LoRaWAN Test FUN 02:
Test the node's tolerance to timing errors in the download reception windows.
PRECONDITION: DUT (Device Under Test) is already in TEST MODE.
"""
def __init__(self, test_session_coordinator):
super().__init__(test_name=__name__.split(".")[-1],
ctx_test_session_coordinator=test_session_coordinator)
# -----------------------------------------------------------------------------------------
self.s8_check_pong = lorawan_steps.PongFinalStep(ctx_test_manager=self,
step_name="S8WaitPong",
next_step=None)
self.add_step_description(step_name="Step 8: 8WaitPong",
description=(
"Checks the reception of the PONG message.\n"
"- Reception from DUT: PONG message.\n"
"- TAS sends: None.\n"))
# -----------------------------------------------------------------------------------------
self.s7_actok_to_ping_m20rx2 = ActokToPingDelay(ctx_test_manager=self,
step_name="S7ActokToPingDelayMinus20",
delay=-20,
next_step=self.s8_check_pong,
default_rx1_window=False)
self.add_step_description(
step_name="Step 7: S7ActokToPingDelayMinus20",
description=(
"Waits and Activation Ok message with the current downlink counter of "
"the session and after it's received a PING PONG exchange will be initiated, "
"using RX2 with a timing error of -20 micro seconds.\n"
"- Reception from DUT: TAOK message with the downlink counter.\n"
"- TAS sends: PING message with a -20 micro seconds delay in RX2.\n"))
# -----------------------------------------------------------------------------------------
self.s6_check_pong = lorawan_steps.ProcessPong(ctx_test_manager=self,
step_name="S6WaitPong",
next_step=self.s7_actok_to_ping_m20rx2)
self.add_step_description(step_name="Step 6: 6WaitPong",
description=(
"Checks the reception of the PONG message.\n"
"- Reception from DUT: PONG message.\n"
"- TAS sends: None.\n"))
# -----------------------------------------------------------------------------------------
self.s5_actok_to_ping_m20rx1 = ActokToPingDelay(ctx_test_manager=self,
step_name="S5ActokToPingDelay",
delay=-20,
next_step=self.s6_check_pong,
default_rx1_window=True)
self.add_step_description(
step_name="Step 5: S5ActokToPingDelayMinus20",
description=(
"Waits and Activation Ok message with the current downlink counter of "
"the session and after it's received a PING PONG exchange will be initiated, "
"using RX1 with a timing error of -20 micro seconds.\n"
"- Reception from DUT: TAOK message with the downlink counter.\n"
"- TAS sends: PING message with a -20 micro seconds delay in RX1.\n"))
# -----------------------------------------------------------------------------------------
self.s4_check_pong = lorawan_steps.ProcessPong(ctx_test_manager=self,
step_name="S4WaitPong",
next_step=self.s5_actok_to_ping_m20rx1)
self.add_step_description(step_name="Step 4: S4WaitPong",
description=(
"Checks the reception of the PONG message.\n"
"- Reception from DUT: PONG message.\n"
"- TAS sends: None.\n"))
# -----------------------------------------------------------------------------------------
self.s3_actok_to_ping_20rx2 = ActokToPingDelay(ctx_test_manager=self,
step_name="S3ActokToPingDelayPlus20",
delay=20,
next_step=self.s4_check_pong,
default_rx1_window=False)
self.add_step_description(
step_name="Step 2: S3ActokToPingDelayPlus20",
description=(
"Waits and Activation Ok message with the current downlink counter of "
"the session and after it's received a PING PONG exchange will be initiated, "
"using RX2 with a timing error of +20 micro seconds.\n"
"- Reception from DUT: TAOK message with the downlink counter.\n"
"- TAS sends: PING message with a +20 micro seconds delay in RX2.\n"))
# -----------------------------------------------------------------------------------------
self.s2_check_pong = lorawan_steps.ProcessPong(ctx_test_manager=self,
step_name="S2WaitPong",
next_step=self.s3_actok_to_ping_20rx2)
self.add_step_description(step_name="Step 2: S2WaitPong",
description=(
"Checks the reception of the PONG message.\n"
"- Reception from DUT: PONG message.\n"
"- TAS sends: None.\n"))
# -----------------------------------------------------------------------------------------
self.s1_actok_to_ping_20rx1 = ActokToPingDelay(ctx_test_manager=self,
step_name="S1ActokToPingDelayPlus20",
delay=20,
next_step=self.s2_check_pong,
default_rx1_window=True)
self.add_step_description(
step_name="Step 1: S1ActokToPingDelayPlus20",
description=(
"Waits and Activation Ok message with the current downlink counter of "
"the session and after it's received a PING PONG exchange will be "
"initiated, using RX1 with a timing error of +20 micro seconds.\n"
"- Reception from DUT: TAOK message with the downlink counter.\n"
"- TAS sends: PING message with a +20 micro seconds delay in RX1.\n"))
# -----------------------------------------------------------------------------------------
# Set Initial Step
self.current_step = self.s1_actok_to_ping_20rx1
self.add_step_description(
step_name="Test ID: TD_LoRaWAN_FUN_02",
description=(
"Objective: Test the node's tolerance to timing errors in the download "
"reception windows. Verifies that downlink messages with +/- 20us in RX1 "
"and RX2 are correctly received.\n"
"References: LoRaWAN Specification v1.0.2.\n"
"Pre-test conditions: The DUT is in Test Mode and supports "
"Over The Air Activation (OTAA).\n"))
|
#!/usr/bin/env python
"""settings.py
Udacity conference server-side Python App Engine app user settings
$Id$
created/forked from conference.py by wesc on 2014 may 24
"""
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '1006324622497-9qlhtun5go635oe57l2vevfrq3f16u57.apps.googleusercontent.com'
|
"""
Another Main module.
"""
if __name__ == "__main__":
import stuntcat.cli
cli = stuntcat.cli.Cli()
cli.cli_main()
|
import os, shutil, subprocess
import ase.build
import pypospack.io.vasp as vasp
import pypospack.crystal as crystal
import pypospack.io.slurm as slurm
class VaspCalculateBulkProperties(object):
def __init__(self,sim_dir,obj_structure):
self.sim_dir = sim_dir
self.sim_task_list = ['min0','conv_encut','conv_kpoints','minf','elas']
self.dir_dict = {}
for task in self.sim_task_list:
self.dir_dict[task] = os.path.join(self.sim_dir,task)
# define the tasks
self.sim_task_class_name = {}
self.sim_task_class_name['min0'] = ['pypospack.task.vasp','VaspMinimizeStructure']
self.sim_task_class_name['conv_encut'] = ['pypospack.task.vasp','VaspConvergeEncut']
self.sim_task_class_name['conv_kpoints'] = ['pypospack.task.vasp','VaspConvergeKponts']
self.sim_task_class_name['minf'] = ['pypospack.task.vasp','VaspMinimizeStructure']
self.sim_task_class_name['elas'] = ['pypospack.task.vasp', 'VaspCalculateElastic']
# define dependency
self.sim_task_dependency = {}
self.sim_task_dependency['min'
self.min_0_dir = os.path.join(self.sim_dir,'min_0')
self.conv_encut_dir = os.path.join(self.sim_dir,'conv_encut')
self.conv_kpoints_dir = os.path.join(self.sim_dir,'conv_kpoints')
self.min_f_dir = os.path.join(self.sim_dir,'min_0')
self.elas_dir = os.path.join(self.sim_dir,'elas')
def run(self):
self.sims = {}
if os.path.exists(self.sim_dir):
for task in self.sim_task_list:
self.sim_task[task] = getattrself.sim_task_class_name
else:
os.mkdir(self.sim_dir)
self.sims['min_0'] = VaspMinimizeStructure(\
sim_dir = self.min_0_dir,
obj_structure = obj_structure)
if self.sims['min_0'].is_job_completed:
pass
else:
pass
self.sims['conv_encut'] = VaspConvergeEncut()
self.sims['conv_kpoints'] = VaspMinimizeStructure()
is_encut_complete = self.sims['conv_encut'].is_job_complete
is_kpoints_complete = self.sim['conv_kpoints'].is_job_complete
if is_encut_complete and is_kpoints_complete:
self.sims['min_f'] = VaspMinimizeStructure(\
sim_dir = self.min_f_dir,
obj_structure = obj_structure)
VaspConvergeKpoints()
VaspMinimizeStructure()
VaspCalculateElastic()
class VaspMinimizeStructure(object):
def __init__(self,sim_dir,obj_structure, xc = 'GGA'):
assert isinstance(obj_structure,pypospack.crystal.SimulationCell)
self.xc = xc
self.sim_dir = sim_dir
self.is_job_submitted = False
self.is_job_completed = False
if os.path.exists(self.sim_dir):
self.is_job_submitted = os.path.exists(self.sim_dir,'job.submitted')
self.is_job_completed = os.path.exists(self.sim_dir,'job.completed')
if self.is_job_completed:
self.postprocesses()
else:
os.mkdir(self.sim_dir)
self.create_simulation()
self.submit_job()
def create_simulation(self):
# initialize input files
self.vs = vasp.VaspSimulation()
self.vs.poscar = vasp.Poscar(obj_structure)
self.vs.incar = vasp.Incar()
self.vs.potcar = vasp.Potcar()
self.vs.kpoints = vasp.Kpoints()
self.vs.xc = self.xc
self.vs.simulation_directory = self.sim_dir
self.vs.symbols = self.vs.poscar.symbols
# configure potcar file
self.vs.potcar.symbols = self.vs.symbols
self.vs.potcar.xc = self.vs.xc
fn_potcar = os.path.join(self.vs.simulation_directory,'POTCAR')
self.vs.potcar.write(fn_potcar)
self.vs.potcar.read(fn_potcar)
self.vs.encut = max(self.vs.potcar.encut_max)
self.vs.natoms = self.vs.poscar.n_atoms
# configure incar file
magmom_0 = 1.0
self.vs.incar.ismear = 1
self.vs.incar.sigma = 0.20
self.vs.incar.ispin = 2
self.vs.incar.magmom = "{}*{}".format(self.vs.natoms,magmom_0)
self.vs.incar.ibrion = 2
self.vs.incar.isif = 3
self.vs.incar.potim = 0.5
self.vs.incar.ediffg = -0.001
self.vs.poscar.write(os.path.join(\
self.vs.simulation_directory,"POSCAR"))
self.vs.incar.write(os.path.join(\
self.vs.simulation_directory,"INCAR"))
self.vs.kpoints.write(os.path.join(\
self.vs.simulation_directory,"KPOINTS"))
def submit_job(self):
pass
def postprocess(self):
pass
class VaspConvergeEncut(object):
pass
class VaspConvergeKpoints(object):
pass
class VaspMinimizeStructure(object):
pass
class VaspCalculateElastic(object):
pass
def calculate_bulk_properties(sim_dir):
os.mkdir(sim_dir)
if __name__ == '__main__':
structures = {}
structures['Ni_fcc_cubic'] = {'symbols':['Ni'],
'sg':'fcc',
'a0':3.508,
'shape':'cubic'}
structures['Ni_bcc_cubic'] = {'symbols':['Ni'],
'sg':'bcc',
'a0':3.508,
'shape':'cubic'}
structures['Ni_hcp_cubic'] = {'symbols':['Ni'],
'sg':'hcp',
'a0':3.508,
'shape':'cubic'}
structures['Ni_dia_cubic'] = {'symbols':['Ni'],
'sg':'diamond',
'a0':3.508,
'shape':'cubic'}
structures['Ni_sc_cubic'] = {'symbols':['Ni'],
'sg':'sc',
'a0':3.508,
'shape':'cubic'}
root_dir = os.getcwd()
for k,v in structures.items():
sim_dir = os.path.join(root_dir,k)
# add in aliases for space groups here
if v['sg'] in ['dia']:
v['sg'] = 'diamond'
if v['sg'] in ['fcc','bcc','diamond','sc','hcp']:
if isinstance(v['symbols'],list):
if len(v['symbols']) == 1:
v['symbols'] = v['symbols'][0]
else:
raise KeyError('cannot have more than one symbol in {}'.format(sg))
else:
raise KeyError('sg is an unsupported space group, passed in {}'.format(sg))
obj_poscar = None
if v['shape'] == 'cubic':
obj_poscar = vasp.Poscar(\
ase.build.bulk(\
v['symbols'],
v['sg'],
a=v['a0'],
cubic=True))
elif v['shape'] == 'ortho':
obj_poscar= vasp.Poscar(\
ase.build.bulk(\
v['symbols'],
v['sg'],
a=v['a0'],
ortho=True))
elif v['shape'] == 'prim':
obj_poscar = vasp.Poscar(\
ase.build.bulk(\
v['symbols'].
v['sg'],
a0=v['a0']))
else:
raise KeyError('sg is an unsupported space group, pass in {}'.format(sg))
VaspCalculateBulkProperties(sim_dir,obj_poscar)
|
import electric_car
my_tesla = electric_car.ElectricCar('tesla', 'roadster', 2016)
print(my_tesla.get_descriptive_name())
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 22:07:01 2018
@author: yoelr
"""
from ._tank import MixTank
from ._hx import HXutility
class EnzymeTreatment(MixTank):
"""Create an EnzymeTreatment unit that is cost as a MixTank with a heat exchanger."""
_N_outs = 1
#: Residence time (hr)
_tau = 1
def __init__(self, ID='', ins=None, outs=(), *, T):
super().__init__(ID, ins, outs)
self.T = T #: Operating temperature
self._heat_exchanger = he = HXutility(None, None, T=T)
self._heat_utilities = he._heat_utilities
he._ins = self._ins
he._outs = self._outs
def _run(self):
feed = self.ins[0]
out = self.outs[0]
out._mol[:] = self._mol_in
out.phase = feed.phase
out.P = feed.P
out.T = self.T
def _design(self):
super()._design()
self._heat_exchanger._design()
def _cost(self):
super()._cost()
he = self._heat_exchanger
he._cost()
self._Cost['Heat exchanger'] = he._Cost['Heat exchanger']
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import datetime
import logging
import re
from collections import defaultdict
from operator import attrgetter
from random import randint
from typing import (Any, Dict, Generator, List, Optional, Set, Tuple, Type,
Union)
from amundsen_common.entity.resource_type import ResourceType
from amundsen_common.models.dashboard import DashboardSummary
from amundsen_common.models.feature import Feature
from amundsen_common.models.generation_code import GenerationCode
from amundsen_common.models.lineage import Lineage, LineageItem
from amundsen_common.models.popular_table import PopularTable
from amundsen_common.models.table import (Application, Badge, Column,
ProgrammaticDescription, Reader,
ResourceReport, Stat, Table, Tag,
User, Watermark)
from amundsen_common.models.user import User as UserEntity
from amundsen_common.utils.atlas import (AtlasColumnKey, AtlasCommonParams,
AtlasCommonTypes, AtlasDashboardTypes,
AtlasStatus, AtlasTableKey,
AtlasTableTypes)
from apache_atlas.client.base_client import AtlasClient
from apache_atlas.model.glossary import (AtlasGlossary, AtlasGlossaryHeader,
AtlasGlossaryTerm)
from apache_atlas.model.instance import (AtlasEntitiesWithExtInfo, AtlasEntity,
AtlasEntityHeader,
AtlasEntityWithExtInfo,
AtlasRelatedObjectId)
from apache_atlas.model.relationship import AtlasRelationship
from apache_atlas.utils import type_coerce
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from flask import current_app as app
from werkzeug.exceptions import BadRequest
from metadata_service.entity.dashboard_detail import \
DashboardDetail as DashboardDetailEntity
from metadata_service.entity.dashboard_query import DashboardQuery
from metadata_service.entity.description import Description
from metadata_service.entity.tag_detail import TagDetail
from metadata_service.exception import NotFoundException
from metadata_service.proxy import BaseProxy
from metadata_service.util import UserResourceRel
LOGGER = logging.getLogger(__name__)
# Expire cache every 11 hours + jitter
_ATLAS_PROXY_CACHE_EXPIRY_SEC = 11 * 60 * 60 + randint(0, 3600)
# noinspection PyMethodMayBeStatic
class AtlasProxy(BaseProxy):
"""
Atlas Proxy client for the amundsen metadata
{ATLAS_API_DOCS} = https://atlas.apache.org/api/v2/
"""
DB_ATTRIBUTE = 'db'
STATISTICS_FORMAT_SPEC = app.config['STATISTICS_FORMAT_SPEC']
# Qualified Name of the Glossary, that holds the user defined terms.
# For Amundsen, we are using Glossary Terms as the Tags.
AMUNDSEN_USER_TAGS = 'amundsen_user_tags'
_CACHE = CacheManager(**parse_cache_config_options({'cache.regions': 'atlas_proxy',
'cache.atlas_proxy.type': 'memory',
'cache.atlas_proxy.expire': _ATLAS_PROXY_CACHE_EXPIRY_SEC}))
def __init__(self, *,
host: str,
port: int,
user: str = 'admin',
password: str = '',
encrypted: bool = False,
validate_ssl: bool = False,
client_kwargs: dict = dict()) -> None:
"""
Initiate the Apache Atlas client with the provided credentials
"""
protocol = 'https' if encrypted else 'http'
self.client = AtlasClient(f'{protocol}://{host}:{port}', (user, password))
self.client.session.verify = validate_ssl
def _parse_dashboard_bookmark_qn(self, bookmark_qn: str) -> Dict:
"""
Parse bookmark qualifiedName and extract the info
:param bookmark_qn: Qualified Name of Bookmark entity
:return: Dictionary object containing following information:
product: dashboard product
cluster: cluster information
dashboard_group: Dashboard group name
dashboard_id: Dashboard identifier
user_id: User id
"""
pattern = re.compile(r"""
^(?P<product>[^.]*)_dashboard
://
(?P<cluster>[^.]*)
\.
(?P<dashboard_group>[^.]*)
/
(?P<dashboard_id>[^.]*)
/
(?P<type>[^.]*)
/
bookmark
/
(?P<user_id>[^.]*)
$
""", re.X)
result = pattern.match(bookmark_qn)
return result.groupdict() if result else dict()
def _parse_table_bookmark_qn(self, bookmark_qn: str) -> Dict:
"""
Parse bookmark qualifiedName and extract the info
:param bookmark_qn: Qualified Name of Bookmark entity
:return: Dictionary object containing following information:
cluster: cluster information
db: Database name
name: Table name
"""
pattern = re.compile(r"""
^(?P<db>[^.]*)
\.
(?P<table>[^.]*)
\.
(?P<entity_type>[^.]*)
\.
(?P<user_id>[^.]*)\.bookmark
\@
(?P<cluster>.*)
$
""", re.X)
result = pattern.match(bookmark_qn)
return result.groupdict() if result else dict()
@classmethod
def _filter_active(cls, entities: List[dict]) -> List[dict]:
"""
Filter out active entities based on entity end relationship status.
"""
result = [e for e in entities
if e.get('relationshipStatus') == AtlasStatus.ACTIVE
and e.get('entityStatus') == AtlasStatus.ACTIVE]
return result
def _get_table_entity(self, *, table_uri: str) -> AtlasEntityWithExtInfo:
"""
Fetch information from table_uri and then find the appropriate entity
:param table_uri: The table URI coming from Amundsen Frontend
:return: A table entity matching the Qualified Name derived from table_uri
"""
key = AtlasTableKey(table_uri)
try:
return self.client.entity.get_entity_by_attribute(type_name=key.entity_type,
uniq_attributes=[
(AtlasCommonParams.qualified_name,
key.qualified_name)])
except Exception as ex:
LOGGER.exception(f'Table not found. {str(ex)}')
raise NotFoundException(f'Table URI( {table_uri} ) does not exist')
def _get_user_entity(self, user_id: str) -> AtlasEntityWithExtInfo:
"""
Fetches an user entity from an id
:param user_id: User ID
:return: A User entity matching the user_id
"""
try:
return self.client.entity.get_entity_by_attribute(type_name=AtlasCommonTypes.user,
uniq_attributes=[
(AtlasCommonParams.qualified_name, user_id)])
except Exception:
raise NotFoundException(f'(User {user_id}) does not exist')
def _create_bookmark(self, entity: AtlasEntityWithExtInfo, user_guid: str, bookmark_qn: str,
entity_uri: str) -> None:
"""
Creates a bookmark entity for a specific user and entity uri.
:param entity: bookmarked entity
:param user_guid: User's guid
:param bookmark_qn: Bookmark qualifiedName
:param entity_uri: uri of bookmarked entity
:return:
"""
bookmark_entity = {
'entity': {
'typeName': AtlasCommonTypes.bookmark,
AtlasCommonParams.attributes: {
AtlasCommonParams.qualified_name: bookmark_qn,
AtlasStatus.ACTIVE.lower(): True,
'entityUri': entity_uri,
'entityName': entity.entity[AtlasCommonParams.attributes]['name'],
'user': {AtlasCommonParams.guid: user_guid},
'entity': {AtlasCommonParams.guid: entity.entity[AtlasCommonParams.guid]}}
}
}
bookmark_entity = type_coerce(bookmark_entity, AtlasEntityWithExtInfo)
self.client.entity.create_entity(bookmark_entity)
def _get_bookmark_entity(self, entity_uri: str, user_id: str,
resource_type: ResourceType) -> AtlasEntityWithExtInfo:
"""
Fetch a Bookmark entity from parsing entity uri and user id.
If Bookmark is not present, create one for the user.
:param entity_uri:
:param user_id: Qualified Name of a user
:return:
"""
if resource_type == ResourceType.Table:
entity_info = AtlasTableKey(entity_uri).get_details()
schema = entity_info.get('schema')
table = entity_info.get('table')
database = entity_info.get('database', 'hive_table')
cluster = entity_info.get('cluster')
bookmark_qn = f'{schema}.{table}.{database}.{user_id}.bookmark@{cluster}'
else:
bookmark_qn = f'{entity_uri}/{resource_type.name.lower()}/bookmark/{user_id}'
try:
bookmark_entity = self.client.entity.get_entity_by_attribute(type_name=AtlasCommonTypes.bookmark,
uniq_attributes=[
(AtlasCommonParams.qualified_name,
bookmark_qn)])
except Exception as ex:
LOGGER.exception(f'Bookmark not found. {str(ex)}')
if resource_type == ResourceType.Table:
bookmarked_entity = self._get_table_entity(table_uri=entity_uri)
elif resource_type == ResourceType.Dashboard:
bookmarked_entity = self._get_dashboard(qualified_name=entity_uri)
else:
raise NotImplementedError(f'Bookmarks for Resource Type ({resource_type}) are not yet implemented')
# Fetch user entity from user_id for relation
user_entity = self._get_user_entity(user_id)
# Create bookmark entity with the user relation.
self._create_bookmark(bookmarked_entity,
user_entity.entity[AtlasCommonParams.guid],
bookmark_qn,
entity_uri)
# Fetch bookmark entity after creating it.
bookmark_entity = self.client.entity.get_entity_by_attribute(type_name=AtlasCommonTypes.bookmark,
uniq_attributes=[
(AtlasCommonParams.qualified_name,
bookmark_qn)])
return bookmark_entity
def _get_column(self, *, table_uri: str, column_name: str) -> Dict:
"""
Fetch the column information from referredEntities of the table entity
:param table_uri:
:param column_name:
:return: A dictionary containing the column details
"""
try:
table_entity = self._get_table_entity(table_uri=table_uri)
columns = table_entity.entity[AtlasCommonParams.relationships].get('columns')
for column in columns or list():
col_details = table_entity.referredEntities[column[AtlasCommonParams.guid]]
if column_name == col_details[AtlasCommonParams.attributes]['name']:
return col_details
raise NotFoundException(f'Column not found: {column_name}')
except KeyError as ex:
LOGGER.exception(f'Column not found: {str(ex)}')
raise NotFoundException(f'Column not found: {column_name}')
def _serialize_columns(self, *, entity: AtlasEntityWithExtInfo) -> \
Union[List[Column], List]:
"""
Helper function to fetch the columns from entity and serialize them
using Column and Stat model.
:param entity: AtlasEntityWithExtInfo object,
along with relationshipAttributes
:return: A list of Column objects, if there are any columns available,
else an empty list.
"""
columns = list()
for column in entity.entity[AtlasCommonParams.relationships].get('columns') or list():
column_status = column.get('entityStatus', 'inactive').lower()
if column_status != 'active':
continue
col_entity = entity.referredEntities[column[AtlasCommonParams.guid]]
col_attrs = col_entity[AtlasCommonParams.attributes]
statistics = list()
badges = list()
for column_classification in col_entity.get('classifications') or list():
if column_classification.get('entityStatus') == AtlasStatus.ACTIVE:
name = column_classification.get('typeName')
badges.append(Badge(badge_name=name, category='default'))
for stats in col_attrs.get('statistics') or list():
stats_attrs = stats[AtlasCommonParams.attributes]
stat_type = stats_attrs.get('stat_name')
stat_format = self.STATISTICS_FORMAT_SPEC.get(stat_type, dict())
if not stat_format.get('drop', False):
stat_type = stat_format.get('new_name', stat_type)
stat_val = stats_attrs.get('stat_val')
format_val = stat_format.get('format')
if format_val:
stat_val = format_val.format(stat_val)
else:
stat_val = str(stat_val)
start_epoch = stats_attrs.get('start_epoch')
end_epoch = stats_attrs.get('end_epoch')
statistics.append(
Stat(
stat_type=stat_type,
stat_val=stat_val,
start_epoch=start_epoch,
end_epoch=end_epoch,
)
)
columns.append(
Column(
name=col_attrs.get('name'),
description=col_attrs.get('description') or col_attrs.get('comment'),
col_type=col_attrs.get('type') or col_attrs.get('dataType') or col_attrs.get('data_type'),
sort_order=col_attrs.get('position') or 9999,
stats=statistics,
badges=badges
)
)
return sorted(columns, key=lambda item: item.sort_order)
def _get_reports(self, guids: List[str]) -> List[ResourceReport]:
reports = []
if guids:
report_entities = self.client.entity.get_entities_by_guids(guids=guids)
for report_entity in report_entities.entities:
try:
if report_entity.status == AtlasStatus.ACTIVE:
report_attrs = report_entity.attributes
reports.append(
ResourceReport(
name=report_attrs['name'],
url=report_attrs['url']
)
)
except (KeyError, AttributeError):
LOGGER.exception(f'Error while accessing table report: {str(report_entity)}', exc_info=True)
parsed_reports = app.config['RESOURCE_REPORT_CLIENT'](reports) \
if app.config['RESOURCE_REPORT_CLIENT'] else reports
return sorted(parsed_reports)
def _get_owners(self, data_owners: list, fallback_owner: str = None) -> List[User]:
owners_detail = list()
active_owners_list = list()
for owner in self._filter_active(data_owners):
owner_qn = owner['displayText']
owner_data = self._get_user_details(owner_qn)
owners_detail.append(User(**owner_data))
active_owners_list.append(owner_qn)
# To avoid the duplication,
# we are checking if the fallback is not in data_owners
if fallback_owner and (fallback_owner not in active_owners_list):
owners_detail.append(User(**self._get_user_details(fallback_owner)))
return owners_detail
def get_user(self, *, id: str) -> Union[UserEntity, None]:
pass
def create_update_user(self, *, user: User) -> Tuple[User, bool]:
pass
def get_users(self) -> List[UserEntity]:
pass
def _serialize_badges(self, entity: AtlasEntityWithExtInfo) -> List[Badge]:
"""
Return list of Badges for entity. Badges in Amundsen <> Atlas integration are based on Atlas Classification.
:param entity: entity for which badges should be collected
:return : List of Amundsen Badge objects.
"""
result = []
classifications = entity.get('classifications')
for classification in classifications or list():
result.append(Badge(badge_name=classification.get('typeName'), category='default'))
return result
def _serialize_tags(self, entity: AtlasEntityWithExtInfo) -> List[Tag]:
"""
Return list of Tags for entity. Tags in Amundsen <> Atlas integration are based on Atlas Glossary.
:param entity: entity for which tags should be collected
:return : List of Amundsen Tag objects.
"""
result = []
meanings = self._filter_active(entity.get(AtlasCommonParams.relationships, dict()).get('meanings', []))
for term in meanings or list():
result.append(Tag(tag_name=term.get('displayText', ''), tag_type='default'))
return result
def get_table(self, *, table_uri: str) -> Table:
"""
Gathers all the information needed for the Table Detail Page.
:param table_uri:
:return: A Table object with all the information available
or gathered from different entities.
"""
entity = self._get_table_entity(table_uri=table_uri)
table_details = entity.entity
try:
attrs = table_details[AtlasCommonParams.attributes]
programmatic_descriptions = self._get_programmatic_descriptions(attrs.get('parameters', dict()) or dict())
table_info = AtlasTableKey(attrs.get(AtlasCommonParams.qualified_name)).get_details()
badges = self._serialize_badges(table_details)
tags = self._serialize_tags(table_details)
columns = self._serialize_columns(entity=entity)
reports_guids = [report.get("guid") for report in attrs.get("reports") or list()]
table_type = attrs.get('tableType') or 'table'
is_view = 'view' in table_type.lower()
readers = self._get_readers(table_details, Reader)
application = self._get_application(table_details)
table = Table(
table_writer=application,
database=AtlasTableKey(table_uri).get_details()['database'],
cluster=table_info.get('cluster', ''),
schema=table_info.get('schema', ''),
name=attrs.get('name') or table_info.get('table', ''),
badges=badges,
tags=tags,
description=attrs.get('description') or attrs.get('comment'),
owners=self._get_owners(
table_details[AtlasCommonParams.relationships].get('ownedBy', []), attrs.get('owner')),
resource_reports=self._get_reports(guids=reports_guids),
columns=columns,
is_view=is_view,
table_readers=readers,
last_updated_timestamp=self._parse_date(table_details.get('updateTime')),
programmatic_descriptions=programmatic_descriptions,
watermarks=self._get_table_watermarks(table_details))
return table
except KeyError:
LOGGER.exception('Error while accessing table information. {}', exc_info=True)
raise BadRequest(f'Some of the required attributes are missing in: {table_uri}')
@staticmethod
def _validate_date(text_date: str, date_format: str) -> Tuple[Optional[datetime.datetime], Optional[str]]:
try:
return datetime.datetime.strptime(text_date, date_format), date_format
except (ValueError, TypeError):
return None, None
@staticmethod
def _select_watermark_format(partition_names: Optional[List[Any]]) -> Optional[str]:
result = None
if partition_names:
for partition_name in partition_names:
# Assume that all partitions for given table have the same date format. Only thing that needs to be done
# is establishing which format out of the supported ones it is and then we validate every partition
# against it.
for df in app.config['WATERMARK_DATE_FORMATS']:
_, result = AtlasProxy._validate_date(partition_name, df)
if result:
LOGGER.debug('Established date format', extra=dict(date_format=result))
return result
return result
@staticmethod
def _render_partition_key_name(entity: AtlasEntityWithExtInfo) -> Optional[str]:
_partition_keys = []
for partition_key in entity.get(AtlasCommonParams.attributes, dict()).get('partitionKeys', []):
partition_key_column_name = partition_key.get('displayName')
if partition_key_column_name:
_partition_keys.append(partition_key_column_name)
partition_key = ' '.join(_partition_keys).strip()
return partition_key
def _get_table_watermarks(self, entity: AtlasEntityWithExtInfo) -> List[Watermark]:
partition_value_format = '%Y-%m-%d %H:%M:%S'
_partitions = entity.get(AtlasCommonParams.relationships, dict()).get('partitions', list())
names = [_partition.get('displayText') for _partition in self._filter_active(_partitions)]
if not names:
return []
partition_key = self._render_partition_key_name(entity)
watermark_date_format = self._select_watermark_format(names)
partitions = {}
for _partition in _partitions:
partition_name = _partition.get('displayText')
if partition_name and watermark_date_format:
partition_date, _ = self._validate_date(partition_name, watermark_date_format)
if partition_date:
common_values = {'partition_value': datetime.datetime.strftime(partition_date,
partition_value_format),
'create_time': 0,
'partition_key': partition_key}
partitions[partition_date] = common_values
if partitions:
low_watermark_date = min(partitions.keys())
high_watermark_date = max(partitions.keys())
low_watermark = Watermark(watermark_type='low_watermark', **partitions.get(low_watermark_date))
high_watermark = Watermark(watermark_type='high_watermark', **partitions.get(high_watermark_date))
return [low_watermark, high_watermark]
else:
return []
def delete_owner(self, *, table_uri: str, owner: str) -> None:
"""
:param table_uri:
:param owner:
:return:
"""
table = self._get_table_entity(table_uri=table_uri)
table_entity = table.entity
if table_entity[AtlasCommonParams.relationships].get("ownedBy"):
try:
active_owner = next(filter(lambda item:
item['relationshipStatus'] == AtlasStatus.ACTIVE
and item['displayText'] == owner,
table_entity[AtlasCommonParams.relationships]['ownedBy']), None)
if active_owner:
self.client.relationship.delete_relationship_by_guid(
guid=active_owner.get('relationshipGuid')
)
else:
raise BadRequest('You can not delete this owner.')
except Exception:
LOGGER.exception('Error while removing table data owner.', exc_info=True)
def add_owner(self, *, table_uri: str, owner: str) -> None:
"""
Query on Atlas User entity to find if the entity exist for the
owner string in parameter, if not create one. And then use that User
entity's GUID and add a relationship between Table and User, on ownedBy field.
:param table_uri:
:param owner: Email address of the owner
:return: None, as it simply adds the owner.
"""
owner_info = self._get_user_details(owner)
if not owner_info:
raise NotFoundException(f'User "{owner}" does not exist.')
user_dict = type_coerce({
"entity": {
"typeName": "User",
"attributes": {"qualifiedName": owner},
}
}, AtlasEntityWithExtInfo)
# Get or Create a User
user_entity = self.client.entity.create_entity(user_dict)
user_guid = next(iter(user_entity.guidAssignments.values()))
table = self._get_table_entity(table_uri=table_uri)
entity_def = {
"typeName": "DataSet_Users_Owner",
"end1": {
"guid": table.entity.get("guid"), "typeName": "Table",
},
"end2": {
"guid": user_guid, "typeName": "User",
},
}
try:
relationship = type_coerce(entity_def, AtlasRelationship)
self.client.relationship.create_relationship(relationship=relationship)
except Exception:
LOGGER.exception('Error while adding the owner information. {}', exc_info=True)
raise BadRequest(f'User {owner} is already added as a data owner for table {table_uri}.')
def get_table_description(self, *,
table_uri: str) -> Union[str, None]:
"""
:param table_uri:
:return: The description of the table as a string
"""
entity = self._get_table_entity(table_uri=table_uri)
return entity.entity[AtlasCommonParams.attributes].get('description')
def put_table_description(self, *,
table_uri: str,
description: str) -> None:
"""
Update the description of the given table.
:param table_uri:
:param description: Description string
:return: None
"""
table = self._get_table_entity(table_uri=table_uri)
self.client.entity.partial_update_entity_by_guid(
entity_guid=table.entity.get("guid"), attr_value=description, attr_name='description'
)
@_CACHE.cache('_get_user_defined_glossary_guid')
def _get_user_defined_glossary_guid(self) -> str:
"""
This function look for a user defined glossary i.e., self.ATLAS_USER_DEFINED_TERMS
If there is not one available, this will create a new glossary.
The main reason to put this functionality into a separate function is to avoid
the lookup each time someone assigns a tag to a data source.
:return: Glossary object, that holds the user defined terms.
"""
# Check if the user glossary already exists
glossaries = self.client.glossary.get_all_glossaries()
for glossary in glossaries:
if glossary.get(AtlasCommonParams.qualified_name) == self.AMUNDSEN_USER_TAGS:
return glossary[AtlasCommonParams.guid]
# If not already exists, create one
glossary_def = AtlasGlossary({"name": self.AMUNDSEN_USER_TAGS,
"shortDescription": "Amundsen User Defined Terms"})
glossary = self.client.glossary.create_glossary(glossary_def)
return glossary.guid
@_CACHE.cache('_get_create_glossary_term')
def _get_create_glossary_term(self, term_name: str) -> Union[AtlasGlossaryTerm, AtlasEntityHeader]:
"""
Since Atlas does not provide any API to find a term directly by a qualified name,
we need to look for AtlasGlossaryTerm via basic search, if found then return, else
create a new glossary term under the user defined glossary.
:param term_name: Name of the term. NOTE: this is different from qualified name.
:return: Term Object.
"""
params = {
'typeName': "AtlasGlossaryTerm",
'excludeDeletedEntities': True,
'includeSubTypes': True,
AtlasCommonParams.attributes: ["assignedEntities", ],
'entityFilters': {'condition': "AND",
'criterion': [{'attributeName': "name", 'operator': "=", 'attributeValue': term_name}]
}
}
result = self.client.discovery.faceted_search(search_parameters=params)
if result.approximateCount:
term = result.entities[0]
else:
glossary_guid = self._get_user_defined_glossary_guid()
glossary_def = AtlasGlossaryHeader({'glossaryGuid': glossary_guid})
term_def = AtlasGlossaryTerm({'name': term_name, 'anchor': glossary_def})
term = self.client.glossary.create_glossary_term(term_def)
return term
def add_tag(self, *, id: str, tag: str, tag_type: str = "default",
resource_type: ResourceType = ResourceType.Table) -> None:
"""
Assign the Glossary Term to the give table. If the term is not there, it will
create a new term under the Glossary self.ATLAS_USER_DEFINED_TERMS
:param id: Table URI / Dashboard ID etc.
:param tag: Tag Name
:param tag_type
:return: None
"""
entity = self._get_table_entity(table_uri=id)
term = self._get_create_glossary_term(tag)
related_entity = AtlasRelatedObjectId({AtlasCommonParams.guid: entity.entity[AtlasCommonParams.guid],
"typeName": resource_type.name})
self.client.glossary.assign_term_to_entities(term.guid, [related_entity])
def add_badge(self, *, id: str, badge_name: str, category: str = '',
resource_type: ResourceType) -> None:
# Not implemented
raise NotImplementedError
def delete_tag(self, *, id: str, tag: str, tag_type: str,
resource_type: ResourceType = ResourceType.Table) -> None:
"""
Removes the Glossary Term assignment from the provided source.
:param id: Table URI / Dashboard ID etc.
:param tag: Tag Name
:return:None
"""
entity = self._get_table_entity(table_uri=id)
term = self._get_create_glossary_term(tag)
if not term:
return
assigned_entities = self.client.glossary.get_entities_assigned_with_term(term.guid, "ASC", -1, 0)
for item in assigned_entities or list():
if item.get(AtlasCommonParams.guid) == entity.entity[AtlasCommonParams.guid]:
related_entity = AtlasRelatedObjectId(item)
return self.client.glossary.disassociate_term_from_entities(term.guid, [related_entity])
def delete_badge(self, *, id: str, badge_name: str, category: str,
resource_type: ResourceType) -> None:
# Not implemented
raise NotImplementedError
def put_column_description(self, *,
table_uri: str,
column_name: str,
description: str) -> None:
"""
:param table_uri:
:param column_name: Name of the column to update the description
:param description: The description string
:return: None, as it simply updates the description of a column
"""
column_detail = self._get_column(
table_uri=table_uri,
column_name=column_name)
col_guid = column_detail[AtlasCommonParams.guid]
self.client.entity.partial_update_entity_by_guid(
entity_guid=col_guid, attr_value=description, attr_name='description'
)
def get_column_description(self, *,
table_uri: str,
column_name: str) -> Union[str, None]:
"""
:param table_uri:
:param column_name:
:return: The column description using the referredEntities
information of a table entity
"""
column_detail = self._get_column(
table_uri=table_uri,
column_name=column_name)
return column_detail[AtlasCommonParams.attributes].get('description')
def _serialize_popular_tables(self, entities: AtlasEntitiesWithExtInfo) -> List[PopularTable]:
"""
Gets a list of entities and serialize the popular tables.
:param entities: List of entities from atlas client
:return: a list of PopularTable objects
"""
popular_tables = list()
for table in entities.entities or []:
table_attrs = table.attributes
table_info = AtlasTableKey(table_attrs.get(AtlasCommonParams.qualified_name)).get_details()
table_name = table_info.get('table') or table_attrs.get('name')
schema_name = table_info.get('schema', '')
db_cluster = table_info.get('cluster', '')
popular_table = PopularTable(
database=table_info.get('database') or table.typeName,
cluster=db_cluster,
schema=schema_name,
name=table_name,
description=table_attrs.get('description') or table_attrs.get('comment'))
popular_tables.append(popular_table)
return popular_tables
def get_popular_tables(self, *,
num_entries: int,
user_id: Optional[str] = None) -> List[PopularTable]:
"""
Generates a list of Popular tables to be shown on the home page of Amundsen.
:param num_entries: Number of popular tables to fetch
:return: A List of popular tables instances
"""
popular_query_params = {'typeName': AtlasTableTypes.table,
'sortBy': 'popularityScore',
'sortOrder': 'DESCENDING',
'excludeDeletedEntities': True,
'limit': num_entries}
search_results = self.client.discovery.faceted_search(search_parameters=popular_query_params)
return self._serialize_popular_tables(search_results)
def get_latest_updated_ts(self) -> int:
date = None
metrics = self.client.admin.get_metrics()
try:
date = self._parse_date(metrics.general.get('stats', {}).get('Notification:lastMessageProcessedTime'))
except AttributeError:
pass
return date or 0
def get_statistics(self) -> Dict[str, Any]:
# Not implemented
pass
@_CACHE.cache('get_tags')
def get_tags(self) -> List:
"""
Fetch all the glossary terms from atlas, along with their assigned entities as this
will be used to generate the autocomplete on the table detail page
:return: A list of TagDetail Objects
"""
tags = []
params = {
'typeName': "AtlasGlossaryTerm",
'limit': 1000,
'offset': 0,
'excludeDeletedEntities': True,
'includeSubTypes': True,
AtlasCommonParams.attributes: ["assignedEntities", ]
}
glossary_terms = self.client.discovery.faceted_search(search_parameters=params)
for item in glossary_terms.entities or list():
tags.append(
TagDetail(
tag_name=item.attributes.get("name"),
tag_count=len(item.attributes.get("assignedEntities"))
)
)
return tags
@_CACHE.cache('get_badges')
def get_badges(self) -> List:
badges = list()
metrics = self.client.admin.get_metrics()
try:
system_badges = metrics["tag"].get("tagEntities").keys()
for item in system_badges:
badges.append(
Badge(badge_name=item, category="default")
)
except AttributeError:
LOGGER.info("No badges/classifications available in the system.")
return badges
def _get_resources_followed_by_user(self, user_id: str, resource_type: str) \
-> List[Union[PopularTable, DashboardSummary]]:
"""
Helper function to get the resource, table, dashboard etc followed by a user.
:param user_id: User ID of a user
:param resource_type: Type of a resource that returns, could be table, dashboard etc.
:return: A list of PopularTable, DashboardSummary or any other resource.
"""
if resource_type == ResourceType.Table.name:
bookmark_qn_search_pattern = f'_{resource_type.lower()}.{user_id}.bookmark'
else:
bookmark_qn_search_pattern = f'/{resource_type.lower()}/bookmark/{user_id}'
params = {
'typeName': AtlasCommonTypes.bookmark,
'offset': '0',
'limit': '1000',
'excludeDeletedEntities': True,
'entityFilters': {
'condition': 'AND',
'criterion': [
{
'attributeName': AtlasCommonParams.qualified_name,
'operator': 'contains',
'attributeValue': bookmark_qn_search_pattern
},
{
'attributeName': AtlasStatus.ACTIVE.lower(),
'operator': 'eq',
'attributeValue': 'true'
}
]
},
AtlasCommonParams.attributes: ['count', AtlasCommonParams.qualified_name,
AtlasCommonParams.uri, 'entityName']
}
# Fetches the bookmark entities based on filters
search_results = self.client.discovery.faceted_search(search_parameters=params)
resources: List[Union[PopularTable, DashboardSummary]] = []
for record in search_results.entities or []:
if resource_type == ResourceType.Table.name:
table_info = AtlasTableKey(record.attributes[AtlasCommonParams.uri]).get_details()
res = self._parse_table_bookmark_qn(record.attributes[AtlasCommonParams.qualified_name])
resources.append(PopularTable(
database=table_info['database'],
cluster=res['cluster'],
schema=res['db'],
name=res['table']))
elif resource_type == ResourceType.Dashboard.name:
dashboard_info = self._parse_dashboard_bookmark_qn(record.attributes[AtlasCommonParams.qualified_name])
resources.append(DashboardSummary(
uri=record.attributes[AtlasCommonParams.uri],
cluster=dashboard_info['cluster'],
name=record.attributes['entityName'],
group_name=dashboard_info['dashboard_group'],
group_url='',
product=dashboard_info['product'],
url=''
))
else:
raise NotImplementedError(f'resource type {resource_type} is not supported')
return resources
def _get_resources_owned_by_user(self, user_id: str, resource_type: str) \
-> List[Union[PopularTable, DashboardSummary, Any]]:
"""
Helper function to get the resource, table, dashboard etc owned by a user.
:param user_id: User ID of a user
:param resource_type: Type of a resource that returns, could be table, dashboard etc.
:return: A list of PopularTable, DashboardSummary or any other resource.
"""
resources: List[Union[PopularTable, DashboardSummary, Any]] = list()
if resource_type == ResourceType.Table.name:
type_regex = "(.*)_table$"
entity_type = AtlasTableTypes.table
serialize_function = self._serialize_popular_tables
elif resource_type == ResourceType.Dashboard.name:
type_regex = 'Dashboard'
entity_type = AtlasDashboardTypes.metadata
serialize_function = self._serialize_dashboard_summaries
else:
raise NotImplementedError(f'Resource Type ({resource_type}) is not yet implemented')
user_entity = self.client.entity.get_entity_by_attribute(type_name=AtlasCommonTypes.user,
uniq_attributes=[
(
AtlasCommonParams.qualified_name,
user_id)]).entity
if not user_entity:
raise NotFoundException(f'User {user_id} not found.')
resource_guids = set()
for item in self._filter_active(user_entity[AtlasCommonParams.relationships].get('owns')) or list():
if re.compile(type_regex).match(item['typeName']):
resource_guids.add(item[AtlasCommonParams.guid])
owned_resources_query = f'{entity_type} where owner like "{user_id.lower()}*" and __state = "ACTIVE"'
entities = self.client.discovery.dsl_search(owned_resources_query)
for entity in entities.entities or list():
resource_guids.add(entity.guid)
if resource_guids:
resource_guids_chunks = AtlasProxy.split_list_to_chunks(list(resource_guids), 100)
for chunk in resource_guids_chunks:
entities = self.client.entity.get_entities_by_guids(guids=list(chunk), ignore_relationships=True)
resources += serialize_function(entities)
else:
LOGGER.info(f'User ({user_id}) does not own any "{resource_type}"')
return resources
@staticmethod
def split_list_to_chunks(input_list: List[Any], n: int) -> Generator:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(input_list), n):
yield input_list[i:i + n]
def _get_resource_by_user_relation(self, user_email: str, relation_type: UserResourceRel,
resource_type: ResourceType) -> Dict[str, Any]:
resources: List[Union[PopularTable, DashboardSummary]] = list()
if resource_type.name == ResourceType.Table.name:
resource = ResourceType.Table.name.lower()
elif resource_type.name == ResourceType.Dashboard.name:
resource = ResourceType.Dashboard.name.lower()
else:
raise NotImplementedError(f'Resource type {resource_type.name} not supported.')
if relation_type == UserResourceRel.follow:
resources = self._get_resources_followed_by_user(user_id=user_email,
resource_type=resource_type.name)
elif relation_type == UserResourceRel.own:
resources = self._get_resources_owned_by_user(user_id=user_email,
resource_type=resource_type.name)
return {resource: resources}
def get_dashboard_by_user_relation(self, *, user_email: str, relation_type: UserResourceRel) -> Dict[str, Any]:
return self._get_resource_by_user_relation(user_email, relation_type, ResourceType.Dashboard)
def get_table_by_user_relation(self, *, user_email: str, relation_type: UserResourceRel) -> Dict[str, Any]:
return self._get_resource_by_user_relation(user_email, relation_type, ResourceType.Table)
def get_frequently_used_tables(self, *, user_email: str) -> Dict[str, List[PopularTable]]:
user = self.client.entity.get_entity_by_attribute(type_name=AtlasCommonTypes.user,
uniq_attributes=[
(AtlasCommonParams.qualified_name, user_email)]).entity
readers_guids = []
for user_reads in self._filter_active(user[AtlasCommonParams.relationships].get('entityReads')):
readers_guids.append(user_reads.get(AtlasCommonParams.guid))
readers = self.client.entity.get_entities_by_guids(guids=list(readers_guids), ignore_relationships=True)
_results = {}
for reader in readers.entities or list():
entity_uri = reader.attributes.get(AtlasCommonParams.uri)
count = reader.attributes.get('count')
if count:
table_info = AtlasTableKey(entity_uri).get_details()
_results[count] = dict(cluster=table_info.get('cluster'),
name=table_info.get('table'),
schema=table_info.get('schema'),
database=table_info.get('database'))
sorted_counts = sorted(_results.keys())
results = []
for count in sorted_counts:
data: dict = _results.get(count, dict())
table = PopularTable(**data)
results.append(table)
return {'table': results}
def add_resource_relation_by_user(self, *,
id: str,
user_id: str,
relation_type: UserResourceRel,
resource_type: ResourceType) -> None:
if resource_type not in [ResourceType.Table, ResourceType.Dashboard]:
raise NotImplementedError(f'resource type {resource_type} is not supported')
entity = self._get_bookmark_entity(entity_uri=id, user_id=user_id, resource_type=resource_type) # type: ignore
entity.entity[AtlasCommonParams.attributes][AtlasStatus.ACTIVE.lower()] = True
self.client.entity.update_entity(entity)
def delete_resource_relation_by_user(self, *,
id: str,
user_id: str,
relation_type: UserResourceRel,
resource_type: ResourceType) -> None:
if resource_type not in [ResourceType.Table, ResourceType.Dashboard]:
raise NotImplementedError(f'resource type {resource_type} is not supported')
entity = self._get_bookmark_entity(entity_uri=id, user_id=user_id, resource_type=resource_type) # type: ignore
entity.entity[AtlasCommonParams.attributes][AtlasStatus.ACTIVE.lower()] = False
self.client.entity.update_entity(entity)
def _parse_date(self, date: int) -> Optional[int]:
try:
date_str = str(date)
date_trimmed = date_str[:10]
assert len(date_trimmed) == 10
return int(date_trimmed)
except Exception:
return None
def _get_readers(self, entity: AtlasEntityWithExtInfo, model: Any = Reader, top: Optional[int] = 15) \
-> List[Union[Reader, User]]:
_readers = entity.get(AtlasCommonParams.relationships, dict()).get('readers', list())
guids = [_reader.get(AtlasCommonParams.guid) for _reader in self._filter_active(_readers)]
if not guids:
return []
readers = self.client.entity.get_entities_by_guids(guids=list(guids), ignore_relationships=False)
_result = []
for _reader in readers.entities or list():
read_count = _reader.attributes['count']
if read_count >= int(app.config['POPULAR_RESOURCES_MINIMUM_READER_COUNT']):
reader_qn = _reader.relationshipAttributes['user']['displayText']
reader_details = self._get_user_details(reader_qn)
if model == Reader:
reader = Reader(user=User(**reader_details), read_count=read_count)
elif model == User:
reader = User(**reader_details)
else:
return []
_result.append(reader)
if model == Reader:
result = sorted(_result, key=attrgetter('read_count'), reverse=True)[:top]
else:
result = _result
result = result[:top]
return result
def _get_application(self, entity: AtlasEntityWithExtInfo) -> Optional[Application]:
_applications = entity.get(AtlasCommonParams.relationships, dict()).get('applications', list())
guids = [a.get(AtlasCommonParams.guid) for a in self._filter_active(_applications)]
if not guids:
return None
applications = self.client.entity.get_entities_by_guids(guids=list(guids), ignore_relationships=False)
for _app in applications.entities or list():
url = _app.attributes.get('application_url', '')
description = _app.attributes.get('description', '')
id = _app.attributes.get('id', '')
name = _app.attributes.get('name', '')
app = Application(application_url=url, description=description, id=id, name=name)
# only single app per table is supported
break
return app
def _get_programmatic_descriptions(self, parameters: dict) -> List[ProgrammaticDescription]:
programmatic_descriptions: Dict[str, ProgrammaticDescription] = {}
for source, text in parameters.items():
use_parameter = True
for regex_filter in app.config['PROGRAMMATIC_DESCRIPTIONS_EXCLUDE_FILTERS']:
pattern = re.compile(regex_filter)
if pattern.match(source):
use_parameter = False
break
if use_parameter:
source = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", source).lower()
programmatic_descriptions[source] = ProgrammaticDescription(source=source, text=text)
result = dict(sorted(programmatic_descriptions.items()))
return list(result.values())
def _serialize_dashboard_queries(self, queries: List[Dict]) -> List[DashboardQuery]:
"""
Renders DashboardQuery from attributes
:param queries: list of dicts with query attributes
:returns List of DashboardQuery objects.
"""
result = []
for query in queries:
name = query.get('name', '')
query_text = query.get('queryText', '')
url = query.get('url', '')
dashboard_query = DashboardQuery(name=name, query_text=query_text, url=url)
result.append(dashboard_query)
return result
def _get_dashboard_group(self, group_guid: str) -> AtlasEntityWithExtInfo:
"""
Return raw DashboardGroup entity.
:param group_guid: guid of dashboard group entity.
:return : Atlas DashboardGroup entity.
"""
entity = self.client.entity.get_entities_by_guids(guids=[group_guid]).entities[0]
return entity
def _get_dashboard_summary(self, entity: AtlasEntityWithExtInfo, executions: List[AtlasEntity]) -> Dict:
attributes = entity.entity[AtlasCommonParams.attributes]
relationships = entity.entity[AtlasCommonParams.relationships]
group = self._get_dashboard_group(relationships.get('group').get(AtlasCommonParams.guid))[
AtlasCommonParams.attributes]
successful_executions = [e for e in executions if e.get('state') == 'succeeded']
try:
last_successful_execution = successful_executions[0]
except IndexError:
last_successful_execution = dict(timestamp=0)
chart_names = [e[AtlasCommonParams.attributes]['name'] for _, e in entity['referredEntities'].items()
if e['typeName'] == AtlasDashboardTypes.chart]
result = dict(
uri=attributes.get(AtlasCommonParams.qualified_name, ''),
cluster=attributes.get('cluster', ''),
group_name=relationships.get('group', dict()).get('displayText', ''),
group_url=group.get('url', ''),
product=attributes.get('product', ''),
name=attributes.get('name', ''),
url=attributes.get('url', ''),
last_successful_run_timestamp=last_successful_execution.get('timestamp', 0),
description=attributes.get('description', ''),
chart_names=chart_names)
return result
def _get_dashboard_details(self, entity: AtlasEntityWithExtInfo) -> Dict:
try:
attributes = entity.entity[AtlasCommonParams.attributes]
relationships = entity.entity[AtlasCommonParams.relationships]
referred_entities = entity['referredEntities']
badges = self._serialize_badges(entity)
tags = self._serialize_tags(entity)
_executions = []
_queries = []
for k, v in referred_entities.items():
entity_type = v.get('typeName')
_attributes = v[AtlasCommonParams.attributes]
if entity_type == AtlasDashboardTypes.execution:
_executions.append(_attributes)
elif entity_type == AtlasDashboardTypes.query:
_queries.append(_attributes)
queries = self._serialize_dashboard_queries(_queries)
query_names = [q.name for q in queries]
table_guids = [t.get(AtlasCommonParams.guid) for t in self._filter_active(relationships.get('tables', []))]
if table_guids:
_tables = self.client.entity.get_entities_by_guids(guids=table_guids)
tables = self._serialize_popular_tables(_tables)
else:
tables = []
executions_attributes = sorted(_executions, key=lambda x: x.get('timestamp', 0), reverse=True)
try:
last_execution = executions_attributes[0]
except IndexError:
last_execution = dict(timestamp=0, state='Unknown')
owners = self._get_owners(relationships.get('ownedBy', []))
readers = self._get_readers(entity.entity, User)
result = self._get_dashboard_summary(entity, executions_attributes)
extra_spec = dict(
created_timestamp=attributes.get('createdTimestamp', 0),
updated_timestamp=attributes.get('lastModifiedTimestamp', 0),
owners=owners,
last_run_timestamp=last_execution.get('timestamp', 0),
last_run_state=last_execution.get('state', 'Unknown'),
query_names=query_names,
queries=queries,
tables=tables,
tags=tags,
badges=badges,
recent_view_count=attributes.get('popularityScore', 0),
frequent_users=readers)
result.update(extra_spec)
return result
except Exception as e:
raise e
def _get_dashboard(self, qualified_name: str) -> AtlasEntityWithExtInfo:
"""
Return raw Dasboard entity.
:param qualified_name: qualified name of the dashboard
:return : Atlas Dashboard entity.
"""
entity = self.client.entity.get_entity_by_attribute(type_name=AtlasDashboardTypes.metadata,
uniq_attributes=[
(AtlasCommonParams.qualified_name, qualified_name)])
return entity
def get_dashboard(self, id: str) -> DashboardDetailEntity:
entity = self._get_dashboard(id)
attributes = self._get_dashboard_details(entity)
return DashboardDetailEntity(**attributes)
def get_dashboard_description(self, *, id: str) -> Description:
"""
Return dashboard description.
:param id:
:return: The description of the dashboard as a string
"""
entity = self.client.entity.get_entity_by_attribute(type_name=AtlasDashboardTypes.metadata,
uniq_attributes=[(AtlasCommonParams.qualified_name, id)])
return entity.entity[AtlasCommonParams.attributes].get('description')
def put_dashboard_description(self, *,
id: str,
description: str) -> None:
"""
Update the description of the given dashboard.
:param id: dashboard id (uri)
:param description: Description string
:return: None
"""
entity = self.client.entity.get_entity_by_attribute(type_name=AtlasDashboardTypes.metadata,
uniq_attributes=[(AtlasCommonParams.qualified_name, id)])
self.client.entity.partial_update_entity_by_guid(
entity_guid=entity.entity.get(AtlasCommonParams.guid), attr_value=description, attr_name='description'
)
def _serialize_dashboard_summaries(self, entities: AtlasEntitiesWithExtInfo) -> List[DashboardSummary]:
"""
Returns dashboards summary for dashboards using specific table.
"""
result = []
for _dashboard in entities.entities:
try:
if _dashboard.status == AtlasStatus.ACTIVE:
executions = [
entities['referredEntities'].get(e.get(AtlasCommonParams.guid))[AtlasCommonParams.attributes]
for e in
self._filter_active(
_dashboard[AtlasCommonParams.relationships].get('executions', []))]
dashboard = AtlasEntityWithExtInfo(attrs=dict(entity=_dashboard, referredEntities={}))
summary = DashboardSummary(**self._get_dashboard_summary(dashboard, executions))
result.append(summary)
except (KeyError, AttributeError):
LOGGER.exception(f'Error while accessing table report: {str(dashboard)}.', exc_info=True)
return result
def get_resources_using_table(self, *,
id: str,
resource_type: ResourceType) -> Dict[str, List[DashboardSummary]]:
if resource_type == ResourceType.Dashboard:
resource = 'dashboards'
serialize_function = self._serialize_dashboard_summaries
else:
raise NotImplementedError(f'{resource_type} is not supported')
table = self._get_table_entity(table_uri=id)
guids = [d.get(AtlasCommonParams.guid) for d in
self._filter_active(table.entity[AtlasCommonParams.relationships].get(resource, []))]
if guids:
entities = self.client.entity.get_entities_by_guids(guids=guids)
result = serialize_function(entities)
else:
result = []
return {resource: result}
@classmethod
def _generate_edges(cls, graph: Dict[str, List[str]]) -> List[Tuple[str, str]]:
"""
Generates list of edge pairs from the graph.
:param graph: Graph of nodes
:return: List of tuples with graph edges
"""
edges = []
# for each node in graph
for node in graph:
# for each neighbour node of a single node
for neighbour in graph[node]:
# if edge exists then append
edges.append((node, neighbour))
return edges
@classmethod
def _find_shortest_path(cls, graph: Dict[str, List[str]], start: str, end: str, path: List[Any] = []) -> List[str]:
"""
Find shortest path between graph nodes. Used to calculate 'level' parameter
__source__='https://www.python.org/doc/essays/graphs/'
__author__='Guido van Rossum'
:param graph: Dictionary of str (node key) and List[str] (connected nodes)
:param start: Starting node for finding the path
:param end: Ending node for finding the path
:param path: Accumulator for recursive calls
:return: Shortest path between start and end nodes
"""
path = path + [start]
if start == end:
return path
if not graph.get(start):
return []
shortest: List[str] = []
for node in graph[start]:
if node not in path:
newpath = AtlasProxy._find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
@staticmethod
def _find_parent_nodes(graph: Dict) -> Dict[str, Set[str]]:
"""
Rewrite graph dict to the form that makes it possible to easily
:param graph: Dictionary of str (node key) and List[str]
:return: Dict with keys (node) and values (parents of a node)
"""
relations: Dict[str, Set[str]] = {}
for parent, ancestors in graph.items():
for ancestor in ancestors:
if not relations.get(ancestor):
relations[ancestor] = set()
relations[ancestor].add(parent)
return relations
def _get_lineage_graph(self, lineage: Dict, entity_type: str, key_class: Any) -> Dict[str, List[str]]:
"""
Since Atlas bases lineage on additional entity (Process(input=A, output=B)) for capturing lineage
we need to create graph that has direct A > B relationships and removes Process entities.
:param lineage: Raw linage captured from Atlas
:param entity_type: Type of entity for which lineage is captured
:param key_class: Helper class used for Amundsen key <> Atlas qualified name serialization/deserialization
:return: Graph of nodes with relations.
"""
processes: Dict[str, Dict[str, Any]] = dict()
entity_type = entity_type.lower()
entities = lineage.get('guidEntityMap', dict())
relations = lineage.get('relations', [])
for relation in relations:
input_guid = relation['fromEntityId']
input_type = entities.get(input_guid)['typeName'].lower()
output_guid = relation['toEntityId']
output_type = entities.get(output_guid)['typeName'].lower()
if input_type.endswith('process') and output_type.endswith(entity_type):
output_qn = entities.get(output_guid)[AtlasCommonParams.attributes][AtlasCommonParams.qualified_name]
output_key = key_class(output_qn, output_type).amundsen_key # type: ignore
if not processes.get(input_guid):
processes[input_guid] = dict(inputs=set(), outputs=set())
processes[input_guid]['outputs'].add(output_key)
elif output_type.endswith('process') and input_type.endswith(entity_type):
input_qn = entities.get(input_guid)[AtlasCommonParams.attributes][AtlasCommonParams.qualified_name]
input_key = key_class(input_qn, input_type).amundsen_key # type: ignore
if not processes.get(output_guid):
processes[output_guid] = dict(inputs=set(), outputs=set())
processes[output_guid]['inputs'].add(input_key)
graph: Dict[str, List[str]] = defaultdict(list)
for _, spec in processes.items():
for input_key in spec['inputs']:
for output_key in spec['outputs']:
graph[input_key].append(output_key)
return dict(graph)
def _serialize_lineage_item(self, edge: Tuple[str, str], direction: str, key_class: Any,
graph: Dict, root_node: str, parent_nodes: Dict[str, Set[str]]) -> List[LineageItem]:
"""
Serializes LineageItem object.
:param edge: tuple containing two node keys that are connected with each other.
:param direction: Lineage direction upstream/downstream
:param key_class: Helper class used for managing Atlas <> Amundsen key formats.
:param: graph: Graph from which the edge was derived from. Used to find distance between edge node and entity
for which lineage is retrieved.
:param parent_nodes: Dict of keys (nodes) with set of keys (parents).
:return: Serialized LineageItem list.
"""
result: List[LineageItem] = []
if direction == 'upstream':
key, _ = edge
level = len(AtlasProxy._find_shortest_path(graph, key, root_node)) - 1
elif direction == 'downstream':
_, key = edge
level = len(AtlasProxy._find_shortest_path(graph, root_node, key)) - 1
else:
raise ValueError(f'Direction {direction} not supported!')
parents = parent_nodes.get(key, [''])
while True:
try:
parent = parents.pop()
except Exception:
break
badges: List[str] = []
usage = 0
source = key_class(key).get_details()['database']
spec = dict(key=key,
parent=parent,
source=source,
badges=badges,
usage=usage,
level=level)
result.append(LineageItem(**spec))
return result
def _serialize_lineage(self, lineage: dict, entity_type: str, root_node: str, direction: str,
key_class: Union[Type[AtlasTableKey], Type[AtlasColumnKey]]) -> List[LineageItem]:
"""
Serializes lineage to Amundsen format based on Atlas lineage output.
The assumption for Atlas <> Amundsen lineage is that every Process entity in Atlas lineage contains at least
one entity of entity_type both in inputs and outputs.
If your lineage is A > B > C where:
A - is table
B - is file
C - is table
It won't render A > C table lineage in Amundsen.
The implementation follows simplified set of expectations and might be subject of change if such requirement
arises.
:param lineage: Raw lineage from Atlas
:param entity_type: Type of the entity for which lineage is being retrieved
:param root_node: key of entity for which lineage will be rendered. Required to calculate 'level' dynamically
based on nodes distance.
:param direction: upstream/downstream
:param key_class: Class for serializing entities keys
:return: The Lineage object with upstream & downstream lineage items
"""
result: List[LineageItem] = []
graph = self._get_lineage_graph(lineage, entity_type, key_class)
edges = AtlasProxy._generate_edges(graph)
parent_nodes = self._find_parent_nodes(graph)
for edge in edges:
lineage_items = self._serialize_lineage_item(edge, direction, key_class, graph, root_node, parent_nodes)
result += lineage_items
return result
def get_lineage(self, *, id: str, resource_type: ResourceType, direction: str, depth: int) -> Lineage:
"""
Retrieves the lineage information for the specified resource type.
:param id: Key of entity for which lineage will be collected
:param resource_type: Type of the entity for which lineage is being retrieved
:param direction: Whether to get the upstream/downstream or both directions
:param depth: Depth or level of lineage information. 0=only parent, 1=immediate nodes, 2=...
:return: The Lineage object with upstream & downstream lineage items
"""
lineage_spec: Dict[str, Any] = dict(key=id,
direction=direction,
depth=depth,
upstream_entities=[],
downstream_entities=[])
# Atlas returns complete lineage when depth=0. In Amundsen depth=0 means only parent.
if depth > 0:
key_class: Union[Type[AtlasTableKey], Type[AtlasColumnKey]]
if resource_type == ResourceType.Column:
key_class = AtlasColumnKey
elif resource_type == ResourceType.Table:
key_class = AtlasTableKey
else:
raise NotImplementedError(f'Resource {resource_type.name} not supported!')
key = key_class(id) # type: ignore
entity = self.client.entity.get_entity_by_attribute(type_name=resource_type.name,
uniq_attributes=[(AtlasCommonParams.qualified_name,
key.qualified_name)])
entity_guid = entity.entity.guid
_upstream: Dict[str, Any] = {}
_downstream: Dict[str, Any] = {}
if not direction == 'downstream':
_upstream = self.client.lineage.get_lineage_info(entity_guid, 'INPUT', depth)
if not direction == 'upstream':
_downstream = self.client.lineage.get_lineage_info(entity_guid, 'OUTPUT', depth)
upstream = self._serialize_lineage(_upstream, resource_type.name, id, 'upstream', key_class)
downstream = self._serialize_lineage(_downstream, resource_type.name, id, 'downstream', key_class)
lineage_spec['upstream_entities'] = upstream
lineage_spec['downstream_entities'] = downstream
lineage = Lineage(**lineage_spec)
return lineage
def get_feature(self, *, feature_uri: str) -> Feature:
pass
def get_resource_description(self, *,
resource_type: ResourceType,
uri: str) -> Description:
pass
def put_resource_description(self, *,
resource_type: ResourceType,
uri: str,
description: str) -> None:
pass
def add_resource_owner(self, *,
uri: str,
resource_type: ResourceType,
owner: str) -> None:
pass
def delete_resource_owner(self, *,
uri: str,
resource_type: ResourceType,
owner: str) -> None:
pass
def get_resource_generation_code(self, *,
uri: str,
resource_type: ResourceType) -> GenerationCode:
pass
def get_popular_resources(self, *,
num_entries: int,
resource_types: List[str],
user_id: Optional[str] = None) -> Dict[str, List]:
raise NotImplementedError
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 12:13:19 2020
@author: metalcorebear
"""
from model import propagation_model
import model_params
import argparse
import os
import pandas as pd
# Specify arguments
def get_path():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='Enter the output path.', required=True)
args = vars(parser.parse_args())
output_path = str(args['output'])
return output_path
# Generate output file name parameters
output_path = get_path()
density = model_params.parameters['density']
nodes = model_params.parameters['network_size']
neg_bias = model_params.parameters['neg_bias']
filename = 'ABM_' + str(density) + '_' + str(nodes) + '_' + str(neg_bias) + '.csv'
output_file = os.path.join(output_path, filename)
# Instantiate model
meme_model = propagation_model()
# Number of steps to run model.
steps = model_params.parameters['steps']
for i in range(steps):
print("Step: " + str(i))
meme_model.step()
# Generate output
output_data = meme_model.datacollector.get_model_vars_dataframe()
output_data.to_csv(output_file, encoding='UTF8')
print (output_data)
print('Filename:')
print(filename)
print('You are a great American!!')
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.api import http_pb2
from google.cloud.vision_v1p2beta1.proto import geometry_pb2
from google.cloud.vision_v1p2beta1.proto import image_annotator_pb2
from google.cloud.vision_v1p2beta1.proto import text_annotation_pb2
from google.cloud.vision_v1p2beta1.proto import web_detection_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.rpc import status_pb2
from google.type import color_pb2
from google.type import latlng_pb2
_shared_modules = [
http_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
timestamp_pb2,
wrappers_pb2,
status_pb2,
color_pb2,
latlng_pb2,
]
_local_modules = [
geometry_pb2,
image_annotator_pb2,
text_annotation_pb2,
web_detection_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.vision_v1p2beta1.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
# -*- coding: utf-8 -*-
# from cms_bs3_theme.models import ThemeSite
def settings(request):
"""
"""
from . import conf
conf = dict(vars(conf))
# conf.update(ThemeSite.objects.get_theme_conf(request=request, fail=False))
data = request.session.get('cms_bs3_theme_conf', {})
conf.update(data)
return {'bs3_conf': conf}
|
#!/usr/bin/env python
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@insecure.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@insecure.com for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
# This module has two classes. ScriptDB is responsible for parsing the
# script.db file and fetching each script's name and categories.
# ScriptMetadata gets the description, categories, @usage, @output, and
# arguments from the script itself.
import re
import os
import sys
from zenmapCore.Paths import Path
from zenmapCore.UmitLogging import log
class ScriptDB (object):
"""Class responsible for parsing the script.db file, fetching script
names and categories."""
LUA_STRING_ESCAPES = {
"a": "\a", "b": "\b", "f": "\f", "n": "\n", "r": "\r",
"t": "\t", "v": "\v", "\\": "\\", "\"": "\"", "'": "'", "0": "\0"
}
def __init__(self, script_db_path = None):
self.unget_buf = ""
self.f = open(script_db_path, "r")
try:
self.entries_list = self.parse()
finally:
self.f.close()
def getchar(self):
if self.unget_buf:
c = self.unget_buf[-1]
self.unget_buf = self.unget_buf[:-1]
return c
else:
return self.f.read(1)
def unget(self, data):
if data:
self.unget_buf += data
def parse(self):
"""Parses a script.db entry and returns it as a dictionary. An entry
looks like this:
Entry { filename = "afp-brute.nse", categories = { "auth", "intrusive", } }
"""
entries = []
while True:
entry = self.parse_entry()
if not entry:
break
entries.append(entry)
return entries
def token(self):
"""Returns a tuple whose first element is a type ("string", "ident", or
"delim") and whose second element is the token text."""
c = self.getchar()
while c.isspace():
c = self.getchar()
if not c:
return None
if c.isalpha() or c == "_":
ident = []
while c.isalpha() or c.isdigit() or c == "_":
ident.append(c)
c = self.getchar()
self.unget(c)
return ("ident", "".join(ident))
elif c in "'\"":
string = []
begin_quote = c
c = self.getchar()
while c != begin_quote:
if c == "\\":
repl = None
c = self.getchar()
if not c:
raise ScriptDBSyntaxError()
if c.isdigit():
d1 = c
d2 = self.getchar()
d3 = self.getchar()
if d1 and d2 and d3:
n = int(d1 + d2 + d3)
if n > 255:
raise ScriptDBSyntaxError()
repl = chr(n)
else:
self.unget(d3)
self.unget(d2)
if not repl:
repl = self.LUA_STRING_ESCAPES.get(c)
if not repl:
raise ScriptDBSyntaxError()
c = repl
string.append(c)
c = self.getchar()
return ("string", "".join(string))
elif c in "{},=":
return ("delim", c)
else:
raise ScriptDBSyntaxError()
def expect(self, tokens):
for token in tokens:
t = self.token()
if t != token:
raise ScriptDBSyntaxError()
def parse_entry(self):
entry = {}
token = self.token()
if not token:
return None
self.expect((("delim", "{"), ("ident", "filename"), ("delim", "=")))
token = self.token()
if not token or token[0] != "string":
raise ScriptDBSyntaxError()
entry["filename"] = token[1]
self.expect((("delim", ","), ("ident", "categories"), ("delim", "="), ("delim", "{")))
entry["categories"] = []
token = self.token()
if token and token[0] == "string":
entry["categories"].append(token[1])
token = self.token()
while token == ("delim", ","):
token = self.token()
if token and token[0] == "string":
entry["categories"].append(token[1])
else:
break
token = self.token()
if token != ("delim", "}"):
raise ScriptDBSyntaxError()
token = self.token()
if token == ("delim", ","):
token = self.token()
if token != ("delim", "}"):
raise ScriptDBSyntaxError()
return entry
def get_entries_list(self):
return self.entries_list
def nsedoc_tags_iter(f):
in_doc_comment = False
tag_name = None
tag_text = None
for line in f:
# New LuaDoc comment?
if re.match(r'^\s*---', line):
in_doc_comment = True
if not in_doc_comment:
continue
# New LuaDoc tag?
m = re.match(r'^\s*--+\s*@(\w+)\s*(.*)', line, re.S)
if m:
if tag_name:
yield tag_name, tag_text
tag_name = None
tag_text = None
tag_name = m.group(1)
tag_text = m.group(2)
else:
# Still in comment?
m = re.match(r'^\s*--+\s*(.*)', line)
if m:
# Add to text if we're in a tag.
if tag_name:
tag_text += m.group(1) + "\n"
else:
in_doc_comment = False
if tag_name:
yield tag_name, tag_text
tag_name = None
tag_text = None
class ScriptMetadata (object):
"""Class responsible for parsing all the script information."""
class Entry (object):
"""An instance of this class is used to store all the information
related to a particular script."""
def __init__(self, filename):
self.filename = filename
self.categories = []
self.arguments = [] # Arguments including library arguments.
self.license = ""
self.author = ""
self.description = ""
self.output = ""
self.usage = ""
url = property(lambda self: "http://nmap.org/nsedoc/scripts/" + os.path.splitext(self.filename)[0] + ".html")
def __init__(self, scripts_dir, nselib_dir):
self.scripts_dir = scripts_dir
self.nselib_dir = nselib_dir
self.library_arguments = {}
self.library_requires = {}
self.construct_library_arguments()
def get_metadata(self, filename):
entry = self.Entry(filename)
entry.description = self.get_string_variable(filename, "description")
entry.arguments = self.get_arguments(entry.filename)
entry.license = self.get_string_variable(filename, "license")
entry.author = self.get_string_variable(filename, "author")
filepath = os.path.join(self.scripts_dir, filename)
f = open(filepath, "r")
try:
for tag_name, tag_text in nsedoc_tags_iter(f):
if tag_name == "output" and not entry.output:
entry.output = tag_text
elif tag_name == "usage" and not entry.usage:
entry.usage = tag_text
finally:
f.close()
return entry
@staticmethod
def get_file_contents(filename):
f = open(filename, "r")
try:
contents = f.read()
finally:
f.close()
return contents
def get_string_variable(self, filename, varname):
contents = ScriptMetadata.get_file_contents(os.path.join(self.scripts_dir, filename))
# Short string?
m = re.search(re.escape(varname) + r'\s*=\s*(["\'])(.*?[^\\])\1', contents)
if m:
return m.group(2)
# Long string?
m = re.search(re.escape(varname) + r'\s*=\s*\[(=*)\[(.*?)\]\1\]', contents, re.S)
if m:
return m.group(2)
return None
@staticmethod
def get_requires(filename):
f = open(filename, "r")
try:
requires = ScriptMetadata.get_requires_from_file(f)
finally:
f.close()
return requires
@staticmethod
def get_requires_from_file(f):
require_expr = re.compile(r'.*\brequire\s*\(?([\'\"])([\w._-]+)\1\)?')
requires = []
for line in f.readlines():
m = require_expr.match(line)
if m:
requires.append(m.group(2))
return requires
@staticmethod
def get_script_args(filename):
f = open(filename, "r")
try:
args = ScriptMetadata.get_script_args_from_file(f)
finally:
f.close()
return args
@staticmethod
def get_script_args_from_file(f):
"""Extracts a list of script arguments from the file given. Results are
returned as a list of (argname, description) tuples."""
args = []
for tag_name, tag_text in nsedoc_tags_iter(f):
m = re.match(r'([\w._-]+)', tag_text)
if (tag_name == "arg" or tag_name == "args") and m:
args.append((m.group(1), re.sub(r'^[\w._-]+','',tag_text)))
return args
def get_arguments(self, filename):
"""Returns list of arguments including library arguments on
passing the file name."""
filepath = os.path.join(self.scripts_dir, filename)
script_args = self.get_script_args(filepath)
# Recursively walk through the libraries required by the script (and
# the libraries they require, etc.), adding all arguments.
library_args = []
seen = set()
pool = set(self.get_requires(filepath))
while pool:
require = pool.pop()
if require in seen:
continue
seen.add(require)
sub_requires = self.library_requires.get(require)
if sub_requires:
pool.update(set(sub_requires))
require_args = self.library_arguments.get(require)
if require_args:
library_args += require_args
return script_args + library_args
def construct_library_arguments(self):
"""Constructs a dictionary of library arguments using library
names as keys and arguments as values. Each argument is really a
(name, description) tuple."""
for filename in os.listdir(self.nselib_dir):
filepath = os.path.join(self.nselib_dir, filename)
if not os.path.isfile(filepath):
continue
base, ext = os.path.splitext(filename)
if ext == ".lua" or ext == ".luadoc":
libname = base
else:
libname = filename
self.library_arguments[libname] = self.get_script_args(filepath)
self.library_requires[libname] = self.get_requires(filepath)
def get_script_entries(scripts_dir, nselib_dir):
"""Merge the information obtained so far into one single entry for
each script and return it."""
metadata = ScriptMetadata(scripts_dir, nselib_dir)
try:
scriptdb = ScriptDB(os.path.join(scripts_dir, "script.db"))
except IOError:
return []
entries = []
for dbentry in scriptdb.get_entries_list():
entry = metadata.get_metadata(dbentry["filename"])
# Categories is the only thing ScriptMetadata doesn't take care of.
entry.categories = dbentry["categories"]
entries.append(entry)
return entries
if __name__ == '__main__':
for entry in get_script_entries():
print "*" * 75
print "Filename:", entry.filename
print "Categories:", entry.categories
print "License:", entry.license
print "Author:", entry.author
print "URL:", entry.url
print "Description:", entry.description
print "Arguments:", [x[0] for x in entry.arguments]
print "Output:"
print entry.output
print "Usage:"
print entry.usage
print "*" * 75
|
import sys
import cli
intf= sys.argv[1:]
intf = ''.join(intf[0])
print "\n\n *** Configuring interface %s with 'configurep' function *** \n\n" %intf
cli.configurep(["interface loopback55","ip address 10.55.55.55 255.255.255.0","no shut","end"])
print "\n\n *** Configuring interface %s with 'configure' function *** \n\n"
cmd='interface %s,logging event link-status ,end' % intf
cli.configure(cmd.split(','))
print "\n\n *** Printing show cmd with 'executep' function *** \n\n"
cli.executep('show ip interface brief')
print "\n\n *** Printing show cmd with 'execute' function *** \n\n"
output= cli.execute('show run interface %s' %intf)
print (output)
print "\n\n *** Printing show cmd with 'clip' function *** \n\n"
cli.clip('show run interface %s' %intf)
|
from tello import Tello
import sys
from datetime import datetime
import time
import TelloPro
tello = Tello()
command_lst = []
command_lst.append(TelloPro.get_instance('takeoff', -1, ""))
command_lst.append(TelloPro.get_instance('up', 30, ""))
command_lst.append(TelloPro.get_instance('down', 30, ""))
command_lst.append(TelloPro.get_instance('left', 30, ""))
command_lst.append(TelloPro.get_instance('right', 30, ""))
command_lst.append(TelloPro.get_instance('forward', 30, ""))
command_lst.append(TelloPro.get_instance('back', 30, ""))
command_lst.append(TelloPro.get_instance('cw', 60, ""))
command_lst.append(TelloPro.get_instance('ccw', 60, ""))
command_lst.append(TelloPro.get_instance('flip', -1, "l"))
command_lst.append(TelloPro.get_instance('land', -1, ""))
for command in command_lst:
tello.send_command_instance(command)
|
"""
Skeleton data structures
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.data_objects.grid_patch import \
AMRGridPatch
from yt.geometry.grid_geometry_handler import \
GridIndex
from yt.data_objects.static_output import \
Dataset
from .fields import SkeletonFieldInfo
class SkeletonGrid(AMRGridPatch):
_id_offset = 0
def __init__(self, id, index, level, start, dimensions):
AMRGridPatch.__init__(self, id, filename=index.index_filename,
index=index)
self.Parent = []
self.Children = []
self.Level = level
self.start_index = start.copy()
self.stop_index = self.start_index + dimensions
self.ActiveDimensions = dimensions.copy()
def __repr__(self):
return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
class SkeletonHierarchy(GridIndex):
grid = SkeletonGrid
def __init__(self, ds, dataset_type='skeleton'):
self.dataset_type = dataset_type
# for now, the index file is the dataset!
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
GridIndex.__init__(self, ds, dataset_type)
def _detect_output_fields(self):
# This needs to set a self.field_list that contains all the available,
# on-disk fields.
# NOTE: Each should be a tuple, where the first element is the on-disk
# fluid type or particle type. Convention suggests that the on-disk
# fluid type is usually the dataset_type and the on-disk particle type
# (for a single population of particles) is "io".
pass
def _count_grids(self):
# This needs to set self.num_grids
pass
def _parse_index(self):
# This needs to fill the following arrays, where N is self.num_grids:
# self.grid_left_edge (N, 3) <= float64
# self.grid_right_edge (N, 3) <= float64
# self.grid_dimensions (N, 3) <= int
# self.grid_particle_count (N, 1) <= int
# self.grid_levels (N, 1) <= int
# self.grids (N, 1) <= grid objects
#
pass
def _populate_grid_objects(self):
# For each grid, this must call:
# grid._prepare_grid()
# grid._setup_dx()
# This must also set:
# grid.Children <= list of child grids
# grid.Parent <= parent grid
# This is handled by the frontend because often the children must be
# identified.
pass
class SkeletonDataset(Dataset):
_index_class = SkeletonHierarchy
_field_info_class = SkeletonFieldInfo
def __init__(self, filename, dataset_type='skeleton',
storage_filename=None,
units_override=None):
self.fluid_types += ('skeleton',)
Dataset.__init__(self, filename, dataset_type,
units_override=units_override)
self.storage_filename = storage_filename
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
#
# self.length_unit = self.quan(1.0, "cm")
# self.mass_unit = self.quan(1.0, "g")
# self.time_unit = self.quan(1.0, "s")
# self.time_unit = self.quan(1.0, "s")
#
# These can also be set:
# self.velocity_unit = self.quan(1.0, "cm/s")
# self.magnetic_unit = self.quan(1.0, "gauss")
pass
def _parse_parameter_file(self):
# This needs to set up the following items. Note that these are all
# assumed to be in code units; domain_left_edge and domain_right_edge
# will be updated to be in code units at a later time. This includes
# the cosmological parameters.
#
# self.unique_identifier
# self.parameters <= full of code-specific items of use
# self.domain_left_edge <= array of float64
# self.domain_right_edge <= array of float64
# self.dimensionality <= int
# self.domain_dimensions <= array of int64
# self.periodicity <= three-element tuple of booleans
# self.current_time <= simulation time in code units
#
# We also set up cosmological information. Set these to zero if
# non-cosmological.
#
# self.cosmological_simulation <= int, 0 or 1
# self.current_redshift <= float
# self.omega_lambda <= float
# self.omega_matter <= float
# self.hubble_constant <= float
pass
@classmethod
def _is_valid(self, *args, **kwargs):
# This accepts a filename or a set of arguments and returns True or
# False depending on if the file is of the type requested.
return False
|
from django.contrib import admin
from .models import Article, Category, User
from django import forms
from pagedown.widgets import AdminPagedownWidget
class ArticleForm(forms.ModelForm):
text = forms.CharField(widget=AdminPagedownWidget())
class Meta:
model = Article
fields = '__all__'
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'publish_time', 'last_modify_time', 'id')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'created_time', 'last_modify_time', 'id')
class UserAdmin(admin.ModelAdmin):
list_display = ('username', 'nickname', 'created_time', 'id')
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(User, UserAdmin)
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("//third_party:repo.bzl", "clean_dep")
def configure_snappy():
http_archive(
name = "com_github_google_snappy",
build_file = clean_dep("//third_party/snappy:BUILD.bzl"),
sha256 = "e170ce0def2c71d0403f5cda61d6e2743373f9480124bcfcd0fa9b3299d428d9",
strip_prefix = "snappy-1.1.9",
url = "https://github.com/google/snappy/archive/1.1.9.zip",
)
|
#!/usr/bin/env python
import logging
import os
import sys
if os.environ.get("ALLENNLP_DEBUG"):
LEVEL = logging.DEBUG
else:
level_name = os.environ.get("ALLENNLP_LOG_LEVEL")
LEVEL = logging._nameToLevel.get(level_name, logging.INFO)
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL)
# filelock emits too many messages, so tell it to be quiet unless it has something
# important to say.
_filelock_logger = logging.getLogger("filelock")
_filelock_logger.setLevel(logging.WARNING)
from allennlp.commands import main # noqa
def run():
main(prog="allennlp")
if __name__ == "__main__":
run()
|
from datetime import datetime
from jose import jwt
from jose.utils import base64url_decode
from jose.exceptions import JWTError
from vds_vault_oauth.utilities import OAuthContainer
# Token that stores the necessary tokens and provides the ability to decode & log them.
class Token():
def __init__(self, token_value, token_type, logger):
self.token_value = token_value
self.token_type = token_type
self.token_claims = dict()
self.logger = logger
def decodeTokens(self):
if (self.token_value != None and self.token_type != "refresh_token"):
try:
claims = jwt.get_unverified_claims(self.token_value)
headers = jwt.get_unverified_headers(self.token_value)
self.token_claims.update(headers)
self.token_claims.update(claims)
except JWTError as e:
import sys
self.logger.log(("\t%s: %s\n" % (str(sys.exc_info()[0]), str(e))))
self.logger.log(("\t%s: %s\n\n" % ("Error", "Non-JWT token detected. Verifying against introspection endpoint.")))
return False
return True
def verifyTokenClaims(self):
self.logger.log(("\n\t" + '{s:{c}^{n}}'.format(s=" Verifying '" + self.token_type + "' Claims ",n=65,c='-') + "\n\n"))
if ('sub' in self.token_claims):
self.logger.log(("\t%s: %s\n" % ("The 'sub' claim exists", self.token_claims['sub'])))
else:
self.logger.log(("\n\tINVALID: The 'sub' claim does not exist. This is required.\n"), "ERROR")
if ('aud' in self.token_claims):
self.logger.log(("\t%s: %s\n" % ("The 'aud' claim exists", self.token_claims['aud'])))
else:
self.logger.log(("\n\tINVALID: The 'aud' claim does not exist. This is optionally required.\n"), "ERROR")
if ('exp' in self.token_claims):
expiry = datetime.utcfromtimestamp(int(self.token_claims['exp'])).strftime('%Y-%m-%d %H:%M:%S')
self.logger.log(("\t%s: %s\n" % ("The 'exp' claim exists", str(self.token_claims['exp']) + " (" + str(expiry) + " UTC)")))
else:
self.logger.log(("\n\tINVALID: The 'exp' claim does not exist.\n"), "ERROR")
if self.token_type == "access_token":
if ('cid' in self.token_claims):
self.logger.log(("\t%s: %s\n" % ("The 'cid' claim exists", self.token_claims['cid'])))
elif ('appid' in self.token_claims):
self.logger.log(("\t%s: %s\n" % ("The 'appid' claim exists", self.token_claims['appid'])))
else:
self.logger.log(("\n\tINVALID: The 'cid' or 'appid' claim does not exist.\n"), "ERROR")
self.logger.log(("\n\n"))
def logTokenClaims(self):
for key, value in self.token_claims.items():
self.logger.log(("\t%s: %s\n" % (key, value)))
self.logger.log(("\n\t" + '{s:{c}^{n}}'.format(s='',n=65,c='-') + "\n\n"))
self.logger.log(("\n"))
|
from django.views.generic import (TemplateView, ListView,
DetailView, CreateView, UpdateView)
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.conf import settings
from .forms import ChildEditForm, ChildAddForm
from .models import Child, MedicalUpdate
from images.models import Photo
from django.contrib.auth.models import User
from django.shortcuts import render, get_object_or_404, redirect
class ChildListView(LoginRequiredMixin, ListView):
"""View to see a list of all children."""
template_name = "child_list.html"
login_url = reverse_lazy('auth_login')
context_object_name = 'children'
model = Child
def get(self, *args, **kwargs):
"""get args and kwargs"""
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
"""return context data"""
context = super().get_context_data(**kwargs)
return context
class ChildMedicalUpdateView(LoginRequiredMixin, DetailView):
template_name = 'child_medical_veiw.html'
model = MedicalUpdate
login_url = reverse_lazy('auth_url')
context_object_name = 'medicalupdate'
def get_queryset(self):
return MedicalUpdate.objects.filter(child__id=self.kwargs['pk'])
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['child'] = Child.objects.filter(id=self.kwargs['pk']).first()
context['medicalupdate'] = self.get_queryset()
return context
#
# OLD VERSION:------------------------------------------------------
# don't delete until presentation
# class ChildEditView(LoginRequiredMixin, UpdateView):
# """Lets admin or super edit a child profile."""
# template_name = 'child_edit.html'
# model = Child
# form_class = ChildEditForm
# login_url = reverse_lazy('auth_login')
# success_url = reverse_lazy('children')
# # DO
# # change to use pk
# slug_url_kwarg = 'username'
# slug_field = 'user__username'
# def get(self, *args, **kwargs):
# """Get."""
# self.kwargs['username'] = self.request.user.get_username()
# return super().get(*args, **kwargs)
# def post(self, *args, **kwargs):
# """Post."""
# self.kwargs['username'] = self.request.user.get_username()
# return super().post(*args, **kwargs)
# def get_form_kwargs(self):
# """Get kwargs."""
# kwargs = super().get_form_kwargs()
# kwargs.update({'username': self.request.user.get_username()})
# return kwargs
# def form_valid(self, form):
# """Validate form."""
# # form.instance.user.email = form.data['email']
# form.instance.user.first_name = form.data['first_name']
# form.instance.user.last_name = form.data['last_name']
# form.instance.user.save()
# return super().form_valid(form)
class ChildEditView(LoginRequiredMixin, UpdateView):
"""Lets admin or super edit a child profile."""
template_name = 'child_edit.html'
model = Child
form_class = ChildEditForm
login_url = reverse_lazy('auth_login')
success_url = reverse_lazy('childlist')
def get(self, *args, **kwargs):
"""Get info"""
# DO
# maybe only need return super
self.kwargs['username'] = self.request.user.get_username()
return super().post(*args, **kwargs)
def post(self, *args, **kwargs):
"""Post for child edit form."""
# Do
# same as the get comment
self.kwargs['username'] = self.request.user.get_username()
return super().post(*args, **kwargs)
def get_form_kwargs(self):
"""Get kwargs from edit form."""
kwargs = super().get_form_kwargs()
kwargs.update({'username': self.request.user.get_username()})
return kwargs
def form_valid(self, form):
"""Validate form data."""
# form.instance.user = self.request.user
# form.instance.save()
# import pdb; pdb.set_trace()
photo = form.instance.photos.first()
if photo and 'image' not in form.files:
photo.delete()
elif photo:
photo.image = form.files['image']
photo.description = form.data['description']
photo.save()
elif 'image' in form.files:
# create new photo instance
photo = Photo(
child=form.instance,
image=form.files['image'],
description=form.data['description']
)
photo.save()
return super().form_valid(form)
class ChildDetailView(LoginRequiredMixin, DetailView):
template_name = 'child_profile.html'
model = Child
login_url = reverse_lazy('auth_url')
pk_url_kwarg = 'pk'
# DON'T DELETE
# NEED FOR REFERENCE ----------------------------------------
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# # photos = Photo.objects.filter(user__username=self.request.user.username)
# # import pdb; pdb.set_trace()
# # context['child_photos'] = photos
# return context
# def get_queryset(self):
# return Child.objects.filter(published='PUBLIC')
# -------------------------------------------------------------
class ChildCreateView(LoginRequiredMixin, CreateView):
"""Lets a staff with appropriate permissions add a child to the system."""
pass
template_name = 'child_create.html'
model = Child
form_class = ChildAddForm
success_url = reverse_lazy('childlist')
login_url = reverse_lazy('auth_login')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'username': self.request.user.username})
return kwargs
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.save()
if 'image' in form.files:
# create new photo instance
photo = Photo(
child=form.instance,
image=form.files['image'],
description=form.data['description']
)
photo.save()
return super().form_valid(form)
|
# Generated by Django 3.1.7 on 2021-05-11 21:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MatchApp', '0061_apadrinamiento'),
]
operations = [
migrations.AlterField(
model_name='apadrinamiento',
name='tipo_ingreso',
field=models.CharField(blank=True, choices=[('Combinado', 'Combinado'), ('Unico', 'Unico'), ('Directo Albergue', 'Directo Albergue')], max_length=20, null=True, verbose_name='Tipo de Ingreso'),
),
]
|
from torch import nn
class ConvolutionalBlock(nn.Module):
def __init__(self, in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1, padding_mode='zeros'):
super().__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride,
padding_mode=padding_mode)
self.bn1 = nn.BatchNorm1d(out_channels)
self.relu1 = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
return out
|
import sklearn.metrics as metrics
import pandas as pd
import numpy as np
def repair_chrdata(df, tCol):
### Parameters:
# df: input dataframe
# tCol: targeted column label with NaN
### Output
# df: repaired dataframe
# word: string of related dataframe column with some records have NaN in targeted column
# count: number of records fixed in the targeted column with NaN
# work out number of NaN records need to fix
dFrm = df[df[tCol].isnull()]
count = len(dFrm)
# work out the fill up string (most appearance) at targeted column for NULL
tword = df[tCol].unique().tolist()
# print(tword)
wordLT = df[tCol].value_counts(dropna=False)
word = ''
wordCnt = 0
for index, value in wordLT.items():
print(f'[COUNT] Index: {index}, Value: {value}')
if wordCnt < value:
word = index
wordCnt = value
# print(word)
# print(wordLT)
# update the targeted NaN with the most frequent string
mask = df[tCol].isnull()
df.loc[mask, tCol] = word
print(f'[REPAIR] "{tCol}" with string: {word}, Count: {count}')
return df, word, count
# Repair a single number data column contained NaN with median value
def repair_numdata(df, tCol):
### Parameters:
# df: input dataframe
# tCol: targeted column label with NaN
### Output
# df: repaired dataframe
# medianVal: median value of related dataframe column with some records have NaN in targeted column
# count: number of records fixed in the targeted column with NaN
# work out number of NaN records need to fix
dFrm = df[df[tCol].isnull()]
count = len(dFrm)
# work out the median value of the records from targeted column
medianVal = df[tCol].median()
# update the targeted NaN with the median value
mask = df[tCol].isnull()
df.loc[mask, tCol] = medianVal
print(f'[REPAIR] "{tCol}" Median: {medianVal}, Count: {count}')
return df, medianVal, count
### Work out the educated guess targets to repair dataframe with NaN in 'repair_rdata' function
def repair_target(df, tCol, rCol):
### Parameters:
# df: input dataframe
# tCol: targeted column label with NaN
# rCol: related column label without NaN for educated guess
### Output
# target: column value of related column that have NaN in targeted column
repair = df[df[tCol].isnull()]
# print(repair[[rCol, tCol]])
target = sorted(repair[rCol].unique().tolist())
print(f'[TARGET] {tCol} NaN target: {target}')
return target
### Educated guess to repair dataframe column contained NaN with mean value of related
### dataframe column
def repair_rcdata(df, tCol, rCol, target):
### Parameters:
# df: input dataframe
# tCol: targeted column label with NaN
# rCol: related column label without NaN for educated guess
# target: column value of related column that have NaN in targeted column
### Output
# df: repaired dataframe
# meanVal: mean value of related dataframe column with some records have NaN in targeted column
# count: number of records fixed in the targeted column with NaN
### Main coding
# work out number of NaN records need to fix
dFrm = df[df[tCol].isnull()]
dFrm = dFrm[dFrm[rCol] == target]
count = len(dFrm)
# work out the mean value of the records from related column
repair = df.loc[df[rCol] == target]
meanVal = round(repair[tCol].mean(), 3)
if np.isnan(meanVal):
meanVal = np.float64(0)
# update the targeted NaN with the calculated mean value of related records
df[tCol] = df.apply(
lambda row: meanVal if np.isnan(row[tCol]) & (row[rCol] == target)
else row[tCol], axis=1
)
print(f'[REPAIR] {tCol}({target}) Mean: {meanVal}, Count: {count}')
return df, meanVal, count
def regression_results(y_true, y_pred):
# Regression metrics
explained_variance=metrics.explained_variance_score(y_true, y_pred)
mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred)
mse=metrics.mean_squared_error(y_true, y_pred)
# mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)
# median_absolute_error=metrics.median_absolute_error(y_true, y_pred)
r2=metrics.r2_score(y_true, y_pred)
print('explained_variance: ', round(explained_variance,4))
# print('mean_squared_log_error: ', round(mean_squared_log_error,4))
print('r-squared (r2): ', round(r2,4))
print('mean_absolute_error (MAE): ', round(mean_absolute_error,4))
print('mean_squared_error (MSE): ', round(mse,4))
print('root_mean_squared_error (RMSE): ', round(np.sqrt(mse),4))
|
import os
import numpy as np
import pytest
from ci_framework import FlopyTestSetup, base_test_dir
import flopy
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
ex_pth = os.path.join("..", "examples", "data", "mf2005_test")
testmodels = [
os.path.join(ex_pth, f) for f in os.listdir(ex_pth) if f.endswith(".nam")
]
@pytest.mark.parametrize(
"namfile",
testmodels,
)
def test_checker_on_load(namfile):
# load all of the models in the mf2005_test folder
# model level checks are performed by default on load()
checker_on_load(namfile)
def checker_on_load(mfnam):
f = os.path.basename(mfnam)
d = os.path.dirname(mfnam)
m = flopy.modflow.Modflow.load(f, model_ws=d)
assert isinstance(
m, flopy.modflow.Modflow
), "Not a flopy.modflow.Modflow instance"
def test_bcs_check():
model_ws = f"{base_dir}_test_bcs_check"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
mf = flopy.modflow.Modflow(version="mf2005", model_ws=model_ws)
# test check for isolated cells
dis = flopy.modflow.ModflowDis(
mf, nlay=2, nrow=3, ncol=3, top=100, botm=95
)
bas = flopy.modflow.ModflowBas(mf, ibound=np.ones((2, 3, 3), dtype=int))
chk = bas.check()
dis = flopy.modflow.ModflowDis(
mf, nlay=3, nrow=5, ncol=5, top=100, botm=95
)
ibound = np.zeros((3, 5, 5), dtype=int)
ibound[1, 1, 1] = 1 # fully isolated cell
ibound[0:2, 4, 4] = 1 # cell connected vertically to one other cell
bas = flopy.modflow.ModflowBas(mf, ibound=ibound)
mf._mg_resync = True
chk = bas.check()
assert chk.summary_array["desc"][0] == "isolated cells in ibound array"
assert (
chk.summary_array.i[0] == 1
and chk.summary_array.i[0] == 1
and chk.summary_array.j[0] == 1
)
assert len(chk.summary_array) == 1
ghb = flopy.modflow.ModflowGhb(
mf, stress_period_data={0: [0, 0, 0, 100, 1]}
)
riv = flopy.modflow.ModflowRiv(
mf,
stress_period_data={
0: [[0, 0, 0, 101, 10, 100], [0, 0, 1, 80, 10, 90]]
},
)
chk = ghb.check()
assert chk.summary_array["desc"][0] == "BC in inactive cell"
chk = riv.check()
assert chk.summary_array["desc"][4] == "RIV stage below rbots"
assert np.array_equal(chk.summary_array["j"], np.array([0, 1, 1, 1, 1]))
def test_properties_check():
# test that storage values ignored for steady state
model_ws = f"{base_dir}_test_properties_check"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
mf = flopy.modflow.Modflow(
version="mf2005",
model_ws=model_ws,
)
dis = flopy.modflow.ModflowDis(
mf,
nrow=2,
ncol=2,
top=np.array([[100, np.nan], [100, 100]]),
nper=3,
steady=True,
)
chk = dis.check()
assert len(chk.summary_array) == 1
kij = (
chk.summary_array["k"][0],
chk.summary_array["i"][0],
chk.summary_array["j"][0],
)
assert kij == (0, 0, 1)
lpf = flopy.modflow.ModflowLpf(mf, sy=np.ones((2, 2)), ss=np.ones((2, 2)))
chk = lpf.check()
assert len(chk.summary_array) == 0
# test k values check
lpf = flopy.modflow.ModflowLpf(
mf,
hk=np.array([[1, 1e10], [1, -1]]),
hani=np.array([[1, 1], [1, -1]]),
vka=np.array([[1e10, 0], [1, 1e-20]]),
)
chk = lpf.check()
ind1 = np.array(
[
True if list(inds) == [0, 1, 1] else False
for inds in chk.view_summary_array_fields(["k", "i", "j"])
]
)
ind1_errors = chk.summary_array[ind1]["desc"]
ind2 = np.array(
[
True if list(inds) == [0, 0, 1] else False
for inds in chk.view_summary_array_fields(["k", "i", "j"])
]
)
ind2_errors = chk.summary_array[ind2]["desc"]
ind3 = np.array(
[
True if list(inds) == [0, 0, 0] else False
for inds in chk.view_summary_array_fields(["k", "i", "j"])
]
)
ind3_errors = chk.summary_array[ind3]["desc"]
assert (
"zero or negative horizontal hydraulic conductivity values"
in ind1_errors
)
assert (
"horizontal hydraulic conductivity values below checker threshold of 1e-11"
in ind1_errors
)
assert "negative horizontal anisotropy values" in ind1_errors
assert (
"vertical hydraulic conductivity values below checker threshold of 1e-11"
in ind1_errors
)
assert (
"horizontal hydraulic conductivity values above checker threshold of 100000.0"
in ind2_errors
)
assert (
"zero or negative vertical hydraulic conductivity values"
in ind2_errors
)
assert (
"vertical hydraulic conductivity values above checker threshold of 100000.0"
in ind3_errors
)
def test_oc_check():
m = flopy.modflow.Modflow()
oc = flopy.modflow.mfoc.ModflowOc(m)
chk = oc.check()
assert len(chk.summary_array) == 1, len(chk.summary_array)
assert "DIS package not available" in chk.summary_array[0]["desc"]
flopy.modflow.ModflowDis(m)
oc.stress_period_data = {(0, 0): ["save head", "save budget"]}
chk = oc.check() # check passsed
assert len(chk.summary_array) == 0, len(chk.summary_array)
oc.stress_period_data = {(0, 0): ["save"]}
chk = oc.check()
assert len(chk.summary_array) == 1, len(chk.summary_array)
assert "too few words" in chk.summary_array[0]["desc"]
oc.stress_period_data = {(0, 0): ["save it"]}
chk = oc.check()
assert len(chk.summary_array) == 1, len(chk.summary_array)
assert "action 'save it' ignored" in chk.summary_array[0]["desc"]
oc.stress_period_data = {(1, 1): ["save head", "save budget"]}
chk = oc.check()
assert len(chk.summary_array) == 1, len(chk.summary_array)
assert "OC stress_period_data ignored" in chk.summary_array[0]["desc"]
if __name__ == "__main__":
print(f"numpy version: {np.__version__}")
for mfnam in testmodels:
checker_on_load(mfnam)
test_bcs_check()
test_properties_check()
test_oc_check()
|
from urlparse import urlparse
from django import forms
from tower import ugettext_lazy as _lazy
import amo
from mkt.api.forms import SluggableModelChoiceField
from mkt.webapps.models import Addon
class ReceiptForm(forms.Form):
app = SluggableModelChoiceField(
queryset=Addon.objects.filter(type=amo.ADDON_WEBAPP),
sluggable_to_field_name='app_slug')
class TestInstall(forms.Form):
TYPE_CHOICES = (('none', _lazy('No receipt')),
('ok', _lazy(u'Test receipt')),
('expired', _lazy(u'Expired test receipt')),
('invalid', _lazy(u'Invalid test receipt')),
('refunded', _lazy(u'Refunded test receipt')))
receipt_type = forms.ChoiceField(choices=TYPE_CHOICES)
manifest_url = forms.URLField()
def clean(self):
data = self.cleaned_data
url = data.get('manifest_url')
if url:
parsed = urlparse(url)
data['root'] = '%s://%s' % (parsed.scheme, parsed.netloc)
return data
|
from entities.workflow import Workflow
class Main(Workflow):
def _run(self, job):
return {}
|
import operator
s1 = "#include <boost/"
lines1 = {}
with open('./boost_includes_1') as f:
lines=f.readlines()
for line in lines:
if s1 in line:
line1 = line[line.find('#'):line.find('\n')]
if lines1.has_key(line1):
lines1[line1] = lines1[line1] + 1
else:
lines1[line1]=1;
sorted_x = sorted(lines1.items(), key=operator.itemgetter(1))
for line in sorted_x:
print line
f.close()
'''
sed scripts to remove some boost deps:
make_shared:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::make_shared/std::make_shared/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/make_shared.hpp>/#include <memory>/g' {} +
shared_ptr:
find . -type f -not -path '*/\.*' -exec sed -i 's/std::shared_ptr/std::shared_ptr/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/shared_ptr.hpp>/#include <memory>/g' {} +
bind:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::bind/std::bind/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/bind.hpp>/#include <functional>/g' {} +
function:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::function/std::function/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/function.hpp>/#include <functional>/g' {} +
scoped_ptr
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::scoped_ptr/std::unique_ptr/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/scoped_ptr.hpp>/#include <memory>/g' {} +
To remove:
<boost/math/special_functions/fpclassify.hpp>: marketmodel.cpp, matrices.cpp, simulatedannealing.cpp
change to <cmath>
change boost::math::isnan to std::isnan and boost::math::isinf to std::isinf
script:
find . -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::math::isnan/std::isnan/g' {} +
find . -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::math::isinf/std::isinf/g' {} +
find */marketmodel.cpp */matrices.cpp */*/*/simulatedannealing.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/#include <boost\/math\/special_functions\/fpclassify.hpp>/#include <cmath>/g' {} +
Other math/special_functions also similar:
gamma.hpp = not replacable - only gaama_q and gamma_q_inv in noarbsabr.cpp
atanh.hpp:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::math::atanh/std::atanh/g' {} +
find blackformula.cpp -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/math\/special_functions\/atanh.hpp>/#include <cmath>/g' {} +
erf.hpp
find */*/*/*/gaussian1dmodel.cpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::math::erf/std::erf/g' {} +
find */*/*/*/gaussian1dmodel.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/#include <boost\/math\/special_functions\/erf.hpp>/#include <cmath>/g' {} +
---
boost/atomic:
find observable.hpp observable.cpp singleton.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::atomic/std::atomic/g' {} +
find observable.hpp observable.cpp singleton.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/#include <boost\/math\/special_functions\/erf.hpp>/#include <cmath>/g' {} +
boost/random
not doing for lambda as non-polymorphic only maybe
not doing for thread at present
'''
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from ndb_relations.relations import OneToMany
class User2(ndb.Model):
name = ndb.StringProperty()
class Order2(ndb.Model):
pass
class OrderItem2(ndb.Model):
name = ndb.StringProperty()
price = ndb.FloatProperty()
class OrderOwner(OneToMany):
origin = ndb.KeyProperty(User2)
destin = ndb.KeyProperty(Order2)
class Item(ndb.Model):
name = ndb.StringProperty()
class OrderItemRelation(OneToMany):
origin = ndb.KeyProperty(Order2)
destin = ndb.KeyProperty(Item)
|
# Copyright 2017 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for VariantCalling CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from third_party.nucleus.io import fasta
from third_party.nucleus.io import sam
from third_party.nucleus.util import ranges
from deepvariant import testdata
from deepvariant.protos import deepvariant_pb2
from deepvariant.python import allelecounter as _allelecounter
from deepvariant.python import variant_calling
def setUpModule():
testdata.init()
class WrapVariantCallingTest(absltest.TestCase):
def test_call_from_allele_counter(self):
ref = fasta.IndexedFastaReader(testdata.CHR20_FASTA)
sam_reader = sam.SamReader(testdata.CHR20_BAM)
size = 1000
region = ranges.make_range('chr20', 10000000, 10000000 + size)
allele_counter = _allelecounter.AlleleCounter(
ref.c_reader,
region,
deepvariant_pb2.AlleleCounterOptions(partition_size=size))
caller = variant_calling.VariantCaller(
deepvariant_pb2.VariantCallerOptions(
min_count_snps=2,
min_count_indels=2,
min_fraction_snps=0.12,
min_fraction_indels=0.12,
sample_name='sample_name',
p_error=0.001,
max_gq=50,
gq_resolution=1,
ploidy=2))
# Grab all of the reads in our region and add them to the allele_counter.
reads = list(sam_reader.query(region))
self.assertNotEmpty(reads)
for read in reads:
allele_counter.add(read)
# Get the candidates records for this whole region.
candidates = caller.calls_from_allele_counter(allele_counter)
# We should have at least some candidates and some gvcf records.
self.assertNotEmpty(candidates)
# Each candidate should be a DeepVariantCall.
for candidate in candidates:
self.assertIsInstance(candidate, deepvariant_pb2.DeepVariantCall)
if __name__ == '__main__':
absltest.main()
|
from os import environ
import os
import time
from urllib.parse import urlparse
import aiohttp
from pyshorteners import Shortener
from bs4 import BeautifulSoup
import requests
import re
from pyrogram import Client, filters
API_ID = environ.get('API_ID')
API_HASH = environ.get('API_HASH')
BOT_TOKEN = environ.get('BOT_TOKEN')
API_KEY = environ.get('API_KEY')
CHANNEL = environ.get('CHANNEL')
HOWTO = environ.get('HOWTO')
bot = Client('Droplink bot',
api_id=API_ID,
api_hash=API_HASH,
bot_token=BOT_TOKEN)
@bot.on_message(filters.command('start') & filters.private)
async def start(bot, message):
await message.reply(
f"**Hi {message.chat.first_name}!**\n\n"
"I'm doodurl bot. Just send me link and get short link")
@bot.on_message(filters.command('help') & filters.private)
async def start(bot, message):
await message.reply(
f"**Hello, {message.chat.first_name}!**\n\n"
"**If you send post which had doodstream Links, texts & images... Than I'll convert & replace all pdisk links with your doodurk links \nMessage me @mrpunisher52 For more help-**")
@bot.on_message(filters.command('support') & filters.private)
async def start(bot, message):
await message.reply(
f"**Hey, {message.chat.first_name}!**\n\n"
"**please contact me on @mrpunisher52 or for more join @hornyworld22**")
@bot.on_message(filters.text & filters.private)
async def pdisk_uploader(bot, message):
new_string = str(message.text)
conv = await message.reply("Converting...")
dele = conv["message_id"]
try:
pdisk_link = await multi_pdisk_up(new_string)
await bot.delete_messages(chat_id=message.chat.id, message_ids=dele)
await message.reply(f'{pdisk_link}' , quote=True)
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
@bot.on_message(filters.photo & filters.private)
async def pdisk_uploader(bot, message):
new_string = str(message.caption)
conv = await message.reply("Converting...")
dele = conv["message_id"]
try:
pdisk_link = await multi_pdisk_up(new_string)
if(len(pdisk_link) > 1020):
await bot.delete_messages(chat_id=message.chat.id, message_ids=dele)
await message.reply(f'{pdisk_link}' , quote=True)
else:
await bot.delete_messages(chat_id=message.chat.id, message_ids=dele)
await bot.send_photo(message.chat.id, message.photo.file_id, caption=f'{pdisk_link}')
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
async def pdisk_up(link):
if ('pdisk' in link or 'kuklink' in link or 'kofilink' in link or 'cofilink' in link or 'bit' in link or 'vdshort' in link or 'vidrivers' in link or 'dplinks' in link or 'wslinker' in link or 'mdisk' in link or 'dood' in link):
url = 'https://doodurl.in/api'
params = {'api': API_KEY, 'url': link}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params, raise_for_status=True) as response:
data = await response.json()
v_url = data["shortenedUrl"]
else:
v_url = link
return (v_url)
async def multi_pdisk_up(ml_string):
list_string = ml_string.splitlines()
ml_string = ' \n'.join(list_string)
new_ml_string = list(map(str, ml_string.split(" ")))
new_ml_string = [sub.replace('https://t.me/Desi_Bhabhi_Aunty_hot_Video/41', 'https://t.me/Desi_Bhabhi_Aunty_hot_Video/61') for sub in new_ml_string]
#new_ml_string = await remove_footer(new_ml_string)
new_join_str = "".join(new_ml_string)
urls = re.findall(r'(https?://[^\s]+)', new_join_str)
nml_len = len(new_ml_string)
u_len = len(urls)
url_index = []
count = 0
for i in range(nml_len):
for j in range(u_len):
if (urls[j] in new_ml_string[i]):
url_index.append(count)
count += 1
new_urls = await new_pdisk_url(urls)
url_index = list(dict.fromkeys(url_index))
i = 0
for j in url_index:
new_ml_string[j] = new_ml_string[j].replace(urls[i], new_urls[i])
i += 1
new_string = " ".join(new_ml_string)
#return await addFooter(new_string)
return (new_string)
async def new_pdisk_url(urls):
new_urls = []
for i in urls:
time.sleep(0.2)
new_urls.append(await pdisk_up(i))
return new_urls
'''async def remove_footer(new_List):
for i in new_List:
if('https://t.me/Desi_Bhabhi_Aunty_hot_Video/41' in i):
i = i.replace("41","61")
#new_List.remove(i)
return new_List'''
'''async def addFooter(str):
footer = """
━━━━━━━━━━━━━━━
⚙️ How to Download / Watch Online :""" + HOWTO + """
━━━━━━━━━━━━━━━
⭐️JOIN CHANNEL ➡️ t.me/""" + CHANNEL
return str + footer'''
bot.run()
|
from .aggregate_interval_play import AggregateIntervalPlay
from .milestone import Milestone
from .models import (
AggregateDailyAppNameMetrics,
AggregateDailyTotalUsersMetrics,
AggregateDailyUniqueUsersMetrics,
AggregateMonthlyAppNameMetrics,
AggregateMonthlyPlays,
AggregateMonthlyTotalUsersMetrics,
AggregateMonthlyUniqueUsersMetrics,
AggregatePlaylist,
AggregatePlays,
AggregateTrack,
AggregateUser,
AppMetricsAllTime,
AppMetricsTrailingMonth,
AppMetricsTrailingWeek,
AppNameMetrics,
AssociatedWallet,
Base,
BlacklistedIPLD,
Block,
BlockMixin,
Challenge,
ChallengeDisbursement,
ChallengeType,
Follow,
HourlyPlayCounts,
IndexingCheckpoints,
IPLDBlacklistBlock,
ListenStreakChallenge,
Play,
Playlist,
PlaysArchive,
ProfileCompletionChallenge,
Remix,
Repost,
RepostType,
RouteMetrics,
RouteMetricsAllTime,
RouteMetricsDayMatview,
RouteMetricsMonthMatview,
RouteMetricsTrailingMonth,
RouteMetricsTrailingWeek,
Save,
SaveType,
SkippedTransaction,
SkippedTransactionLevel,
Stem,
TagTrackUserMatview,
Track,
URSMContentNode,
User,
UserBalance,
UserBalanceChange,
UserChallenge,
UserListeningHistory,
WalletChain,
)
from .related_artist import RelatedArtist
from .reward_manager import RewardManagerTransaction
from .spl_token_transaction import SPLTokenTransaction
from .track_route import TrackRoute
from .track_trending_score import TrackTrendingScore
from .trending_param import TrendingParam
from .trending_result import TrendingResult
from .user_bank import UserBankAccount, UserBankTransaction
from .user_events import UserEvents
__all__ = [
"AggregateDailyAppNameMetrics",
"AggregateDailyTotalUsersMetrics",
"AggregateDailyUniqueUsersMetrics",
"AggregateMonthlyAppNameMetrics",
"AggregateMonthlyTotalUsersMetrics",
"AggregateMonthlyUniqueUsersMetrics",
"AggregatePlaylist",
"AggregatePlays",
"AggregateMonthlyPlays",
"AggregateTrack",
"AggregateUser",
"AggregateIntervalPlay",
"AppMetricsAllTime",
"AppMetricsTrailingMonth",
"AppMetricsTrailingWeek",
"AppNameMetrics",
"AssociatedWallet",
"Base",
"BlacklistedIPLD",
"Block",
"BlockMixin",
"Challenge",
"ChallengeDisbursement",
"ChallengeType",
"Follow",
"HourlyPlayCounts",
"IPLDBlacklistBlock",
"IndexingCheckpoints",
"ListenStreakChallenge",
"Milestone",
"Play",
"PlaysArchive",
"Playlist",
"ProfileCompletionChallenge",
"RelatedArtist",
"Remix",
"Repost",
"RepostType",
"RewardManagerTransaction",
"RouteMetrics",
"RouteMetricsAllTime",
"RouteMetricsDayMatview",
"RouteMetricsMonthMatview",
"RouteMetricsTrailingMonth",
"RouteMetricsTrailingWeek",
"Save",
"SaveType",
"SkippedTransaction",
"SkippedTransactionLevel",
"SPLTokenTransaction",
"Stem",
"TagTrackUserMatview",
"Track",
"TrackRoute",
"TrackTrendingScore",
"TrendingParam",
"TrendingResult",
"URSMContentNode",
"User",
"UserBalance",
"UserBalanceChange",
"UserChallenge",
"UserBankTransaction",
"UserBankAccount",
"UserEvents",
"UserListeningHistory",
"WalletChain",
]
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in file_management/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('file_management/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='file_management',
version=version,
description='App for managing File',
author='Indictrans',
author_email='sangram.p@indictranstech.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
|
# Generated by Django 1.11.7 on 2018-01-12 17:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("letters", "0009_auto_20170826_0742")]
operations = [
migrations.AlterModelOptions(
name="letter",
options={
"ordering": ["created"],
"permissions": (
("can_filter_eml", "Can filter eml"),
("recognize_letter", "Can recognize letter"),
),
"verbose_name": "Letter",
"verbose_name_plural": "Letters",
},
)
]
|
from ranger.api.commands import Command
class paste_as_root(Command):
def execute(self):
if self.fm.do_cut:
self.fm.execute_console('shell sudo mv %c .')
else:
self.fm.execute_console('shell sudo cp -r %c .')
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
-o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m --reverse --header='Jump to file'"
else:
# match files and directories
command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
-o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m --reverse --header='Jump to filemap <C-f> fzf_select'"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
import os
from ranger.core.loader import CommandLoader
class extract_here(Command):
def execute(self):
""" extract selected files to current directory."""
cwd = self.fm.thisdir
marked_files = tuple(cwd.get_selection())
def refresh(_):
cwd = self.fm.get_directory(original_path)
cwd.load_content()
one_file = marked_files[0]
cwd = self.fm.thisdir
original_path = cwd.path
au_flags = ['-x', cwd.path]
au_flags += self.line.split()[1:]
au_flags += ['-e']
self.fm.copy_buffer.clear()
self.fm.cut_buffer = False
if len(marked_files) == 1:
descr = "extracting: " + os.path.basename(one_file.path)
else:
descr = "extracting files from: " + os.path.basename(
one_file.dirname)
obj = CommandLoader(args=['aunpack'] + au_flags
+ [f.path for f in marked_files], descr=descr,
read=True)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
import os
from ranger.core.loader import CommandLoader
class compress(Command):
def execute(self):
""" Compress marked files to current directory """
cwd = self.fm.thisdir
marked_files = cwd.get_selection()
if not marked_files:
return
def refresh(_):
cwd = self.fm.get_directory(original_path)
cwd.load_content()
original_path = cwd.path
parts = self.line.split()
au_flags = parts[1:]
descr = "compressing files in: " + os.path.basename(parts[1])
obj = CommandLoader(args=['apack'] + au_flags + \
[os.path.relpath(f.path, cwd.path) for f in marked_files], descr=descr, read=True)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
def tab(self, tabnum):
""" Complete with current folder name """
extension = ['.zip', '.tar.gz', '.rar', '.7z']
return ['compress ' + os.path.basename(self.fm.thisdir.path) + ext for ext in extension]
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
# replace mydatabase the same way you did in "setup.py"
cfg.parentdir_prefix = "pyannote-db-callhome-"
# replace MyDatabase the same way you did in "setup.py"
cfg.versionfile_source = "CallHome/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second muskcoind instance using the same datadir or wallet."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start([])
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, 'regtest')
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second muskcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {}. Muskcoin Core is probably already running.".format(datadir)
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second muskcoind instance using the same wallet")
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if __name__ == '__main__':
FilelockTest().main()
|
from ConfigParser import SafeConfigParser
import errno
import logging
import os
import urllib2
class Config(object):
# S3 settings
AWS_ACCESS_KEY_CONFIG = ('aws', 'access_key', 'AWS_ACCESS_KEY')
AWS_SECRET_KEY_CONFIG = ('aws', 'secret_key', 'AWS_SECRET_KEY')
AWS_TEST_RESULT_BUCKET_CONFIG = ('aws', 'test_result_bucket', 'TEST_RESULT_BUCKET')
# MySQL settings
MYSQL_HOST_CONFIG = ('mysql', 'host', 'MYSQL_HOST')
MYSQL_PORT_CONFIG = ('mysql', 'port', 'MYSQL_PORT')
MYSQL_USER_CONFIG = ('mysql', 'user', 'MYSQL_USER')
MYSQL_PWD_CONFIG = ('mysql', 'password', 'MYSQL_PWD')
MYSQL_DB_CONFIG = ('mysql', 'database', 'MYSQL_DB')
# Isolate settings
ISOLATE_HOME_CONFIG = ('isolate', 'home', "ISOLATE_HOME")
ISOLATE_SERVER_CONFIG = ('isolate', 'server', "ISOLATE_SERVER")
ISOLATE_CACHE_DIR_CONFIG = ('isolate', 'cache_dir', "ISOLATE_CACHE_DIR")
# Beanstalk settings
BEANSTALK_HOST_CONFIG = ('beanstalk', 'host', 'BEANSTALK_HOST')
# Dist test settings
DIST_TEST_MASTER_CONFIG = ('dist_test', 'master', "DIST_TEST_MASTER")
DIST_TEST_JOB_PATH_CONFIG = ('dist_test', 'job_path', 'DIST_TEST_JOB_PATH')
DIST_TEST_USER_CONFIG = ('dist_test', 'user', 'DIST_TEST_USER')
DIST_TEST_PASSWORD_CONFIG = ('dist_test', 'password', 'DIST_TEST_PASSWORD')
DIST_TEST_URL_TIMEOUT_CONFIG = ('dist_test', 'url_timeout', 'DIST_TEST_URL_TIMEOUT')
def __init__(self, path=None):
if path is None:
path = os.getenv("DIST_TEST_CNF")
if path is None:
path = os.path.join(os.getenv("HOME"), ".dist_test.cnf")
logging.info("Reading configuration from %s", path)
# Populate parser with default values
defaults = {
"log_dir" : os.path.join(os.path.dirname(os.path.realpath(__file__)), "logs"),
"submit_gce_metrics" : "True",
"allowed_ip_ranges": "0.0.0.0/0",
"accounts": "{}",
}
self.config = SafeConfigParser(defaults)
self.config.read(path)
# Isolate settings
self.ISOLATE_HOME = self._get_with_env_override(*self.ISOLATE_HOME_CONFIG)
self.ISOLATE_SERVER = self._get_with_env_override(*self.ISOLATE_SERVER_CONFIG)
self.ISOLATE_CACHE_DIR = self._get_with_env_override(*self.ISOLATE_CACHE_DIR_CONFIG)
# S3 settings
self.AWS_ACCESS_KEY = self._get_with_env_override(*self.AWS_ACCESS_KEY_CONFIG)
self.AWS_SECRET_KEY = self._get_with_env_override(*self.AWS_SECRET_KEY_CONFIG)
self.AWS_TEST_RESULT_BUCKET = self._get_with_env_override(*self.AWS_TEST_RESULT_BUCKET_CONFIG)
# MySQL settings
self.MYSQL_HOST = self._get_with_env_override(*self.MYSQL_HOST_CONFIG)
try:
self.MYSQL_PORT = int(self._get_with_env_override(*self.MYSQL_PORT_CONFIG))
except:
self.MYSQL_PORT = 3306
self.MYSQL_USER = self._get_with_env_override(*self.MYSQL_USER_CONFIG)
self.MYSQL_PWD = self._get_with_env_override(*self.MYSQL_PWD_CONFIG)
self.MYSQL_DB = self._get_with_env_override(*self.MYSQL_DB_CONFIG)
# Beanstalk settings
self.BEANSTALK_HOST = self._get_with_env_override(*self.BEANSTALK_HOST_CONFIG)
# dist_test settings
if not self.config.has_section('dist_test'):
self.config.add_section('dist_test')
self.DIST_TEST_MASTER = self._get_with_env_override(*self.DIST_TEST_MASTER_CONFIG)
self.DIST_TEST_JOB_PATH = self._get_with_env_override(*self.DIST_TEST_JOB_PATH_CONFIG)
if self.DIST_TEST_JOB_PATH is None:
self.DIST_TEST_JOB_PATH = os.path.expanduser("~/.dist-test-last-job")
self.DIST_TEST_USER = self._get_with_env_override(*self.DIST_TEST_USER_CONFIG)
self.DIST_TEST_PASSWORD = self._get_with_env_override(*self.DIST_TEST_PASSWORD_CONFIG)
self.DIST_TEST_URL_TIMEOUT = self._get_with_env_override(*self.DIST_TEST_URL_TIMEOUT_CONFIG)
if self.DIST_TEST_URL_TIMEOUT is not None:
self.DIST_TEST_URL_TIMEOUT = float(self.DIST_TEST_URL_TIMEOUT)
# dist_test master configs (in the 'dist_test' section)
self.DIST_TEST_ALLOWED_IP_RANGES = self.config.get('dist_test', 'allowed_ip_ranges')
self.ACCOUNTS = self.config.get('dist_test', 'accounts')
self.log_dir = self.config.get('dist_test', 'log_dir')
# Make the log directory if it doesn't exist
Config.mkdir_p(self.log_dir)
self.SERVER_ACCESS_LOG = os.path.join(self.log_dir, "server-access.log")
self.SERVER_ERROR_LOG = os.path.join(self.log_dir, "server-error.log")
self.SERVER_LOG = os.path.join(self.log_dir, "server.log")
self.SLAVE_LOG = os.path.join(self.log_dir, "slave.log")
@staticmethod
def mkdir_p(path):
"""Similar to mkdir -p, make a directory ignoring EEXIST"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _get_with_env_override(self, section, option, env_key):
env_value = os.environ.get(env_key)
if env_value is not None:
return env_value
file_value = None
if self.config.has_option(section, option):
file_value = self.config.get(section, option)
return file_value
def ensure_aws_configured(self):
self._ensure_configs([self.AWS_ACCESS_KEY_CONFIG,
self.AWS_SECRET_KEY_CONFIG,
self.AWS_TEST_RESULT_BUCKET_CONFIG])
def ensure_isolate_configured(self):
self._ensure_configs([self.ISOLATE_HOME_CONFIG,
self.ISOLATE_SERVER_CONFIG,
self.ISOLATE_CACHE_DIR_CONFIG])
def ensure_mysql_configured(self):
self._ensure_configs([self.MYSQL_HOST_CONFIG,
self.MYSQL_USER_CONFIG,
self.MYSQL_PWD_CONFIG,
self.MYSQL_DB_CONFIG])
def ensure_beanstalk_configured(self):
self._ensure_configs([self.BEANSTALK_HOST_CONFIG])
def ensure_dist_test_configured(self):
self._ensure_configs([self.DIST_TEST_MASTER_CONFIG])
def _ensure_configs(self, configs):
for config in configs:
if self._get_with_env_override(*config) is None:
raise Exception(("Missing configuration %s.%s. Please set in the config file or " +
"set the environment variable %s.") % config)
def configure_auth(self):
"""
Configure urllib2 to pass authentication information if provided
in the configuration.
"""
if not self.DIST_TEST_USER:
return
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self.DIST_TEST_MASTER,
self.DIST_TEST_USER, self.DIST_TEST_PASSWORD)
handler = urllib2.HTTPDigestAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
|
######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import operator
import warnings
from typing import Any, List, Union, Dict, Optional, Callable, Iterable, NoReturn, TypeVar
import torch
import torch.nn as nn
from nni.common.serializer import Translatable
from nni.retiarii.serializer import basic_unit
from nni.retiarii.utils import STATE_DICT_PY_MAPPING_PARTIAL
from .utils import Mutable, generate_new_label, get_fixed_value
__all__ = ['LayerChoice', 'InputChoice', 'ValueChoice', 'Placeholder', 'ChosenInputs']
class LayerChoice(Mutable):
"""
Layer choice selects one of the ``candidates``, then apply it on inputs and return results.
Layer choice does not allow itself to be nested.
Parameters
----------
candidates : list of nn.Module or OrderedDict
A module list to be selected from.
prior : list of float
Prior distribution used in random sampling.
label : str
Identifier of the layer choice.
Attributes
----------
length : int
Deprecated. Number of ops to choose from. ``len(layer_choice)`` is recommended.
names : list of str
Names of candidates.
choices : list of Module
Deprecated. A list of all candidate modules in the layer choice module.
``list(layer_choice)`` is recommended, which will serve the same purpose.
Notes
-----
``candidates`` can be a list of modules or a ordered dict of named modules, for example,
.. code-block:: python
self.op_choice = LayerChoice(OrderedDict([
("conv3x3", nn.Conv2d(3, 16, 128)),
("conv5x5", nn.Conv2d(5, 16, 128)),
("conv7x7", nn.Conv2d(7, 16, 128))
]))
Elements in layer choice can be modified or deleted. Use ``del self.op_choice["conv5x5"]`` or
``self.op_choice[1] = nn.Conv3d(...)``. Adding more choices is not supported yet.
"""
# FIXME: prior is designed but not supported yet
@classmethod
def create_fixed_module(cls, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,
label: Optional[str] = None, **kwargs):
chosen = get_fixed_value(label)
if isinstance(candidates, list):
result = candidates[int(chosen)]
else:
result = candidates[chosen]
# map the named hierarchies to support weight inheritance for python engine
if hasattr(result, STATE_DICT_PY_MAPPING_PARTIAL):
# handle cases where layer choices are nested
# already has a mapping, will merge with it
prev_mapping = getattr(result, STATE_DICT_PY_MAPPING_PARTIAL)
setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {k: f'{chosen}.{v}' for k, v in prev_mapping.items()})
else:
# "result" needs to know where to map itself.
# Ideally, we should put a _mapping_ in the module where "result" is located,
# but it's impossible to put mapping into parent module here.
setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {'__self__': str(chosen)})
return result
def __init__(self, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,
prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):
super(LayerChoice, self).__init__()
if 'key' in kwargs:
warnings.warn(f'"key" is deprecated. Assuming label.')
label = kwargs['key']
if 'return_mask' in kwargs:
warnings.warn(f'"return_mask" is deprecated. Ignoring...')
if 'reduction' in kwargs:
warnings.warn(f'"reduction" is deprecated. Ignoring...')
self.candidates = candidates
self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]
assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'
self._label = generate_new_label(label)
self.names = []
if isinstance(candidates, dict):
for name, module in candidates.items():
assert name not in ["length", "reduction", "return_mask", "_key", "key", "names"], \
"Please don't use a reserved name '{}' for your module.".format(name)
self.add_module(name, module)
self.names.append(name)
elif isinstance(candidates, list):
for i, module in enumerate(candidates):
self.add_module(str(i), module)
self.names.append(str(i))
else:
raise TypeError("Unsupported candidates type: {}".format(type(candidates)))
self._first_module = self._modules[self.names[0]] # to make the dummy forward meaningful
@property
def key(self):
return self._key()
@torch.jit.ignore
def _key(self):
warnings.warn('Using key to access the identifier of LayerChoice is deprecated. Please use label instead.',
category=DeprecationWarning)
return self._label
@property
def label(self):
return self._label
def __getitem__(self, idx):
if isinstance(idx, str):
return self._modules[idx]
return list(self)[idx]
def __setitem__(self, idx, module):
key = idx if isinstance(idx, str) else self.names[idx]
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in self.names[idx]:
delattr(self, key)
else:
if isinstance(idx, str):
key, idx = idx, self.names.index(idx)
else:
key = self.names[idx]
delattr(self, key)
del self.names[idx]
def __len__(self):
return len(self.names)
def __iter__(self):
return map(lambda name: self._modules[name], self.names)
@property
def choices(self):
return self._choices()
@torch.jit.ignore
def _choices(self):
warnings.warn("layer_choice.choices is deprecated. Use `list(layer_choice)` instead.", category=DeprecationWarning)
return list(self)
def forward(self, x):
warnings.warn('You should not run forward of this module directly.')
return self._first_module(x)
def __repr__(self):
return f'LayerChoice({self.candidates}, label={repr(self.label)})'
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
ReductionType = Literal['mean', 'concat', 'sum', 'none']
class InputChoice(Mutable):
"""
Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys).
Use ``reduction`` to specify how chosen inputs are reduced into one output. A few options are:
* ``none``: do nothing and return the list directly.
* ``sum``: summing all the chosen inputs.
* ``mean``: taking the average of all chosen inputs.
* ``concat``: concatenate all chosen inputs at dimension 1.
We don't support customizing reduction yet.
Parameters
----------
n_candidates : int
Number of inputs to choose from. It is required.
n_chosen : int
Recommended inputs to choose. If None, mutator is instructed to select any.
reduction : str
``mean``, ``concat``, ``sum`` or ``none``.
prior : list of float
Prior distribution used in random sampling.
label : str
Identifier of the input choice.
"""
@classmethod
def create_fixed_module(cls, n_candidates: int, n_chosen: Optional[int] = 1,
reduction: ReductionType = 'sum', *,
prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):
return ChosenInputs(get_fixed_value(label), reduction=reduction)
def __init__(self, n_candidates: int, n_chosen: Optional[int] = 1,
reduction: str = 'sum', *,
prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):
super(InputChoice, self).__init__()
if 'key' in kwargs:
warnings.warn(f'"key" is deprecated. Assuming label.')
label = kwargs['key']
if 'return_mask' in kwargs:
warnings.warn(f'"return_mask" is deprecated. Ignoring...')
if 'choose_from' in kwargs:
warnings.warn(f'"reduction" is deprecated. Ignoring...')
self.n_candidates = n_candidates
self.n_chosen = n_chosen
self.reduction = reduction
self.prior = prior or [1 / n_candidates for _ in range(n_candidates)]
assert self.reduction in ['mean', 'concat', 'sum', 'none']
self._label = generate_new_label(label)
@property
def key(self):
return self._key()
@torch.jit.ignore
def _key(self):
warnings.warn('Using key to access the identifier of InputChoice is deprecated. Please use label instead.',
category=DeprecationWarning)
return self._label
@property
def label(self):
return self._label
def forward(self, candidate_inputs: List[torch.Tensor]) -> torch.Tensor:
warnings.warn('You should not run forward of this module directly.')
return candidate_inputs[0]
def __repr__(self):
return f'InputChoice(n_candidates={self.n_candidates}, n_chosen={self.n_chosen}, ' \
f'reduction={repr(self.reduction)}, label={repr(self.label)})'
class ChosenInputs(nn.Module):
"""
A module that chooses from a tensor list and outputs a reduced tensor.
The already-chosen version of InputChoice.
When forward, ``chosen`` will be used to select inputs from ``candidate_inputs``,
and ``reduction`` will be used to choose from those inputs to form a tensor.
Attributes
----------
chosen : list of int
Indices of chosen inputs.
reduction : ``mean`` | ``concat`` | ``sum`` | ``none``
How to reduce the inputs when multiple are selected.
"""
def __init__(self, chosen: Union[List[int], int], reduction: ReductionType):
super().__init__()
self.chosen = chosen if isinstance(chosen, list) else [chosen]
self.reduction = reduction
def forward(self, candidate_inputs):
return self._tensor_reduction(self.reduction, [candidate_inputs[i] for i in self.chosen])
def _tensor_reduction(self, reduction_type, tensor_list):
if reduction_type == 'none':
return tensor_list
if not tensor_list:
return None # empty. return None for now
if len(tensor_list) == 1:
return tensor_list[0]
if reduction_type == 'sum':
return sum(tensor_list)
if reduction_type == 'mean':
return sum(tensor_list) / len(tensor_list)
if reduction_type == 'concat':
return torch.cat(tensor_list, dim=1)
raise ValueError(f'Unrecognized reduction policy: "{reduction_type}"')
# the code in ValueChoice can be generated with this codegen
# this is not done online because I want to have type-hint supports
# $ python -c "from nni.retiarii.nn.pytorch.api import _valuechoice_codegen; _valuechoice_codegen(_internal=True)"
def _valuechoice_codegen(*, _internal: bool = False):
if not _internal:
raise RuntimeError("This method is set to be internal. Please don't use it directly.")
MAPPING = {
# unary
'neg': '-', 'pos': '+', 'invert': '~',
# binary
'add': '+', 'sub': '-', 'mul': '*', 'matmul': '@',
'truediv': '//', 'floordiv': '/', 'mod': '%',
'lshift': '<<', 'rshift': '>>',
'and': '&', 'xor': '^', 'or': '|',
# no reflection
'lt': '<', 'le': '<=', 'eq': '==',
'ne': '!=', 'ge': '>=', 'gt': '>',
# NOTE
# Currently we don't support operators like __contains__ (b in a),
# Might support them in future when we actually need them.
}
binary_template = """ def __{op}__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [self, other])"""
binary_r_template = """ def __r{op}__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [other, self])"""
unary_template = """ def __{op}__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.{op}, '{sym}{{}}', [self])"""
for op, sym in MAPPING.items():
if op in ['neg', 'pos', 'invert']:
print(unary_template.format(op=op, sym=sym) + '\n')
else:
opt = op + '_' if op in ['and', 'or'] else op
print(binary_template.format(op=op, opt=opt, sym=sym) + '\n')
if op not in ['lt', 'le', 'eq', 'ne', 'ge', 'gt']:
print(binary_r_template.format(op=op, opt=opt, sym=sym) + '\n')
def _valuechoice_staticmethod_helper(orig_func):
orig_func.__doc__ += """
Notes
-----
This function performs lazy evaluation.
Only the expression will be recorded when the function is called.
The real evaluation happens when the inner value choice has determined its final decision.
If no value choice is contained in the parameter list, the evaluation will be intermediate."""
return orig_func
class ValueChoiceX(Translatable):
"""Internal API. Implementation note:
The transformed (X) version of value choice.
It can be the result of composition (transformation) of one or several value choices. For example,
.. code-block:: python
nn.ValueChoice([1, 2]) + nn.ValueChoice([3, 4]) + 5
The instance of base class cannot be created directly. Instead, they should be only the result of transformation of value choice.
Therefore, there is no need to implement ``create_fixed_module`` in this class, because,
1. For python-engine, value choice itself has create fixed module. Consequently, the transformation is born to be fixed.
2. For graph-engine, it uses evaluate to calculate the result.
Potentially, we have to implement the evaluation logic in oneshot algorithms. I believe we can postpone the discussion till then.
"""
def __init__(self, function: Callable[..., Any], repr_template: str, arguments: List[Any], dry_run: bool = True):
super().__init__()
if function is None:
# this case is a hack for ValueChoice subclass
# it will reach here only because ``__init__`` in ``nn.Module`` is useful.
return
self.function = function
self.repr_template = repr_template
self.arguments = arguments
assert any(isinstance(arg, ValueChoiceX) for arg in self.arguments)
if dry_run:
# for sanity check
self.dry_run()
def inner_choices(self) -> Iterable['ValueChoice']:
"""
Return an iterable of all leaf value choices.
Useful for composition of value choices.
No deduplication on labels. Mutators should take care.
"""
for arg in self.arguments:
if isinstance(arg, ValueChoiceX):
yield from arg.inner_choices()
def dry_run(self) -> Any:
"""
Dry run the value choice to get one of its possible evaluation results.
"""
# values are not used
return self._evaluate(iter([]), True)
def evaluate(self, values: Iterable[Any]) -> Any:
"""
Evaluate the result of this group.
``values`` should in the same order of ``inner_choices()``.
"""
return self._evaluate(iter(values), False)
def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:
# "values" iterates in the recursion
eval_args = []
for arg in self.arguments:
if isinstance(arg, ValueChoiceX):
# recursive evaluation
eval_args.append(arg._evaluate(values, dry_run))
# the recursion will stop when it hits a leaf node (value choice)
# the implementation is in `ValueChoice`
else:
# constant value
eval_args.append(arg)
return self.function(*eval_args)
def _translate(self):
"""
Try to behave like one of its candidates when used in ``basic_unit``.
"""
return self.dry_run()
def __repr__(self):
reprs = []
for arg in self.arguments:
if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice):
reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority
else:
reprs.append(repr(arg))
return self.repr_template.format(*reprs)
# the following are a series of methods to create "ValueChoiceX"
# which is a transformed version of value choice
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# Special operators that can be useful in place of built-in conditional operators.
@staticmethod
@_valuechoice_staticmethod_helper
def to_int(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', int]:
"""
Convert a ``ValueChoice`` to an integer.
"""
if isinstance(obj, ValueChoiceX):
return ValueChoiceX(int, 'int({})', [obj])
return int(obj)
@staticmethod
@_valuechoice_staticmethod_helper
def to_float(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', float]:
"""
Convert a ``ValueChoice`` to a float.
"""
if isinstance(obj, ValueChoiceX):
return ValueChoiceX(float, 'float({})', [obj])
return float(obj)
@staticmethod
@_valuechoice_staticmethod_helper
def condition(pred: 'ValueChoiceOrAny',
true: 'ValueChoiceOrAny',
false: 'ValueChoiceOrAny') -> 'ValueChoiceOrAny':
"""
Return ``true`` if the predicate ``pred`` is true else ``false``.
Examples
--------
>>> ValueChoice.condition(ValueChoice([1, 2]) > ValueChoice([0, 3]), 2, 1)
"""
if any(isinstance(obj, ValueChoiceX) for obj in [pred, true, false]):
return ValueChoiceX(lambda t, c, f: t if c else f, '{} if {} else {}', [true, pred, false])
return true if pred else false
@staticmethod
@_valuechoice_staticmethod_helper
def max(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],
*args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':
"""
Returns the maximum value from a list of value choices.
The usage should be similar to Python's built-in value choices,
where the parameters could be an iterable, or at least two arguments.
"""
if not args:
return ValueChoiceX.max(*list(arg0))
lst = [arg0] + list(args)
if any(isinstance(obj, ValueChoiceX) for obj in lst):
return ValueChoiceX(max, 'max({})', lst)
return max(lst)
@staticmethod
@_valuechoice_staticmethod_helper
def min(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],
*args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':
"""
Returns the minunum value from a list of value choices.
The usage should be similar to Python's built-in value choices,
where the parameters could be an iterable, or at least two arguments.
"""
if not args:
return ValueChoiceX.min(*list(arg0))
lst = [arg0] + list(args)
if any(isinstance(obj, ValueChoiceX) for obj in lst):
return ValueChoiceX(min, 'min({})', lst)
return min(lst)
def __hash__(self):
# this is required because we have implemented ``__eq__``
return id(self)
# NOTE:
# Write operations are not supported. Reasons follow:
# - Semantics are not clear. It can be applied to "all" the inner candidates, or only the chosen one.
# - Implementation effort is too huge.
# As a result, inplace operators like +=, *=, magic methods like `__getattr__` are not included in this list.
def __getitem__(self, key: Any) -> 'ValueChoiceX':
return ValueChoiceX(lambda x, y: x[y], '{}[{}]', [self, key])
# region implement int, float, round, trunc, floor, ceil
# because I believe sometimes we need them to calculate #channels
# `__int__` and `__float__` are not supported because `__int__` is required to return int.
def __round__(self, ndigits: Optional[Any] = None) -> 'ValueChoiceX':
if ndigits is not None:
return ValueChoiceX(round, 'round({}, {})', [self, ndigits])
return ValueChoiceX(round, 'round({})', [self])
def __trunc__(self) -> 'ValueChoiceX':
raise RuntimeError("Try to use `ValueChoice.to_int()` instead of `math.trunc()` on value choices.")
def __floor__(self) -> 'ValueChoiceX':
return ValueChoiceX(math.floor, 'math.floor({})', [self])
def __ceil__(self) -> 'ValueChoiceX':
return ValueChoiceX(math.ceil, 'math.ceil({})', [self])
def __index__(self) -> NoReturn:
# https://docs.python.org/3/reference/datamodel.html#object.__index__
raise RuntimeError("`__index__` is not allowed on ValueChoice, which means you can't "
"use int(), float(), complex(), range() on a ValueChoice.")
def __bool__(self) -> NoReturn:
raise RuntimeError('Cannot use bool() on ValueChoice. That means, using ValueChoice in a if-clause is illegal. '
'Please try methods like `ValueChoice.max(a, b)` to see whether that meets your needs.')
# endregion
# region the following code is generated with codegen (see above)
# Annotated with "region" because I want to collapse them in vscode
def __neg__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.neg, '-{}', [self])
def __pos__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.pos, '+{}', [self])
def __invert__(self) -> 'ValueChoiceX':
return ValueChoiceX(operator.invert, '~{}', [self])
def __add__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.add, '{} + {}', [self, other])
def __radd__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.add, '{} + {}', [other, self])
def __sub__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.sub, '{} - {}', [self, other])
def __rsub__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.sub, '{} - {}', [other, self])
def __mul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mul, '{} * {}', [self, other])
def __rmul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mul, '{} * {}', [other, self])
def __matmul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.matmul, '{} @ {}', [self, other])
def __rmatmul__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.matmul, '{} @ {}', [other, self])
def __truediv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.truediv, '{} // {}', [self, other])
def __rtruediv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.truediv, '{} // {}', [other, self])
def __floordiv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.floordiv, '{} / {}', [self, other])
def __rfloordiv__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.floordiv, '{} / {}', [other, self])
def __mod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mod, '{} % {}', [self, other])
def __rmod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.mod, '{} % {}', [other, self])
def __lshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.lshift, '{} << {}', [self, other])
def __rlshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.lshift, '{} << {}', [other, self])
def __rshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.rshift, '{} >> {}', [self, other])
def __rrshift__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.rshift, '{} >> {}', [other, self])
def __and__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.and_, '{} & {}', [self, other])
def __rand__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.and_, '{} & {}', [other, self])
def __xor__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.xor, '{} ^ {}', [self, other])
def __rxor__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.xor, '{} ^ {}', [other, self])
def __or__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.or_, '{} | {}', [self, other])
def __ror__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.or_, '{} | {}', [other, self])
def __lt__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.lt, '{} < {}', [self, other])
def __le__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.le, '{} <= {}', [self, other])
def __eq__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.eq, '{} == {}', [self, other])
def __ne__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.ne, '{} != {}', [self, other])
def __ge__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.ge, '{} >= {}', [self, other])
def __gt__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(operator.gt, '{} > {}', [self, other])
# endregion
# __pow__, __divmod__, __abs__ are special ones.
# Not easy to cover those cases with codegen.
def __pow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':
if modulo is not None:
return ValueChoiceX(pow, 'pow({}, {}, {})', [self, other, modulo])
return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [self, other])
def __rpow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':
if modulo is not None:
return ValueChoiceX(pow, 'pow({}, {}, {})', [other, self, modulo])
return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [other, self])
def __divmod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(divmod, 'divmod({}, {})', [self, other])
def __rdivmod__(self, other: Any) -> 'ValueChoiceX':
return ValueChoiceX(divmod, 'divmod({}, {})', [other, self])
def __abs__(self) -> 'ValueChoiceX':
return ValueChoiceX(abs, 'abs({})', [self])
ValueChoiceOrAny = TypeVar('ValueChoiceOrAny', ValueChoiceX, Any)
class ValueChoice(ValueChoiceX, Mutable):
"""
ValueChoice is to choose one from ``candidates``.
In most use scenarios, ValueChoice should be passed to the init parameters of a serializable module. For example,
.. code-block:: python
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, nn.ValueChoice([32, 64]), kernel_size=nn.ValueChoice([3, 5, 7]))
def forward(self, x):
return self.conv(x)
In case, you want to search a parameter that is used repeatedly, this is also possible by sharing the same value choice instance.
(Sharing the label should have the same effect.) For example,
.. code-block:: python
class Net(nn.Module):
def __init__(self):
super().__init__()
hidden_dim = nn.ValueChoice([128, 512])
self.fc = nn.Sequential(
nn.Linear(64, hidden_dim),
nn.Linear(hidden_dim, 10)
)
# the following code has the same effect.
# self.fc = nn.Sequential(
# nn.Linear(64, nn.ValueChoice([128, 512], label='dim')),
# nn.Linear(nn.ValueChoice([128, 512], label='dim'), 10)
# )
def forward(self, x):
return self.fc(x)
Note that ValueChoice should be used directly. Transformations like ``nn.Linear(32, nn.ValueChoice([64, 128]) * 2)``
are not supported.
Another common use case is to initialize the values to choose from in init and call the module in forward to get the chosen value.
Usually, this is used to pass a mutable value to a functional API like ``torch.xxx`` or ``nn.functional.xxx```.
For example,
.. code-block:: python
class Net(nn.Module):
def __init__(self):
super().__init__()
self.dropout_rate = nn.ValueChoice([0., 1.])
def forward(self, x):
return F.dropout(x, self.dropout_rate())
Parameters
----------
candidates : list
List of values to choose from.
prior : list of float
Prior distribution to sample from.
label : str
Identifier of the value choice.
"""
# FIXME: prior is designed but not supported yet
@classmethod
def create_fixed_module(cls, candidates: List[Any], *, label: Optional[str] = None, **kwargs):
value = get_fixed_value(label)
if value not in candidates:
raise ValueError(f'Value {value} does not belong to the candidates: {candidates}.')
return value
def __init__(self, candidates: List[Any], *, prior: Optional[List[float]] = None, label: Optional[str] = None):
super().__init__(None, None, None)
self.candidates = candidates
self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]
assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'
self._label = generate_new_label(label)
self._accessor = []
@property
def label(self):
return self._label
def forward(self):
warnings.warn('You should not run forward of this module directly.')
return self.candidates[0]
def inner_choices(self) -> Iterable['ValueChoice']:
# yield self because self is the only value choice here
yield self
def dry_run(self) -> Any:
return self.candidates[0]
def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:
if dry_run:
return self.candidates[0]
try:
value = next(values)
except StopIteration:
raise ValueError(f'Value list {values} is exhausted when trying to get a chosen value of {self}.')
if value not in self.candidates:
raise ValueError(f'Value {value} does not belong to the candidates of {self}.')
return value
def __repr__(self):
return f'ValueChoice({self.candidates}, label={repr(self.label)})'
@basic_unit
class Placeholder(nn.Module):
"""
The API that creates an empty module for later mutations.
For advanced usages only.
"""
def __init__(self, label, **related_info):
self.label = label
self.related_info = related_info
super().__init__()
def forward(self, x):
return x
|
# -*-coding:utf-8-*-
import logging
"""避免被ban策略之一:使用useragent池。
使用注意:需在settings.py中进行相应的设置。
"""
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
class RotateUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
#显示当前使用的useragent
#print "********Current UserAgent:%s************" %ua
#记录
logging.log(msg='Current UserAgent: ' + ua, level=logging.DEBUG)
request.headers.setdefault('User-Agent', ua)
#the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
#for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 ",
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 ",
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 ",
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 ",
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1",
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5",
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5",
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3",
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24",
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24",
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
]
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for jsonchecker.py."""
import unittest
from blinkpy.style.checkers import jsonchecker
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class JSONCheckerTest(unittest.TestCase):
"""Tests JSONChecker class."""
def test_line_number_from_json_exception(self):
tests = (
(0, 'No JSON object could be decoded'),
(2, 'Expecting property name: line 2 column 1 (char 2)'),
(3, 'Expecting object: line 3 column 1 (char 15)'),
(9, 'Expecting property name: line 9 column 21 (char 478)'),
)
for expected_line, message in tests:
self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
def assert_no_error(self, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
error_handler = MockErrorHandler(handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.turned_off_filtering)
def assert_error(self, expected_line_number, expected_category, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.assertIn(category, jsonchecker.JSONChecker.categories)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
def mock_handle_style_error(self):
pass
def test_conflict_marker(self):
self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
def test_single_quote(self):
self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
def test_init(self):
error_handler = MockErrorHandler(self.mock_handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
self.assertEqual(checker._handle_style_error, error_handler)
def test_no_error(self):
self.assert_no_error("""{
"slaves": [ { "name": "test-slave", "platform": "*" },
{ "name": "apple-xserve-4", "platform": "mac-snowleopard" }
],
"builders": [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
"platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
"slavenames": ["apple-xserve-4"]
}
],
"schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
"builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
}
]
}
""")
|
"""Copyright 2021 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import os
import sys
import time
from typing import Any, Dict
import yaml
from oc_config_validate import (context, formatter, runner, schema, target,
testbase)
__version__ = "2.0.0"
LOGGING_FORMAT = "%(levelname)s(%(filename)s:%(lineno)d):%(message)s"
def createArgsParser() -> argparse.ArgumentParser:
"""Create parser for arguments passed into the program from the CLI.
Returns:
argparse.ArgumentParser object.
"""
parser = argparse.ArgumentParser(
description="OpenConfig Configuration Validation utility.")
parser.add_argument(
"-tgt",
"--target",
type=str,
help="The gNMI Target, as hostname:port.",
)
parser.add_argument(
"-user",
"--username",
type=str,
help="Username to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-pass",
"--password",
type=str,
help="Password to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-key",
"--private_key",
type=str,
help="Path to the Private key to use when establishing"
"a gNMI Channel to the Target.",
)
parser.add_argument(
"-ca",
"--root_ca_cert",
type=str,
help="Path to Root CA to use when building the gNMI Channel.",
)
parser.add_argument(
"-cert",
"--cert_chain",
type=str,
help="Path to Certificate chain to use when"
"establishing a gNMI Channel to the Target.")
parser.add_argument(
"-tests",
"--tests_file",
type=str,
action="store",
help="YAML file to read the test to run.")
parser.add_argument(
"-init",
"--init_config_file",
type=str,
action="store",
help="JSON file with the initial full OpenConfig configuration to "
"apply.")
parser.add_argument(
"-xpath",
"--init_config_xpath",
type=str,
action="store",
help="gNMI xpath where to apply the initial config.",
default="/")
parser.add_argument(
"-results",
"--results_file",
type=str,
action="store",
help="Filename where to write the test results.")
parser.add_argument(
"-f",
"--format",
type=str,
action="store",
help="Format "
"of the GetResponse to be printed. Default=JSON.",
choices=["json", "protobuff"],
default="json")
parser.add_argument(
"-v", "--version", help="Print program version", action="store_true")
parser.add_argument(
"-V", "--verbose", help="Enable gRPC debugging and extra logging.",
action="store_true")
parser.add_argument(
"-models", "--oc_models_versions", help="Print OC models versions.",
action="store_true")
parser.add_argument(
"--no_tls", help="gRPC insecure mode.", action="store_true")
parser.add_argument(
"-o",
"--tls_host_override",
type=str,
action="store",
help="Hostname to use during the TLS certificate check.",
)
parser.add_argument(
"-set_cooldown",
"--gnmi_set_cooldown_secs",
type=int,
action="store",
help="Seconds to wait after a successful gNMI Set message.",
)
parser.add_argument(
"--stop_on_error",
action="store_true",
help="Stop the execution if a test fails.",
)
parser.add_argument(
"--log_gnmi",
action="store_true",
help="Log the gnmi requests to the tests results.",
)
return parser
def validateArgs(args: Dict[str, Any]):
"""Returns True if the arguments are valid.
Raises:
ValueError if any argument is invalid.
IOError is unable to open a file given in argument.
"""
def isFileOK(filename: str, writable: bool = False):
try:
file = open(filename, "w+" if writable else "r", encoding="utf8")
file.close()
except IOError as io_error:
logging.error("Unable to open %s: %s", filename, io_error)
raise
# Mandatory args for tests
for arg, write in [("tests_file", False), ("results_file", True)]:
if not args[arg]:
raise ValueError("Needed --%s file" % arg)
isFileOK(args[arg], write)
if args["init_config_file"]:
isFileOK(args["init_config_file"], False)
# Output format supported
if (args["format"] and
args["format"].lower() not in formatter.SUPPORTED_FORMATS):
raise ValueError("Output format %s is not supported.")
def main(): # noqa
"""Executes this library."""
argparser = createArgsParser()
args = vars(argparser.parse_args())
if args["version"]:
print(__version__)
sys.exit()
if args["oc_models_versions"]:
print(schema.getOcModelsVersions())
sys.exit()
if args["verbose"]:
# os.environ["GRPC_TRACE"] = "all"
os.environ["GRPC_VERBOSITY"] = "DEBUG"
logging.basicConfig(
level=logging.DEBUG if args["verbose"] else logging.INFO,
format=LOGGING_FORMAT)
try:
validateArgs(args)
except (IOError, ValueError) as error:
sys.exit("Invalid arguments: %s" % error)
if args["log_gnmi"]:
testbase.LOG_GNMI = args["log_gnmi"]
try:
ctx = context.fromFile(args["tests_file"])
except IOError as io_error:
sys.exit("Unable to read %s: %s" % (args["tests_file"], io_error))
except yaml.YAMLError as yaml_error:
sys.exit("Unable to parse YAML file %s: %s" % (args["tests_file"],
yaml_error))
logging.info("Read tests file '%s': %d tests to run",
args["tests_file"], len(ctx.tests))
if not ctx.target:
ctx.target = context.Target()
# Override Target options
for arg in ["target", "username", "password", "no_tls", "private_key",
"cert_chain", "root_ca_cert", "tls_host_override",
"gnmi_set_cooldown_secs"]:
if args[arg]:
setattr(ctx.target, arg, args[arg])
tgt = target.TestTarget(ctx.target)
try:
tgt.validate()
except ValueError as error:
sys.exit("Invalid Target: %s" % error)
logging.info("Testing gNMI Target %s.", tgt)
if tgt.gnmi_set_cooldown_secs:
logging.info("Using gNMI Set Cooldown of %d secs",
tgt.gnmi_set_cooldown_secs)
# Apply initial configuration
if args["init_config_file"]:
ctx.init_configs.append(context.InitConfig(args["init_config_file"],
args["init_config_xpath"]))
if not runner.setInitConfigs(ctx, tgt,
stop_on_error=args["stop_on_error"]):
sys.exit(1)
start_t = time.time()
results = runner.runTests(ctx, tgt, stop_on_error=args["stop_on_error"])
end_t = time.time()
test_run = testbase.TestRun(ctx)
test_run.copyResults(results, start_t, end_t)
logging.info("Results Summary: %s", test_run.summary())
try:
fmtr = formatter.makeFormatter(args["format"])
fmtr.writeResultsToFile(test_run, args["results_file"])
logging.info("Test results written to %s", args["results_file"])
except IOError as io_error:
logging.exception("Unable to write file %s: %s", args["results_file"],
io_error)
except TypeError as type_error:
logging.exception("Unable to parse results into a JSON text: %s",
type_error)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model openconfig-mpls.
usage: nc-create-oc-mpls-54-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.openconfig import openconfig_mpls \
as oc_mpls
from ydk.models.openconfig import openconfig_mpls_types as oc_mpls_types
import logging
def config_mpls(mpls):
"""Add config data to mpls object."""
# tunnel with protection requested
tunnel = mpls.lsps.constrained_path.Tunnel()
tunnel.name = "LER1-LER2-t54"
tunnel.config.name = "LER1-LER2-t54"
tunnel.config.type = oc_mpls_types.P2P()
tunnel.config.protection_style_requested = oc_mpls_types.LinkProtectionRequested()
tunnel.type = oc_mpls_types.P2P()
p2p_primary_paths = tunnel.p2p_tunnel_attributes.P2PPrimaryPaths()
p2p_primary_paths.name = "DYNAMIC"
p2p_primary_paths.config.name = "DYNAMIC"
p2p_primary_paths.config.preference = 10
path_computation_method = oc_mpls.LocallyComputed()
p2p_primary_paths.config.path_computation_method = path_computation_method
tunnel.p2p_tunnel_attributes.p2p_primary_paths.append(p2p_primary_paths)
tunnel.p2p_tunnel_attributes.config.destination = "172.16.255.2"
tunnel.bandwidth.config.set_bandwidth = 100000
mpls.lsps.constrained_path.tunnel.append(tunnel)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
mpls = oc_mpls.Mpls() # create object
config_mpls(mpls) # add object configuration
# create configuration on NETCONF device
crud.create(provider, mpls)
exit()
# End of script
|
"""
Django settings for startupmoney project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7_bcd_om-v=oud6403zs5#snm5(&_&d(l38#&qc2=(xb77g)^j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admindocs',
'handlemoney',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'startupmoney.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'startupmoney.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
from django.test import TestCase
from core.govdelivery import MockGovDelivery
from data_research.forms import ConferenceRegistrationForm
from data_research.models import ConferenceRegistration
class ConferenceRegistrationFormTests(TestCase):
capacity = 100
govdelivery_code = 'TEST-CODE'
govdelivery_question_id = '12345'
govdelivery_answer_id = '67890'
def test_invalid_form_if_fields_are_missing(self):
form = ConferenceRegistrationForm(
capacity=self.capacity,
govdelivery_code=self.govdelivery_code,
govdelivery_question_id=self.govdelivery_question_id,
govdelivery_answer_id=self.govdelivery_answer_id,
data={'foo': 'bar'}
)
self.assertFalse(form.is_valid())
def get_valid_form(
self,
attendee_type=ConferenceRegistrationForm.ATTENDEE_IN_PERSON,
govdelivery_question_id=None,
govdelivery_answer_id=None
):
return ConferenceRegistrationForm(
capacity=self.capacity,
govdelivery_code=self.govdelivery_code,
govdelivery_question_id=govdelivery_question_id,
govdelivery_answer_id=govdelivery_answer_id,
data={
'attendee_type': attendee_type,
'name': 'A User',
'organization': 'An Organization',
'email': 'user@domain.com',
}
)
def test_valid_form_if_required_fields_are_provided(self):
form = self.get_valid_form()
self.assertTrue(form.is_valid())
def test_form_save_commit_false_doesnt_save_user(self):
form = self.get_valid_form()
form.is_valid()
form.save(commit=False)
self.assertFalse(ConferenceRegistration.objects.exists())
def test_form_save_commit_false_doesnt_subscribe_to_govdelivery(self):
calls_before = list(MockGovDelivery.calls)
form = self.get_valid_form()
form.is_valid()
form.save(commit=False)
self.assertEqual(MockGovDelivery.calls, calls_before)
def test_form_save_sets_registration_code_and_details(self):
form = self.get_valid_form()
form.is_valid()
registrant = form.save(commit=False)
self.assertEqual(registrant.govdelivery_code, 'TEST-CODE')
self.assertEqual(registrant.details, {
'attendee_type': ConferenceRegistrationForm.ATTENDEE_IN_PERSON,
'name': 'A User',
'organization': 'An Organization',
'email': 'user@domain.com',
'dietary_restrictions': [],
'other_dietary_restrictions': '',
'accommodations': [],
'other_accommodations': '',
})
def test_form_save_commit_true_saves_to_db(self):
form = self.get_valid_form()
form.is_valid()
registrant = form.save()
self.assertEqual(registrant, ConferenceRegistration.objects.first())
def test_form_save_commit_true_subscribes_to_gd(self):
form = self.get_valid_form()
form.is_valid()
form.save()
self.assertEqual(
MockGovDelivery.calls,
[(
'set_subscriber_topics',
(),
{
'contact_details': 'user@domain.com',
'topic_codes': ['TEST-CODE'],
'send_notifications': True,
}
)]
)
def test_form_save_commit_true_subscribes_and_sets_question(self):
form = self.get_valid_form(
govdelivery_question_id='12345',
govdelivery_answer_id='67890'
)
form.is_valid()
form.save()
self.assertEqual(MockGovDelivery.calls, [
(
'set_subscriber_topics',
(),
{
'contact_details': 'user@domain.com',
'topic_codes': ['TEST-CODE'],
'send_notifications': True,
}
),
(
'set_subscriber_answer_to_select_question',
(),
{
'contact_details': 'user@domain.com',
'question_id': '12345',
'answer_id': '67890',
}
),
])
def make_capacity_registrants(self, govdelivery_code, attendee_type):
registrant = ConferenceRegistration(
govdelivery_code=govdelivery_code,
details={'attendee_type': attendee_type}
)
ConferenceRegistration.objects.bulk_create(
[registrant] * self.capacity
)
def test_form_not_at_capacity(self):
self.assertFalse(self.get_valid_form().at_capacity)
def test_form_at_capacity(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
self.assertTrue(self.get_valid_form().at_capacity)
def test_form_at_capacity_for_some_other_code(self):
self.make_capacity_registrants(
'some-other-code',
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
self.assertFalse(self.get_valid_form().at_capacity)
def test_form_at_capacity_invalid(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
form = self.get_valid_form()
self.assertFalse(form.is_valid())
def test_form_at_capacity_still_valid_for_virtual_attendees(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
form = self.get_valid_form(
attendee_type=ConferenceRegistrationForm.ATTENDEE_VIRTUALLY
)
self.assertTrue(form.is_valid())
def test_form_virtual_attendees_dont_count_against_capacity(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_VIRTUALLY
)
self.assertFalse(self.get_valid_form().at_capacity)
|
# mysql/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
from .dml import insert, Insert
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
|
# Southrn trees bare strage fruit
days = "Mon Tue Wed Thu Fri Sat Sun"
months = 'Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug\nSep\nOct\nNov\nDec'
print "Here are the days:", days
print 'Here are the months:', months
print """
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like ass.
Even 4 lines if we want, or 5, or whack.
"""
print '''
ahashashas
aashhsahsa
'''
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ModifyFlowProjectClusterSettingRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ModifyFlowProjectClusterSetting','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserLists(self):
return self.get_query_params().get('UserLists')
def set_UserLists(self,UserLists):
for i in range(len(UserLists)):
if UserLists[i] is not None:
self.add_query_param('UserList.' + str(i + 1) , UserLists[i]);
def get_QueueLists(self):
return self.get_query_params().get('QueueLists')
def set_QueueLists(self,QueueLists):
for i in range(len(QueueLists)):
if QueueLists[i] is not None:
self.add_query_param('QueueList.' + str(i + 1) , QueueLists[i]);
def get_HostLists(self):
return self.get_query_params().get('HostLists')
def set_HostLists(self,HostLists):
for i in range(len(HostLists)):
if HostLists[i] is not None:
self.add_query_param('HostList.' + str(i + 1) , HostLists[i]);
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_DefaultQueue(self):
return self.get_query_params().get('DefaultQueue')
def set_DefaultQueue(self,DefaultQueue):
self.add_query_param('DefaultQueue',DefaultQueue)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId)
def get_DefaultUser(self):
return self.get_query_params().get('DefaultUser')
def set_DefaultUser(self,DefaultUser):
self.add_query_param('DefaultUser',DefaultUser)
|
from threading import Thread, Event
from time import sleep
def func1():
sleep(2) # Initially sleep for 2 secs
myeventobj.set() # E2
print("func1 sleeping for 3 secs....")
sleep(3) # E3
myeventobj.clear() # E4
def func2():
print("Initially myeventobj is: ", myeventobj.isSet()) # E1
myeventobj.wait()
if myeventobj.isSet(): # E5
print("True when myeventobj.set() is called from func1 .i.e. Internal flag is set")
print("func2 sleeping for 4 secs....")
sleep(4) # E6
if myeventobj.isSet() == False: # E7
print("False when myeventobj.clear() is called from func1.i.e. Internal flag is reset")
myeventobj = Event()
myt1 = Thread(target=func1)
myt2 = Thread(target=func2)
myt1.start()
myt2.start()
myt1.join()
myt2.join()
print("Main Thread Completed")
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPhantompeakqualtools(RPackage):
"""Computes informative enrichment and quality measures for
ChIP-seq/DNase-seq/FAIRE-seq/MNase-seq data. This is a modified version
of r-spp to be used in conjunction with the phantompeakqualtools
package."""
homepage = "https://github.com/kundajelab/phantompeakqualtools"
url = "https://github.com/kundajelab/phantompeakqualtools/raw/master/spp_1.14.tar.gz"
version('1.14', '4de207d570999170c1bf45bcba8c6d2d')
depends_on('boost@1.41.0:')
depends_on('r-catools', type=('build', 'run'))
depends_on('r-snow', type=('build', 'run'))
depends_on('r-snowfall', type=('build', 'run'))
depends_on('r-bitops', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
conflicts('%gcc@6:')
def setup_environment(self, spack_env, run_env):
spack_env.set('BOOST_ROOT', self.spec['boost'].prefix)
|
from typing import Tuple
import unittest
import numpy as np
from openml.tasks import get_task
from .test_task import OpenMLTaskTest
class OpenMLSupervisedTaskTest(OpenMLTaskTest):
"""
A helper class. The methods of the test case
are only executed in subclasses of the test case.
"""
__test__ = False
@classmethod
def setUpClass(cls):
if cls is OpenMLSupervisedTaskTest:
raise unittest.SkipTest(
"Skip OpenMLSupervisedTaskTest tests,"
" it's a base class"
)
super(OpenMLSupervisedTaskTest, cls).setUpClass()
def setUp(self, n_levels: int = 1):
super(OpenMLSupervisedTaskTest, self).setUp()
def test_get_X_and_Y(self) -> Tuple[np.ndarray, np.ndarray]:
task = get_task(self.task_id)
X, Y = task.get_X_and_y()
return X, Y
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
import time
import weakref
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=line-too-long
@tf_export("distribute.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
"""A distribution strategy for synchronous training on multiple workers.
This strategy implements synchronous distributed training across multiple
workers, each with potentially multiple GPUs. Similar to
`tf.distribute.MirroredStrategy`, it replicates all variables and computations
to each local device. The difference is that it uses a distributed collective
implementation (e.g. all-reduce), so that multiple workers can work together.
You need to launch your program on each worker and configure
`cluster_resolver` correctly. For example, if you are using
`tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to
have its corresponding `task_type` and `task_id` set in the `TF_CONFIG`
environment variable. An example TF_CONFIG on worker-0 of a two worker cluster
is:
```
TF_CONFIG = '{"cluster": {"worker": ["localhost:12345", "localhost:23456"]}, "task": {"type": "worker", "index": 0} }'
```
Your program runs on each worker as-is. Note that collectives require each
worker to participate. All `tf.distribute` and non `tf.distribute` API may use
collectives internally, e.g. checkpointing and saving since reading a
`tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value.
Therefore it's recommended to run exactly the same program on each worker.
Dispatching based on `task_type` or `task_id` of the worker is error-prone.
`cluster_resolver.num_accelerators()` determines the number of GPUs the
strategy uses. If it's zero, the strategy uses the CPU. All workers need to
use the same number of devices, otherwise the behavior is undefined.
This strategy is not intended for TPU. Use `tf.distribute.TPUStrategy`
instead.
After setting up TF_CONFIG, using this strategy is similar to using
`tf.distribute.MirroredStrategy` and `tf.distribute.TPUStrategy`.
```
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Dense(2, input_shape=(5,)),
])
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
def dataset_fn(ctx):
x = np.random.random((2, 5)).astype(np.float32)
y = np.random.randint(2, size=(2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y))
return dataset.repeat().batch(1, drop_remainder=True)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
model.compile()
model.fit(dist_dataset)
```
You can also write your own training loop:
```
@tf.function
def train_step(iterator):
def step_fn(inputs):
features, labels = inputs
with tf.GradientTape() as tape:
logits = model(features, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
strategy.run(step_fn, args=(next(iterator),))
for _ in range(NUM_STEP):
train_step(iterator)
```
See
[Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)
for a detailed tutorial.
__Saving__
You need to save and checkpoint on all workers instead of just one. This is
because variables whose synchronization=ON_READ triggers aggregation during
saving. It's recommended to save to a different path on each worker to avoid
race conditions. Each worker saves the same thing. See
[Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading)
tutorial for examples.
__Known Issues__
* `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the
correct number of accelerators. The strategy uses all available GPUs if
`cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver`
or `None`.
* In eager mode, the strategy needs to be created before calling any other
Tensorflow API.
"""
# pylint: enable=line-too-long
# TODO(anjalisridhar): Update our guides with examples showing how we can use
# the cluster_resolver argument.
# The starting number for collective keys. This should only be set in tests.
_collective_key_base = 0
def __init__(self,
cluster_resolver=None,
communication_options=None):
"""Creates the strategy.
Args:
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
communication_options: optional
`tf.distribute.experimental.CommunicationOptions`. This configures the
default options for cross device communications. It can be overridden by
options provided to the communication APIs like
`tf.distribute.ReplicaContext.all_reduce`. See
`tf.distribute.experimental.CommunicationOptions` for details.
"""
if communication_options is None:
communication_options = collective_util.Options()
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended._num_gpus_per_worker)
@classmethod
def _from_local_devices(cls, devices, communication_options=None):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication_options=communication_options)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access
return obj
@property
def cluster_resolver(self):
"""Returns the cluster resolver associated with this strategy.
As a multi-worker strategy, `tf.distribute.MultiWorkerMirroredStrategy`
provides the associated `tf.distribute.cluster_resolver.ClusterResolver`. If
the user provides one in `__init__`, that instance is returned; if the user
does not, a default `TFConfigClusterResolver` is provided.
"""
return self.extended._cluster_resolver # pylint: disable=protected-access
class _CollectiveAllReduceStrategyExperimentalMeta(type):
@classmethod
def __instancecheck__(cls, instance):
# This is to make isinstance(tf.distribute.MultiWorkerMirroredStrategy(),
# tf.distribute.experimental.MultiWorkerMirroredStrategy). Some libraries is
# performing such check.
return isinstance(instance, CollectiveAllReduceStrategy)
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class _CollectiveAllReduceStrategyExperimental(
CollectiveAllReduceStrategy,
metaclass=_CollectiveAllReduceStrategyExperimentalMeta):
__doc__ = CollectiveAllReduceStrategy.__doc__
@deprecation.deprecated(
None, "use distribute.MultiWorkerMirroredStrategy instead")
def __init__(self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None):
"""Creates the strategy.
Args:
communication: optional
`tf.distribute.experimental.CommunicationImplementation`. This is a hint
on the preferred collective communication implementation. Possible
values include `AUTO`, `RING`, and `NCCL`.
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
"""
communication_options = collective_util.Options(
implementation=communication)
super(_CollectiveAllReduceStrategyExperimental,
self).__init__(cluster_resolver, communication_options)
@classmethod
def _from_local_devices(
cls,
devices,
communication=collective_util.CommunicationImplementation.AUTO):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access
return obj
_CollectiveAllReduceStrategyExperimental.__name__ = CollectiveAllReduceStrategy.__name__
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"]) # pylint: disable=missing-docstring
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
# The starting number for collective keys. This should only be set in tests.
_collective_key_base = 0
def __init__(self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None):
"""Initializes the object."""
communication_options = collective_util.Options(
implementation=communication)
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker").set(self.extended._num_gpus_per_worker)
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
"""Implementation of CollectiveAllReduceStrategy."""
# Whether to perdically check the health of the cluster. If any worker is not
# reachable, collectives are aborted and the user program should get a
# tf.errors.UnavailableError. It's required to restart in order to recover.
_enable_check_health = True
# Check health interval in seconds.
_check_health_interval = 30
# Timeout in seconds for the first check health. The first check health needs
# to wait for cluster, which may make a longer time.
_check_health_initial_timeout = 0
# Times to retry before considering the peer is down.
_check_health_retry_limit = 3
# Timeout in seconds the each check health.
_check_health_timeout = 10
def __init__(self, container_strategy, cluster_resolver,
communication_options):
if not isinstance(communication_options, collective_util.Options):
raise ValueError("communication_options must be an instance of "
"tf.distribute.experimental.CommunicationOptions")
self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()
if not isinstance(self._cluster_resolver, ClusterResolver):
raise ValueError("cluster_resolver must be an instance of "
"tf.distribute.cluster_resolver.ClusterResolver")
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
self._communication_options = communication_options
self._collective_key_base = container_strategy._collective_key_base # pylint: disable=protected-access
self._initialize_strategy(self._cluster_resolver)
self._cfer_fn_cache = weakref.WeakKeyDictionary()
self.experimental_enable_get_next_as_optional = True
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
def _use_merge_call(self):
"""XLA is not supported for multi-worker strategy."""
return True
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver, devices=None):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if ops.executing_eagerly_outside_functions():
try:
context.context().configure_collective_ops(
scoped_allocator_enabled_ops=("CollectiveReduce",))
except RuntimeError:
logging.warning("Collective ops is not configured at program startup. "
"Some performance features may not be enabled.")
self._collective_ops_configured = True
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if devices:
local_devices = devices
else:
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=1 + self._collective_key_base)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices),
collective_keys=self._collective_keys)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._cluster_spec = None
self._task_type = None
self._task_id = None
self._id_in_cluster = 0
# This is a mark to tell whether we are running with standalone client or
# independent worker. Right now with standalone client, strategy object is
# created as local strategy and then turn into multi-worker strategy via
# configure call.
self._local_or_standalone_client_mode = True
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info(
"Single-worker MultiWorkerMirroredStrategy with local_devices "
"= %r, communication = %s", local_devices,
self._communication_options.implementation)
def _initialize_multi_worker(self, cluster_resolver):
"""Initializes the object for multi-worker training."""
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._id_in_cluster = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if (ops.executing_eagerly_outside_functions() and
not getattr(self, "_local_or_standalone_client_mode", False)):
context.context().configure_collective_ops(
collective_leader=multi_worker_util.collective_leader(
cluster_spec, task_type, task_id),
scoped_allocator_enabled_ops=("CollectiveReduce",),
device_filters=("/job:%s/task:%d" % (task_type, task_id),))
self._collective_ops_configured = True
# Starting a std server in eager mode and in independent worker mode.
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
# Checking _local_or_standalone_client_mode as well because we should not
# create the std server in standalone client mode.
config_proto = copy.deepcopy(context.context().config)
config_proto = self._update_config_proto(config_proto)
# If coordination service is enabled, use its internal heartbeat to detect
# peer failures instead of the Python-level health check.
if config_proto.experimental.coordination_service:
self._enable_check_health = False
if hasattr(cluster_resolver, "port"):
port = cluster_resolver.port
else:
port = 0
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc",
port=port)
context.context().enable_collective_ops(server_def)
self._std_server_started = True
# The `ensure_initialized` is needed before calling
# `context.context().devices()`.
context.context().ensure_initialized()
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
# TODO(yuefengz): The `num_gpus` is only for this particular task. It
# assumes all workers have the same number of GPUs. We should remove this
# assumption by querying all tasks for their numbers of GPUs.
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=1 + self._collective_key_base)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices) * self._num_workers,
collective_keys=self._collective_keys)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
if self._enable_check_health and context.executing_eagerly():
self._start_check_health_thread()
else:
logging.info("Check health not enabled.")
logging.info(
"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, "
"task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type, task_id,
self._num_workers, local_devices,
self._communication_options.implementation)
def __del__(self):
self._stop_check_health_thread()
def _input_workers_with_options(self, options=None):
host_device = device_util.get_host_for_device(self._worker_device)
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers([(host_device, self.worker_devices)])
else:
return input_lib.InputWorkers([(
host_device,
[device_util.get_host_for_device(worker) for worker in
self.worker_devices])])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
if replica_id == 0: # First replica on each worker.
assert device is not None
assert primary_var is None
def initial_value_fn(): # pylint: disable=g-missing-docstring
# Only the first device participates in the broadcast of initial values.
group_key = self._collective_keys.get_group_key([device])
group_size = self._num_workers
collective_instance_key = (
self._collective_keys.get_instance_key(group_key, device))
with ops.device(device):
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value = initial_value()
if isinstance(initial_value, base.CheckpointInitialValue):
initial_value = initial_value.wrapped_value
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None))
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(initial_value.shape,
initial_value.dtype,
group_size, group_key,
collective_instance_key)
return initial_value
return initial_value_fn
else:
return super(CollectiveAllReduceExtended,
self)._get_variable_creator_initial_value(
replica_id=replica_id,
device=device,
primary_var=primary_var,
**kwargs)
def _make_input_context(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=self._id_in_cluster,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy"
)
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
input_context=input_context,
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
input_context = self._make_input_context()
return input_lib.get_distributed_datasets_from_function(
dataset_fn=dataset_fn,
input_workers=self._input_workers_with_options(options),
input_contexts=[input_context],
strategy=self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
num_local_replicas = len(self.worker_devices)
for local_replica_id in range(num_local_replicas):
replica_id = (self._id_in_cluster * num_local_replicas +
local_replica_id)
value_context = distribute_lib.ValueContext(
replica_id, self._num_replicas_in_sync)
per_replica_values.append(value_fn(value_context))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
def _make_dataset_iterator(self, dataset):
"""Distributes the dataset to each local GPU."""
input_context = self._make_input_context()
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the input function to each local GPU."""
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.compat.v1.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if (not ops.executing_eagerly_outside_functions() and
self._communication_options.implementation ==
collective_util.CommunicationImplementation.NCCL):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _get_cross_device_ops(self, value):
# CollectiveAllReduce works on a predefined set of devices. In most cases
# they should be the compute devices, but certain use cases may reduce host
# tensors as well (e.g. early stopping). We infer the cross_device_ops to
# use based on the number of devices, since inputs don't always have device
# annotations. The compute devices one is preferred since we can potentially
# leverage NCCL.
if isinstance(value, values.DistributedValues):
num_devices = len(value._values) # pylint: disable=protected-access
else:
num_devices = 1
if num_devices == len(self.worker_devices):
return self._cross_device_ops
else:
return self._host_cross_device_ops
def _gather_to_implementation(self, value, destinations, axis, options):
return self._get_cross_device_ops(value)._gather( # pylint: disable=protected-access
value,
destinations=destinations,
axis=axis,
options=options)
def _reduce_to(self, reduce_op, value, destinations, options):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
# When there are multiple workers, we need to reduce across workers using
# collective ops.
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, len(self.worker_devices))
return self._get_cross_device_ops(value).reduce(
reduce_op,
value,
destinations=destinations,
options=self._communication_options.merge(options))
def _replica_ctx_all_reduce(self, reduce_op, value, options=None):
"""Implements `StrategyExtendedV2._replica_ctx_all_reduce`."""
# This implementation avoids using `merge_call` and just launches collective
# ops in one replica.
if options is None:
options = collective_util.Options()
if context.executing_eagerly():
# In eager mode, falls back to the default implemenation that uses
# `merge_call`. Replica functions are running sequentially in eager mode,
# and due to the blocking nature of collective ops, execution will hang if
# collective ops are to be launched sequentially.
return super()._replica_ctx_all_reduce(reduce_op, value, options)
replica_context = ds_context.get_replica_context()
assert replica_context, (
"`StrategyExtended._replica_ctx_all_reduce` must be called in a "
"replica context")
return self._cross_device_ops._all_reduce( # pylint: disable=protected-access
reduce_op,
value,
replica_context._replica_id, # pylint: disable=protected-access
options)
def _check_health(self):
while True:
if self._check_health_thread_should_stop.is_set():
return
for job in self._cluster_spec.jobs:
for task_id in range(self._cluster_spec.num_tasks(job)):
peer = "/job:{}/replica:0/task:{}".format(job, task_id)
attempts = 0
while True:
attempts += 1
try:
context.context().check_collective_ops_peer_health(
peer, timeout_in_ms=self._check_health_timeout * 1000)
# If check_collective_ops_peer_health doesn't raise an Exception,
# the peer is healthy.
break
except (errors.UnavailableError, errors.FailedPreconditionError,
errors.DeadlineExceededError) as e:
# TODO(b/151232436): Always raise UnavailableError when a peer
# fails. Now there could be many kinds of errors:
# - Unavailable: when the peer is not reachable, e.g. it's down.
# - FailedPrecondition: when the peer has restarted.
if attempts < self._check_health_retry_limit:
logging.warning("%s seems down, retrying %d/%d", peer, attempts,
self._check_health_retry_limit)
continue
logging.error(
"Cluster check alive failed, %s is down, "
"aborting collectives: %s", peer, e)
context.context().abort_collective_ops(
errors.UNAVAILABLE,
"cluster check alive failed, {} is down".format(peer))
return
except Exception as e: # pylint: disable=broad-except
logging.error("Unexpected exception in check alive: %s", e)
context.context().abort_collective_ops(
errors.INTERNAL,
"unexecpted exception in check alive: %s" % e)
return
time.sleep(self._check_health_interval)
def _start_check_health_thread(self):
# Use a dummy all-reduce as a barrier to wait for all workers to be up,
# otherwise the check health may fail immediately.
# Use array_ops.identity to create the dummy tensor so that we have a new
# Tensor. If we use constant it may be a cached from on a /job:localhost
# device, which will cause some code that relies on tensor.device to error.
#
# TODO(b/151232436): change to an explicit barrier if we have it.
dummy_value = array_ops.identity([])
logging.info("Waiting for the cluster, timeout = %s",
self._check_health_initial_timeout or "inf")
try:
self._host_cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
dummy_value,
dummy_value,
options=collective_util.Options(
timeout_seconds=self._check_health_initial_timeout,
implementation=collective_util.CommunicationImplementation.RING))
if context.is_async():
context.async_wait()
except errors.DeadlineExceededError:
raise RuntimeError(
"Timeout waiting for the cluster, timeout is %d seconds" %
self._check_health_initial_timeout)
logging.info("Cluster is ready.")
self._check_health_thread_should_stop = threading.Event()
# Start the thread as daemon to avoid it blocking the program from exiting.
# We try best to shutdown the thread but __del__ is not guaranteed to be
# called when program exists.
self._check_health_thread = threading.Thread(
target=self._check_health,
daemon=True)
self._check_health_thread.start()
def _stop_check_health_thread(self):
if getattr(self, "_check_health_thread", None):
logging.info("stopping check health thread")
self._check_health_thread_should_stop.set()
self._check_health_thread.join()
self._check_health_thread = None
logging.info("check health thread stopped")
def _warn_nccl_no_gpu(self):
if ((self._communication_options.implementation ==
collective_util.CommunicationImplementation.NCCL) and
self._num_gpus_per_worker == 0):
logging.warning("Enabled NCCL communication but no GPUs detected/"
"specified.")
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return self._num_workers > 1
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def _get_replica_id_in_sync_group(self, replica_id):
return self._id_in_cluster * len(self.worker_devices) + replica_id
def _get_local_replica_id(self, replica_id_in_sync_group):
return (replica_id_in_sync_group -
self._id_in_cluster * len(self.worker_devices))
def __deepcopy__(self, memo):
# We check the check health thread instead of whether we are in eager mode
# to limit the backward incompatibility.
if hasattr(self, "_check_health_thread"):
raise ValueError(
"MultiWorkerMirroredStrategy cannot be deep copied in eager mode. "
"If you're using Estimator and see this error message, call "
"tf.compat.v1.disable_eager_execution() at the beginning of your "
"program")
# Otherwise, do a regular deepcopy.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
|
QUERY_HASH = '42323d64886122307be10013ad2dcc44'
STORIES_QUERY_HASH = '45246d3fe16ccc6577e0bd297a5db1ab'
SHORTCODE_QUERY_HASH = 'fead941d698dc1160a298ba7bec277ac'
BASE_URL = "https://www.instagram.com"
LOGIN_REFERER = f'{BASE_URL}/accounts/login'
LOGIN_URL = f'{BASE_URL}/accounts/login/ajax/'
LOGOUT_URL = f'{BASE_URL}/accounts/logout/'
QUERY_URL = f'{BASE_URL}/graphql/query/'
QUERY_POST_URL = f'{QUERY_URL}?' + \
f'query_hash={QUERY_HASH}&' + \
'variables=%7B"id"%3A"{id}"%2C"first"%3A{first}%2C"after"%3A"{after}"%7D'
SHORTCODE_URL = f'{QUERY_URL}?' + \
f'query_hash={SHORTCODE_QUERY_HASH}&' + \
'variables=%7B"shortcode"%3A"{shortcode}"%2C"child_comment_count"%3A{child_comment_count}%2C"fetch_comment_count"%3A{fetch_comment_count}%2C"parent_comment_count"%3A{parent_comment_count}%2C"has_threaded_comments"%3A{has_threaded_comments}%7D'
STORIES_API_URL = BASE_URL + '/graphql/query/?' + \
f'query_hash={STORIES_QUERY_HASH}&' + \
'variables=%7B%22' + \
'reel_ids%22%3A%5B%22{id}%22%5D%2C%22' + \
'tag_names%22%3A%5B%5D%2C%22' + \
'location_ids%22%3A%5B%5D%2C%22' + \
'highlight_reel_ids%22%3A%5B%5D%2C%22' + \
'precomposed_overlay%22%3Afalse%7D'
# make my life easy
# think python might already handle this
null = None
true = True
false = False
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : main.py
@Author : guoliang.wgl
@version : 1.0
@Description: smart_fan案例 - 智能控制小风扇
board.json - 硬件资源配置文件
'''
from fan import Fan
from aht21b import AHT21B
from driver import PWM, I2C
import time
from aliyunIoT import Device # iot组件是连接阿里云物联网平台的组件
import json
# 物联网平台连接标志位
iot_connected = False
wlan = None
# 三元组信息
productKey = "产品密钥"
deviceName = "设备名称"
deviceSecret = "设备密钥"
# 物联网设备实例
device = None
# Wi-Fi SSID和Password设置
wifiSsid = "请输入您的路由器名称"
wifiPassword = "请输入您的路由器密码"
# 警报开关以及时间段控制(大于等于alarm_start 或者小于等于alarm_end )
gear1_temp = 22
gear2_temp = 27
gear3_temp = 32
FLAG_CUR_TEMP = "cur_temp"
FLAG_GEAR1 = "gear1"
FLAG_GEAR2 = "gear2"
FLAG_GEAR3 = "gear3"
cur_gear = 0
# 等待Wi-Fi成功连接到路由器
def get_wifi_status():
global wlan
wifi_connected = False
wlan.active(True) #激活界面
wlan.scan() #扫描接入点
#print("start to connect ", wifiSsid)
# 连接到指定的路由器(路由器名称为wifiSsid, 密码为:wifiPassword)
wlan.connect(wifiSsid, wifiPassword)
while True:
wifi_connected = wlan.isconnected() # 获取Wi-Fi连接路由器的状态信息
if wifi_connected: # Wi-Fi连接成功则退出while循环
break
else:
time.sleep(0.5)
print("wifi_connected:", wifi_connected)
ifconfig = wlan.ifconfig() #获取接口的IP/netmask/gw/DNS地址
print(ifconfig)
time.sleep(0.5)
# 物联网平台连接成功的回调函数
def on_connect(data):
global iot_connected
iot_connected = True
# 设置props 事件接收函数(当云平台向设备下发属性时)
def on_props(request):
global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp
try:
props = eval(request['params'])
if FLAG_GEAR1 in props.keys():
gear1_temp = props[FLAG_GEAR1]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR1, gear1_temp))
elif FLAG_GEAR2 in props.keys():
gear2_temp = props[FLAG_GEAR2]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR2, gear2_temp))
elif FLAG_GEAR3 in props.keys():
gear3_temp = props[FLAG_GEAR3]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR3, gear3_temp))
post_default_value()
except Exception as e:
print(e)
def post_props(data):
global device
if isinstance(data, dict):
data = {'params': json.dumps(data)}
ret = device.postProps(data)
return ret
def connect_lk(productKey, deviceName, deviceSecret):
global device, iot_connected
key_info = {
'region': 'cn-shanghai',
'productKey': productKey,
'deviceName': deviceName,
'deviceSecret': deviceSecret,
'keepaliveSec': 60
}
# 将三元组信息设置到iot组件中
device = Device()
# 设定连接到物联网平台的回调函数,如果连接物联网平台成功,则调用on_connect函数
device.on(Device.ON_CONNECT, on_connect)
# 配置收到云端属性控制指令的回调函数
# 如果收到物联网平台发送的属性控制消息,则调用on_props函数
device.on(Device.ON_PROPS, on_props)
# 启动连接阿里云物联网平台过程
device.connect(key_info)
# 等待设备成功连接到物联网平台
while True:
if iot_connected:
print('物联网平台连接成功')
break
else:
print('sleep for 1 s')
time.sleep(1)
time.sleep(2)
def post_default_value():
global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp
value = {FLAG_GEAR1: gear1_temp}
post_props(value)
value = {FLAG_GEAR2: gear2_temp}
post_props(value)
value = {FLAG_GEAR3: gear3_temp}
post_props(value)
def upload_temp(temp):
value = {FLAG_CUR_TEMP: temp}
post_props(value)
if __name__ == '__main__':
wlan = network.WLAN(network.STA_IF) #创建WLAN对象
# 请替换物联网平台申请到的产品和设备信息
# global productKey, deviceName, deviceSecret ,on_request, on_play
get_wifi_status()
connect_lk(productKey, deviceName, deviceSecret)
post_default_value()
# 初始化风扇控制pwm
pwmObj = PWM()
pwmObj.open("fan")
fan = Fan(pwmObj)
fan.control(0)
# 初始化温度传感器
i2c = I2C()
i2c.open('aht21b')
aht = AHT21B(i2c)
while True:
temp = aht.getTemperature()
print('cur temp is {}'.format(temp))
upload_temp(temp)
if temp <= gear1_temp and cur_gear != 0:
cur_gear = 0
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear1_temp and temp <= gear2_temp and cur_gear != 1:
cur_gear = 1
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear2_temp and temp <= gear3_temp and cur_gear != 2:
cur_gear = 2
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear3_temp and cur_gear != 3:
cur_gear = 3
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
|
# coding=utf-8
#
# Copyright 2020 Heinrich Heine University Duesseldorf
#
# Part of this code is based on the source code of BERT-DST
# (arXiv:1907.03040)
# Part of this code is based on the source code of Transformers
# (arXiv:1910.03771)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import sys
from typing import Dict, Union
import hydra
import numpy as np
import torch
import transformers
from fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap.auto_wrap import auto_wrap
from fairscale.optim.grad_scaler import ShardedGradScaler
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)
from general_util.logger import setting_logger
from general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer
logger: logging.Logger
# transformers.logging.set_verbosity_error()
def save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):
# Save model checkpoint.
if cfg.local_rank != -1:
state_dict = model.state_dict()
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)
else:
model.save_pretrained(output_dir)
# Save tokenizer and training args.
if cfg.local_rank in [-1, 0]:
if tokenizer is not None:
tokenizer.save_pretrained(output_dir)
OmegaConf.save(cfg, os.path.join(output_dir, "training_config.yaml"))
logger.info("Saving model checkpoint to %s", output_dir)
def forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):
if cfg.fp16:
with torch.cuda.amp.autocast():
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in transformers (see doc)
else:
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in pytorch-transformers (see doc)
if cfg.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if cfg.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
return loss.item()
def train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):
""" Train the model """
if cfg.local_rank in [-1, 0]:
_dir_splits = cfg.output_dir.split('/')
_log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])
tb_writer = SummaryWriter(log_dir=_log_dir)
else:
tb_writer = None
cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)
train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)
train_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,
collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,
prefetch_factor=cfg.prefetch_factor)
if "extended_vocab" in cfg and cfg.extended_vocab:
logger.info(f"Extended extra vocab size: {cfg.extended_vocab}")
model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)
if cfg.max_steps > 0:
t_total = cfg.max_steps
cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs
num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps
optimizer = scheduler = None
# Prepare optimizer and schedule (linear warmup and decay)
if cfg.local_rank == -1:
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if cfg.fp16:
if cfg.local_rank != -1:
scaler = ShardedGradScaler()
else:
from torch.cuda.amp.grad_scaler import GradScaler
scaler = GradScaler()
else:
scaler = None
# multi-gpu training (should be after apex fp16 initialization)
model_single_gpu = model
if cfg.n_gpu > 1:
model = torch.nn.DataParallel(model_single_gpu)
# Distributed training (should be after apex fp16 initialization)
if cfg.local_rank != -1:
model = auto_wrap(model)
model = FullyShardedDDP(model,
mixed_precision=cfg.fp16,
flatten_parameters=getattr(cfg, "flatten_parameters", True),
reshard_after_forward=cfg.reshard_after_forward,
move_grads_to_cpu=cfg.move_grads_to_cpu,
move_params_to_cpu=cfg.move_params_to_cpu)
if not cfg.move_params_to_cpu:
model = model.to(cfg.device)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
logger.info(optimizer)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", cfg.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", cfg.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", cfg.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(cfg.num_train_epochs), desc="Epoch", disable=cfg.local_rank not in [-1, 0])
set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)
if cfg.local_rank != -1:
train_dataloader.sampler.set_epoch(epoch)
for step, batch in enumerate(epoch_iterator):
# If training is continued from a checkpoint, fast forward
# to the state of that checkpoint.
if global_step < continue_from_global_step:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
global_step += 1
continue
model.train()
batch = batch_to_device(batch, cfg.device)
if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
loss = forward_step(model, batch, cfg, scaler)
else:
loss = forward_step(model, batch, cfg, scaler)
tr_loss += loss
if (step + 1) % cfg.gradient_accumulation_steps == 0:
if cfg.fp16:
scaler.unscale_(optimizer)
if cfg.max_grad_norm:
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(cfg.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
if cfg.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad(set_to_none=True)
global_step += 1
# Log metrics
if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:
output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(model, cfg, output_dir, tokenizer)
# Evaluation
if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:
state_dict = model.state_dict()
if cfg.local_rank in [-1, 0]:
results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split="dev")
for key, value in results.items():
tb_writer.add_scalar(f"eval/{key}", value, global_step)
sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
flag = note_best_checkpoint(cfg, results, sub_path)
if cfg.save_best and flag:
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)
else:
model.save_pretrained(cfg.output_dir)
tokenizer.save_pretrained(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
logger.info("Saving best model checkpoint to %s", cfg.output_dir)
if 0 < cfg.max_steps < global_step:
epoch_iterator.close()
break
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if cfg.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix="", _split="dev"):
dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)
if not os.path.exists(os.path.join(cfg.output_dir, prefix)):
os.makedirs(os.path.join(cfg.output_dir, prefix))
cfg.eval_batch_size = cfg.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,
collate_fn=eval_collator)
single_model_gpu = unwrap_model(model)
single_model_gpu.get_eval_log(reset=True)
# Eval!
torch.cuda.empty_cache()
logger.info("***** Running evaluation {}.{} *****".format(_split, prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", cfg.eval_batch_size)
# Seems FSDP does not need to unwrap the model for evaluating.
model.eval()
pred_list = []
prob_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating", dynamic_ncols=True):
batch = batch_to_device(batch, cfg.device)
with torch.cuda.amp.autocast():
with torch.no_grad():
outputs = model(**batch)
probs = outputs["logits"].softmax(dim=-1).detach().float().cpu()
prob, pred = probs.max(dim=-1)
pred_list.extend(pred.tolist())
prob_list.extend(prob.tolist())
metric_log, results = single_model_gpu.get_eval_log(reset=True)
logger.info("****** Evaluation Results ******")
logger.info(f"Global Steps: {prefix}")
logger.info(metric_log)
prediction_file = os.path.join(cfg.output_dir, prefix, "eval_predictions.npy")
np.save(prediction_file, pred_list)
json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, "eval_probs.json"), "w"))
return results
def load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split="train"):
if cfg.local_rank not in [-1, 0] and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if _split == "train":
input_file = cfg.train_file
elif _split == "dev":
input_file = cfg.dev_file
elif _split == "test":
input_file = cfg.test_file
else:
raise RuntimeError(_split)
examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)
if cfg.local_rank == 0 and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
dataset = TensorDataset(*tensors)
return dataset, features
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
dist.init_process_group(backend='nccl')
cfg.n_gpu = 1
cfg.world_size = dist.get_world_size()
cfg.device = device
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
# Set seed
set_seed(cfg)
# Load pre-trained model and tokenizer
if cfg.local_rank not in [-1, 0]:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.pretrain:
pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')
else:
pretrain_state_dict = None
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)
model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)
if cfg.local_rank == 0:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.
model.to(cfg.device)
# logger.info("Training/evaluation parameters %s", OmegaConf.to_yaml(cfg))
if cfg.local_rank in [-1, 0] and cfg.do_train:
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
# Training
if cfg.do_train:
# TODO: Add option for continuously training from checkpoint.
# The operation should be introduced in ``train`` method since both the state dict
# of schedule and optimizer (and scaler, if any) should be loaded.
# If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)
continue_from_global_step = 0 # If set to 0, start training from the beginning
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
# checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))
# if len(checkpoints) > 0:
# checkpoint = checkpoints[-1]
# logger.info("Resuming training from the latest checkpoint: %s", checkpoint)
# continue_from_global_step = int(checkpoint.split('-')[-1])
# model = model_class.from_pretrained(checkpoint)
# model.to(args.device)
train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split="train")
global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Test
results = {}
if cfg.do_eval and cfg.local_rank in [-1, 0]:
checkpoints = [cfg.output_dir]
if cfg.save_best:
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):
checkpoints = [cfg.prediction_cfg.best_checkpoint]
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.eval_sub_path:
checkpoints = list(
os.path.dirname(c) for c in
sorted(glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model.bin", recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
model = hydra.utils.call(cfg.model, checkpoint)
model.to(device)
if cfg.test_file:
prefix = f'test' + (f'-{prefix}' if prefix != "" else "")
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
hydra_formatted_args = []
# convert the cli params added by torch.distributed.launch into Hydra format
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
main()
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis17.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812736, 45705088]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'log_base': 10})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 4250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
import tensorflow as tf
import keras.backend as K
import numpy as np
from Utils import *
from generators.MotionBlurGenerator import *
from generators.CelebAGenerator import *
K.set_learning_phase(0)
from glob import glob
import os
# paths
Orig_Path = './results/CelebA/Original Images/*.png'
Range_Path = './results/CelebA/Range Images/*.png'
Blur_Path = './results/CelebA/Original Blurs/Test Blurs.npy'
# constants
REGULARIZORS = [0.01 , 0.01]
RANDOM_RESTARTS = 10
NOISE_STD = 0.01
STEPS = 10000
IMAGE_RANGE = [-1,1]
def step_size(t):
return 0.01 * np.exp( - t / 1000 )
SAVE_PATH = './results/CelebA/deblurring - alg1 - ' +str(int(NOISE_STD*100)) + 'perc noise - ' +str(RANDOM_RESTARTS) + 'RR/deblurring_'
# -----------------------------------------------------------------------
# loading test blur images
W = np.load(Blur_Path)
BLUR_RES = W.shape[1]
# loading test celeba images
X_Orig = np.array([ imread(path) for path in glob(Orig_Path)])/255
X_Range = np.array([ imread(path) for path in glob(Range_Path)])/255
IMAGE_RES = X_Orig.shape[1]
CHANNELS = X_Orig.shape[-1]
# loading celeba generator
CelebAGen = CelebAGenerator()
CelebAGen.GenerateModel()
CelebAGen.LoadWeights()
CelebAGAN = CelebAGen.GetModels()
celeba_latent_dim = CelebAGen.latent_dim
# loading motion blur generator
BLURGen = MotionBlur()
BLURGen.GenerateModel()
BLURGen.LoadWeights()
blur_vae, blur_encoder, blur_decoder = BLURGen.GetModels()
blur_latent_dim = BLURGen.latent_dim
# check if save dir exists, if not create a new one
try:
os.stat(SAVE_PATH[:-11])
except:
os.mkdir(SAVE_PATH[:-11])
# generating blurry images from test
Y_np = []
Blurry_Images = []
for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Test-Blurry'):
x_np = X_Orig[i]
w_np = W[i]
y_np, y_f = GenerateBlurry(x_np, w_np, noise_std = NOISE_STD )
Y_np.append(y_np)
for _ in range(RANDOM_RESTARTS):
Blurry_Images.append(y_f)
Y_np = np.array(Y_np)
Blurry_Images = np.array(Blurry_Images)
# generating blurry images from range
Blurry_Images_range = []
Y_np_range = []
for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Range-Blurry'):
y_np, y_f = GenerateBlurry(X_Range[i], W[i], noise_std = NOISE_STD )
Y_np_range.append(y_np)
for _ in range(RANDOM_RESTARTS):
Blurry_Images_range.append(y_f)
Y_np_range = np.array(Y_np_range)
Blurry_Images_range = np.array(Blurry_Images_range)
# alternating gradient descent for test images
image_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0],
reg = REGULARIZORS, image_range = IMAGE_RANGE,
decoder = CelebAGAN, blur_decoder = blur_decoder,
image_res = IMAGE_RES, blur_res = BLUR_RES,
channels = CHANNELS)
m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS,
image_grad = image_gradients , blur_grad = blur_gradients,
getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)
X_hat_test = []
W_hat_test = []
for i in range(len(X_Orig)):
m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,
latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False)
X_hat_test.append(x_hat_test)
W_hat_test.append(w_hat_test)
X_hat_test = np.array(X_hat_test)
W_hat_test = np.array(W_hat_test)
# alternating gradient descent for range images
m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images_range, stepsize=step_size,steps = STEPS,
image_grad = image_gradients , blur_grad = blur_gradients,
getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)
X_hat_range = []
W_hat_range = []
for i in range(len(X_Orig)):
m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
x_hat_range, w_hat_range, loss_last_iter_range = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,
latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False)
X_hat_range.append(x_hat_range)
W_hat_range.append(w_hat_range)
X_hat_range = np.array(X_hat_range)
W_hat_range = np.array(W_hat_range)
X_hat_test = (X_hat_test + 1)/2
X_hat_range = (X_hat_range + 1)/2
Max = 10**len(str(len(X_Orig)-1))
# saving results
for i in range(len(X_Orig)):
Save_Results(path = SAVE_PATH + str(i+Max)[1:],
x_np = None,
w_np = None,
y_np = Y_np[i],
y_np_range = Y_np_range[i] ,
x_hat_test = X_hat_test[i],
w_hat_test = W_hat_test[i],
x_range = None,
x_hat_range = X_hat_range[i],
w_hat_range = W_hat_range[i], clip=True)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base PXE Interface Methods
"""
from futurist import periodics
from ironic_lib import metrics_utils
from oslo_config import cfg
from oslo_log import log as logging
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers import utils as driver_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
RESCUE_PROPERTIES = {
'rescue_kernel': _('UUID (from Glance) of the rescue kernel. This value '
'is required for rescue mode.'),
'rescue_ramdisk': _('UUID (from Glance) of the rescue ramdisk with agent '
'that is used at node rescue time. This value is '
'required for rescue mode.'),
}
OPTIONAL_PROPERTIES = {
'kernel_append_params': _("Additional kernel parameters to pass down to "
"instance kernel. These parameters can be "
"consumed by the kernel or by the applications "
"by reading /proc/cmdline. Mind severe cmdline "
"size limit. Overrides "
"[pxe]/kernel_append_params ironic "
"option."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(driver_utils.OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(RESCUE_PROPERTIES)
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
class PXEBaseMixin(object):
ipxe_enabled = False
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
@METRICS.timer('PXEBaseMixin.clean_up_ramdisk')
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up the PXE environment that was setup for booting
the deploy or rescue ramdisk. It unlinks the deploy/rescue
kernel/ramdisk in the node's directory in tftproot and removes it's PXE
config.
:param task: a task from TaskManager.
:param mode: Label indicating a deploy or rescue operation
was carried out on the node. Supported values are 'deploy' and
'rescue'. Defaults to 'deploy', indicating deploy operation was
carried out.
:returns: None
"""
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
try:
images_info = pxe_utils.get_image_info(
node, mode=mode, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get %(mode)s image info '
'to clean up images for node %(node)s: %(err)s',
{'mode': mode, 'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(
task, images_info, ipxe_enabled=self.ipxe_enabled)
@METRICS.timer('PXEBaseMixin.clean_up_instance')
def clean_up_instance(self, task):
"""Cleans up the boot of instance.
This method cleans up the environment that was setup for booting
the instance. It unlinks the instance kernel/ramdisk in node's
directory in tftproot and removes the PXE config.
:param task: a task from TaskManager.
:returns: None
"""
node = task.node
try:
images_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get instance image info '
'to clean up images for node %(node)s: %(err)s',
{'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(task, images_info,
ipxe_enabled=self.ipxe_enabled)
boot_mode_utils.deconfigure_secure_boot_if_needed(task)
@METRICS.timer('PXEBaseMixin.prepare_ramdisk')
def prepare_ramdisk(self, task, ramdisk_params):
"""Prepares the boot of Ironic ramdisk using PXE.
This method prepares the boot of the deploy or rescue kernel/ramdisk
after reading relevant information from the node's driver_info and
instance_info.
:param task: a task from TaskManager.
:param ramdisk_params: the parameters to be passed to the ramdisk.
pxe driver passes these parameters as kernel command-line
arguments.
:returns: None
:raises: MissingParameterValue, if some information is missing in
node's driver_info or instance_info.
:raises: InvalidParameterValue, if some information provided is
invalid.
:raises: IronicException, if some power or set boot boot device
operation failed on the node.
"""
node = task.node
# Label indicating a deploy or rescue operation being carried out on
# the node, 'deploy' or 'rescue'. Unless the node is in a rescue like
# state, the mode is set to 'deploy', indicating deploy operation is
# being carried out.
mode = deploy_utils.rescue_or_deploy_mode(node)
if self.ipxe_enabled:
# NOTE(mjturek): At this point, the ipxe boot script should
# already exist as it is created at startup time. However, we
# call the boot script create method here to assert its
# existence and handle the unlikely case that it wasn't created
# or was deleted.
pxe_utils.create_ipxe_boot_script()
# Generate options for both IPv4 and IPv6, and they can be
# filtered down later based upon the port options.
# TODO(TheJulia): This should be re-tooled during the Victoria
# development cycle so that we call a single method and return
# combined options. The method we currently call is relied upon
# by two eternal projects, to changing the behavior is not ideal.
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
pxe_info = pxe_utils.get_image_info(node, mode=mode,
ipxe_enabled=self.ipxe_enabled)
# NODE: Try to validate and fetch instance images only
# if we are in DEPLOYING state.
if node.provision_state == states.DEPLOYING:
pxe_info.update(
pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled))
boot_mode_utils.sync_boot_mode(task)
pxe_options = pxe_utils.build_pxe_config_options(
task, pxe_info, ipxe_enabled=self.ipxe_enabled,
ramdisk_params=ramdisk_params)
# TODO(dtantsur): backwards compability hack, remove in the V release
if ramdisk_params.get("ipa-api-url"):
pxe_options["ipa-api-url"] = ramdisk_params["ipa-api-url"]
if self.ipxe_enabled:
pxe_config_template = deploy_utils.get_ipxe_config_template(node)
else:
pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template,
ipxe_enabled=self.ipxe_enabled)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=False)
if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
pxe_info.pop(kernel_label, None)
pxe_info.pop(ramdisk_label, None)
if pxe_info:
pxe_utils.cache_ramdisk_kernel(task, pxe_info,
ipxe_enabled=self.ipxe_enabled)
LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '
'with kernel params %(params)s',
{'node': node.uuid, 'params': pxe_options})
@METRICS.timer('PXEBaseMixin.prepare_instance')
def prepare_instance(self, task):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
relevant information from the node's instance_info. In case of netboot,
it updates the dhcp entries and switches the PXE config. In case of
localboot, it cleans up the PXE config.
:param task: a task from TaskManager.
:returns: None
"""
boot_mode_utils.sync_boot_mode(task)
boot_mode_utils.configure_secure_boot_if_needed(task)
node = task.node
boot_option = deploy_utils.get_boot_option(node)
boot_device = None
instance_image_info = {}
if boot_option == "ramdisk" or boot_option == "kickstart":
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
ipxe_enabled=self.ipxe_enabled)
if 'ks_template' in instance_image_info:
ks_cfg = pxe_utils.validate_kickstart_template(
instance_image_info['ks_template'][1]
)
pxe_utils.validate_kickstart_file(ks_cfg)
if (deploy_utils.is_iscsi_boot(task) or boot_option == "ramdisk"
or boot_option == "kickstart"):
pxe_utils.prepare_instance_pxe_config(
task, instance_image_info,
iscsi_boot=deploy_utils.is_iscsi_boot(task),
ramdisk_boot=(boot_option == "ramdisk"),
anaconda_boot=(boot_option == "kickstart"),
ipxe_enabled=self.ipxe_enabled)
pxe_utils.prepare_instance_kickstart_config(
task, instance_image_info,
anaconda_boot=(boot_option == "kickstart"))
boot_device = boot_devices.PXE
elif boot_option != "local":
if task.driver.storage.should_write_image(task):
# Make sure that the instance kernel/ramdisk is cached.
# This is for the takeover scenario for active nodes.
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
ipxe_enabled=self.ipxe_enabled)
# If it's going to PXE boot we need to update the DHCP server
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
try:
root_uuid_or_disk_id = task.node.driver_internal_info[
'root_uuid_or_disk_id'
]
except KeyError:
if not task.driver.storage.should_write_image(task):
pass
elif not iwdi:
LOG.warning("The UUID for the root partition can't be "
"found, unable to switch the pxe config from "
"deployment mode to service (boot) mode for "
"node %(node)s", {"node": task.node.uuid})
else:
LOG.warning("The disk id for the whole disk image can't "
"be found, unable to switch the pxe config "
"from deployment mode to service (boot) mode "
"for node %(node)s. Booting the instance "
"from disk.", {"node": task.node.uuid})
pxe_utils.clean_up_pxe_config(
task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
else:
pxe_utils.build_service_pxe_config(
task, instance_image_info, root_uuid_or_disk_id,
ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.PXE
else:
# NOTE(dtantsur): create a PXE configuration as a safety net for
# hardware uncapable of persistent boot. If on a reboot it will try
# to boot from PXE, this configuration will return it back.
if CONF.pxe.enable_netboot_fallback:
pxe_utils.build_service_pxe_config(
task, instance_image_info,
task.node.driver_internal_info.get('root_uuid_or_disk_id'),
ipxe_enabled=self.ipxe_enabled,
# PXE config for whole disk images is identical to what
# we need to boot from local disk, so use True even
# for partition images.
is_whole_disk_image=True)
else:
# Clean up the deployment configuration
pxe_utils.clean_up_pxe_config(
task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
# NOTE(pas-ha) do not re-set boot device on ACTIVE nodes
# during takeover
if boot_device and task.node.provision_state != states.ACTIVE:
manager_utils.node_set_boot_device(task, boot_device,
persistent=True)
def _validate_common(self, task):
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(
_("Node %s does not have any port associated with it.")
% node.uuid)
if self.ipxe_enabled:
if not CONF.deploy.http_url or not CONF.deploy.http_root:
raise exception.MissingParameterValue(_(
"iPXE boot is enabled but no HTTP URL or HTTP "
"root was specified."))
# NOTE(zer0c00l): When 'kickstart' boot option is used we need to store
# kickstart and squashfs files in http_root directory. These files
# will be eventually requested by anaconda installer during deployment
# over http(s).
if deploy_utils.get_boot_option(node) == 'kickstart':
if not CONF.deploy.http_url or not CONF.deploy.http_root:
raise exception.MissingParameterValue(_(
"'kickstart' boot option is set on the node but no HTTP "
"URL or HTTP root was specified."))
if not CONF.anaconda.default_ks_template:
raise exception.MissingParameterValue(_(
"'kickstart' boot option is set on the node but no "
"default kickstart template is specified."))
# Check the trusted_boot capabilities value.
deploy_utils.validate_capabilities(node)
if deploy_utils.is_trusted_boot_requested(node):
# Check if 'boot_option' and boot mode is compatible with
# trusted boot.
if self.ipxe_enabled:
# NOTE(TheJulia): So in theory (huge theory here, not put to
# practice or tested), that one can define the kernel as tboot
# and define the actual kernel and ramdisk as appended data.
# Similar to how one can iPXE load the XEN hypervisor.
# tboot mailing list seem to indicate pxe/ipxe support, or
# more specifically avoiding breaking the scenarios of use,
# but there is also no definitive documentation on the subject.
LOG.warning('Trusted boot has been requested for %(node)s in '
'concert with iPXE. This is not a supported '
'configuration for an ironic deployment.',
{'node': node.uuid})
pxe_utils.validate_boot_parameters_for_trusted_boot(node)
# Check if we have invalid parameters being passed which will not work
# for ramdisk configurations.
if (node.instance_info.get('image_source')
and node.instance_info.get('boot_iso')):
raise exception.InvalidParameterValue(_(
"An 'image_source' and 'boot_iso' parameter may not be "
"specified at the same time."))
pxe_utils.parse_driver_info(node)
@METRICS.timer('PXEBaseMixin.validate')
def validate(self, task):
"""Validate the PXE-specific info for booting deploy/instance images.
This method validates the PXE-specific info for booting the
ramdisk and instance on the node. If invalid, raises an
exception; otherwise returns None.
:param task: a task from TaskManager.
:returns: None
:raises: InvalidParameterValue, if some parameters are invalid.
:raises: MissingParameterValue, if some required parameters are
missing.
"""
self._validate_common(task)
node = task.node
# NOTE(TheJulia): If we're not writing an image, we can skip
# the remainder of this method.
# NOTE(dtantsur): if we're are writing an image with local boot
# the boot interface does not care about image parameters and
# must not validate them.
boot_option = deploy_utils.get_boot_option(node)
if (not task.driver.storage.should_write_image(task)
or boot_option == 'local'):
return
d_info = deploy_utils.get_image_instance_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
elif d_info.get('boot_iso'):
props = ['boot_iso']
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
if boot_option == 'kickstart':
props.append('squashfs_id')
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
@METRICS.timer('PXEBaseMixin.validate_rescue')
def validate_rescue(self, task):
"""Validate that the node has required properties for rescue.
:param task: a TaskManager instance with the node being checked
:raises: MissingParameterValue if node is missing one or more required
parameters
"""
pxe_utils.parse_driver_info(task.node, mode='rescue')
@METRICS.timer('PXEBaseMixin.validate_inspection')
def validate_inspection(self, task):
"""Validate that the node has required properties for inspection.
:param task: A TaskManager instance with the node being checked
:raises: UnsupportedDriverExtension
"""
try:
self._validate_common(task)
except exception.MissingParameterValue:
# Fall back to non-managed in-band inspection
raise exception.UnsupportedDriverExtension(
driver=task.node.driver, extension='inspection')
_RETRY_ALLOWED_STATES = {states.DEPLOYWAIT, states.CLEANWAIT,
states.RESCUEWAIT}
@METRICS.timer('PXEBaseMixin._check_boot_timeouts')
@periodics.periodic(spacing=CONF.pxe.boot_retry_check_interval,
enabled=bool(CONF.pxe.boot_retry_timeout))
def _check_boot_timeouts(self, manager, context):
"""Periodically checks whether boot has timed out and retry it.
:param manager: conductor manager.
:param context: request context.
"""
filters = {'provision_state_in': self._RETRY_ALLOWED_STATES,
'reserved': False,
'maintenance': False,
'provisioned_before': CONF.pxe.boot_retry_timeout}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver, conductor_group in node_iter:
try:
lock_purpose = 'checking PXE boot status'
with task_manager.acquire(context, node_uuid,
shared=True,
purpose=lock_purpose) as task:
self._check_boot_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
def _check_boot_status(self, task):
if not isinstance(task.driver.boot, PXEBaseMixin):
return
if not _should_retry_boot(task.node):
return
task.upgrade_lock(purpose='retrying PXE boot')
# Retry critical checks after acquiring the exclusive lock.
if (task.node.maintenance or task.node.provision_state
not in self._RETRY_ALLOWED_STATES
or not _should_retry_boot(task.node)):
return
LOG.info('Booting the ramdisk on node %(node)s is taking more than '
'%(timeout)d seconds, retrying boot',
{'node': task.node.uuid,
'timeout': CONF.pxe.boot_retry_timeout})
manager_utils.node_power_action(task, states.POWER_OFF)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=False)
manager_utils.node_power_action(task, states.POWER_ON)
def _should_retry_boot(node):
# NOTE(dtantsur): this assumes IPA, do we need to make it generic?
for field in ('agent_last_heartbeat', 'last_power_state_change'):
if manager_utils.value_within_timeout(
node.driver_internal_info.get(field),
CONF.pxe.boot_retry_timeout):
# Alive and heartbeating, probably busy with something long
LOG.debug('Not retrying PXE boot for node %(node)s; its '
'%(event)s happened less than %(timeout)d seconds ago',
{'node': node.uuid, 'event': field,
'timeout': CONF.pxe.boot_retry_timeout})
return False
return True
|
class cryto:
def decryp_Vige() :
cyphertext=input("cyphertext=")
key=input("key=")
print("plaintext=",end='')
j=0
for i in cyphertext :
c=ord(key[j])
if c < 97 :
c=c+32
c=c-97
x=ord(i)+26
if x < 123 :
x=x-c
if x > 90 :
x=x-26
else :
x=x-c
if x > 122 :
x=x-26
print(chr(x),end='')
j=j+1
print("\n")
def encryp_Vige() :
plaintext=input("plaintext=")
key=input("key=")
print()
print("cyphertext=",end='')
j=0
for i in plaintext :
c=ord(key[j])
if c < 97 :
c=c+32
c=c-97
x=ord(i)-26
if x < 65 :
x=x+c
if x < 65 :
x=x+26
else :
x=x+c
if x < 97 :
x=x+26
print(chr(x),end='')
j=j+1
print("\n")
def Make_a_rsa() :
print("公鑰(n,e) 只能加密小於n的整数m!!!")
while(1) :
p,q=map(int,input("choose two Prime number :(split with space)").split())
if p > 1 :
t=0
for i in range ( 2 , p ) :
if ( p % i ) == 0 :
print ( "請輸入質數",end="")
t=1
break
if t == 1 :
continue
if q > 1 :
t=0
for i in range ( 2 , q ) :
if ( q % i ) == 0 :
print ( "請輸入質數",end="")
t=1
break
if t == 1 :
continue
break
n=p*q
r=(p-1)*(q-1)
e=0
d=0
for i in range ( 2 , r ) :
if ( r-int(r/i)*i ) == 1 :
e=i
break
for i in range ( 2 , r ) :
if ( (i*e) % r ) == 1 :
d=i
break
print("Public key(N,e)=({0},{1})\nPrivate key(N,d)=({2},{3})".format(n, e, n, d))
def rsa_send() :
import math
import array as arr
n,k=map(int,input("input your key :(split with space)").split())
name=input("enter the path of your bin :(Don't use the used name of bin!)")
output_file = open(name+".bin", 'wb')
text=input("plaintext/cyphertext=")
fb=[]
for i in text :
i=ord(i)
i=pow(i,k,n)
fb.append(i)
int_array = arr.array('i', fb)
int_array.tofile(output_file)
output_file.close()
def rsa_read() :
n,k=map(int,input("input your key :(split with space)").split())
name=input("enter the path of your bin :")
with open(name + ".bin" , 'rb') as file:
int_bytes = file.read()
for i in int_bytes :
if i == 0 :
continue
i=pow(i,k,n)
print(chr(i), end="")
def linr_radom() :
text=input("plaintext/cyphertext=")
LFSR=input("LFSR_4=")
print()
print("cyphertext/plaintext=",end='')
a=int(LFSR[0])
b=int(LFSR[1])
c=int(LFSR[2])
d=int(LFSR[3])
for i in text :
print(int(i) ^ a,end="")
t= a ^ d
d=a
a=b
b=c
c=t
print()
def wood_decry() :
text=input("input the cryto :")
n=0
for i in text :
if n%4==0 :
print(i,end="")
n=n+1
def wood_encry() :
import random
text=input("input the plaintext :")
l=[]
for i in range(48,122) :
if (i>48 and i<57) or (i>65 and i<90) or (i>97 and i<122) :
l.append(i)
for i in text :
print(i,end="")
for j in range(3) :
r=random.choice(l)
print(chr(r),end="")
|
""" Calcualte p-values, ROC, AUC, and proportion of significant observations for
a set of observations given the null hypothesis distribution
Args:
variable: array of observed values
hypothesis: optional null hypotheis distribution (beta distribution by default)
alpha: optional significance parameter (.05 by default)
Returns:
pvalues: for every observation in variable
ROC: on a grid of 1000 points
AUC: integral of ROC
proportion of significant observations
"""
import numpy as np
def pvalue(variable=None, hypothesis=None, alpha=.05):
""" calcualte pvalues, AUC and fraction of significant observations
"""
#set model
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
#set null-hypothesis
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
#calculate prob of left-tail event p(H<=x|H) for every instance of X
prob = []
for var in variable:
prob.append((hypothesis <= var).sum())
#normalize p
prob = np.divide(prob, hypothesis.size)
#scan alpha from 0 to 1 and find prob(p<=alpha)
scanprob = []
alphagrid = np.linspace(0, 1, num=1000)
for val in alphagrid:
#calculate prob p<=alpha
scanprob.append((prob <= val).sum() / variable.size)
return prob, scanprob, np.sum(prob) / alphagrid.size, (prob <= alpha).sum() /variable.size
def lefttailpvalue(variable=None, hypothesis=None):
""" calcualte left-tail pvalues
"""
#set model
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
#set null-hypothesis
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
#calculate prob of left-tail event p(H<=x|H) for every instance of X
prob = []
for var in variable:
prob.append((hypothesis <= var).sum())
#normalize p
prob = np.divide(prob, hypothesis.size)
return prob
def righttailpvalue(variable=None, hypothesis=None):
""" calcualte left-tail pvalues
"""
#set model
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
#set null-hypothesis
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
#calculate prob of right-tail event p(H>=x|H) for every instance of X
prob = []
for var in variable:
prob.append((hypothesis >= var).sum())
#normalize p
prob = np.divide(prob, hypothesis.size)
return prob
|
# Plot polynomial regression on 1d problem
# Based on https://github.com/probml/pmtk3/blob/master/demos/linregPolyVsDegree.m
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics
from sklearn.metrics import mean_squared_error as mse
def make_1dregression_data(n=21):
np.random.seed(0)
xtrain = np.linspace(0.0, 20, n)
xtest = np.arange(0.0, 20, 0.1)
sigma2 = 4
w = np.array([-1.5, 1/9.])
fun = lambda x: w[0]*x + w[1]*np.square(x)
ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \
np.sqrt(sigma2)
ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \
np.sqrt(sigma2)
return xtrain, ytrain, xtest, ytest
xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)
#Rescaling data
scaler = MinMaxScaler(feature_range=(-1, 1))
Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1))
Xtest = scaler.transform(xtest.reshape(-1, 1))
degs = np.arange(1, 21, 1)
ndegs = np.max(degs)
mse_train = np.empty(ndegs)
mse_test = np.empty(ndegs)
ytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)
ytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)
for deg in degs:
model = LinearRegression()
poly_features = PolynomialFeatures(degree=deg, include_bias=False)
Xtrain_poly = poly_features.fit_transform(Xtrain)
model.fit(Xtrain_poly, ytrain)
ytrain_pred = model.predict(Xtrain_poly)
ytrain_pred_stored[deg-1] = ytrain_pred
Xtest_poly = poly_features.transform(Xtest)
ytest_pred = model.predict(Xtest_poly)
mse_train[deg-1] = mse(ytrain_pred, ytrain)
mse_test[deg-1] = mse(ytest_pred, ytest)
ytest_pred_stored[deg-1] = ytest_pred
# Plot MSE vs degree
fig, ax = plt.subplots()
mask = degs <= 15
ax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test')
ax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train')
ax.legend(loc='upper right', shadow=True)
plt.xlabel('degree')
plt.ylabel('mse')
save_fig('polyfitVsDegree.pdf')
plt.show()
# Plot fitted functions
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ax.scatter(xtrain, ytrain)
ax.plot(xtest, ytest_pred_stored[deg-1])
ax.set_ylim((-10, 15))
plt.title('degree {}'.format(deg))
save_fig('polyfitDegree{}.pdf'.format(deg))
plt.show()
# Plot residuals
#https://blog.minitab.com/blog/adventures-in-statistics-2/why-you-need-to-check-your-residual-plots-for-regression-analysis
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ypred = ytrain_pred_stored[deg-1]
residuals = ytrain - ypred
ax.plot(ypred, residuals, 'o')
ax.set_xlabel('predicted y')
ax.set_ylabel('residual')
plt.title('degree {}. Predictions on the training set'.format(deg))
save_fig('polyfitDegree{}Residuals.pdf'.format(deg))
plt.show()
# Plot fit vs actual
# https://blog.minitab.com/blog/adventures-in-statistics-2/regression-analysis-how-do-i-interpret-r-squared-and-assess-the-goodness-of-fit
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
for train in [True, False]:
if train:
ytrue = ytrain
ypred = ytrain_pred_stored[deg-1]
dataset = 'Train'
else:
ytrue = ytest
ypred = ytest_pred_stored[deg-1]
dataset = 'Test'
fig, ax = plt.subplots()
ax.scatter(ytrue, ypred)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
ax.set_xlabel('true y')
ax.set_ylabel('predicted y')
r2 = sklearn.metrics.r2_score(ytrue, ypred)
plt.title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2))
save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset))
plt.show()
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Widget(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_updated_by': 'str',
'user_permission': 'str',
'dashboard_id': 'int',
'name': 'str',
'description': 'str',
'last_updated_on': 'int',
'theme': 'str',
'interval': 'int',
'id': 'int',
'type': 'str',
'timescale': 'str'
}
attribute_map = {
'last_updated_by': 'lastUpdatedBy',
'user_permission': 'userPermission',
'dashboard_id': 'dashboardId',
'name': 'name',
'description': 'description',
'last_updated_on': 'lastUpdatedOn',
'theme': 'theme',
'interval': 'interval',
'id': 'id',
'type': 'type',
'timescale': 'timescale'
}
discriminator_value_class_map = {
'batchjob': 'BatchJobWidget',
'netflow': 'NetflowWidget',
'html': 'HtmlWidget',
'sgraph': 'WebsiteGraphWidget',
'devicesla': 'DeviceSLAWidget',
'groupnetflowgraph': 'NetflowGroupGraphWidget',
'gauge': 'GaugeWidget',
'ograph': 'OverviewGraphWidget',
'statsd': 'StatsDWidget',
'netflowgraph': 'NetflowGraphWidget',
'devicestatus': 'DeviceStatus',
'text': 'TextWidget',
'flash': 'FlashWidget',
'ngraph': 'NormalGraphWidget',
'groupnetflow': 'NetflowGroupWidget',
'bignumber': 'BigNumberWidget',
'cgraph': 'CustomerGraphWidget',
'dynamictable': 'DynamicTableWidget',
'table': 'TableWidget',
'gmap': 'GoogleMapWidget',
'noc': 'NOCWidget',
'': 'ServiceAlert',
'alert': 'AlertWidget',
'websiteindividualstatus': 'WebsiteIndividualsStatusWidget',
'websiteoverallstatus': 'WebsiteOverallStatusWidget',
'piechart': 'PieChartWidget',
'websiteoverview': 'WebsiteOverviewWidget',
'websitesla': 'WebsiteSLAWidget'
}
def __init__(self, last_updated_by=None, user_permission=None, dashboard_id=None, name=None, description=None, last_updated_on=None, theme=None, interval=None, id=None, type=None, timescale=None): # noqa: E501
"""Widget - a model defined in Swagger""" # noqa: E501
self._last_updated_by = None
self._user_permission = None
self._dashboard_id = None
self._name = None
self._description = None
self._last_updated_on = None
self._theme = None
self._interval = None
self._id = None
self._type = None
self._timescale = None
self.discriminator = 'type'
if last_updated_by is not None:
self.last_updated_by = last_updated_by
if user_permission is not None:
self.user_permission = user_permission
self.dashboard_id = dashboard_id
self.name = name
if description is not None:
self.description = description
if last_updated_on is not None:
self.last_updated_on = last_updated_on
if theme is not None:
self.theme = theme
if interval is not None:
self.interval = interval
if id is not None:
self.id = id
self.type = type
if timescale is not None:
self.timescale = timescale
@property
def last_updated_by(self):
"""Gets the last_updated_by of this Widget. # noqa: E501
The user that last updated the widget # noqa: E501
:return: The last_updated_by of this Widget. # noqa: E501
:rtype: str
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, last_updated_by):
"""Sets the last_updated_by of this Widget.
The user that last updated the widget # noqa: E501
:param last_updated_by: The last_updated_by of this Widget. # noqa: E501
:type: str
"""
self._last_updated_by = last_updated_by
@property
def user_permission(self):
"""Gets the user_permission of this Widget. # noqa: E501
The permission level of the user who last modified the widget # noqa: E501
:return: The user_permission of this Widget. # noqa: E501
:rtype: str
"""
return self._user_permission
@user_permission.setter
def user_permission(self, user_permission):
"""Sets the user_permission of this Widget.
The permission level of the user who last modified the widget # noqa: E501
:param user_permission: The user_permission of this Widget. # noqa: E501
:type: str
"""
self._user_permission = user_permission
@property
def dashboard_id(self):
"""Gets the dashboard_id of this Widget. # noqa: E501
The id of the dashboard the widget belongs to # noqa: E501
:return: The dashboard_id of this Widget. # noqa: E501
:rtype: int
"""
return self._dashboard_id
@dashboard_id.setter
def dashboard_id(self, dashboard_id):
"""Sets the dashboard_id of this Widget.
The id of the dashboard the widget belongs to # noqa: E501
:param dashboard_id: The dashboard_id of this Widget. # noqa: E501
:type: int
"""
if dashboard_id is None:
raise ValueError("Invalid value for `dashboard_id`, must not be `None`") # noqa: E501
self._dashboard_id = dashboard_id
@property
def name(self):
"""Gets the name of this Widget. # noqa: E501
The name of the widget # noqa: E501
:return: The name of this Widget. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Widget.
The name of the widget # noqa: E501
:param name: The name of this Widget. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Widget. # noqa: E501
The description of the widget # noqa: E501
:return: The description of this Widget. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Widget.
The description of the widget # noqa: E501
:param description: The description of this Widget. # noqa: E501
:type: str
"""
self._description = description
@property
def last_updated_on(self):
"""Gets the last_updated_on of this Widget. # noqa: E501
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:return: The last_updated_on of this Widget. # noqa: E501
:rtype: int
"""
return self._last_updated_on
@last_updated_on.setter
def last_updated_on(self, last_updated_on):
"""Sets the last_updated_on of this Widget.
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:param last_updated_on: The last_updated_on of this Widget. # noqa: E501
:type: int
"""
self._last_updated_on = last_updated_on
@property
def theme(self):
"""Gets the theme of this Widget. # noqa: E501
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:return: The theme of this Widget. # noqa: E501
:rtype: str
"""
return self._theme
@theme.setter
def theme(self, theme):
"""Sets the theme of this Widget.
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:param theme: The theme of this Widget. # noqa: E501
:type: str
"""
self._theme = theme
@property
def interval(self):
"""Gets the interval of this Widget. # noqa: E501
The refresh interval of the widget, in minutes # noqa: E501
:return: The interval of this Widget. # noqa: E501
:rtype: int
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this Widget.
The refresh interval of the widget, in minutes # noqa: E501
:param interval: The interval of this Widget. # noqa: E501
:type: int
"""
self._interval = interval
@property
def id(self):
"""Gets the id of this Widget. # noqa: E501
The Id of the widget # noqa: E501
:return: The id of this Widget. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Widget.
The Id of the widget # noqa: E501
:param id: The id of this Widget. # noqa: E501
:type: int
"""
self._id = id
@property
def type(self):
"""Gets the type of this Widget. # noqa: E501
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:return: The type of this Widget. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Widget.
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:param type: The type of this Widget. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def timescale(self):
"""Gets the timescale of this Widget. # noqa: E501
The default timescale of the widget # noqa: E501
:return: The timescale of this Widget. # noqa: E501
:rtype: str
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
"""Sets the timescale of this Widget.
The default timescale of the widget # noqa: E501
:param timescale: The timescale of this Widget. # noqa: E501
:type: str
"""
self._timescale = timescale
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Widget, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Widget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from pydantic import BaseModel
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import pkg_resources
from typing import Any
def build_api(handler, endpoint):
def get_version():
pkg_name = "msgflow"
try:
version = pkg_resources.get_distribution(pkg_name).version
except pkg_resources.DistributionNotFound:
print(f"Package name not found: {pkg_name}")
version = "package version info not found"
return version
app = FastAPI(
title="msgFlow",
description="",
version=get_version(),
)
app.add_api_route(endpoint, handler.handle, methods=["POST"])
return app
class Request(BaseModel):
text: str
dialog_id: str = 0
data: dict[str, Any] = None
class Response(BaseModel):
texts: list[str]
request: Request
class Handler:
def __init__(self, bot):
self._bot = bot
def handle(self, req: Request):
msg = WebapiMessage(text=req.text, dialog_id=req.dialog_id, req=req)
self._bot.handle(msg)
return Response(texts=msg.msgs, request=req)
class WebapiMessage:
def __init__(self, text: str, dialog_id: str, req):
""""""
self._text = text
self._cid = dialog_id
self._req = req
self._msgs = []
@property
def text(self):
return self._text
@property
def dialog_id(self) -> str:
# In CliService, a conversation is identified by the user's name
return self._cid
def respond(self, text):
self._msgs.append(text)
@property
def source(self) -> Any:
return self._req
@property
def msgs(self):
return self._msgs
class WebapiService:
def __init__(self, config):
"""
Args:
config (Dict[str, Any])
"""
# Set attributes
self._config = config
@classmethod
def from_config(cls, config: dict[str, object]):
cfg = WebapiConfig(**config)
return cls(config=cfg)
def flow(self, bot):
handler = Handler(bot=bot)
app = build_api(
handler,
endpoint=self._config.endpoint,
)
uvicorn.run(app=app, host=self._config.host, port=self._config.port)
def post(self, text):
raise NotImplementedError()
class WebapiConfig(BaseModel):
host: str
port: int
endpoint: str = "/handle"
|
import os
from montreal_forced_aligner.corpus.acoustic_corpus import AcousticCorpus
def test_save_text_lab(
basic_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=basic_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="acoustic_corpus").save()
def test_file_properties(
stereo_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=stereo_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
file = corpus.get_file(name="michaelandsickmichael")
assert file.sound_file.num_channels == 2
assert file.num_speakers == 2
assert file.num_utterances == 7
x, y = file.sound_file.normalized_waveform()
assert y.shape[0] == 2
def test_flac_tg(flac_tg_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=flac_tg_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="61-70968-0000").save()
|
import os
def tfoutputtoAzdo(outputlist, jsonObject):
"""
This function convert a dict to Azure DevOps pipelines variable
outputlist : dict { terraform_output : azure devpops variable}
jsonOject : the terraform output in Json format (terraform output -json)
"""
if(len(outputlist) > 0):
for k, v in outputlist.items():
tfoutput_name = k
azdovar = str(v)
if tfoutput_name in jsonObject.keys():
var_value = jsonObject[tfoutput_name]["value"]
print(
"Run [echo ##vso[task.setvariable variable="+azdovar+";]"+var_value+"]")
os.system(
"echo ##vso[task.setvariable variable="+azdovar+";]"+var_value+"")
else:
print("key {} is not present in terraform output".format(
tfoutput_name))
|
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
class _BaseWrapper():
def __init__(self, model):
super().__init__()
self.model = model
self.handlers = []
def forward(self, images):
self.image_shape = images.shape[2:]
print(self.image_shape)
self.logits = self.model(images)
self.probs = F.softmax(self.logits, dim=1)
return self.probs.sort(dim=1, descending=True)
def backward(self, ids):
one_hot = F.one_hot(ids, self.logits.shape[-1])
one_hot = one_hot.squeeze()
self.model.zero_grad()
self.logits.backward(gradient=one_hot, retain_graph=True)
# gradient는 해당 index에 대해서만 미분을 통한 backpropagation을 하겠다는 의미이다.
# 즉, 내가 확인하고 싶은 class에 대해서 featuremap이 얼마나 영향을 미쳤는지 확인할 수 있다.
def generate(self):
raise NotImplementedError
class GradCAM(_BaseWrapper):
def __init__(self, model, layers=None):
super().__init__(model)
self.feature_map = {}
self.grad_map = {}
self.layers = layers
def save_fmaps(key):
def forward_hook(module, input, output):
self.feature_map[key]=output.detach()
return forward_hook
def save_grads(key):
def backward_hook(modeul, grad_in, grad_out):
self.grad_map[key] = grad_out[0].detach()
return backward_hook
for name, module in self.model.named_modules():
if self.layers is None or name in self.layers:
self.handlers.append(module.register_forward_hook(save_fmaps(name)))
self.handlers.append(module.register_backward_hook(save_grads(name)))
def findLayers(self, layers, target_layer):
if target_layer in layers.keys():
return layers[target_layer]
else:
raise ValueError(f"{target_layer} not exists")
def generate(self, target_layer):
feature_maps = self.findLayers(self.feature_map, target_layer)
grad_maps = self.findLayers(self.grad_map, target_layer)
weights = F.adaptive_avg_pool2d(grad_maps, 1)
grad_cam = torch.mul(feature_maps, weights).sum(dim=1, keepdim=True)
grad_cam = F.relu(grad_cam)
grad_cam = F.interpolate(grad_cam, self.image_shape, mode="bilinear", align_corners=False)
B, C, H, W = grad_cam.shape
# C는 1인듯?
grad_cam = grad_cam.view(B, -1)
grad_cam -= grad_cam.min(dim=1, keepdim=True)[0]
# 양수 만들어주려고 하는듯
grad_cam /= grad_cam.max(dim=1, keepdim=True)[0]
grad_cam = grad_cam.view(B, C, H, W)
return grad_cam
|
import unittest
import numpy as np
from hmc.applications.cox_poisson import forward_transform, inverse_transform, generate_data, gaussian_posterior_factory, hyperparameter_posterior_factory
from hmc.applications.cox_poisson.prior import log_prior, grad_log_prior, hess_log_prior, grad_hess_log_prior
class TestCoxPoisson(unittest.TestCase):
def test_prior(self):
def transformed_log_prior(qt):
return log_prior(*inverse_transform(qt)[0])
transformed_grad_log_prior = lambda qt: grad_log_prior(*qt)
transformed_hess_log_prior = lambda qt: hess_log_prior(*qt)
transformed_grad_hess_log_prior = lambda qt: grad_hess_log_prior(*qt)
q = np.random.uniform(size=(2, ))
qt, _ = forward_transform(q)
delta = 1e-5
u = np.random.normal(size=qt.shape)
fd = (transformed_log_prior(qt + 0.5*delta*u) - transformed_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_grad_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (transformed_grad_log_prior(qt + 0.5*delta*u) - transformed_grad_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_hess_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (transformed_hess_log_prior(qt + 0.5*delta*u) - transformed_hess_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_grad_hess_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
def test_gaussian_posterior(self):
sigmasq, beta = np.random.uniform(size=(2, ))
mu = np.log(126.0) - sigmasq / 2.0
dist, x, y = generate_data(10, mu, beta, sigmasq)
euclidean_auxiliaries, metric = gaussian_posterior_factory(dist, mu, sigmasq, beta, y)
log_posterior = lambda x: euclidean_auxiliaries(x)[0]
grad_log_posterior = lambda x: euclidean_auxiliaries(x)[1]
delta = 1e-6
u = np.random.normal(size=x.shape)
fd = (log_posterior(x + 0.5*delta*u) - log_posterior(x - 0.5*delta*u)) / delta
dd = grad_log_posterior(x)@u
self.assertTrue(np.allclose(fd, dd))
def test_hyperparameter_posterior(self):
sigmasq, beta = np.random.uniform(size=(2, ))
mu = np.log(126.0) - sigmasq / 2.0
dist, x, y = generate_data(16, mu, beta, sigmasq)
log_posterior, metric, _, euclidean_auxiliaries, riemannian_auxiliaries = hyperparameter_posterior_factory(dist, mu, x, y)
grad_log_posterior = lambda qt: euclidean_auxiliaries(qt)[1]
grad_metric = lambda qt: riemannian_auxiliaries(qt)[3]
q = np.array([sigmasq, beta])
qt, _ = forward_transform(q)
delta = 1e-4
u = np.random.normal(size=(2, ))
fd = (log_posterior(qt + 0.5*delta*u) - log_posterior(qt - 0.5*delta*u)) / delta
dd = grad_log_posterior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (metric(qt + 0.5*delta*u) - metric(qt - 0.5*delta*u)) / delta
dd = grad_metric(qt)@u
self.assertTrue(np.allclose(fd, dd))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.