text
stringlengths 2
999k
|
|---|
import json
from time import sleep
import gspread
import requests
from gspread_formatting import *
from oauth2client.service_account import ServiceAccountCredentials
class Spreadsheet:
# comment out all but one of these depending on which spreadsheet being used
# URL = 'https://docs.google.com/spreadsheets/d/1WhExw_ReHnyPQYXl0p-kT6jYXpZW5w8-cq2ffK7niOs' # Sample Deep Space Scouting Sheet Machine
# URL = 'https://docs.google.om/spreadsheets/d/1lOTML4TgNqv5OKUJU32keWu62__T9cFT3IL52kmPbKk' # Bethesda Week 2 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1C8hjCqMZmacyUe3SlRgW4o4HGqTRFozviK4WZ6Mu4yc' # Week 0 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1uYb9n_2IaGSRvOPZcuE59eUQjinaTSIN1SKqTQ6z2lQ' # Dickinson Center Week 0 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1_8tFjgxjGVA0__1BLkMV-ookfPLrnGDE8gZj6pQc1_k' # Centurion-KnightKrawler Week 0 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1Ftzcn5u5axYUkob1MXI8wV1KAD-8qjGkywqQjP4_AMo' # Haymarket Week 1 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1fRm4nZIT457zIpW5cyZrIvR0gSGt6oEcphVYiaH6eK8' # Owings Mills Week 3 Scouting Sheet Machine
URL = 'https://docs.google.com/spreadsheets/d/1y8xtKJftg1mDbhfcmISWkyi4MgmSauveD9BY2bPNUCo/edit#gid=168604214' # CHCMP Scouting Sheet Machine
# google sheets setup
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret_gsheets.json', scope)
client = gspread.authorize(creds)
# google sheets document
sheet = client.open_by_url(URL)
# individual worksheets of google sheets document
key_worksheet = sheet.worksheet('Key')
teams_worksheet = sheet.worksheet('Teams')
sample_team_worksheet = sheet.worksheet('Sample Team')
schedule_worksheet = sheet.worksheet('Schedule')
team_data_worksheet = sheet.worksheet('Team Data')
# setting event key to value in A1 of Key worksheet
event_key = key_worksheet.cell(1, 1).value
# 2537 cell format
format_2537 = CellFormat(backgroundColor=Color(.148, .98, .216)) # 25fa37 converted to rgb out of 1
# tba setup
tba_session = requests.Session()
BASE_URL = 'https://www.thebluealliance.com/api/v3'
# tba credentials setup
with open('client_secret_tba.json') as json_file:
data = json.load(json_file)
tba_auth_key = data['tba_auth_key']
def __init__(self):
"""All TBA requests will have authentication key in header"""
self.tba_session.headers.update({'X-TBA-Auth-Key': self.tba_auth_key})
def get_teams_from_event(self, event):
"""Returns all team keys from event in a list
event: event key of intended competition (e.g. 2018vahay)
"""
teams_raw = self.tba_session.get(self.BASE_URL + '/event/%s/teams/keys' % event).json()
teams = []
for team_raw in teams_raw:
teams.append(team_raw[3:])
return teams
def fill_teams(self, sheet, event):
"""Fills first column of specified sheet with all teams from specified sheet
sheet: intended google sheet
event: event key of intended competition (e.g. 2018vahay)
"""
column = []
for team in self.get_teams_from_event(event):
column.append(team)
for index in range(0, len(column)):
sheet.update_cell(index + 1, 1, column[index])
def create_team_sheets(self):
"""Creates a scouting sheet for each team in competition
event: event key of intended competition (e.g. 2018 vahay)
"""
teams = self.teams_worksheet.col_values(1)
for team in teams:
self.sheet.add_worksheet(team, self.sample_team_worksheet.row_count, self.sample_team_worksheet.col_count)
def delete_team_sheets(self):
"""Deletes all individual team worksheets
Used for testing
"""
teams = self.teams_worksheet.col_values(1)
for team in teams:
self.sheet.del_worksheet(self.sheet.worksheet(team))
def get_sample_sheet(self):
"""Returns the sample team sheet in 2D list format [row][column]"""
sample_sheet = []
for row in range(1, self.sample_team_worksheet.row_count + 1):
sample_sheet.append(self.sample_team_worksheet.row_values(row, value_render_option='FORMULA'))
return sample_sheet
def copy_sheet(self, copy_from, copy_to, team_num):
"""Copies every element from a list of values to a specified sheet
copy_from: list from which values are copied
copy_to: sheet to which values are copied
"""
i, j = 1, 1
for row in copy_from:
for col in row:
if col == 'Team #':
copy_to.update_cell(i, j, team_num)
sleep(1.01)
elif col != '':
copy_to.update_cell(i, j, col)
sleep(1.01) # Quota is 100 requests per 100s, this does 100 requests per 101s
j += 1
i += 1
j = 1
def copy_sample_to_team_sheets(self):
"""Copies sample sheet format to every team sheet"""
sample_sheet = self.get_sample_sheet()
for team in self.teams_worksheet.col_values(1):
self.copy_sheet(sample_sheet, self.sheet.worksheet(team), team)
def get_color_schedule(self, event, color):
"""Returns match schedule of specified color alliance in list
event: event key of intended competition (e.g. 2018vahay)
color: color of desired alliance schedule (e.g. red or blue)
"""
# event schedules get updated to elims event schedules once elims are reached
# only elims schedule accessible in finished events
schedule = []
event_list = self.tba_session.get(self.BASE_URL + '/event/%s/matches/simple' % event).json() # list of dicts
for match in event_list:
schedule.append(match['alliances'][color]['team_keys'])
for alliance in schedule:
for i in range(len(alliance)):
alliance[i] = alliance[i][3:]
# trims 'frc' from beginning of every team number
return schedule
def fill_schedule(self, event):
"""Auto fills Schedule worksheet with schedule
event: event key of intended competition (e.g. 2018vahay)
"""
red_schedule = self.get_color_schedule(event, 'red')
blue_schedule = self.get_color_schedule(event, 'blue')
# updates num_matches to the correct number of matches and fill column 1 of spreadsheet with match number
num_matches = 1
for match in range(len(red_schedule)):
self.schedule_worksheet.update_cell(match + 1, 1, match + 1)
num_matches += 1
sleep(1.01)
for i in range(num_matches):
for j in range(3):
self.schedule_worksheet.update_cell(i + 1, j + 2, red_schedule[i][j])
sleep(1.01)
self.schedule_worksheet.update_cell(i + 1, j + 5, blue_schedule[i][j])
sleep(1.01)
def get_team_metrics_from_event(self, event):
"""Returns OPRs, DPRs, and CCWMs of all teams at event in dictionary of dictionaries
event: event key of intended competition (e.g. 2018vahay)
"""
return self.tba_session.get(self.BASE_URL + '/event/%s/oprs' % event).json()
def fill_team_data(self, event):
"""Auto fills Team Data worksheet with teams and their corresponding OPR, DPR, and CCWM
event: event key if intended competition (e.g. 2018vahay)
"""
teams = self.get_teams_from_event(event)
metrics = self.get_team_metrics_from_event(event)
row = 2
team_col, opr_col, dpr_col, ccwm_col = 1, 2, 3, 4
for team in teams:
self.team_data_worksheet.update_cell(row, team_col, team)
sleep(1.01)
self.team_data_worksheet.update_cell(row, opr_col, metrics['oprs']['frc' + team])
sleep(1.01)
self.team_data_worksheet.update_cell(row, dpr_col, metrics['dprs']['frc' + team])
sleep(1.01)
self.team_data_worksheet.update_cell(row, ccwm_col, metrics['ccwms']['frc' + team])
sleep(1.01)
row += 1
def get_predictions_from_event(self, event):
return self.tba_session.get(self.BASE_URL + '/event/%s/predictions' % event).json()
def format_cells_in_schedule(self):
cells_2537_raw = self.schedule_worksheet.findall('2537')
cells_2537 = []
for cell in cells_2537_raw:
cells_2537.append([cell.col + 64, cell.row]) # add 64 to column to match ascii character decimals
for cell in cells_2537:
b = bytes(str(cell[0]), 'utf8')
ascii_char = b.decode('ascii')
cell[0] = chr(int(ascii_char))
for i in range(len(cells_2537)):
format_cell_range(self.schedule_worksheet, '%s%i:%s%i' % (cells_2537[i][0], cells_2537[i][1], cells_2537[i][0], cells_2537[i][1]), self.format_2537)
def main(self):
self.fill_teams(self.teams_worksheet, self.event_key)
self.create_team_sheets()
# self.delete_team_sheets()
# print(self.get_sample_sheet())
# self.copy_sheet(self.get_sample_sheet(), self.sheet.worksheet('1086'), 1086) # testing on single sheet
# print(len(self.get_sample_sheet()))
self.copy_sample_to_team_sheets()
# print(self.get_color_schedule(self.event_key, 'red'))
self.fill_schedule(self.event_key)
self.fill_team_data(self.event_key)
# print(self.get_team_metrics_from_event(self.event_key))
# print(self.get_predictions_from_event(self.event_key))
self.format_cells_in_schedule()
if __name__ == '__main__':
spreadsheet = Spreadsheet()
spreadsheet.main()
|
import logging
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torch.utils.data.distributed import DistributedSampler
from .dataset import CheXpert
def _get_mean_and_std(dataset: Dataset):
"""Compute the mean and std of dataset."""
data_loader = DataLoader(dataset, batch_size=1, shuffle=False)
mean = torch.zeros(3)
std = torch.zeros(3)
for i, (img, _) in enumerate(data_loader):
if i % 1000 == 0:
print(i)
mean += img.mean(dim=(0, 2, 3))
std += img.std(dim=(0, 2, 3))
mean /= len(data_loader)
std /= len(data_loader)
return mean, std
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_chexpert():
CHEXPERT_MEAN = [0.503, 0.503, 0.503]
CHEXPERT_STD = [0.291, 0.291, 0.291]
image_size = 256
train_transform = transforms.Compose(
[
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),
]
)
# train_transform.transforms.append(Cutout(16))
test_transform = transforms.Compose(
[
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),
]
)
return train_transform, test_transform
# for centralized training
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, policy="zeros"):
return get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs, policy=policy)
# for local devices
def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy="zeros"):
return get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=policy)
def get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs=None, policy="zeros"):
dl_obj = CheXpert
transform_train, transform_test = _data_transforms_chexpert()
train_ds = dl_obj(
datadir,
dataidxs=dataidxs,
train=True,
transform=transform_train,
download=False,
policy=policy,
)
test_ds = dl_obj(
datadir,
dataidxs=None,
train=False,
transform=transform_test,
download=False,
policy=policy,
)
train_dl = DataLoader(
dataset=train_ds,
batch_size=train_bs,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=4,
)
test_dl = DataLoader(
dataset=test_ds,
batch_size=test_bs,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=4,
)
return train_dl, test_dl
def get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None, policy="zeros"):
dl_obj = CheXpert
transform_train, transform_test = _data_transforms_chexpert()
train_ds = dl_obj(
datadir,
dataidxs=dataidxs_train,
train=True,
transform=transform_train,
download=True,
policy=policy,
)
test_ds = dl_obj(
datadir,
dataidxs=dataidxs_test,
train=False,
transform=transform_test,
download=True,
policy=policy,
)
train_dl = DataLoader(
dataset=train_ds,
batch_size=train_bs,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=4,
)
test_dl = DataLoader(
dataset=test_ds,
batch_size=test_bs,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=4,
)
return train_dl, test_dl
def distributed_centralized_chexpert_loader(dataset, data_dir, world_size, rank, batch_size):
"""
Used for generating distributed dataloader for
accelerating centralized training
"""
train_bs = batch_size
test_bs = batch_size
transform_train, transform_test = _data_transforms_chexpert()
train_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=True, transform=transform_train)
test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test)
train_sam = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)
test_sam = DistributedSampler(test_dataset, num_replicas=world_size, rank=rank)
train_dl = data.DataLoader(
train_dataset,
batch_size=train_bs,
sampler=train_sam,
pin_memory=True,
num_workers=4,
)
test_dl = data.DataLoader(
test_dataset,
batch_size=test_bs,
sampler=test_sam,
pin_memory=True,
num_workers=4,
)
class_num = 1000
train_data_num = len(train_dataset)
test_data_num = len(test_dataset)
return train_data_num, test_data_num, train_dl, test_dl, None, None, None, class_num
def load_partition_data_chexpert(
data_dir,
partition_method="random",
partition_alpha=None,
client_number=100,
batch_size=10,
policy="zeros",
):
transform_train, transform_test = _data_transforms_chexpert()
train_dataset = CheXpert(
data_dir=data_dir,
dataidxs=None,
train=True,
transform=transform_train,
policy=policy,
)
test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test, policy=policy)
# get local dataset
if partition_method == "random":
num_train_items = int(len(train_dataset) / client_number)
num_test_items = int(len(test_dataset) / client_number)
dict_client = {}
all_train_idxs = list(range(len(train_dataset)))
all_test_idxs = list(range(len(test_dataset)))
for client_idx in range(client_number):
dict_client[client_idx] = {}
dict_client[client_idx]["train"] = set(np.random.choice(all_train_idxs, num_train_items, replace=False))
dict_client[client_idx]["test"] = set(np.random.choice(all_test_idxs, num_test_items, replace=False))
all_train_idxs = list(set(all_train_idxs) - dict_client[client_idx]["train"])
all_test_idxs = list(set(all_test_idxs) - dict_client[client_idx]["test"])
if len(all_train_idxs) > 0:
all_client_idxs = list(range(client_number))
np.random.shuffle(all_client_idxs)
choiced_client_idxs = all_client_idxs[: len(all_train_idxs)]
for idx, client_idx in enumerate(choiced_client_idxs):
dict_client[client_idx]["train"].add(all_train_idxs[idx])
if len(all_test_idxs) > 0:
all_client_idxs = list(range(client_number))
np.random.shuffle(all_client_idxs)
choiced_client_idxs = all_client_idxs[: len(all_test_idxs)]
for idx, client_idx in enumerate(choiced_client_idxs):
dict_client[client_idx]["test"].add(all_test_idxs[idx])
else:
raise NotImplementedError
# build dataloader
train_dl = []
test_dl = []
for client_idx in range(client_number):
train_data_idxs = list(dict_client[client_idx]["train"])
test_data_idxs = list(dict_client[client_idx]["test"])
train_dl_, test_dl_ = get_dataloader_test_chexpert(
datadir=data_dir,
dataidxs_train=train_data_idxs,
dataidxs_test=test_data_idxs,
train_bs=batch_size,
test_bs=batch_size,
policy=policy,
)
train_dl.append(train_dl_)
test_dl.append(test_dl_)
logging.info(f"Client {client_idx} train data num: {len(train_dl_)} test data num: {len(test_dl_)}")
logging.info("Partition data done")
# logging.info("Partition data for each client: {}".format(dict_client))
train_data_num = len(train_dataset)
test_data_num = len(test_dataset)
train_data_global = train_dataset
test_data_global = test_dataset
data_local_num_dict = {
client_idx: len(dict_client[client_idx]["train"]) + len(dict_client[client_idx]["test"])
for client_idx in range(client_number)
}
train_data_local_dict = {client_idx: train_dl_ for client_idx, train_dl_ in enumerate(train_dl)}
test_data_local_dict = {client_idx: test_dl_ for client_idx, test_dl_ in enumerate(test_dl)}
class_num = train_dataset.num_classes
return (
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
)
if __name__ == "__main__":
data_path = os.path.join("D:\\", "dataset", "CheXpert", "CheXpert-v1.0-small")
data = CheXpert(data_dir=data_path, transform=transforms.ToTensor())
print(len(data))
print(data[0][0])
print(data[0][1])
# mean, std = _get_mean_and_std(data)
# print(mean, std)
# train_transform, valid_transform = _data_transforms_chexpert()
# print(train_transform)
# print(valid_transform)
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_chexpert(data_dir=data_path, client_number=10, batch_size=10, policy="zeros")
print(train_data_num, test_data_num, class_num)
|
import json
from collections import Iterable
from pathlib import Path
import cadquery as cq
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from cadquery import exporters
import paramak
from paramak.neutronics_utils import (add_stl_to_moab_core,
define_moab_core_and_tags)
from paramak.utils import get_hash
class Reactor:
"""The Reactor object allows shapes and components to be added and then
collective operations to be performed on them. Combining all the shapes is
required for creating images of the whole reactor and creating a Graveyard
(bounding box) that is needed for neutronics simulations.
Args:
shapes_and_components (list): list of paramak.Shape
"""
def __init__(self, shapes_and_components):
self.material_tags = []
self.stp_filenames = []
self.stl_filenames = []
self.tet_meshes = []
self.graveyard = None
self.solid = None
self.shapes_and_components = shapes_and_components
self.reactor_hash_value = None
self.graveyard_offset = None # set by the make_graveyard method
@property
def stp_filenames(self):
values = []
for shape_or_component in self.shapes_and_components:
values.append(shape_or_component.stp_filename)
return values
@stp_filenames.setter
def stp_filenames(self, value):
self._stp_filenames = value
@property
def stl_filenames(self):
values = []
for shape_or_component in self.shapes_and_components:
values.append(shape_or_component.stl_filename)
return values
@stl_filenames.setter
def stl_filenames(self, value):
self._stl_filenames = value
@property
def largest_dimension(self):
"""Calculates a bounding box for the Reactor and returns the largest
absolute value of the largest dimension of the bounding box"""
largest_dimension = 0
for component in self.shapes_and_components:
largest_dimension = max(
largest_dimension,
component.largest_dimension)
self._largest_dimension = largest_dimension
return largest_dimension
@largest_dimension.setter
def largest_dimension(self, value):
self._largest_dimension = value
@property
def material_tags(self):
"""Returns a set of all the materials_tags used in the Reactor
(excluding the plasma)"""
values = []
for shape_or_component in self.shapes_and_components:
if isinstance(
shape_or_component,
(paramak.Plasma,
paramak.PlasmaFromPoints,
paramak.PlasmaBoundaries)) is False:
values.append(shape_or_component.material_tag)
return values
@material_tags.setter
def material_tags(self, value):
self._material_tags = value
@property
def tet_meshes(self):
values = []
for shape_or_componet in self.shapes_and_components:
values.append(shape_or_componet.tet_mesh)
return values
@tet_meshes.setter
def tet_meshes(self, value):
self._tet_meshes = value
@property
def shapes_and_components(self):
"""Adds a list of parametric shape(s) and or parametric component(s)
to the Reactor object. This allows collective operations to be
performed on all the shapes in the reactor. When adding a shape or
component the stp_filename of the shape or component should be unique"""
if hasattr(self, "create_solids"):
ignored_keys = ["reactor_hash_value"]
if get_hash(self, ignored_keys) != self.reactor_hash_value:
self.create_solids()
self.reactor_hash_value = get_hash(self, ignored_keys)
return self._shapes_and_components
@shapes_and_components.setter
def shapes_and_components(self, value):
if not isinstance(value, Iterable):
raise ValueError("shapes_and_components must be a list")
self._shapes_and_components = value
@property
def graveyard_offset(self):
return self._graveyard_offset
@graveyard_offset.setter
def graveyard_offset(self, value):
if value is None:
self._graveyard_offset = None
elif not isinstance(value, (float, int)):
raise ValueError("graveyard_offset must be a number")
elif value < 0:
raise ValueError("graveyard_offset must be positive")
self._graveyard_offset = value
@property
def solid(self):
"""This combines all the parametric shapes and compents in the reactor
object and rotates the viewing angle so that .solid operations in
jupyter notebook.
"""
list_of_cq_vals = []
for shape_or_compound in self.shapes_and_components:
if isinstance(
shape_or_compound.solid,
cq.occ_impl.shapes.Compound):
for solid in shape_or_compound.solid.Solids():
list_of_cq_vals.append(solid)
else:
list_of_cq_vals.append(shape_or_compound.solid.val())
compound = cq.Compound.makeCompound(list_of_cq_vals)
compound = compound.rotate(
startVector=(0, 1, 0), endVector=(0, 0, 1), angleDegrees=180
)
return compound
@solid.setter
def solid(self, value):
self._solid = value
def neutronics_description(self, include_plasma=False,
include_graveyard=True
):
"""A description of the reactor containing material tags, stp filenames,
and tet mesh instructions. This is used for neutronics simulations which
require linkage between volumes, materials and identification of which
volumes to tet mesh. The plasma geometry is not included by default as
it is typically not included in neutronics simulations. The reason for
this is that the low number density results in minimal interaction with
neutrons. However, it can be added if the include_plasma argument is set
to True.
Returns:
dictionary: a dictionary of materials and filenames for the reactor
"""
neutronics_description = []
for entry in self.shapes_and_components:
if include_plasma is False and isinstance(
entry,
(paramak.Plasma,
paramak.PlasmaFromPoints,
paramak.PlasmaBoundaries)) is True:
continue
if entry.stp_filename is None:
raise ValueError(
"Set Shape.stp_filename for all the \
Reactor entries before using this method"
)
if entry.material_tag is None:
raise ValueError(
"set Shape.material_tag for all the \
Reactor entries before using this method"
)
neutronics_description.append(entry.neutronics_description())
# This add the neutronics description for the graveyard which is unique
# as it is automatically calculated instead of being added by the user.
# Also the graveyard must have 'Graveyard' as the material name
if include_graveyard is True:
self.make_graveyard()
neutronics_description.append(
self.graveyard.neutronics_description())
return neutronics_description
def export_neutronics_description(
self,
filename="manifest.json",
include_plasma=False,
include_graveyard=True):
"""
Saves Reactor.neutronics_description to a json file. The resulting json
file contains a list of dictionaries. Each dictionary entry comprises
of a material and a filename and optionally a tet_mesh instruction. The
json file can then be used with the neutronics workflows to create a
neutronics model. Creating of the neutronics model requires linkage
between volumes, materials and identification of which volumes to
tet_mesh. If the filename does not end with .json then .json will be
added. The plasma geometry is not included by default as it is
typically not included in neutronics simulations. The reason for this
is that the low number density results in minimal interactions with
neutrons. However, the plasma can be added if the include_plasma
argument is set to True.
Args:
filename (str, optional): the filename used to save the neutronics
description
include_plasma (Boolean, optional): should the plasma be included.
Defaults to False as the plasma volume and material has very
little impact on the neutronics results due to the low density.
Including the plasma does however slow down the simulation.
include_graveyard (Boolean, optional): should the graveyard be
included. Defaults to True as this is needed for DAGMC models.
"""
path_filename = Path(filename)
if path_filename.suffix != ".json":
path_filename = path_filename.with_suffix(".json")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
with open(path_filename, "w") as outfile:
json.dump(
self.neutronics_description(
include_plasma=include_plasma,
include_graveyard=include_graveyard,
),
outfile,
indent=4,
)
print("saved geometry description to ", path_filename)
return str(path_filename)
def export_stp(self, output_folder="", graveyard_offset=100,
mode='solid'):
"""Writes stp files (CAD geometry) for each Shape object in the reactor
and the graveyard.
Args:
output_folder (str): the folder for saving the stp files to
graveyard_offset (float, optional): the offset between the largest
edge of the geometry and inner bounding shell created. Defaults
to 100.
mode (str, optional): the object to export can be either
'solid' which exports 3D solid shapes or the 'wire' which
exports the wire edges of the shape. Defaults to 'solid'.
Returns:
list: a list of stp filenames created
"""
if len(self.stp_filenames) != len(set(self.stp_filenames)):
raise ValueError(
"Set Reactor already contains a shape or component \
with this stp_filename",
self.stp_filenames,
)
filenames = []
for entry in self.shapes_and_components:
if entry.stp_filename is None:
raise ValueError(
"set .stp_filename property for \
Shapes before using the export_stp method"
)
filenames.append(
str(Path(output_folder) / Path(entry.stp_filename)))
entry.export_stp(
filename=Path(output_folder) / Path(entry.stp_filename),
mode=mode
)
# creates a graveyard (bounding shell volume) which is needed for
# nuetronics simulations
self.make_graveyard(graveyard_offset=graveyard_offset)
filenames.append(
str(Path(output_folder) / Path(self.graveyard.stp_filename)))
self.graveyard.export_stp(
Path(output_folder) / Path(self.graveyard.stp_filename)
)
return filenames
def export_stl(self, output_folder="", tolerance=0.001):
"""Writes stl files (CAD geometry) for each Shape object in the reactor
:param output_folder: the folder for saving the stp files to
:type output_folder: str
:param tolerance: the precision of the faceting
:type tolerance: float
:return: a list of stl filenames created
:rtype: list
"""
if len(self.stl_filenames) != len(set(self.stl_filenames)):
raise ValueError(
"Set Reactor already contains a shape or component \
with this stl_filename",
self.stl_filenames,
)
filenames = []
for entry in self.shapes_and_components:
print("entry.stl_filename", entry.stl_filename)
if entry.stl_filename is None:
raise ValueError(
"set .stl_filename property for \
Shapes before using the export_stl method"
)
filenames.append(
str(Path(output_folder) / Path(entry.stl_filename)))
entry.export_stl(
Path(output_folder) /
Path(
entry.stl_filename),
tolerance)
# creates a graveyard (bounding shell volume) which is needed for
# nuetronics simulations
self.make_graveyard()
filenames.append(
str(Path(output_folder) / Path(self.graveyard.stl_filename)))
self.graveyard.export_stl(
Path(output_folder) / Path(self.graveyard.stl_filename)
)
print("exported stl files ", filenames)
return filenames
def export_h5m(
self,
filename='dagmc.h5m',
skip_graveyard=False,
tolerance=0.001,
graveyard_offset=100):
"""Converts stl files into DAGMC compatible h5m file using PyMOAB. The
DAGMC file produced has not been imprinted and merged unlike the other
supported method which uses Trelis to produce an imprinted and merged
DAGMC geometry. If the provided filename doesn't end with .h5m it will
be added
Args:
filename (str, optional): filename of h5m outputfile
Defaults to "dagmc.h5m".
skip_graveyard (boolean, optional): filename of h5m outputfile
Defaults to False.
tolerance (float, optional): the precision of the faceting
Defaults to 0.001.
graveyard_offset (float, optional): the offset between the largest
edge of the geometry and inner bounding shell created. Defaults
to 100.
Returns:
filename: output h5m filename
"""
path_filename = Path(filename)
if path_filename.suffix != ".h5m":
path_filename = path_filename.with_suffix(".h5m")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
moab_core, moab_tags = define_moab_core_and_tags()
surface_id = 1
volume_id = 1
for item in self.shapes_and_components:
item.export_stl(item.stl_filename, tolerance=tolerance)
moab_core = add_stl_to_moab_core(
moab_core,
surface_id,
volume_id,
item.material_tag,
moab_tags,
item.stl_filename)
volume_id += 1
surface_id += 1
if skip_graveyard is False:
self.make_graveyard(graveyard_offset=graveyard_offset)
self.graveyard.export_stl(self.graveyard.stl_filename)
volume_id = 2
surface_id = 2
moab_core = add_stl_to_moab_core(
moab_core,
surface_id,
volume_id,
self.graveyard.material_tag,
moab_tags,
self.graveyard.stl_filename
)
all_sets = moab_core.get_entities_by_handle(0)
file_set = moab_core.create_meshset()
moab_core.add_entities(file_set, all_sets)
moab_core.write_file(str(path_filename))
return filename
def export_physical_groups(self, output_folder=""):
"""Exports several JSON files containing a look up table which is
useful for identifying faces and volumes. The output file names are
generated from .stp_filename properties.
Args:
output_folder (str, optional): directory of outputfiles.
Defaults to "".
Raises:
ValueError: if one .stp_filename property is set to None
Returns:
list: list of output file names
"""
filenames = []
for entry in self.shapes_and_components:
if entry.stp_filename is None:
raise ValueError(
"set .stp_filename property for \
Shapes before using the export_stp method"
)
filenames.append(
str(Path(output_folder) / Path(entry.stp_filename)))
entry.export_physical_groups(
Path(output_folder) / Path(entry.stp_filename))
return filenames
def export_svg(self, filename):
"""Exports an svg file for the Reactor.solid. If the filename provided
doesn't end with .svg it will be added.
Args:
filename (str): the filename of the svg file to be exported
"""
path_filename = Path(filename)
if path_filename.suffix != ".svg":
path_filename = path_filename.with_suffix(".svg")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
with open(path_filename, "w") as out_file:
exporters.exportShape(self.solid, "SVG", out_file)
print("Saved file as ", path_filename)
def export_graveyard(
self,
graveyard_offset=100,
filename="Graveyard.stp"):
"""Writes an stp file (CAD geometry) for the reactor graveyard. This
is needed for DAGMC simulations. This method also calls
Reactor.make_graveyard with the offset.
Args:
filename (str): the filename for saving the stp file
graveyard_offset (float): the offset between the largest edge of
the geometry and inner bounding shell created. Defaults to
Reactor.graveyard_offset
Returns:
str: the stp filename created
"""
self.make_graveyard(graveyard_offset=graveyard_offset)
self.graveyard.export_stp(Path(filename))
return filename
def make_graveyard(self, graveyard_offset=100):
"""Creates a graveyard volume (bounding box) that encapsulates all
volumes. This is required by DAGMC when performing neutronics
simulations.
Args:
graveyard_offset (float): the offset between the largest edge of
the geometry and inner bounding shell created. Defaults to
Reactor.graveyard_offset
Returns:
CadQuery solid: a shell volume that bounds the geometry, referred
to as a graveyard in DAGMC
"""
self.graveyard_offset = graveyard_offset
for component in self.shapes_and_components:
if component.solid is None:
component.create_solid()
graveyard_shape = paramak.HollowCube(
length=self.largest_dimension * 2 + graveyard_offset * 2,
name="Graveyard",
material_tag="Graveyard",
stp_filename="Graveyard.stp",
stl_filename="Graveyard.stl",
)
self.graveyard = graveyard_shape
return graveyard_shape
def export_2d_image(
self,
filename="2d_slice.png",
xmin=0.0,
xmax=900.0,
ymin=-600.0,
ymax=600.0):
"""Creates a 2D slice image (png) of the reactor.
Args:
filename (str): output filename of the image created
Returns:
str: png filename created
"""
path_filename = Path(filename)
if path_filename.suffix != ".png":
path_filename = path_filename.with_suffix(".png")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots()
# creates indvidual patches for each Shape which are combined together
for entry in self.shapes_and_components:
patch = entry._create_patch()
ax.add_collection(patch)
ax.axis("equal")
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
ax.set_aspect("equal", "box")
Path(filename).parent.mkdir(parents=True, exist_ok=True)
plt.savefig(filename, dpi=100)
plt.close()
print("\n saved 2d image to ", str(path_filename))
return str(path_filename)
def export_html(self, filename="reactor.html"):
"""Creates a html graph representation of the points for the Shape
objects that make up the reactor. Note, If filename provided doesn't end
with .html then it will be appended.
Args:
filename (str): the filename to save the html graph
Returns:
plotly figure: figure object
"""
path_filename = Path(filename)
if path_filename.suffix != ".html":
path_filename = path_filename.with_suffix(".html")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
fig = go.Figure()
fig.update_layout(
{"title": "coordinates of components", "hovermode": "closest"}
)
# accesses the Shape traces for each Shape and adds them to the figure
for entry in self.shapes_and_components:
fig.add_trace(entry._trace())
fig.write_html(str(path_filename))
print("Exported html graph to ", str(path_filename))
return fig
|
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def hive_service(
name,
action='start'):
import params
if name == 'metastore':
pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
cmd = format(
"env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir}")
elif name == 'hiveserver2':
pid_file = format("{hive_pid_dir}/{hive_pid}")
cmd = format(
"env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir}")
if action == 'start':
demon_cmd = format("{cmd}")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
Execute(demon_cmd,
user=params.hive_user,
not_if=no_op_test
)
if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format(
"{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
Execute(db_connection_check_command,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
elif action == 'stop':
demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
Execute(demon_cmd)
|
"""
Logging module for printing status during an exploit, and internally
within ``pwntools``.
Exploit Developers
------------------
By using the standard ``from pwn import *``, an object named ``log`` will
be inserted into the global namespace. You can use this to print out
status messages during exploitation.
For example,::
log.info('Hello, world!')
prints::
[*] Hello, world!
Additionally, there are some nifty mechanisms for performing status updates
on a running job (e.g. when brute-forcing).::
p = log.progress('Working')
p.status('Reticulating splines')
time.sleep(1)
p.success('Got a shell!')
The verbosity of logging can be most easily controlled by setting
``log_level`` on the global ``context`` object.::
log.info("No you see me")
context.log_level = 'error'
log.info("Now you don't")
The purpose of this attribute is to control what gets printed to the screen,
not what gets emitted. This means that you can put all logging events into
a log file, while only wanting to see a small subset of them on your screen.
Pwnlib Developers
-----------------
A module-specific logger can be imported into the module via::
from pwnlib.log import getLogger
log = getLogger(__name__)
This provides an easy way to filter logging programmatically
or via a configuration file for debugging.
When using ``progress``, you should use the ``with``
keyword to manage scoping, to ensure the spinner stops if an
exception is thrown.
Technical details
-----------------
Familiarity with the :mod:`logging` module is assumed.
A pwnlib root logger named 'pwnlib' is created and a custom handler and
formatter is installed for it. The handler determines its logging level from
:data:`context.log_level`.
Ideally :data:`context.log_level` should only affect which records will be
emitted by the handler such that e.g. logging to a file will not be changed by
it. But for performance reasons it is not feasible log everything in the normal
case. In particular there are tight loops inside :mod:`pwnlib.tubes.tube`, which
we would like to be able to debug, but if we are not debugging them, they should
not spit out messages (even to a log file). For this reason there are a few places
inside pwnlib, that will not even emit a record without :data:`context.log_level`
being set to `logging.DEBUG` or below.
Log records created by ``Progress`` and ``Logger`` objects will set
``'pwnlib_msgtype'`` on the ``extra`` field to signal which kind of message was
generated. This information is used by the formatter to prepend a symbol to the
message, e.g. ``'[+] '`` in ``'[+] got a shell!'``
This field is ignored when using the ``logging`` module's standard formatters.
All status updates (which are not dropped due to throttling) on progress loggers
result in a log record being created. The ``extra`` field then carries a
reference to the ``Progress`` logger as ``'pwnlib_progress'``.
If the custom handler determines that :data:`term.term_mode` is enabled, log
records that have a ``'pwnlib_progess'`` in their ``extra`` field will not
result in a message being emitted but rather an animated progress line (with a
spinner!) being created. Note that other handlers will still see a meaningful
log record.
The custom handler will only handle log records whith a level of at least
:data:`context.log_level`. Thus if e.g. the level for the
``'pwnlib.tubes.ssh'`` is set to ``'DEBUG'`` no additional output will show up
unless :data:`context.log_level` is also set to ``'DEBUG'``. Other handlers
will however see the extra log records generated by the ``'pwnlib.tubes.ssh'``
logger.
"""
from __future__ import absolute_import
from __future__ import division
import logging
import os
import random
import re
import six
import sys
import threading
import time
from pwnlib import term
from pwnlib.config import register_config
from pwnlib.context import Thread
from pwnlib.context import context
from pwnlib.exception import PwnlibException
from pwnlib.term import spinners
from pwnlib.term import text
__all__ = [
'getLogger', 'install_default_handler', 'rootlogger'
]
# list of prefixes to use for the different message types. note that the `text`
# module won't add any escape codes if `pwnlib.context.log_console.isatty()` is `False`
_msgtype_prefixes = {
'status' : [text.magenta, 'x'],
'success' : [text.bold_green, '+'],
'failure' : [text.bold_red, '-'],
'debug' : [text.bold_red, 'DEBUG'],
'info' : [text.bold_blue, '*'],
'warning' : [text.bold_yellow, '!'],
'error' : [text.on_red, 'ERROR'],
'exception' : [text.on_red, 'ERROR'],
'critical' : [text.on_red, 'CRITICAL'],
'info_once' : [text.bold_blue, '*'],
'warning_once' : [text.bold_yellow, '!'],
}
def read_log_config(settings):
log = getLogger(__name__)
for key, value in settings.items():
if '.' not in key:
log.warn("Invalid configuration option %r in section %r" % (key, 'log'))
continue
msgtype, key = key.split('.', 1)
if key == 'color':
current = _msgtype_prefixes[msgtype][0]
_msgtype_prefixes[msgtype][0] = getattr(text, value, current)
elif key == 'symbol':
_msgtype_prefixes[msgtype][1] = value
else:
log.warn("Unknown configuration option %r in section %r" % (key, 'log'))
register_config('log', read_log_config)
# the text decoration to use for spinners. the spinners themselves can be found
# in the `pwnlib.term.spinners` module
_spinner_style = text.bold_blue
class Progress(object):
"""
Progress logger used to generate log records associated with some running
job. Instances can be used as context managers which will automatically
declare the running job a success upon exit or a failure upon a thrown
exception. After :meth:`success` or :meth:`failure` is called the status
can no longer be updated.
This class is intended for internal use. Progress loggers should be created
using :meth:`Logger.progress`.
"""
def __init__(self, logger, msg, status, level, args, kwargs):
self._logger = logger
self._msg = msg
self._status = status
self._level = level
self._stopped = False
self.last_status = 0
self.rate = kwargs.pop('rate', 0)
self._log(status, args, kwargs, 'status')
# it is a common use case to create a logger and then immediately update
# its status line, so we reset `last_status` to accommodate this pattern
self.last_status = 0
def _log(self, status, args, kwargs, msgtype):
# Logs are strings, not bytes. Handle Python3 bytes() objects.
status = six.ensure_text(status)
# this progress logger is stopped, so don't generate any more records
if self._stopped:
return
msg = self._msg
if msg and status:
msg += ': '
msg += status
self._logger._log(self._level, msg, args, kwargs, msgtype, self)
def status(self, status, *args, **kwargs):
"""status(status, *args, **kwargs)
Logs a status update for the running job.
If the progress logger is animated the status line will be updated in
place.
Status updates are throttled at one update per 100ms.
"""
now = time.time()
if (now - self.last_status) > self.rate:
self.last_status = now
self._log(status, args, kwargs, 'status')
def success(self, status = 'Done', *args, **kwargs):
"""success(status = 'Done', *args, **kwargs)
Logs that the running job succeeded. No further status updates are
allowed.
If the Logger is animated, the animation is stopped.
"""
self._log(status, args, kwargs, 'success')
self._stopped = True
def failure(self, status = 'Failed', *args, **kwargs):
"""failure(message)
Logs that the running job failed. No further status updates are
allowed.
If the Logger is animated, the animation is stopped.
"""
self._log(status, args, kwargs, 'failure')
self._stopped = True
def __enter__(self):
return self
def __exit__(self, exc_typ, exc_val, exc_tb):
# if the progress logger is already stopped these are no-ops
if exc_typ is None:
self.success()
else:
self.failure()
class Logger(object):
"""
A class akin to the :class:`logging.LoggerAdapter` class. All public
methods defined on :class:`logging.Logger` instances are defined on this
class.
Also adds some ``pwnlib`` flavor:
* :meth:`progress` (alias :meth:`waitfor`)
* :meth:`success`
* :meth:`failure`
* :meth:`indented`
* :meth:`info_once`
* :meth:`warning_once` (alias :meth:`warn_once`)
Adds ``pwnlib``-specific information for coloring, indentation and progress
logging via log records ``extra`` field.
Loggers instantiated with :func:`getLogger` will be of this class.
"""
_one_time_infos = set()
_one_time_warnings = set()
def __init__(self, logger=None):
if logger is None:
# This is a minor hack to permit user-defined classes which inherit
# from a tube (which do not actually reside in the pwnlib library)
# to receive logging abilities that behave as they would expect from
# the rest of the library
module = self.__module__
if not module.startswith('pwnlib'):
module = 'pwnlib.' + module
# - end hack -
logger_name = '%s.%s.%s' % (module, self.__class__.__name__, id(self))
logger = logging.getLogger(logger_name)
self._logger = logger
def _getlevel(self, levelString):
if isinstance(levelString, six.integer_types):
return levelString
return logging._levelNames[levelString.upper()]
def _log(self, level, msg, args, kwargs, msgtype, progress = None):
# Logs are strings, not bytes. Handle Python3 bytes() objects.
msg = six.ensure_text(msg)
extra = kwargs.get('extra', {})
extra.setdefault('pwnlib_msgtype', msgtype)
extra.setdefault('pwnlib_progress', progress)
kwargs['extra'] = extra
self._logger.log(level, msg, *args, **kwargs)
def progress(self, message, status = '', *args, **kwargs):
"""progress(message, status = '', *args, level = logging.INFO, **kwargs) -> Progress
Creates a new progress logger which creates log records with log level
`level`.
Progress status can be updated using :meth:`Progress.status` and stopped
using :meth:`Progress.success` or :meth:`Progress.failure`.
If `term.term_mode` is enabled the progress logger will be animated.
The progress manager also functions as a context manager. Using context
managers ensures that animations stop even if an exception is raised.
.. code-block:: python
with log.progress('Trying something...') as p:
for i in range(10):
p.status("At %i" % i)
time.sleep(0.5)
x = 1/0
"""
level = self._getlevel(kwargs.pop('level', logging.INFO))
return Progress(self, message, status, level, args, kwargs)
def waitfor(self, *args, **kwargs):
"""Alias for :meth:`progress`."""
return self.progress(*args, **kwargs)
def indented(self, message, *args, **kwargs):
"""indented(message, *args, level = logging.INFO, **kwargs)
Log a message but don't put a line prefix on it.
Arguments:
level(int): Alternate log level at which to set the indented
message. Defaults to :const:`logging.INFO`.
"""
level = self._getlevel(kwargs.pop('level', logging.INFO))
self._log(level, message, args, kwargs, 'indented')
def success(self, message, *args, **kwargs):
"""success(message, *args, **kwargs)
Logs a success message.
"""
self._log(logging.INFO, message, args, kwargs, 'success')
def failure(self, message, *args, **kwargs):
"""failure(message, *args, **kwargs)
Logs a failure message.
"""
self._log(logging.INFO, message, args, kwargs, 'failure')
def info_once(self, message, *args, **kwargs):
"""info_once(message, *args, **kwargs)
Logs an info message. The same message is never printed again.
"""
m = message % args
if m not in self._one_time_infos:
if self.isEnabledFor(logging.INFO):
self._one_time_infos.add(m)
self._log(logging.INFO, message, args, kwargs, 'info_once')
def warning_once(self, message, *args, **kwargs):
"""warning_once(message, *args, **kwargs)
Logs a warning message. The same message is never printed again.
"""
m = message % args
if m not in self._one_time_warnings:
if self.isEnabledFor(logging.WARNING):
self._one_time_warnings.add(m)
self._log(logging.WARNING, message, args, kwargs, 'warning_once')
def warn_once(self, *args, **kwargs):
"""Alias for :meth:`warning_once`."""
return self.warning_once(*args, **kwargs)
# logging functions also exposed by `logging.Logger`
def debug(self, message, *args, **kwargs):
"""debug(message, *args, **kwargs)
Logs a debug message.
"""
self._log(logging.DEBUG, message, args, kwargs, 'debug')
def info(self, message, *args, **kwargs):
"""info(message, *args, **kwargs)
Logs an info message.
"""
self._log(logging.INFO, message, args, kwargs, 'info')
def hexdump(self, message, *args, **kwargs):
# cyclic dependencies FTW!
# TODO: Move pwnlib.util.fiddling.hexdump into a new module.
import pwnlib.util.fiddling
self.info(pwnlib.util.fiddling.hexdump(message, *args, **kwargs))
def warning(self, message, *args, **kwargs):
"""warning(message, *args, **kwargs)
Logs a warning message.
"""
self._log(logging.WARNING, message, args, kwargs, 'warning')
def warn(self, *args, **kwargs):
"""Alias for :meth:`warning`."""
return self.warning(*args, **kwargs)
def error(self, message, *args, **kwargs):
"""error(message, *args, **kwargs)
To be called outside an exception handler.
Logs an error message, then raises a ``PwnlibException``.
"""
self._log(logging.ERROR, message, args, kwargs, 'error')
raise PwnlibException(message % args)
def exception(self, message, *args, **kwargs):
"""exception(message, *args, **kwargs)
To be called from an exception handler.
Logs a error message, then re-raises the current exception.
"""
kwargs["exc_info"] = 1
self._log(logging.ERROR, message, args, kwargs, 'exception')
raise
def critical(self, message, *args, **kwargs):
"""critical(message, *args, **kwargs)
Logs a critical message.
"""
self._log(logging.CRITICAL, message, args, kwargs, 'critical')
def log(self, level, message, *args, **kwargs):
"""log(level, message, *args, **kwargs)
Logs a message with log level `level`. The ``pwnlib`` formatter will
use the default :mod:`logging` formater to format this message.
"""
self._log(level, message, args, kwargs, None)
def isEnabledFor(self, level):
"""isEnabledFor(level) -> bool
See if the underlying logger is enabled for the specified level.
"""
effectiveLevel = self._logger.getEffectiveLevel()
if effectiveLevel == 1:
effectiveLevel = context.log_level
return effectiveLevel <= level
def setLevel(self, level):
"""setLevel(level)
Set the logging level for the underlying logger.
"""
with context.local(log_level=level):
self._logger.setLevel(context.log_level)
def addHandler(self, handler):
"""addHandler(handler)
Add the specified handler to the underlying logger.
"""
self._logger.addHandler(handler)
def removeHandler(self, handler):
"""removeHandler(handler)
Remove the specified handler from the underlying logger.
"""
self._logger.removeHandler(handler)
@property
def level(self):
return self._logger.level
@level.setter
def level(self, value):
with context.local(log_level=value):
self._logger.level = context.log_level
class Handler(logging.StreamHandler):
"""
A custom handler class. This class will report whatever
:data:`context.log_level` is currently set to as its log level.
If :data:`term.term_mode` is enabled log records originating from a progress
logger will not be emitted but rather an animated progress line will be
created.
An instance of this handler is added to the ``'pwnlib'`` logger.
"""
@property
def stream(self):
return context.log_console
@stream.setter
def stream(self, value):
pass
def emit(self, record):
"""
Emit a log record or create/update an animated progress logger
depending on whether :data:`term.term_mode` is enabled.
"""
# We have set the root 'pwnlib' logger to have a logLevel of 1,
# when logging has been enabled via install_default_handler.
#
# If the level is 1, we should only process the record if
# context.log_level is less than the record's log level.
#
# If the level is not 1, somebody else expressly set the log
# level somewhere on the tree, and we should use that value.
level = logging.getLogger(record.name).getEffectiveLevel()
if level == 1:
level = context.log_level
if level > record.levelno:
return
progress = getattr(record, 'pwnlib_progress', None)
# if the record originates from a `Progress` object and term handling
# is enabled we can have animated spinners! so check that
if progress is None or not term.term_mode:
super(Handler, self).emit(record)
return
# yay, spinners!
# since we want to be able to update the spinner we overwrite the
# message type so that the formatter doesn't output a prefix symbol
msgtype = record.pwnlib_msgtype
record.pwnlib_msgtype = 'animated'
msg = "%s\n" % self.format(record)
# we enrich the `Progress` object to keep track of the spinner
if not hasattr(progress, '_spinner_handle'):
spinner_handle = term.output('')
msg_handle = term.output(msg)
stop = threading.Event()
def spin():
'''Wheeeee!'''
state = 0
states = random.choice(spinners.spinners)
while True:
prefix = '[%s] ' % _spinner_style(states[state])
spinner_handle.update(prefix)
state = (state + 1) % len(states)
if stop.wait(0.1):
break
t = Thread(target = spin)
t.daemon = True
t.start()
progress._spinner_handle = spinner_handle
progress._msg_handle = msg_handle
progress._stop_event = stop
progress._spinner_thread = t
else:
progress._msg_handle.update(msg)
# if the message type was not a status message update, then we should
# stop the spinner
if msgtype != 'status':
progress._stop_event.set()
progress._spinner_thread.join()
style, symb = _msgtype_prefixes[msgtype]
prefix = '[%s] ' % style(symb)
progress._spinner_handle.update(prefix)
class Formatter(logging.Formatter):
"""
Logging formatter which performs custom formatting for log records
containing the ``'pwnlib_msgtype'`` attribute. Other records are formatted
using the `logging` modules default formatter.
If ``'pwnlib_msgtype'`` is set, it performs the following actions:
* A prefix looked up in `_msgtype_prefixes` is prepended to the message.
* The message is prefixed such that it starts on column four.
* If the message spans multiple lines they are split, and all subsequent
lines are indented.
This formatter is used by the handler installed on the ``'pwnlib'`` logger.
"""
# Indentation from the left side of the terminal.
# All log messages will be indented at list this far.
indent = ' '
# Newline, followed by an indent. Used to wrap multiple lines.
nlindent = '\n' + indent
def format(self, record):
# use the default formatter to actually format the record
msg = super(Formatter, self).format(record)
# then put on a prefix symbol according to the message type
msgtype = getattr(record, 'pwnlib_msgtype', None)
# if 'pwnlib_msgtype' is not set (or set to `None`) we just return the
# message as it is
if msgtype is None:
return msg
if msgtype in _msgtype_prefixes:
style, symb = _msgtype_prefixes[msgtype]
prefix = '[%s] ' % style(symb)
elif msgtype == 'indented':
prefix = self.indent
elif msgtype == 'animated':
# the handler will take care of updating the spinner, so we will
# not include it here
prefix = ''
else:
# this should never happen
prefix = '[?] '
msg = prefix + msg
msg = self.nlindent.join(msg.splitlines())
return msg
# we keep a dictionary of loggers such that multiple calls to `getLogger` with
# the same name will return the same logger
def getLogger(name):
return Logger(logging.getLogger(name))
class LogfileHandler(logging.FileHandler):
def __init__(self):
super(LogfileHandler, self).__init__('', delay=1)
@property
def stream(self):
return context.log_file
@stream.setter
def stream(self, value):
pass
def handle(self, *a, **kw):
if self.stream.name is not None:
super(LogfileHandler, self).handle(*a, **kw)
iso_8601 = '%Y-%m-%dT%H:%M:%S'
fmt = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
log_file = LogfileHandler()
log_file.setFormatter(logging.Formatter(fmt, iso_8601))
#
# The root 'pwnlib' logger is declared here. To change the target of all
# 'pwntools'-specific logging, only this logger needs to be changed.
#
# Logging cascades upward through the hierarchy,
# so the only point that should ever need to be
# modified is the root 'pwnlib' logger.
#
# For example:
# map(rootlogger.removeHandler, rootlogger.handlers)
# logger.addHandler(myCoolPitchingHandler)
#
rootlogger = getLogger('pwnlib')
console = Handler()
formatter = Formatter()
console.setFormatter(formatter)
def install_default_handler():
'''install_default_handler()
Instantiates a :class:`Handler` and :class:`Formatter` and installs them for
the ``pwnlib`` root logger. This function is automatically called from when
importing :mod:`pwn`.
'''
logger = logging.getLogger('pwnlib')
if console not in logger.handlers:
logger.addHandler(console)
logger.addHandler(log_file)
logger.setLevel(1)
|
import rows
import os
from timeit import default_timer
import json
output_path = '../package/data/'
class Brasilio(object):
def __init__(self, output_path='../package/data/', verbose=False):
self.verbose = verbose
self.output_path = output_path
self.timer = default_timer
def __enter__(self):
# Cria diretório package
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
# Cria resouces.py vazio
json.dump([], open("resources.json", "w"), indent=2)
# Start Timer
self.start = self.timer()
return self
def __exit__(self, *args):
# Cria datapackage
create_datapackage(self.output_path, verbose=False)
# End Timer
end = self.timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs # millisecs
if self.verbose:
print('Sucesso!\n Sua captura demorou: {0:.2f} s'.format(self.elapsed))
def generate_resources(filename, verbose=False):
data_path = os.path.join(output_path, filename)
if verbose:
print('Reading Data')
data = rows.import_from_csv(data_path)
translate = {int: 'integer',
str: 'string'}
resource = {'format': "csv",
"url": "http://brasil.io/dataset/{}?format=csv".format(filename.split('.')[0]),
"path": data_path,
"profile": "tabular-data-resource",
'schema': {
'fields': []}
}
for i, field in enumerate(data.field_names):
resource['schema']['fields'].append({'name': field,
'type': translate[data.field_types[i].TYPE[0]]})
if verbose:
print('Writing resources.json')
# print(type(resources))
# print(json.dumps(resources))
resources = json.load(open("resources.json", "r"))
resources.append(resource)
json.dump(resources, open("resources.json", "w"), indent=2)
def create_datapackage(output_path, verbose=False):
# Criar o datapackage.json
if verbose:
print("Criando datapackage.json")
with open("metadata.json", "r") as mfd:
output = json.load(mfd)
with open("resources.json", "r") as rfd:
output['resources'] = json.load(rfd)
with open("../package/datapackage.json", "w") as datapackage:
json.dump(output, datapackage, indent=2)
if __name__ == '__main__':
pass
|
#!/usr/bin/env python3
import os
import pathlib
import sys
import subprocess
def has_cargo_fmt():
"""Runs a quick check to see if cargo fmt is installed."""
try:
c = subprocess.run(["cargo", "fmt", "--", "--help"], capture_output=True)
except OSError:
return False
else:
return c.returncode == 0
def get_modified_files():
"""Returns a list of all modified files."""
c = subprocess.run(
["git", "diff-index", "--cached", "--name-only", "HEAD"], capture_output=True
)
return [pathlib.Path(os.fsdecode(p)) for p in c.stdout.splitlines()]
def run_format_check(files):
rust_files = [x for x in files if x.suffix == "rs" and x.isfile()]
if not rust_files:
return 0
ret = subprocess.run(
["cargo", "fmt", "--", "--check", "--color=always"] + rust_files
)
if ret.returncode != 0:
print("", file=sys.stderr)
print(
"\033[1m\033[2minfo: to fix this run `cargo fmt --all` and "
"commit again\033[0m",
file=sys.stderr,
)
return ret.returncode
def main():
if not has_cargo_fmt():
print("warning: cargo fmt not installed")
return
sys.exit(run_format_check(get_modified_files()))
if __name__ == "__main__":
main()
|
import os
import distutils.spawn
import mpi4py
from mpi4py import MPI
def check_mpi():
mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
for executable, path in mpi4py.get_config().items():
if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
continue
if mpiexec_path not in path:
raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
if 'Open MPI' not in MPI.get_vendor():
raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
if vendor_number not in mpiexec_path:
raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
check_mpi()
|
import queue
import time
import numpy as np
class CameraInformation:
def __init__(self, cam_id: str):
self._frame_queue: queue.Queue = queue.Queue(maxsize=1)
self._frame_shape = None
self._last_frame_time = None
self.is_online = True
self.node_id = cam_id
def write_frame(self, frame):
try:
self._frame_queue.get_nowait()
except queue.Empty:
pass
self._frame_shape = frame.shape
self._last_frame_time = time.time()
self._frame_queue.put_nowait(frame)
def read_frame(self,):
try:
frame = self._frame_queue.get(timeout=2)
if not self.is_online:
self.is_online = True
return frame
except queue.Empty:
if self.is_online:
self.is_online = False
return np.zeros(self._frame_shape)
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import Generate5tt
def test_Generate5tt_inputs():
input_map = dict(
algorithm=dict(
argstr='%s',
mandatory=True,
position=-3,
),
args=dict(argstr='%s', ),
bval_scale=dict(argstr='-bvalue_scaling %s', ),
environ=dict(
nohash=True,
usedefault=True,
),
grad_file=dict(
argstr='-grad %s',
extensions=None,
xor=['grad_fsl'],
),
grad_fsl=dict(
argstr='-fslgrad %s %s',
xor=['grad_file'],
),
in_bval=dict(extensions=None, ),
in_bvec=dict(
argstr='-fslgrad %s %s',
extensions=None,
),
in_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=-2,
),
nthreads=dict(
argstr='-nthreads %d',
nohash=True,
),
out_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=-1,
),
)
inputs = Generate5tt.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Generate5tt_outputs():
output_map = dict(out_file=dict(extensions=None, ), )
outputs = Generate5tt.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
#!/usr/bin/env python
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='impasse',
# Version chosen for parity with Assimp since we need ABI compatibility
version='5.0.6',
license='BSD',
description='Alternate Python bindings for the Open Asset Import Library (ASSIMP)',
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/SaladDais/Impasse',
author='Salad Dais',
author_email='SaladDais@users.noreply.github.com',
packages=['impasse'],
data_files=[
('share/impasse', ['README.md']),
# TODO: Make these proper console scripts
# ('share/examples/impasse', ['scripts/' + f for f in os.listdir('scripts/')]),
],
install_requires=['numpy', 'cffi'],
python_requires='>=3.7',
zip_safe=False,
tests_require=[
"pytest",
],
test_suite='tests',
)
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth)
mulstrm = vthread.Stream(m, 'mul_stream', clk, rst)
mulx = mulstrm.source('x')
muly = mulstrm.source('y')
mulz = mulx * muly
mulstrm.sink(mulz, 'z')
macstrm = vthread.Stream(m, 'mac_stream', clk, rst)
a = macstrm.source('a')
b = macstrm.source('b')
a = a + 1
b = b + 1
sub = macstrm.substream(mulstrm)
sub.to_source('x', a)
sub.to_source('y', b)
c = sub.from_sink('z')
size = macstrm.parameter('size')
sum, sum_valid = macstrm.ReduceAddValid(c, size)
macstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid')
actstrm = vthread.Stream(m, 'act_stream', clk, rst)
a = actstrm.source('a')
b = actstrm.source('b')
a = a + 1
b = b + 1
a = a + 1
b = b + 1
sub = actstrm.substream(mulstrm)
sub.to_source('x', a)
sub.to_source('y', b)
c = sub.from_sink('z')
size = actstrm.parameter('size')
sum, sum_valid = actstrm.ReduceAddValid(c, size)
sum = actstrm.Mux(sum > 0, sum, 0)
actstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid')
all_ok = m.TmpReg(initval=0)
def comp_stream_mul(size, offset):
mulstrm.set_source('x', ram_a, offset, size)
mulstrm.set_source('y', ram_b, offset, size)
mulstrm.set_sink('z', ram_c, offset, size)
mulstrm.run()
mulstrm.join()
def comp_stream_mac(size, offset):
macstrm.set_source('a', ram_a, offset, size)
macstrm.set_source('b', ram_b, offset, size)
macstrm.set_parameter('size', size)
macstrm.set_sink('sum', ram_c, offset, 1)
macstrm.run()
macstrm.join()
def comp_stream_act(size, offset):
actstrm.set_source('a', ram_a, offset, size)
actstrm.set_source('b', ram_b, offset, size)
actstrm.set_parameter('size', size)
actstrm.set_sink('sum', ram_c, offset, 1)
actstrm.run()
actstrm.join()
def comp_sequential_mul(size, offset):
sum = 0
for i in range(size):
a = ram_a.read(i + offset)
b = ram_b.read(i + offset)
sum = a * b
ram_c.write(i + offset, sum)
def comp_sequential_mac(size, offset):
sum = 0
for i in range(size):
a = ram_a.read(i + offset) + 1
b = ram_b.read(i + offset) + 1
sum += a * b
ram_c.write(offset, sum)
def comp_sequential_act(size, offset):
sum = 0
for i in range(size):
a = ram_a.read(i + offset) + 1 + 1
b = ram_b.read(i + offset) + 1 + 1
sum += a * b
if sum <= 0:
sum = 0
ram_c.write(offset, sum)
def check(size, offset_stream, offset_seq):
for i in range(size):
st = ram_c.read(i + offset_stream)
sq = ram_c.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok.value = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
all_ok.value = True
# mul
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_stream_mul(size, offset)
myaxi.dma_write(ram_c, offset, 1024, size)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_sequential_mul(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, size)
# verification
print('# MUL')
check(size, 0, offset)
# mac
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_stream_mac(size, offset)
myaxi.dma_write(ram_c, offset, 1024, 1)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_sequential_mac(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, 1)
# verification
print('# MAC')
check(1, 0, offset)
# act
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_stream_act(size, offset)
myaxi.dma_write(ram_c, offset, 1024, 1)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_sequential_act(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, 1)
# verification
print('# ACT')
check(1, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
try:
actstrm.draw_graph()
except:
pass
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.numbits"""
import json
import sqlite3
from hypothesis import example, given, settings
from hypothesis.strategies import sets, integers
from coverage import env
from coverage.numbits import (
nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection,
numbits_any_intersection, num_in_numbits, register_sqlite_functions,
)
from tests.coveragetest import CoverageTest
# Hypothesis-generated line number data
line_numbers = integers(min_value=1, max_value=9999)
line_number_sets = sets(line_numbers)
# When coverage-testing ourselves, hypothesis complains about a test being
# flaky because the first run exceeds the deadline (and fails), and the second
# run succeeds. Disable the deadline if we are coverage-testing.
default_settings = settings()
if env.METACOV:
default_settings = settings(default_settings, deadline=None)
def good_numbits(numbits):
"""Assert that numbits is good."""
# It shouldn't end with a zero byte, that should have been trimmed off.
assert (not numbits) or (numbits[-1] != 0)
class NumbitsOpTest(CoverageTest):
"""Tests of the numbits operations in numbits.py."""
run_in_temp_dir = False
@given(line_number_sets)
@settings(default_settings)
def test_conversion(self, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
nums2 = numbits_to_nums(numbits)
assert nums == set(nums2)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_union(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbu = numbits_union(nb1, nb2)
good_numbits(nbu)
union = numbits_to_nums(nbu)
assert nums1 | nums2 == set(union)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbi = numbits_intersection(nb1, nb2)
good_numbits(nbi)
intersection = numbits_to_nums(nbi)
assert nums1 & nums2 == set(intersection)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_any_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
inter = numbits_any_intersection(nb1, nb2)
expect = bool(nums1 & nums2)
assert expect == bool(inter)
@given(line_numbers, line_number_sets)
@settings(default_settings)
@example(152, {144})
def test_num_in_numbits(self, num, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
is_in = num_in_numbits(num, numbits)
assert (num in nums) == is_in
class NumbitsSqliteFunctionTest(CoverageTest):
"""Tests of the SQLite integration for numbits functions."""
run_in_temp_dir = False
def setup_test(self):
super().setup_test()
conn = sqlite3.connect(":memory:")
register_sqlite_functions(conn)
self.cursor = conn.cursor()
self.cursor.execute("create table data (id int, numbits blob)")
self.cursor.executemany(
"insert into data (id, numbits) values (?, ?)",
[
(i, nums_to_numbits(range(i, 100, i)))
for i in range(1, 11)
]
)
self.addCleanup(self.cursor.close)
def test_numbits_union(self):
res = self.cursor.execute(
"select numbits_union(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
")"
)
expected = [
7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49,
54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99,
]
answer = numbits_to_nums(list(res)[0][0])
assert expected == answer
def test_numbits_intersection(self):
res = self.cursor.execute(
"select numbits_intersection(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
")"
)
answer = numbits_to_nums(list(res)[0][0])
assert [63] == answer
def test_numbits_any_intersection(self):
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5]))
)
answer = [any_inter for (any_inter,) in res]
assert [1] == answer
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9]))
)
answer = [any_inter for (any_inter,) in res]
assert [0] == answer
def test_num_in_numbits(self):
res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id")
answer = [is_in for (id, is_in) in res]
assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer
def test_numbits_to_nums(self):
res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])])
assert [1, 2, 3] == json.loads(res.fetchone()[0])
|
#!/usr/bin/env python3
import os
import math
from cereal import car, log
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.car.hyundai.scc_smoother import SccSmoother
from selfdrive.ntune import ntune_get, ntune_isEnabled
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType
has_relay = hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos]
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay)
# read params
params = Params()
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
self.enable_lte_onroad = params.get_bool("EnableLteOnroad")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
fuzzy_fingerprint = self.CP.fuzzyFingerprint
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or fuzzy_fingerprint
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
# scc smoother
self.is_cruise_enabled = False
self.cruiseVirtualMaxSpeed = 0
self.clu_speed_ms = 0.
self.apply_accel = 0.
self.fused_accel = 0.
self.lead_drel = 0.
self.aReqValue = 0.
self.aReqValueMin = 0.
self.aReqValueMax = 0.
self.angle_steers_des = 0.
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, fuzzy_fingerprint)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0:
self.events.add(EventName.autoLaneChange)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or not CS.canValid:
self.events.add(EventName.canError)
safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaState"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names):
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw or (self.enabled and self.sm['modelV2'].meta.hardBrakePredicted):
self.events.add(EventName.fcw)
if TICI and self.enable_lte_onroad:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.wideRoadCameraError, "1": EventName.roadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
#if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and \
# (not TICI or self.enable_lte_onroad):
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
#if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
#and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
#self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 2.0):
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
self.CP.enableCruise = self.CI.CP.enableCruise
#if not self.CP.enableCruise:
# self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric)
#elif self.CP.enableCruise and CS.cruiseState.enabled:
# self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl)
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 50 # 0.5s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
#sr = max(params.steerRatio, 0.1)
if ntune_isEnabled('useLiveSteerRatio'):
sr = max(params.steerRatio, 0.1)
else:
sr = max(ntune_get('steerRatio'), 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
#actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# scc smoother
actuators.gas, actuators.brake = self.LoC.update(self.active and CS.cruiseState.speed > 1.,
CS,
v_acc_sol,
long_plan.vTargetFuture,
a_acc_sol,
self.CP,
self.sm['radarState'])
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
# if left_deviation or right_deviation:
# self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
cameraOffset = ntune_get("cameraOffset")
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
can_sends = self.CI.apply(CC, self)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
lat_plan = self.sm['lateralPlan']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
self.angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo))
self.angle_steers_des += params.angleOffsetDeg
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.steeringAngleDesiredDeg = self.angle_steers_des
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.cruiseVirtualMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG
controlsState.cluSpeedMs = self.clu_speed_ms
controlsState.applyAccel = self.apply_accel
controlsState.fusedAccel = self.fused_accel
controlsState.leadDist = self.lead_drel
controlsState.aReqValue = self.aReqValue
controlsState.aReqValueMin = self.aReqValueMin
controlsState.aReqValueMax = self.aReqValueMax
controlsState.steerRatio = self.VM.sR
controlsState.steerRateCost = ntune_get('steerRateCost')
controlsState.steerActuatorDelay = ntune_get('steerActuatorDelay')
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
import cozmo
name = input("What is your name? ")
def cozmo_program(robot: cozmo.robot.Robot):
robot.say_text(
f"Hi! My name is Cozmo. How are you, {name}?").wait_for_completed()
cozmo.run_program(cozmo_program)
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import pytest
from thriftrw._buffer import ReadBuffer
from thriftrw._buffer import WriteBuffer
from thriftrw.errors import EndOfInputError
def test_empty_write_buffer():
buff = WriteBuffer(10)
assert buff.length == 0
assert buff.capacity == 10
assert buff.value == b''
def test_empty_read_buffer():
buff = ReadBuffer(b'')
assert buff.take(0) == b''
with pytest.raises(EndOfInputError):
buff.take(1)
def test_simple_write():
buff = WriteBuffer(10)
buff.write_bytes(b'hello ')
buff.write_bytes(b'world')
assert buff.value == b'hello world'
assert buff.length == 11
def test_simple_read():
buff = ReadBuffer(b'abcd')
assert buff.take(1) == b'a'
assert buff.take(2) == b'bc'
with pytest.raises(EndOfInputError):
buff.take(2)
assert buff.take(1) == b'd'
def test_write_clear():
buff = WriteBuffer(10)
buff.write_bytes(b'foo')
buff.clear()
assert buff.value == b''
assert buff.capacity == 10
assert buff.length == 0
|
from binascii import crc32
from contextlib import contextmanager
from datetime import datetime, timedelta, timezone
from pathlib import Path
from osgeo import gdal
import pytest
import rasterio
from click.testing import CliRunner
from rasterio import DatasetReader
from rasterio.enums import Compression
from rio_cogeo import cogeo
import eodatasets3
from eodatasets3.model import DatasetDoc
from tests import assert_file_structure
from tests.common import assert_same_as_file
from . import assert_image
h5py = pytest.importorskip(
"h5py",
reason="Extra dependencies needed to run wagl package test. "
"Try pip install eodatasets3[wagl]",
)
# These test datasets come from running `tests/integration/h5downsample.py` on a real
# wagl output.
WAGL_LANDSAT_OUTPUT: Path = (
Path(__file__).parent
/ "data/wagl-input/LC80920842016180LGN01/LC80920842016180LGN01.wagl.h5"
)
WAGL_SENTINEL_OUTPUT: Path = (
Path(__file__).parent
/ "data/wagl-input/S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09/"
"S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09.wagl.h5"
)
# The matching Level1 metadata (produced by landsat_l1_prepare.py)
L1_METADATA_PATH: Path = (
Path(__file__).parent
/ "data/wagl-input/LC08_L1TP_092084_20160628_20170323_01_T1.yaml"
)
S2_L1_METADATA_PATH: Path = (
Path(__file__).parent
/ "data/wagl-input/S2A_MSIL1C_20201031T004711_N0209_R102_T53JQJ_20201031T022859.odc-metadata.yaml"
)
def test_whole_landsat_wagl_package(
l1_ls8_dataset: DatasetDoc, l1_ls8_folder: Path, tmp_path: Path
):
out = tmp_path
from eodatasets3.scripts import packagewagl
# No warnings should be logged during package.
# We could tighten this to specific warnings if it proves too noisy, but it's
# useful for catching things like unclosed files.
with expect_no_warnings():
res = CliRunner().invoke(
packagewagl.run,
map(
str,
(WAGL_LANDSAT_OUTPUT, "--level1", L1_METADATA_PATH, "--output", out),
),
catch_exceptions=False,
)
# The last line of output ends with the dataset path.
words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1)
expected_folder = out / "ga_ls8c_ard_3/092/084/2016/06/28"
assert_file_structure(
expected_folder,
{
"ga_ls8c_ard_3-2-1_092084_2016-06-28_final.odc-metadata.yaml": "",
"ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml": "",
"ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif": "",
},
)
[output_metadata] = expected_folder.rglob("*.odc-metadata.yaml")
assert reported_metadata == str(
output_metadata
), "Cli didn't report the expected output path"
# Checksum should include all files other than itself.
[checksum_file] = expected_folder.rglob("*.sha1")
all_output_files = set(
p.relative_to(checksum_file.parent)
for p in expected_folder.rglob("*")
if p != checksum_file
)
files_in_checksum = {
Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines()
}
assert all_output_files == files_in_checksum
# Verify the computed contiguity looks the same. (metadata fields will depend on it)
[image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 1978, 1: 4184})
[image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 1979, 1: 4183})
assert_same_as_file(
{
"$schema": "https://schemas.opendatacube.org/dataset",
# A stable ID is taken from the WAGL doc.
"id": "787eb74c-e7df-43d6-b562-b796137330ae",
"label": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final",
"product": {
"href": "https://collections.dea.ga.gov.au/product/ga_ls8c_ard_3",
"name": "ga_ls8c_ard_3",
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[386_170.809_107_605_5, -3_787_581.737_315_514_6],
[393_422.698_122_467_44, -3_754_539.332_156_166_4],
[402_370.463_567_812_2, -3_717_207.883_853_628_3],
[405_296.703_429_750_9, -3_713_106.822_612_258_6],
[405_302.307_692_307_7, -3_713_085.0],
[560_999.714_134_832_8, -3_745_790.820_117_99],
[591_203.344_050_317_7, -3_755_934.776_849_929_2],
[593_107.5, -3_756_373.614_649_681_4],
[593_066.089_284_004_1, -3_756_560.384_007_281_6],
[593_115.0, -3_756_576.810_780_758],
[593_115.0, -3_769_934.639_090_926_4],
[555_895.771_981_598_6, -3_924_204.823_795_153],
[554_316.830_569_659_8, -3_931_326.117_549_759],
[553_913.572_308_820_1, -3_932_420.854_216_015],
[550_505.686_408_068, -3_946_546.219_392_854],
[548_673.645_879_151_9, -3_946_645.831_477_726_3],
[548_393.076_923_077, -3_947_407.5],
[543_888.417_289_877_3, -3_946_906.014_911_907],
[535_826.373_854_402_9, -3_947_344.365_997_631_6],
[362_232.941_315_876_84, -3_905_575.014_223_633],
[362_109.819_892_458_1, -3_904_490.351_889_350_5],
[360_592.5, -3_904_126.385_350_318_6],
[361_565.347_585_850_9, -3_899_693.716_286_561_5],
[360_585.0, -3_891_057.151_898_734_3],
[366_618.297_729_428_5, -3_863_717.869_440_751],
[386_170.809_107_605_5, -3_787_581.737_315_514_6],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": [79, 78],
"transform": [
2981.153_846_153_846,
0.0,
360_585.0,
0.0,
-2966.202_531_645_569_7,
-3_713_085.0,
0.0,
0.0,
1.0,
],
},
"panchromatic": {
"shape": [157, 156],
"transform": [
1490.480_769_230_769_3,
0.0,
360_592.5,
0.0,
-1492.452_229_299_363,
-3_713_092.5,
0.0,
0.0,
1.0,
],
},
},
"properties": {
"datetime": datetime(2016, 6, 28, 0, 2, 28, 624_635),
"dea:dataset_maturity": "final",
"dtr:end_datetime": datetime(2016, 6, 28, 0, 2, 43, 114_771),
"dtr:start_datetime": datetime(2016, 6, 28, 0, 2, 14, 25815),
"eo:cloud_cover": 63.069_613_577_531_236,
"eo:gsd": 1490.480_769_230_769_3,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 33.655_125_34,
"eo:sun_elevation": 23.988_361_72,
"fmask:clear": 32.735_343_657_403_305,
"fmask:cloud": 63.069_613_577_531_236,
"fmask:cloud_shadow": 4.139_470_857_647_722,
"fmask:snow": 0.005_053_323_801_138_007,
"fmask:water": 0.050_518_583_616_596_675,
"gqa:abs_iterative_mean_x": 0.21,
"gqa:abs_iterative_mean_xy": 0.27,
"gqa:abs_iterative_mean_y": 0.18,
"gqa:abs_x": 0.3,
"gqa:abs_xy": 0.39,
"gqa:abs_y": 0.25,
"gqa:cep90": 0.46,
"gqa:iterative_mean_x": -0.17,
"gqa:iterative_mean_xy": 0.21,
"gqa:iterative_mean_y": 0.12,
"gqa:iterative_stddev_x": 0.19,
"gqa:iterative_stddev_xy": 0.25,
"gqa:iterative_stddev_y": 0.17,
"gqa:mean_x": -0.1,
"gqa:mean_xy": 0.14,
"gqa:mean_y": 0.1,
"gqa:stddev_x": 0.35,
"gqa:stddev_xy": 0.45,
"gqa:stddev_y": 0.29,
"landsat:collection_category": "T1",
"landsat:collection_number": 1,
"landsat:landsat_product_id": "LC08_L1TP_092084_20160628_20170323_01_T1",
"landsat:landsat_scene_id": "LC80920842016180LGN01",
"landsat:wrs_path": 92,
"landsat:wrs_row": 84,
"odc:dataset_version": "3.2.1",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": datetime(2019, 7, 11, 23, 29, 29, 21245),
"odc:producer": "ga.gov.au",
"odc:product_family": "ard",
"odc:region_code": "092084",
},
"measurements": {
"nbar_blue": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif"
},
"nbar_coastal_aerosol": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif"
},
"nbar_green": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif"
},
"nbar_nir": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif"
},
"nbar_panchromatic": {
"grid": "panchromatic",
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif",
},
"nbar_red": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif"
},
"nbar_swir_1": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif"
},
"nbar_swir_2": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif"
},
"nbart_blue": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif"
},
"nbart_coastal_aerosol": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif"
},
"nbart_green": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif"
},
"nbart_nir": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif"
},
"nbart_panchromatic": {
"grid": "panchromatic",
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif",
},
"nbart_red": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif"
},
"nbart_swir_1": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif"
},
"nbart_swir_2": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif"
},
"oa_azimuthal_exiting": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif"
},
"oa_azimuthal_incident": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif"
},
"oa_combined_terrain_shadow": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif"
},
"oa_exiting_angle": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif"
},
"oa_fmask": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif"
},
"oa_incident_angle": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif"
},
"oa_nbar_contiguity": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif"
},
"oa_nbart_contiguity": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif"
},
"oa_relative_azimuth": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif"
},
"oa_relative_slope": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif"
},
"oa_satellite_azimuth": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif"
},
"oa_satellite_view": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif"
},
"oa_solar_azimuth": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif"
},
"oa_solar_zenith": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif"
},
"oa_time_delta": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif"
},
},
"accessories": {
"checksum:sha1": {
"path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1"
},
"metadata:processor": {
"path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml"
},
"thumbnail:nbar": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg"
},
"thumbnail:nbart": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg"
},
},
"lineage": {"level1": ["fb1c622e-90aa-50e8-9d5e-ad69db82d0f6"]},
},
output_metadata,
)
[proc_info] = expected_folder.rglob("*.proc-info.yaml")
assert_same_as_file(
{
"fmask": {
"parameters": {
"cloud_buffer_distance_metres": 0.0,
"cloud_shadow_buffer_distance_metres": 0.0,
"frantz_parallax_sentinel_2": False,
},
"percent_class_distribution": {
"clear": 32.735_343_657_403_305,
"cloud": 63.069_613_577_531_236,
"cloud_shadow": 4.139_470_857_647_722,
"snow": 0.005_053_323_801_138_007,
"water": 0.050_518_583_616_596_675,
},
},
"software_versions": [
{
"name": "modtran",
"url": "http://www.ontar.com/software/productdetails.aspx?item=modtran",
"version": "6.0.1",
},
{
"name": "wagl",
"url": "https://github.com/GeoscienceAustralia/wagl.git",
"version": "5.3.1+118.g9edd420",
},
{
"name": "eugl",
"url": "https://github.com/OpenDataCubePipelines/eugl.git",
"version": "0.0.2+69.gb1d1231",
},
{"name": "gverify", "url": None, "version": "v0.25c"},
{
"name": "fmask",
"url": "https://bitbucket.org/chchrsc/python-fmask",
"version": "0.5.3",
},
{
"name": "tesp",
"url": "https://github.com/OpenDataCubePipelines/tesp.git",
"version": "0.6.1",
},
{
"name": "eodatasets3",
"url": "https://github.com/GeoscienceAustralia/eo-datasets",
"version": eodatasets3.__version__,
},
],
},
proc_info,
ignore_fields=("gqa", "wagl"),
)
# All produced tifs should be valid COGs
for image in expected_folder.rglob("*.tif"):
assert cogeo.cog_validate(image), f"Failed COG validation: {image}"
# Check one of the images explicitly.
[image] = expected_folder.rglob("*_nbar_*_band08.tif")
with rasterio.open(image) as d:
d: DatasetReader
assert d.count == 1, "Expected one band"
assert d.nodata == -999.0
# Verify the pixel values haven't changed.
assert crc32(d.read(1).tobytes()) == 3_381_159_350
# (Rasterio's checksum is zero on some datasets for some reason? So we use crc above...)
assert d.checksum(1) == 58403
# The last overview is an odd size because of the tiny test data image size.
assert d.overviews(1) == [8, 16, 31]
assert d.driver == "GTiff"
assert d.dtypes == ("int16",)
assert d.compression == Compression.deflate
assert d.height == 157
assert d.width == 156
# The reduced resolution makes it hard to test the chosen block size...
assert d.block_shapes == [(26, 156)]
# Check the overviews use default 512 block size.
# (Rasterio doesn't seem to have an api for this?)
assert gdal.Open(str(image)).GetRasterBand(1).GetOverview(1).GetBlockSize() == [
512,
512,
], "Expected overviews to have a larger block size."
# OA data should have no overviews.
[*oa_images] = expected_folder.rglob("*_oa_*.tif")
assert oa_images
for image in oa_images:
# fmask is the only OA that should have overviews according to spec (and Josh).
if "fmask" in image.name:
assert_image(image, overviews=[8, 16, 26])
else:
assert_image(image, overviews=[])
# Check we didn't get height/width mixed up again :)
# (The small size of our test data makes this slightly silly, though...)
[thumb_path] = expected_folder.rglob("*_nbar_*.jpg")
assert_image(thumb_path, bands=3, shape=(7, 8))
def test_maturity_calculation():
from eodatasets3 import wagl
# Simplified. Only a few ancillary parts that matter to us.
wagl_doc = {
"ancillary": {
"aerosol": {
"id": ["99d73c48-9985-51d2-9639-d37bcdfe119e"],
"tier": "AATSR_CMP_MONTH",
"value": 0.047_813_605_517_148_97,
},
"brdf": {
"alpha_1": {
"band_1": 0.407_471_513_826_581_4,
"band_2": 0.407_472_440_438_251_7,
"band_3": 0.564_374_828_124_185,
"band_4": 0.452_550_357_394_962_35,
"band_5": 0.720_394_875_348_492_4,
"band_6": 0.475_077_458_430_413_66,
"band_7": 0.549_934_518_094_732,
},
"alpha_2": {
"band_1": 0.177_715_841_252_848_28,
"band_2": 0.177_716_091_422_247_15,
"band_3": 0.136_703_039_045_401_32,
"band_4": 0.167_629_648_004_969_63,
"band_5": 0.090_148_975_875_461_32,
"band_6": 0.121_059_126_731_143_88,
"band_7": 0.181_073_714_539_622_23,
},
"id": [
"2e95bdec-42e4-50a2-9a4c-1ea970e2696d",
"d02e1c58-7379-5c2d-a080-995838550d0d",
],
"tier": "DEFINITIVE",
},
"elevation": {
"id": [
"8ad73086-72cf-561a-aa0f-1e3c64d53384",
"e75ac77d-1ed0-55a5-888b-9ae48080eae9",
]
},
"ozone": {
"id": ["83914de1-c12e-5035-af8d-e2dc1baa54d4"],
"tier": "DEFINITIVE",
"value": 0.295,
},
"water_vapour": {
"id": ["e68035cd-1cd3-57fc-9b0e-2bf710a3df87"],
"tier": "DEFINITIVE",
"value": 0.490_000_009_536_743_16,
},
}
}
# July 2002 is when we consider our BRDF to be good enough: both Aqua
# and Terra satellites were now operational.
acq_before_brdf = datetime(2002, 6, 29, tzinfo=timezone.utc)
acq_after_brdf = datetime(2002, 7, 1, tzinfo=timezone.utc)
proc_after_brdf = acq_after_brdf + timedelta(days=7)
# Normal, final dataset. Processed just outside of NRT window.
assert (
wagl._determine_maturity(
acq_after_brdf, acq_after_brdf + timedelta(hours=49), wagl_doc
)
== "final"
)
# NRT when processed < 48 hours
assert (
wagl._determine_maturity(
acq_after_brdf, acq_after_brdf + timedelta(hours=1), wagl_doc
)
== "nrt"
)
assert (
wagl._determine_maturity(
acq_before_brdf, acq_before_brdf + timedelta(hours=47), wagl_doc
)
== "nrt"
)
# Before 2001: final if water vapour is definitive.
assert (
wagl._determine_maturity(
acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc
)
== "final"
)
# Interim whenever water vapour is fallback.
wagl_doc["ancillary"]["water_vapour"]["tier"] = "FALLBACK_DATASET"
assert (
wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim"
)
assert (
wagl._determine_maturity(
acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc
)
== "interim"
)
wagl_doc["ancillary"]["water_vapour"]["tier"] = "DEFINITIVE"
# Fallback BRDF (when at least one is fallback)
wagl_doc["ancillary"]["brdf"]["tier"] = "FALLBACK_DEFAULT"
assert (
wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim"
)
@contextmanager
def expect_no_warnings():
"""Throw an assertion error if any warnings are produced."""
with pytest.warns(None) as warning_record:
yield
# We could tighten this to specific warnings if it proves too noisy, but it's
# useful for catching things like unclosed files.
if warning_record:
messages = "\n".join(f"- {w.message} ({w})\n" for w in warning_record)
raise AssertionError(f"Expected no warnings to be produced, got:\n {messages}")
def test_sentinel_wagl_package(tmp_path: Path):
out = tmp_path
from eodatasets3.scripts import packagewagl
# No warnings should have been logged during package.
# We could tighten this to specific warnings if it proves too noisy, but it's
# useful for catching things like unclosed files.
with expect_no_warnings():
res = CliRunner().invoke(
packagewagl.run,
map(
str,
(
WAGL_SENTINEL_OUTPUT,
"--level1",
S2_L1_METADATA_PATH,
"--output",
out,
# Our weird scaled test dataset resolution
"--oa-resolution",
998.1818181818181,
),
),
catch_exceptions=False,
)
# The last line of output ends with the dataset path.
words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1)
expected_folder = out / "ga_s2am_ard_3/53/JQJ/2020/10/31"
assert_file_structure(
expected_folder,
{
"20201031T022859": {
"ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.odc-metadata.yaml": "",
"ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml": "",
"ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif": "",
}
},
)
[output_metadata] = expected_folder.rglob("*.odc-metadata.yaml")
# Checksum should include all files other than itself.
[checksum_file] = expected_folder.rglob("*.sha1")
all_output_files = set(
p.relative_to(checksum_file.parent)
for p in expected_folder.rglob("*")
if p != checksum_file and not p.is_dir()
)
files_in_checksum = {
Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines()
}
assert all_output_files == files_in_checksum
# Verify the computed contiguity looks the same. (metadata fields will depend on it)
[image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733})
[image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733})
assert_same_as_file(
{
"$schema": "https://schemas.opendatacube.org/dataset",
"id": "14cfa990-7e2f-4f0c-bd5e-b4cb28c27e8d",
"label": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final",
"product": {
"name": "ga_s2am_ard_3",
"href": "https://collections.dea.ga.gov.au/product/ga_s2am_ard_3",
},
"crs": "epsg:32753",
"geometry": {
"type": "Polygon",
"coordinates": [
[
[731901.8181818182, 6790240.0],
[728854.7368421053, 6790240.0],
[752174.154338321, 6890002.646902946],
[759379.8080509851, 6900040.0],
[762411.0326110948, 6900040.0],
[763218.8851094716, 6900040.0],
[809760.0, 6900040.0],
[809760.0, 6790240.0],
[732900.0, 6790240.0],
[731901.8181818182, 6790240.0],
]
],
},
"grids": {
"default": {
"shape": [110, 110],
"transform": [
998.1818181818181,
0.0,
699960.0,
0.0,
-998.1818181818181,
6900040.0,
0.0,
0.0,
1.0,
],
},
"a": {
"shape": [55, 55],
"transform": [
1996.3636363636363,
0.0,
699960.0,
0.0,
-1996.3636363636363,
6900040.0,
0.0,
0.0,
1.0,
],
},
"b": {
"shape": [19, 19],
"transform": [
5778.9473684210525,
0.0,
699960.0,
0.0,
-5778.9473684210525,
6900040.0,
0.0,
0.0,
1.0,
],
},
"c": {
"shape": [19, 19],
"transform": [
5778.947368421053,
0.0,
699960.0,
0.0,
-5778.947368421053,
6900040.0,
0.0,
0.0,
1.0,
],
},
},
"properties": {
"datetime": "2020-10-31T00:55:10.954414",
"dea:dataset_maturity": "final",
"eo:cloud_cover": 11.063428320692061,
"eo:gsd": 998.1818181818181,
"eo:instrument": "MSI",
"eo:platform": "sentinel-2a",
"eo:sun_azimuth": 62.9424764928076,
"eo:sun_elevation": 26.8398246645449,
"fmask:clear": 73.65382838133374,
"fmask:cloud": 11.063428320692061,
"fmask:cloud_shadow": 0.6983135097842945,
"fmask:snow": 14.583962676987106,
"fmask:water": 0.0004671112027989303,
"gqa:abs_iterative_mean_x": 0.42,
"gqa:abs_iterative_mean_xy": 0.53,
"gqa:abs_iterative_mean_y": 0.32,
"gqa:abs_x": 0.69,
"gqa:abs_xy": 1.07,
"gqa:abs_y": 0.82,
"gqa:cep90": 0.97,
"gqa:iterative_mean_x": 0.4,
"gqa:iterative_mean_xy": 0.4,
"gqa:iterative_mean_y": 0.04,
"gqa:iterative_stddev_x": 0.29,
"gqa:iterative_stddev_xy": 0.53,
"gqa:iterative_stddev_y": 0.44,
"gqa:mean_x": 0.38,
"gqa:mean_xy": 0.39,
"gqa:mean_y": -0.07,
"gqa:stddev_x": 1.18,
"gqa:stddev_xy": 2.24,
"gqa:stddev_y": 1.9,
"odc:dataset_version": "3.2.1",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": "2021-02-10T03:25:22.635668",
"odc:producer": "ga.gov.au",
"odc:product_family": "ard",
"odc:region_code": "53JQJ",
"sat:orbit_state": "descending",
"sat:relative_orbit": 102,
"sentinel:datastrip_id": "S2A_OPER_MSI_L1C_DS_EPAE_20201031T022859_S20201031T004711_N02.09",
"sentinel:sentinel_tile_id": "S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09",
"sentinel:datatake_start_datetime": "2020-10-31T02:28:59",
},
"measurements": {
"nbar_blue": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif"
},
"nbar_coastal_aerosol": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif",
"grid": "b",
},
"nbar_green": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif"
},
"nbar_nir_1": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif"
},
"nbar_nir_2": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif",
"grid": "a",
},
"nbar_red": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif"
},
"nbar_red_edge_1": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif",
"grid": "a",
},
"nbar_red_edge_2": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif",
"grid": "a",
},
"nbar_red_edge_3": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif",
"grid": "a",
},
"nbar_swir_2": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif",
"grid": "a",
},
"nbar_swir_3": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif",
"grid": "a",
},
"nbart_blue": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif"
},
"nbart_coastal_aerosol": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif",
"grid": "b",
},
"nbart_green": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif"
},
"nbart_nir_1": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif"
},
"nbart_nir_2": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif",
"grid": "a",
},
"nbart_red": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif"
},
"nbart_red_edge_1": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif",
"grid": "a",
},
"nbart_red_edge_2": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif",
"grid": "a",
},
"nbart_red_edge_3": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif",
"grid": "a",
},
"nbart_swir_2": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif",
"grid": "a",
},
"nbart_swir_3": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif",
"grid": "a",
},
"oa_azimuthal_exiting": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif"
},
"oa_azimuthal_incident": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif"
},
"oa_combined_terrain_shadow": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif"
},
"oa_exiting_angle": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif"
},
"oa_fmask": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif",
"grid": "c",
},
"oa_incident_angle": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif"
},
"oa_nbar_contiguity": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif"
},
"oa_nbart_contiguity": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif"
},
"oa_relative_azimuth": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif"
},
"oa_relative_slope": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif"
},
"oa_satellite_azimuth": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif"
},
"oa_satellite_view": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif"
},
"oa_solar_azimuth": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif"
},
"oa_solar_zenith": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif"
},
"oa_time_delta": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif"
},
},
"accessories": {
"checksum:sha1": {
"path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1"
},
"metadata:processor": {
"path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml"
},
"thumbnail:nbar": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg"
},
"thumbnail:nbart": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg"
},
},
"lineage": {"level1": ["e27200c1-0a9c-5e24-bfe1-bbbb3f3bdedc"]},
},
output_metadata,
)
[proc_info] = expected_folder.rglob("*.proc-info.yaml")
assert_same_as_file(
{
"fmask": {
"parameters": {
"cloud_buffer_distance_metres": 0.0,
"cloud_shadow_buffer_distance_metres": 0.0,
"frantz_parallax_sentinel_2": False,
},
"percent_class_distribution": {
"clear": 73.65382838133374,
"cloud": 11.063428320692061,
"cloud_shadow": 0.6983135097842945,
"snow": 14.583962676987106,
"water": 0.0004671112027989303,
},
},
"software_versions": [
{
"name": "modtran",
"url": "http://www.ontar.com/software/productdetails.aspx?item=modtran",
"version": "6.0.1",
},
{
"name": "wagl",
"url": "https://github.com/GeoscienceAustralia/wagl.git",
"version": "5.4.1",
},
{
"name": "eugl",
"url": "https://github.com/OpenDataCubePipelines/eugl.git",
"version": "0.2.1",
},
{"name": "gverify", "url": None, "version": "v0.25c"},
{
"name": "fmask",
"url": "https://bitbucket.org/chchrsc/python-fmask",
"version": "0.5.4",
},
{
"name": "tesp",
"url": "https://github.com/OpenDataCubePipelines/tesp.git",
"version": "0.6.2",
},
{
"name": "eodatasets3",
"url": "https://github.com/GeoscienceAustralia/eo-datasets",
"version": eodatasets3.__version__,
},
],
},
proc_info,
ignore_fields=("gqa", "wagl"),
)
# All produced tifs should be valid COGs
for image in expected_folder.rglob("*.tif"):
assert cogeo.cog_validate(image), f"Failed COG validation: {image}"
|
import scrapy, re
from alleco.objects.official import Official
class ross_t(scrapy.Spider):
name = "ross_t"
muniName = "ROSS"
muniType = "TOWNSHIP"
complete = True
def start_requests(self):
urls = ['https://www.ross.pa.us/245/Board-of-Commissioners',
'https://www.ross.pa.us/225/Other-Elected-Officials']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
if response.url[-2]=='r':
for quote in response.xpath('//div[@class="cpTabPanels"]'):
arr = [i.strip() for i in quote.xpath('.//text()').getall() if len(i.strip())>0 and '$' not in i]
temp = []
peeps = []
for i in arr:
temp.append(i)
if '@' in i:
peeps.append(temp)
temp = []
for pers in peeps:
name = self._name(pers[1]) if "Commissioner" in pers[1] else None
yield Official(
muniName=self.muniName,
muniType=self.muniType,
office="COMMISSIONER",
district=pers[0].upper(),
name=name,
email=pers[-1],
vacant=name==None,
url=response.url)
elif response.url[-2]=='l':
for quote in response.xpath('//div[contains(h2/text(),"Ross Tax Collector")]/p[1]'):
yield Official(
muniName=self.muniName,
muniType=self.muniType,
office="TAX COLLECTOR",
name=quote.xpath('text()[1]').get(),
email=quote.xpath('a/@href').get(),
phone=quote.xpath('text()[2]').get(),
url=response.url)
def _name(self,string):
return string.split(",")[0][13:]
|
"""
状态模式
"""
from __future__ import annotations
from abc import ABC, abstractmethod
class Context:
# 状态(状态模式的判断)
_state: State = None
def __init__(self, state: State) -> None:
self.transition_to(state)
def transition_to(self, state: State) -> None:
# 根据不同状态,切换上下文
self._state = state
self._state.context = self
# 最终执行器的操作
def request1(self):
self._state.handle1()
def request2(self):
self._state.handle2()
class State(ABC):
@property
def context(self) -> Context:
return self._context
@context.setter
def context(self, context: Context) -> None:
self._context = context
@abstractmethod
def handle1(self) -> None:
pass
@abstractmethod
def handle2(self) -> None:
pass
class ConcreteStateA(State):
def handle1(self) -> None:
print('执行了A—1')
self.context.transition_to(ConcreteStateB())
def handle2(self) -> None:
print('执行了A-2')
class ConcreteStateB(State):
def handle1(self) -> None:
print('执行了B—1')
def handle2(self) -> None:
print('执行了B—2')
self.context.transition_to(ConcreteStateA())
if __name__ == '__main__':
context = Context(ConcreteStateA())
context.request1()
context.request2()
context.request2()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
from mcfw.rpc import arguments, returns
from rogerthat.models import Message
from rogerthat.models.properties.forms import FormResult
from rogerthat.rpc import users
from rogerthat.service.api import messaging
from rogerthat.to.messaging.forms import TextBlockFormTO, TextBlockTO, FormTO
from rogerthat.to.messaging.service_callback_results import FormAcknowledgedCallbackResultTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.utils.app import get_app_user_tuple
from solutions import translate
from solutions.common.dal import get_solution_main_branding, get_solution_settings
from solutions.common.models import SolutionInboxMessage
@arguments(service_user=users.User, service_identity=unicode, message_key=unicode, app_user=users.User, name=unicode,
answer_id=unicode, parent_inbox_message=SolutionInboxMessage)
def process_updated_customer_signup_message(service_user, service_identity, message_key, app_user, name, answer_id,
parent_inbox_message):
# type: (users.User, unicode, unicode, users.User, unicode, unicode, SolutionInboxMessage) -> None
from solutions.common.bizz.messaging import MESSAGE_TAG_DENY_SIGNUP
from solutions.common.restapi.services import rest_create_service_from_signup
with users.set_user(service_user):
sln_settings = get_solution_settings(service_user)
if answer_id == 'decline':
widget = TextBlockTO()
widget.max_chars = 1024
form = TextBlockFormTO()
form.type = TextBlockTO.TYPE
form.widget = widget
form.positive_button = translate(sln_settings.main_language, 'Confirm')
form.negative_button = translate(sln_settings.main_language, 'Cancel')
form.javascript_validation = """function run(result) {
return result.value ? true : '%s';
}""" % translate(sln_settings.main_language, 'this_field_is_required', _duplicate_backslashes=True)
human_user, app_id = get_app_user_tuple(app_user)
messaging.send_form(parent_key=parent_inbox_message.message_key,
parent_message_key=parent_inbox_message.message_key,
message=translate(sln_settings.main_language, 'signup_not_ok'),
member=human_user.email(),
app_id=app_id,
flags=Message.FLAG_AUTO_LOCK,
branding=get_solution_main_branding(service_user).branding_key,
tag=json.dumps({'__rt__.tag': MESSAGE_TAG_DENY_SIGNUP,
'signup_key': parent_inbox_message.category_key}),
form=form,
service_identity=service_identity,
alert_flags=Message.ALERT_FLAG_VIBRATE)
elif answer_id == 'approve':
result = rest_create_service_from_signup(parent_inbox_message.category_key,
force=True) # type: CreateServiceStatusTO
if not result.success:
messaging.send(parent_message_key=message_key,
message=result.errormsg,
answers=[],
flags=Message.FLAG_ALLOW_DISMISS,
branding=get_solution_main_branding(service_user).branding_key,
tag=None,
service_identity=service_identity)
@returns(FormAcknowledgedCallbackResultTO)
@arguments(service_user=users.User, status=int, form_result=FormResult, answer_id=unicode, member=unicode,
message_key=unicode, tag=unicode, received_timestamp=int, acked_timestamp=int, parent_message_key=unicode,
result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO])
def deny_signup(service_user, status, form_result, answer_id, member, message_key, tag,
received_timestamp, acked_timestamp, parent_message_key, result_key,
service_identity, user_details):
from solutions.common.restapi import rest_customer_signup_reply
with users.set_user(service_user):
if answer_id == FormTO.POSITIVE:
tag_dict = json.loads(tag)
rest_customer_signup_reply(tag_dict['signup_key'], form_result.result.value)
|
import os
import time
import pickle
import math
import numpy as np
import linecache
import matplotlib.pyplot as plt
# from matplotlib.pyplot import MultipleLocator
import grid
data_path = 'E:/dataset/didi/processed'
save_path = 'E:/dataset/didi/processed/order_20161101_sampled_value_map_fig'
data_file_name = 'processed_data' # '.pkl' will be added for binary file
value_map_file_name = 'value_map' # '.pkl' will be added for binary file
n_time_unit = 144
size_hexagon_to_edge = 0.0048
hexagon_size_factor_for_plot = 1
range_map_longitude = [103.96, 104.18]
range_map_latitude = [30.59, 30.77]
size_hexagon = size_hexagon_to_edge * 2 / math.sqrt(3) # length to the point
if not os.path.exists(save_path):
os.mkdir(save_path)
with open(os.path.join(data_path, data_file_name+'.pkl'), 'rb') as f:
data = pickle.load(f)
with open(os.path.join(data_path, value_map_file_name+'.pkl'), 'rb') as f:
value_map = pickle.load(f)
# make hexagon
grid = grid.Hexagon(size_to_edge=size_hexagon_to_edge*hexagon_size_factor_for_plot)
grid_interval_lo = size_hexagon * 1.5
grid_interval_la = size_hexagon_to_edge * 2
grid_centers = []
for la in np.arange(range_map_latitude[1]-size_hexagon, range_map_latitude[0]-0.00001, -grid_interval_la):
row = []
count = 0
for lo in np.arange(range_map_longitude[0], range_map_longitude[1]+0.00001, grid_interval_lo):
if count % 2 == 0:
row.append([lo, la])
else:
row.append([lo, la+size_hexagon_to_edge])
count += 1
grid_centers.append(row)
grid_centers_mat = np.array(grid_centers)
shape_grid_centers_mat = grid_centers_mat.shape
n_grids = shape_grid_centers_mat[0]*shape_grid_centers_mat[1]
grid_index_mat = np.arange(n_grids).reshape(shape_grid_centers_mat[:2])
print('shape of grids is', shape_grid_centers_mat)
print('number of grids is', n_grids)
grid_centers_flat_T = grid_centers_mat.reshape(n_grids, 2).T
max_value = np.max(value_map)
min_value = np.min(value_map)
print('maximum value in value_map is', max_value)
print('minimum value in value_map is', min_value)
# value_map = (value_map - min_value) / max_value
# max_value = np.max(value_map)
# min_value = np.min(value_map)
# print('maximum value in value_map after normalization is', max_value)
# print('minimum value in value_map after normalization is', min_value)
for t in range(n_time_unit):
fig = plt.figure()
plt.title('value map of time unit %d' % t)
plt.scatter(grid_centers_flat_T[0], grid_centers_flat_T[1], c=value_map[t], marker='H', s=100, alpha=0.5)
plt.colorbar()
fig.savefig(os.path.join(save_path, '%d.jpg'%t))
|
w = int(input())
h = int(input())
for i in range(h):
output = str()
for j in range(w):
if (i + j) % 2 == 0:
output += '0'
else:
output += '1'
print(output)
|
<warning descr="Python version 2.6, 2.7 do not support this syntax.">raise exception from cause</warning>
a = 1
|
from bridges.symbol import *
class Text(Symbol):
def __init__(self, label = None):
super(Text, self).__init__()
if label is not None:
self._text = label
else:
self._text = ""
self.stroke_width = 1.0
self._font_size = None
self._anchor_alignment_lr = None
self._anchor_alignment_tb = None
self._locx = 0.0
self._locy = 0.0
def get_shape_type(self):
return "text"
@property
def text(self):
return self._text
@text.setter
def text(self, t):
self._text = t
@property
def font_size(self):
return self._font_size
@font_size.setter
def font_size(self, s):
if(s < 0.0):
raise ValueError("Font size is too small")
self._font_size = s
def set_anchor_alignment(self, typeLR, typeTB):
self._anchor_alignment_lr = typeLR
self._anchor_alignment_tb = typeTB
def set_anchor_location(self, x, y):
self._locx = x
self._locy = y
def get_json_representation(self):
json_builder = super(Text, self).get_json_representation()
json_builder['anchor-location'] = [self._locx, self._locy]
json_builder['text'] = self.text
if self.font_size is not None:
json_builder['font-size'] =self.font_size
if self._anchor_alignment_lr is not None:
json_builder['anchor-alignmentLR'] = self._anchor_alignment_lr
if self._anchor_alignment_tb is not None:
json_builder['anchor-alignmentTB'] = self._anchor_alignment_tb
return json_builder
|
import torch
import torch.nn as nn
import rdkit.Chem as Chem
import torch.nn.functional as F
from hgraph.nnutils import *
from hgraph.encoder import IncHierMPNEncoder
from hgraph.mol_graph import MolGraph
from hgraph.inc_graph import IncTree, IncGraph
class HTuple():
def __init__(self, node=None, mess=None, vmask=None, emask=None):
self.node, self.mess = node, mess
self.vmask, self.emask = vmask, emask
class HierMPNDecoder(nn.Module):
def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, latent_size, depthT, depthG, dropout, attention=False):
super(HierMPNDecoder, self).__init__()
self.vocab = vocab
self.avocab = avocab
self.hidden_size = hidden_size
self.embed_size = embed_size
self.latent_size = latent_size
self.use_attention = attention
self.itensor = torch.LongTensor([]).cuda()
self.hmpn = IncHierMPNEncoder(vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout)
self.rnn_cell = self.hmpn.tree_encoder.rnn
self.E_assm = self.hmpn.E_i
self.E_order = torch.eye(MolGraph.MAX_POS).cuda()
self.topoNN = nn.Sequential(
nn.Linear(hidden_size + latent_size, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, 1)
)
self.clsNN = nn.Sequential(
nn.Linear(hidden_size + latent_size, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, vocab.size()[0])
)
self.iclsNN = nn.Sequential(
nn.Linear(hidden_size + latent_size, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, vocab.size()[1])
)
self.matchNN = nn.Sequential(
nn.Linear(hidden_size + embed_size + MolGraph.MAX_POS, hidden_size),
nn.ReLU(),
)
self.W_assm = nn.Linear(hidden_size, latent_size)
if latent_size != hidden_size:
self.W_root = nn.Linear(latent_size, hidden_size)
if self.use_attention:
self.A_topo = nn.Linear(hidden_size, latent_size)
self.A_cls = nn.Linear(hidden_size, latent_size)
self.A_assm = nn.Linear(hidden_size, latent_size)
self.topo_loss = nn.BCEWithLogitsLoss(size_average=False)
self.cls_loss = nn.CrossEntropyLoss(size_average=False)
self.icls_loss = nn.CrossEntropyLoss(size_average=False)
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
def apply_tree_mask(self, tensors, cur, prev):
fnode, fmess, agraph, bgraph, cgraph, scope = tensors
agraph = agraph * index_select_ND(cur.emask, 0, agraph)
bgraph = bgraph * index_select_ND(cur.emask, 0, bgraph)
cgraph = cgraph * index_select_ND(prev.vmask, 0, cgraph)
return fnode, fmess, agraph, bgraph, cgraph, scope
def apply_graph_mask(self, tensors, hgraph):
fnode, fmess, agraph, bgraph, scope = tensors
agraph = agraph * index_select_ND(hgraph.emask, 0, agraph)
bgraph = bgraph * index_select_ND(hgraph.emask, 0, bgraph)
return fnode, fmess, agraph, bgraph, scope
def update_graph_mask(self, graph_batch, new_atoms, hgraph):
new_atom_index = hgraph.vmask.new_tensor(new_atoms)
hgraph.vmask.scatter_(0, new_atom_index, 1)
new_atom_set = set(new_atoms)
new_bonds = [] #new bonds are the subgraph induced by new_atoms
for zid in new_atoms:
for nid in graph_batch[zid]:
if nid not in new_atom_set: continue
new_bonds.append( graph_batch[zid][nid]['mess_idx'] )
new_bond_index = hgraph.emask.new_tensor(new_bonds)
if len(new_bonds) > 0:
hgraph.emask.scatter_(0, new_bond_index, 1)
return new_atom_index, new_bond_index
def init_decoder_state(self, tree_batch, tree_tensors, src_root_vecs):
batch_size = len(src_root_vecs)
num_mess = len(tree_tensors[1])
agraph = tree_tensors[2].clone()
bgraph = tree_tensors[3].clone()
for i,tup in enumerate(tree_tensors[-1]):
root = tup[0]
assert agraph[root,-1].item() == 0
agraph[root,-1] = num_mess + i
for v in tree_batch.successors(root):
mess_idx = tree_batch[root][v]['mess_idx']
assert bgraph[mess_idx,-1].item() == 0
bgraph[mess_idx,-1] = num_mess + i
new_tree_tensors = tree_tensors[:2] + [agraph, bgraph] + tree_tensors[4:]
htree = HTuple()
htree.mess = self.rnn_cell.get_init_state(tree_tensors[1], src_root_vecs)
htree.emask = torch.cat( [bgraph.new_zeros(num_mess), bgraph.new_ones(batch_size)], dim=0 )
return htree, new_tree_tensors
def attention(self, src_vecs, batch_idx, queries, W_att):
size = batch_idx.size()
if batch_idx.dim() > 1:
batch_idx = batch_idx.view(-1)
queries = queries.view(-1, queries.size(-1))
src_vecs = src_vecs.index_select(0, batch_idx)
att_score = torch.bmm( src_vecs, W_att(queries).unsqueeze(-1) )
att_vecs = F.softmax(att_score, dim=1) * src_vecs
att_vecs = att_vecs.sum(dim=1)
return att_vecs if len(size) == 1 else att_vecs.view(size[0], size[1], -1)
def get_topo_score(self, src_tree_vecs, batch_idx, topo_vecs):
if self.use_attention:
topo_cxt = self.attention(src_tree_vecs, batch_idx, topo_vecs, self.A_topo)
else:
topo_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0)
return self.topoNN( torch.cat([topo_vecs, topo_cxt], dim=-1) ).squeeze(-1)
def get_cls_score(self, src_tree_vecs, batch_idx, cls_vecs, cls_labs):
if self.use_attention:
cls_cxt = self.attention(src_tree_vecs, batch_idx, cls_vecs, self.A_cls)
else:
cls_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0)
cls_vecs = torch.cat([cls_vecs, cls_cxt], dim=-1)
cls_scores = self.clsNN(cls_vecs)
if cls_labs is None: #inference mode
icls_scores = self.iclsNN(cls_vecs) #no masking
else:
vocab_masks = self.vocab.get_mask(cls_labs)
icls_scores = self.iclsNN(cls_vecs) + vocab_masks #apply mask by log(x + mask): mask=0 or -INF
return cls_scores, icls_scores
def get_assm_score(self, src_graph_vecs, batch_idx, assm_vecs):
if self.use_attention:
assm_cxt = self.attention(src_graph_vecs, batch_idx, assm_vecs, self.A_assm)
else:
assm_cxt = index_select_ND(src_graph_vecs, 0, batch_idx)
return (self.W_assm(assm_vecs) * assm_cxt).sum(dim=-1)
def forward(self, src_mol_vecs, graphs, tensors, orders):
batch_size = len(orders)
tree_batch, graph_batch = graphs
tree_tensors, graph_tensors = tensors
inter_tensors = tree_tensors
src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs
init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs)
htree, tree_tensors = self.init_decoder_state(tree_batch, tree_tensors, init_vecs)
hinter = HTuple(
mess = self.rnn_cell.get_init_state(inter_tensors[1]),
emask = self.itensor.new_zeros(inter_tensors[1].size(0))
)
hgraph = HTuple(
mess = self.rnn_cell.get_init_state(graph_tensors[1]),
vmask = self.itensor.new_zeros(graph_tensors[0].size(0)),
emask = self.itensor.new_zeros(graph_tensors[1].size(0))
)
all_topo_preds, all_cls_preds, all_assm_preds = [], [], []
new_atoms = []
tree_scope = tree_tensors[-1]
for i in range(batch_size):
root = tree_batch.nodes[ tree_scope[i][0] ]
clab, ilab = self.vocab[ root['label'] ]
all_cls_preds.append( (init_vecs[i], i, clab, ilab) ) #cluster prediction
new_atoms.extend(root['cluster'])
subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph)
graph_tensors = self.hmpn.embed_graph(graph_tensors) + (graph_tensors[-1],) #preprocess graph tensors
maxt = max([len(x) for x in orders])
max_cls_size = max( [len(attr) * 2 for node,attr in tree_batch.nodes(data='cluster')] )
for t in range(maxt):
batch_list = [i for i in range(batch_size) if t < len(orders[i])]
assert htree.emask[0].item() == 0 and hinter.emask[0].item() == 0 and hgraph.vmask[0].item() == 0 and hgraph.emask[0].item() == 0
subtree = [], []
for i in batch_list:
xid, yid, tlab = orders[i][t]
subtree[0].append(xid)
if yid is not None:
mess_idx = tree_batch[xid][yid]['mess_idx']
subtree[1].append(mess_idx)
subtree = htree.emask.new_tensor(subtree[0]), htree.emask.new_tensor(subtree[1])
htree.emask.scatter_(0, subtree[1], 1)
hinter.emask.scatter_(0, subtree[1], 1)
cur_tree_tensors = self.apply_tree_mask(tree_tensors, htree, hgraph)
cur_inter_tensors = self.apply_tree_mask(inter_tensors, hinter, hgraph)
cur_graph_tensors = self.apply_graph_mask(graph_tensors, hgraph)
htree, hinter, hgraph = self.hmpn(cur_tree_tensors, cur_inter_tensors, cur_graph_tensors, htree, hinter, hgraph, subtree, subgraph)
new_atoms = []
for i in batch_list:
xid, yid, tlab = orders[i][t]
all_topo_preds.append( (htree.node[xid], i, tlab) ) #topology prediction
if yid is not None:
mess_idx = tree_batch[xid][yid]['mess_idx']
new_atoms.extend( tree_batch.nodes[yid]['cluster'] ) #NOTE: regardless of tlab = 0 or 1
if tlab == 0: continue
cls = tree_batch.nodes[yid]['smiles']
clab, ilab = self.vocab[ tree_batch.nodes[yid]['label'] ]
mess_idx = tree_batch[xid][yid]['mess_idx']
hmess = self.rnn_cell.get_hidden_state(htree.mess)
all_cls_preds.append( (hmess[mess_idx], i, clab, ilab) ) #cluster prediction using message
inter_label = tree_batch.nodes[yid]['inter_label']
inter_label = [ (pos, self.vocab[(cls, icls)][1]) for pos,icls in inter_label ]
inter_size = self.vocab.get_inter_size(ilab)
if len(tree_batch.nodes[xid]['cluster']) > 2: #uncertainty occurs only when previous cluster is a ring
nth_child = tree_batch[yid][xid]['label'] #must be yid -> xid (graph order labeling is different from tree)
cands = tree_batch.nodes[yid]['assm_cands']
icls = list(zip(*inter_label))[1]
cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child)
if len(cand_vecs) < max_cls_size:
pad_len = max_cls_size - len(cand_vecs)
cand_vecs = F.pad(cand_vecs, (0,0,0,pad_len))
batch_idx = hgraph.emask.new_tensor( [i] * max_cls_size )
all_assm_preds.append( (cand_vecs, batch_idx, 0) ) #the label is always the first of assm_cands
subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph)
topo_vecs, batch_idx, topo_labels = zip_tensors(all_topo_preds)
topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, topo_vecs)
topo_loss = self.topo_loss(topo_scores, topo_labels.float())
topo_acc = get_accuracy_bin(topo_scores, topo_labels)
cls_vecs, batch_idx, cls_labs, icls_labs = zip_tensors(all_cls_preds)
cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, cls_vecs, cls_labs)
cls_loss = self.cls_loss(cls_scores, cls_labs) + self.icls_loss(icls_scores, icls_labs)
cls_acc = get_accuracy(cls_scores, cls_labs)
icls_acc = get_accuracy(icls_scores, icls_labs)
if len(all_assm_preds) > 0:
assm_vecs, batch_idx, assm_labels = zip_tensors(all_assm_preds)
assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, assm_vecs)
assm_loss = self.assm_loss(assm_scores, assm_labels)
assm_acc = get_accuracy_sym(assm_scores, assm_labels)
else:
assm_loss, assm_acc = 0, 1
loss = (topo_loss + cls_loss + assm_loss) / batch_size
return loss, cls_acc, icls_acc, topo_acc, assm_acc
def enum_attach(self, hgraph, cands, icls, nth_child):
cands = self.itensor.new_tensor(cands)
icls_vecs = self.itensor.new_tensor(icls * len(cands))
icls_vecs = self.E_assm( icls_vecs )
nth_child = self.itensor.new_tensor([nth_child] * len(cands.view(-1)))
order_vecs = self.E_order.index_select(0, nth_child)
cand_vecs = hgraph.node.index_select(0, cands.view(-1))
cand_vecs = torch.cat( [cand_vecs, icls_vecs, order_vecs], dim=-1 )
cand_vecs = self.matchNN(cand_vecs)
if len(icls) == 2:
cand_vecs = cand_vecs.view(-1, 2, self.hidden_size).sum(dim=1)
return cand_vecs
def decode(self, src_mol_vecs, greedy=True, max_decode_step=100, beam=5):
src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs
batch_size = len(src_root_vecs)
tree_batch = IncTree(batch_size, node_fdim=2, edge_fdim=3)
graph_batch = IncGraph(self.avocab, batch_size, node_fdim=self.hmpn.atom_size, edge_fdim=self.hmpn.atom_size + self.hmpn.bond_size)
stack = [[] for i in range(batch_size)]
init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs)
batch_idx = self.itensor.new_tensor(range(batch_size))
cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, init_vecs, None)
root_cls = cls_scores.max(dim=-1)[1]
icls_scores = icls_scores + self.vocab.get_mask(root_cls)
root_cls, root_icls = root_cls.tolist(), icls_scores.max(dim=-1)[1].tolist()
super_root = tree_batch.add_node()
for bid in range(batch_size):
clab, ilab = root_cls[bid], root_icls[bid]
root_idx = tree_batch.add_node( batch_idx.new_tensor([clab, ilab]) )
tree_batch.add_edge(super_root, root_idx)
stack[bid].append(root_idx)
root_smiles = self.vocab.get_ismiles(ilab)
new_atoms, new_bonds, attached = graph_batch.add_mol(bid, root_smiles, [], 0)
tree_batch.register_cgraph(root_idx, new_atoms, new_bonds, attached)
#invariance: tree_tensors is equal to inter_tensors (but inter_tensor's init_vec is 0)
tree_tensors = tree_batch.get_tensors()
graph_tensors = graph_batch.get_tensors()
htree = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) )
hinter = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) )
hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]) )
h = self.rnn_cell.get_hidden_state(htree.mess)
h[1 : batch_size + 1] = init_vecs #wiring root (only for tree, not inter)
for t in range(max_decode_step):
batch_list = [ bid for bid in range(batch_size) if len(stack[bid]) > 0 ]
if len(batch_list) == 0: break
batch_idx = batch_idx.new_tensor(batch_list)
cur_tree_nodes = [stack[bid][-1] for bid in batch_list]
subtree = batch_idx.new_tensor(cur_tree_nodes), batch_idx.new_tensor([])
subgraph = batch_idx.new_tensor( tree_batch.get_cluster_nodes(cur_tree_nodes) ), batch_idx.new_tensor( tree_batch.get_cluster_edges(cur_tree_nodes) )
htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph)
topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, htree.node.index_select(0, subtree[0]))
topo_scores = torch.sigmoid(topo_scores)
if greedy:
topo_preds = topo_scores.tolist()
else:
topo_preds = torch.bernoulli(topo_scores).tolist()
new_mess = []
expand_list = []
for i,bid in enumerate(batch_list):
if topo_preds[i] > 0.5 and tree_batch.can_expand(stack[bid][-1]):
expand_list.append( (len(new_mess), bid) )
new_node = tree_batch.add_node() #new node label is yet to be predicted
edge_feature = batch_idx.new_tensor( [stack[bid][-1], new_node, 0] ) #parent to child is 0
new_edge = tree_batch.add_edge(stack[bid][-1], new_node, edge_feature)
stack[bid].append(new_node)
new_mess.append(new_edge)
else:
child = stack[bid].pop()
if len(stack[bid]) > 0:
nth_child = tree_batch.graph.in_degree(stack[bid][-1]) #edge child -> father has not established
edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )
new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)
new_mess.append(new_edge)
subtree = subtree[0], batch_idx.new_tensor(new_mess)
subgraph = [], []
htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph)
cur_mess = self.rnn_cell.get_hidden_state(htree.mess).index_select(0, subtree[1])
if len(expand_list) > 0:
idx_in_mess, expand_list = zip(*expand_list)
idx_in_mess = batch_idx.new_tensor( idx_in_mess )
expand_idx = batch_idx.new_tensor( expand_list )
forward_mess = cur_mess.index_select(0, idx_in_mess)
cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, expand_idx, forward_mess, None)
scores, cls_topk, icls_topk = hier_topk(cls_scores, icls_scores, self.vocab, beam)
if not greedy:
scores = torch.exp(scores) #score is output of log_softmax
shuf_idx = torch.multinomial(scores, beam, replacement=True).tolist()
for i,bid in enumerate(expand_list):
new_node, fa_node = stack[bid][-1], stack[bid][-2]
success = False
cls_beam = range(beam) if greedy else shuf_idx[i]
for kk in cls_beam: #try until one is chemically valid
if success: break
clab, ilab = cls_topk[i][kk], icls_topk[i][kk]
node_feature = batch_idx.new_tensor( [clab, ilab] )
tree_batch.set_node_feature(new_node, node_feature)
smiles, ismiles = self.vocab.get_smiles(clab), self.vocab.get_ismiles(ilab)
fa_cluster, _, fa_used = tree_batch.get_cluster(fa_node)
inter_cands, anchor_smiles, attach_points = graph_batch.get_assm_cands(fa_cluster, fa_used, ismiles)
if len(inter_cands) == 0:
continue
elif len(inter_cands) == 1:
sorted_cands = [(inter_cands[0], 0)]
nth_child = 0
else:
nth_child = tree_batch.graph.in_degree(fa_node)
icls = [self.vocab[ (smiles,x) ][1] for x in anchor_smiles]
cands = inter_cands if len(attach_points) <= 2 else [ (x[0],x[-1]) for x in inter_cands ]
cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child)
batch_idx = batch_idx.new_tensor( [bid] * len(inter_cands) )
assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, cand_vecs).tolist()
sorted_cands = sorted( list(zip(inter_cands, assm_scores)), key = lambda x:x[1], reverse=True )
for inter_label,_ in sorted_cands:
inter_label = list(zip(inter_label, attach_points))
if graph_batch.try_add_mol(bid, ismiles, inter_label):
new_atoms, new_bonds, attached = graph_batch.add_mol(bid, ismiles, inter_label, nth_child)
tree_batch.register_cgraph(new_node, new_atoms, new_bonds, attached)
tree_batch.update_attached(fa_node, inter_label)
success = True
break
if not success: #force backtrack
child = stack[bid].pop() #pop the dummy new_node which can't be added
nth_child = tree_batch.graph.in_degree(stack[bid][-1])
edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )
new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)
child = stack[bid].pop()
if len(stack[bid]) > 0:
nth_child = tree_batch.graph.in_degree(stack[bid][-1])
edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )
new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)
return graph_batch.get_mol()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy as np
import numpy.testing as npt
import six
from caffe2.python import core, workspace
from ml.rl.caffe_utils import C2
from ml.rl.preprocessing import identify_types, normalization
from ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM
from ml.rl.preprocessing.normalization import (
NormalizationParameters,
sort_features_by_normalization,
)
from ml.rl.preprocessing.preprocessor_net import PreprocessorNet
from ml.rl.test.preprocessing_util import (
BOXCOX_FEATURE_ID,
ENUM_FEATURE_ID,
PROBABILITY_FEATURE_ID,
id_to_type,
read_data,
)
from ml.rl.test.utils import NumpyFeatureProcessor
from scipy import special
class TestNormalization(unittest.TestCase):
def _feature_type_override(self, feature_id):
"""
This should only be used to test CONTINUOUS_ACTION
"""
if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION:
return identify_types.CONTINUOUS_ACTION
return None
def test_prepare_normalization_and_normalize(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, 10, feature_type=self._feature_type_override(name)
)
for k, v in normalization_parameters.items():
if id_to_type(k) == CONTINUOUS:
self.assertEqual(v.feature_type, CONTINUOUS)
self.assertIs(v.boxcox_lambda, None)
self.assertIs(v.boxcox_shift, None)
elif id_to_type(k) == BOXCOX:
self.assertEqual(v.feature_type, BOXCOX)
self.assertIsNot(v.boxcox_lambda, None)
self.assertIsNot(v.boxcox_shift, None)
else:
assert v.feature_type == id_to_type(k)
sorted_features, _ = sort_features_by_normalization(normalization_parameters)
norm_net = core.Net("net")
C2.set_net(norm_net)
preprocessor = PreprocessorNet()
input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32)
for i, feature in enumerate(sorted_features):
input_matrix[:, i] = feature_value_map[feature]
input_matrix_blob = "input_matrix_blob"
workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32))
output_blob, _ = preprocessor.normalize_dense_matrix(
input_matrix_blob, sorted_features, normalization_parameters, "", False
)
workspace.FeedBlob(input_matrix_blob, input_matrix)
workspace.RunNetOnce(norm_net)
normalized_feature_matrix = workspace.FetchBlob(output_blob)
normalized_features = {}
on_column = 0
for feature in sorted_features:
norm = normalization_parameters[feature]
if norm.feature_type == ENUM:
column_size = len(norm.possible_values)
else:
column_size = 1
normalized_features[feature] = normalized_feature_matrix[
:, on_column : (on_column + column_size)
]
on_column += column_size
self.assertTrue(
all(
[
np.isfinite(parameter.stddev) and np.isfinite(parameter.mean)
for parameter in normalization_parameters.values()
]
)
)
for k, v in six.iteritems(normalized_features):
self.assertTrue(np.all(np.isfinite(v)))
feature_type = normalization_parameters[k].feature_type
if feature_type == identify_types.PROBABILITY:
sigmoidv = special.expit(v)
self.assertTrue(
np.all(
np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1))
)
)
elif feature_type == identify_types.ENUM:
possible_values = normalization_parameters[k].possible_values
self.assertEqual(v.shape[0], len(feature_value_map[k]))
self.assertEqual(v.shape[1], len(possible_values))
possible_value_map = {}
for i, possible_value in enumerate(possible_values):
possible_value_map[possible_value] = i
for i, row in enumerate(v):
original_feature = feature_value_map[k][i]
self.assertEqual(
possible_value_map[original_feature], np.where(row == 1)[0][0]
)
elif feature_type == identify_types.QUANTILE:
for i, feature in enumerate(v[0]):
original_feature = feature_value_map[k][i]
expected = NumpyFeatureProcessor.value_to_quantile(
original_feature, normalization_parameters[k].quantiles
)
self.assertAlmostEqual(feature, expected, 2)
elif feature_type == identify_types.BINARY:
pass
elif (
feature_type == identify_types.CONTINUOUS
or feature_type == identify_types.BOXCOX
):
one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01)
zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01)
zero_mean = np.isclose(np.mean(v), 0, atol=0.01)
self.assertTrue(
np.all(zero_mean),
"mean of feature {} is {}, not 0".format(k, np.mean(v)),
)
self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev)))
elif feature_type == identify_types.CONTINUOUS_ACTION:
less_than_max = v < 1
more_than_min = v > -1
self.assertTrue(
np.all(less_than_max),
"values are not less than 1: {}".format(v[less_than_max == False]),
)
self.assertTrue(
np.all(more_than_min),
"values are not more than -1: {}".format(v[more_than_min == False]),
)
else:
raise NotImplementedError()
def test_normalize_dense_matrix_enum(self):
normalization_parameters = {
1: NormalizationParameters(
identify_types.ENUM,
None,
None,
None,
None,
[12, 4, 2],
None,
None,
None,
),
2: NormalizationParameters(
identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None
),
3: NormalizationParameters(
identify_types.ENUM, None, None, None, None, [15, 3], None, None, None
),
}
norm_net = core.Net("net")
C2.set_net(norm_net)
preprocessor = PreprocessorNet()
inputs = np.zeros([4, 3], dtype=np.float32)
feature_ids = [2, 1, 3] # Sorted according to feature type
inputs[:, feature_ids.index(1)] = [12, 4, 2, 2]
inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0]
inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE]
input_blob = C2.NextBlob("input_blob")
workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32))
normalized_output_blob, _ = preprocessor.normalize_dense_matrix(
input_blob, feature_ids, normalization_parameters, "", False
)
workspace.FeedBlob(input_blob, inputs)
workspace.RunNetOnce(norm_net)
normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob)
np.testing.assert_allclose(
np.array(
[
[1.0, 1, 0, 0, 1, 0],
[2.0, 0, 1, 0, 0, 1],
[3.0, 0, 0, 1, 1, 0],
[3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0
]
),
normalized_feature_matrix,
)
def test_persistency(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, feature_type=self._feature_type_override(name)
)
s = normalization.serialize(normalization_parameters)
read_parameters = normalization.deserialize(s)
# Unfortunately, Thrift serializatin seems to lose a bit of precision.
# Using `==` will be false.
self.assertEqual(read_parameters.keys(), normalization_parameters.keys())
for k in normalization_parameters:
self.assertEqual(
read_parameters[k].feature_type,
normalization_parameters[k].feature_type,
)
self.assertEqual(
read_parameters[k].possible_values,
normalization_parameters[k].possible_values,
)
for field in [
"boxcox_lambda",
"boxcox_shift",
"mean",
"stddev",
"quantiles",
"min_value",
"max_value",
]:
if getattr(normalization_parameters[k], field) is None:
self.assertEqual(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
else:
npt.assert_allclose(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
def test_preprocessing_network(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, feature_type=self._feature_type_override(name)
)
test_features = NumpyFeatureProcessor.preprocess(
feature_value_map, normalization_parameters
)
net = core.Net("PreprocessingTestNet")
C2.set_net(net)
preprocessor = PreprocessorNet()
name_preprocessed_blob_map = {}
for feature_name in feature_value_map:
workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32))
preprocessed_blob, _ = preprocessor.preprocess_blob(
str(feature_name), [normalization_parameters[feature_name]]
)
name_preprocessed_blob_map[feature_name] = preprocessed_blob
workspace.CreateNet(net)
for feature_name, feature_value in six.iteritems(feature_value_map):
feature_value = np.expand_dims(feature_value, -1)
workspace.FeedBlob(str(feature_name), feature_value)
workspace.RunNetOnce(net)
for feature_name in feature_value_map:
normalized_features = workspace.FetchBlob(
name_preprocessed_blob_map[feature_name]
)
if feature_name != ENUM_FEATURE_ID:
normalized_features = np.squeeze(normalized_features, -1)
tolerance = 0.01
if feature_name == BOXCOX_FEATURE_ID:
# At the limit, boxcox has some numerical instability
tolerance = 0.5
non_matching = np.where(
np.logical_not(
np.isclose(
normalized_features,
test_features[feature_name],
rtol=tolerance,
atol=tolerance,
)
)
)
self.assertTrue(
np.all(
np.isclose(
normalized_features,
test_features[feature_name],
rtol=tolerance,
atol=tolerance,
)
),
"{} does not match: {} {}".format(
feature_name,
normalized_features[non_matching].tolist(),
test_features[feature_name][non_matching].tolist(),
),
)
def test_type_override(self):
# Take a feature that should be identified as probability
feature_value_map = read_data()
probability_values = feature_value_map[PROBABILITY_FEATURE_ID]
# And ask for a binary anyways
parameter = normalization.identify_parameter(
probability_values, feature_type=identify_types.BINARY
)
self.assertEqual(parameter.feature_type, "BINARY")
|
import speech_recognition as sr
r=sr.Recognizer()
with sr.Microphone() as source:
print("Say Something")
sudio=r.listen(source)
print("Time over")
try:
print("Text: "+r.recognize_google(audio))
except:
pass
|
import tkinter
from tkinter import *
win = Tk()
sb = Spinbox(win, from_=0, to=10)
sb.pack()
win.mainloop()
|
import citysim3d.envs
from visual_dynamics.envs import Env
class ServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, Env):
def _get_config(self):
config = super(ServoingEnv, self)._get_config()
config.update({'env': self.env,
'max_time_steps': self.max_time_steps,
'distance_threshold': self.distance_threshold})
return config
# class ServoingEnv(citysim3d.envs.ServoingEnv, Env):
# def _get_config(self):
# config = super(ServoingEnv, self)._get_config()
# config.update({'env': self.env})
# return config
#
#
# class SimpleQuadPanda3dServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, ServoingEnv):
# def _get_config(self):
# config = super(SimpleQuadPanda3dServoingEnv, self)._get_config()
# config.update({'env': self.env,
# 'max_time_steps': self.max_time_steps,
# 'distance_threshold': self.distance_threshold})
# return config
|
from .dynamic_iterbased_runner import DynamicIterBasedRunner
__all__ = ['DynamicIterBasedRunner']
|
#Django Imports
from django.conf import settings
#Python Imports
import requests, os
#Local Imports
from .at_utils import AfricasTalkingException
#Import Afica's Talking Settings
AFRICAS_TALKING_SETTINGS = getattr(settings,'AFRICAS_TALKING',{})
API_KEY = AFRICAS_TALKING_SETTINGS.get('API_KEY',None)
USERNAME = AFRICAS_TALKING_SETTINGS.get('USERNAME',None)
SHORTCODE = AFRICAS_TALKING_SETTINGS.get('SHORTCODE',None)
AFRICAS_TALKING_SEND = AFRICAS_TALKING_SETTINGS.get('SEND',False)
AFRICAS_TALKING_API_BASE = 'http://api.africastalking.com/version1'
HEADERS = {'Accept': 'application/json','apikey':API_KEY}
PARAMS = {'username':USERNAME,'bulkSMSMode':1}
if SHORTCODE:
PARAMS['from'] = SHORTCODE
def send_raw(to,message):
if not AFRICAS_TALKING_SEND:
raise AfricasTalkingException("Africas Talking called when send not set to True")
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'to':to,'message':message}
params.update(PARAMS)
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.post(send_url,data=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
return post.json()
def send(to,message):
data = send_raw(to,message)
'''
Example of JSON Response
{u'SMSMessageData':
{u'Message': u'Sent to 1/1 Total Cost: USD 0.0109',
u'Recipients': [{
u'status': u'Success', #u'status': u'Invalid Phone Number',
u'cost': u'KES 1.0000',
u'number': u'+254708054321',
u'messageId': u'ATXid_b50fada5b1af078f2277cacb58ef2447'
}]
}
}
'''
# Return tuple (messageId, messageSuccess, extra_data)
recipients = data['SMSMessageData']['Recipients']
if len(recipients) == 1:
msg_id = recipients[0]['messageId']
msg_success = recipients[0]['status'] == 'Success'
return msg_id, msg_success, {'status':recipients[0]['status']}
def balance():
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'user')
post = requests.get(send_url,params=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
data = post.json()
return data['UserData']['balance']
def fetch(last_received_id=0):
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME,'lastReceivedId':last_received_id}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.get(send_url,params=params,headers=HEADERS)
return post
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,sys,traceback,base64,signal
try:
import cPickle
except ImportError:
import pickle as cPickle
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
try:
TimeoutExpired=subprocess.TimeoutExpired
except AttributeError:
class TimeoutExpired(Exception):
pass
def run():
txt=sys.stdin.readline().strip()
if not txt:
sys.exit(1)
[cmd,kwargs,cargs]=cPickle.loads(base64.b64decode(txt))
cargs=cargs or{}
if not'close_fds'in kwargs:
kwargs['close_fds']=False
ret=1
out,err,ex,trace=(None,None,None,None)
try:
proc=subprocess.Popen(cmd,**kwargs)
try:
out,err=proc.communicate(**cargs)
except TimeoutExpired:
if kwargs.get('start_new_session')and hasattr(os,'killpg'):
os.killpg(proc.pid,signal.SIGKILL)
else:
proc.kill()
out,err=proc.communicate()
exc=TimeoutExpired(proc.args,timeout=cargs['timeout'],output=out)
exc.stderr=err
raise exc
ret=proc.returncode
except Exception as e:
exc_type,exc_value,tb=sys.exc_info()
exc_lines=traceback.format_exception(exc_type,exc_value,tb)
trace=str(cmd)+'\n'+''.join(exc_lines)
ex=e.__class__.__name__
tmp=[ret,out,err,ex,trace]
obj=base64.b64encode(cPickle.dumps(tmp))
sys.stdout.write(obj.decode())
sys.stdout.write('\n')
sys.stdout.flush()
while 1:
try:
run()
except KeyboardInterrupt:
break
|
# -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by Jurgen Hermann found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by Fernando Perez (fperez@colorado.edu).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: Jurgen Hermann
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['ANSICodeColors','Parser']
_scheme_default = 'Linux'
# Imports
import StringIO
import keyword
import os
import optparse
import sys
import token
import tokenize
try:
generate_tokens = tokenize.generate_tokens
except AttributeError:
# Python 3. Note that we use the undocumented _tokenize because it expects
# strings, not bytes. See also Python issue #9969.
generate_tokens = tokenize._tokenize
from IPython.utils.coloransi import *
#############################################################################
### Python Source Parser (does Hilighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
_scheme_default)
class Parser:
""" Format colored Python source.
"""
def __init__(self, color_table=None,out = sys.stdout):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
self.color_table = color_table and color_table or ANSICodeColors
self.out = out
def format(self, raw, out = None, scheme = ''):
return self.format2(raw, out, scheme)[0]
def format2(self, raw, out = None, scheme = ''):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out,StringIO.StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO.StringIO()
string_output = 1
elif out is not None:
self.out = out
# Fast return of the unmodified input for NoColor scheme
if scheme == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw,error
else:
return None,error
# local shorthands
colors = self.color_table[scheme].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while 1:
pos = raw_find('\n', pos) + 1
if not pos: break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO.StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def __call__(self, toktype, toktext, start_pos, end_pos, line):
""" Token handler, with syntax highlighting."""
(srow,scol) = start_pos
(erow,ecol) = end_pos
colors = self.colors
owrite = self.out.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
#print '<%s>' % toktext, # dbg
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
def main(argv=None):
"""Run as a command-line script: colorize a python file or stdin using ANSI
color escapes and print to stdout.
Inputs:
- argv(None): a list of strings like sys.argv[1:] giving the command-line
arguments. If None, use sys.argv[1:].
"""
usage_msg = """%prog [options] [filename]
Colorize a python file or stdin using ANSI color escapes and print to stdout.
If no filename is given, or if filename is -, read standard input."""
parser = optparse.OptionParser(usage=usage_msg)
newopt = parser.add_option
newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
choices=['Linux','LightBG','NoColor'],default=_scheme_default,
help="give the color scheme to use. Currently only 'Linux'\
(default) and 'LightBG' and 'NoColor' are implemented (give without\
quotes)")
opts,args = parser.parse_args(argv)
if len(args) > 1:
parser.error("you must give at most one filename.")
if len(args) == 0:
fname = '-' # no filename given; setup to read from stdin
else:
fname = args[0]
if fname == '-':
stream = sys.stdin
else:
try:
stream = open(fname)
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
parser = Parser()
# we need nested try blocks because pre-2.5 python doesn't support unified
# try-except-finally
try:
try:
# write colorized version to stdout
parser.format(stream.read(),scheme=opts.scheme_name)
except IOError as msg:
# if user reads through a pager and quits, don't print traceback
if msg.args != (32,'Broken pipe'):
raise
finally:
if stream is not sys.stdin:
stream.close() # in case a non-handled exception happened above
if __name__ == "__main__":
main()
|
# Last Updated: 2.2
from datetime import datetime
from util.diagMessage import DiagMessage
# Logger class
# Buffers and writes messages to a file
class Logger:
BUFFER_MAX = 10
DEFAULT_FN = "../log.txt"
# Constructor for logger class
# Params: fn - file name to use or leave default
# log - flag to keep a log file or not
# Return: Logger instance
def __init__(self, fn = DEFAULT_FN, log = True):
#{{{
self.keep_log = log
self.fn = fn
self.log_buffer = []
if self.keep_log:
self.log(DiagMessage("LOG0000I"))
#}}}
# Append line to internal log buffer, flush if needed
# Params: diag - DiagMessage to log
# flush - bool flag for flushing buffer early
# Return: None
def log(self, diag, flush=False):
#{{{
if self.keep_log:
self.log_buffer.append(str(datetime.now()) + " - " + diag.msg)
if len(self.log_buffer) >= self.BUFFER_MAX or flush:
self._write()
elif not flush:
print(diag.msg)
#}}}
# Write contents of buffer out to file
# Params: None
# Return: None
def _write(self):
#{{{
print("Writing log...") if debug else None
with open(self.fn,'a') as logfile:
for line in self.log_buffer:
try:
logfile.write(line)
except TypeError:
logfile.write(str(datetime.now())+" - LOG ERR")
except UnicodeEncodeError:
logfile.write(str(line.encode("utf-8","replace")))
logfile.write("\n")
del self.log_buffer[:]
#}}}
|
# Standard Library
from copy import deepcopy
# 3rd Party
# Internal
# ########################################################################### #
class MetaData (dict):
"""
A class for holding information about an object
"""
def __init__ (self,*args,**kwargs):
super(MetaData,self).__init__(*args,**kwargs)
def __repr__ (self):
reprout = 'MetaData {'
if len(self) == 0:
return reprout + "}"
reprout += "\n"
for key in self:
value = str(repr(self[key])).split("\n")
reprout += " "+str(key)+" : "
reprout += value[0].strip()+"\n"
if len(value) > 1:
reprout += " "*(len(key))+" ...\n"
reprout += "}\n"
return reprout
def __str__ (self):
return super(MetaData,self).__repr__()
def _type_check_other (self,other):
if not isinstance(other,dict):
raise TypeError("other must be a subclass of dict")
def __add__ (self,other):
return self.combine(other,key_conflicts='raise')
def __iadd__ (self,other):
self._type_check_other(other)
for key in other:
if key in self:
continue
self[key] = other[key]
return self
def combine (self,other,key_conflicts='ignore',return_=False):
"""
Combine two MetaData dictionaries together.
Parameters
----------
other : dict subclass
Any dictionary object will work including other MetaData Dictionaries
key_conflicts : 'ignore' (default), 'merge', 'warn', 'raise'
Defined the method to handle key conflicts
* ignore : if key is in conflict, keep the current key with no warning
* merge : convert key to string and add integers until unique key is found
* warn : print a warning message for key conflicts. Keep current key
* raise : raise error message for key conflicts.
return_ : boolean
If True then it will keep the data in place and return a copy with
with the concatenation
Returns
-------
info : MetaData
Returns an information object with keys and information
concatenated from the two
Raises
------
KeyError : If key_conflicts=='raise' is True and conflicts exist between two keys
Notes
-----
__1)__ If a key is in conflict but the data the key refers to is the same then
no messages or errors will be raised
Special cases
-------------
add operator : info1 + info2
This will raise errors for key conflicts between the two
iadd operator : info1 += info2
This will ignore key conflicts
and always takes info1 keys as default
"""
self._type_check_other(other)
def errmsg (key):
return "Warning: key conflict '"+str(key)+"'"
key_conflicts = key_conflicts.lower()
if return_:
out = self.copy()
else:
out = self
if key_conflicts=='merge':
for key in other:
if key in self and self[key]==other[key]:
continue
i = 0
base_key = deepcopy(key)
while key in self:
key = str(base_key)+"_"+str(i)
i += 1
out[key] = other[base_key]
return out
# else:
for key in other:
if key in self:
# if the data's the same don't worry about it
if self[key]==other[key]:
continue
# resolve conflicts
if key_conflicts=='raise':
raise KeyError(errmsg(key))
elif key_conflicts=='warn':
print(errmsg(key))
else:
continue
out[key] = other[key]
if return_:
return out
def copy (self):
return deepcopy(self)
def header_list(self):
"""returns a list of the values belonging to keys beginning header_ """
keys = list(self.keys())
headers = []
for key in keys:
try:
keystart = key[:7]
if keystart == "header_":
headers.append(self[key])
except:
pass
return headers
def guess_observation_time(self, headers=None):
if headers == None:
headers = self.header_list()
obs_time = None
for hdr in headers:
try:
obs_time = hdr["ut"]
break
except:
pass
return obs_time
def guess_airmass(self, headers):
if headers == None:
headers = self.header_list()
airmass = None
for hdr in headers:
try:
airmass = hdr["airmass"]
break
except:
pass
return airmass
def guess_object_name(self):
return None
|
Inc('dfaccto/util.py', abs=True)
class _Event(ModuleContext):
def __init__(self):
ModuleContext.__init__(self)
self._setup_packages()
def _setup_packages(self):
self.pkg = Pkg('dfaccto_event',
x_templates={self.File('generic/package.vhd.tpl'): self.File('pkg/dfaccto_event.vhd')})
with self.pkg:
self.tEvent = self.TypeEvent('Event')
def TypeEvent(self, name, stb_bits=None, ack_bits=None):
tlogic = Util.tlogic
if stb_bits is not None:
tsdata = Util.TypeUnsigned('{}Strb'.format(name), width=stb_bits)
else:
tsdata = None
if ack_bits is not None:
tadata = Util.TypeUnsigned('{}Ack'.format(name), width=ack_bits)
else:
tadata = None
return TypeC(name, x_is_event=True,
x_definition=self.Part('types/definition/event.part.tpl'),
x_format_ms=self.Part('types/format/event_ms.part.tpl'),
x_format_sm=self.Part('types/format/event_sm.part.tpl'),
x_wrapeport=self.Part('types/wrapeport/event.part.tpl'),
x_wrapeconv=self.Part('types/wrapeconv/event.part.tpl'),
x_wrapipmap=self.Part('types/wrapipmap/event.part.tpl'),
x_wrapigmap=None,
x_tlogic=tlogic, x_tsdata=tsdata, x_tadata=tadata,
x_cnull=lambda t: Con('{}Null'.format(name), t, value=Lit({'stb': False, 'ack': False})))
Event = _Event()
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
from .utils import rot_mat_to_euler
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=vertices.device,
dtype=dtype).unsqueeze_(dim=0)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
batch_size, -1, 3)
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# v_shaped *= scale
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def transform_mat(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
return torch.cat([F.pad(R, [0, 0, 0, 1]),
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.reshape(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
# transforms_mat[:, 0][:,:3,:3] *= scale
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
|
"""
foxBMS Software License
Copyright 2010-2016, Fraunhofer-Gesellschaft zur Foerderung
der angewandten Forschung e.V.
All rights reserved.
BSD 3-Clause License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
We kindly request you to use one or more of the following phrases to refer
to foxBMS in your hardware, software, documentation or advertising
materials:
"This product uses parts of foxBMS"
"This product includes parts of foxBMS"
"This product is derived from foxBMS"
If you use foxBMS in your products, we encourage you to contact us at:
CONTACT INFORMATION
Fraunhofer IISB ; Schottkystrasse 10 ; 91058 Erlangen, Germany
Dr.-Ing. Vincent LORENTZ
+49 9131-761-346
info@foxbms.org
www.foxbms.org
:author: Martin Giegerich <martin.giegerich@iisb.fraunhofer.de>
"""
import stm32interface
import argparse
import sys
import logging
"""
flash tool implementation to the STM32F4 microcontroller
- for detailed insight to the USART protocol refer to STM32 appnote AN3155
- for detailed insight to the device bootloader" refer to STM32 appnote AN2606
"""
class STM32Flasher(stm32interface.STM32Interface):
def __init__(self, port = None, file = None, baudrate=115200,
address = 0x08000000, goaddress = -1, bytes = 256,**kwargs):
stm32interface.STM32Interface.__init__(self, port, baudrate)
self._file = file
self.bytes = bytes
self.address = address
self._doBeforeInit()
self.init()
def _doBeforeInit(self):
''' abstract method to optionally reset microcontroller or toggle boot pins '''
pass
def __enter__(self):
return self
def read(self):
data = []
length = self.bytes
address = self.address
logging.debug("Flash Read Start, Length: {0}, Address: {1:#x} ".format(length, address))
while length > 256:
logging.debug("Read {0} bytes at {1:#x}".format(256, address))
data = data + self.readMemory(address, 256)
address += 256
length -= 256
logging.info("[{0}/{1}] read ".format(self.bytes-length, self.bytes))
logging.debug("Read {0} bytes at {1:#x}".format(length, address))
data = data + self.readMemory(address, length)
logging.info("[{0}/{1}] read".format(self.bytes, self.bytes))
return data
def write(self, data):
logging.debug("Flash Write Start")
length = len(data)
alllng = len(data)
address = self.address
offset = 0
while length > 256:
logging.debug("Write {0} bytes at {1:#x}".format(256, address))
self.writeMemory(address, data[offset:offset+256])
offset += 256
address += 256
length -= 256
logging.info("[{0}/{1}] written".format(alllng-length, alllng))
logging.debug("Write {0} bytes at {1:#x}".format(length, address))
self.writeMemory(address, data[offset:offset+length] )
logging.info("[{0}/{1}] written".format(alllng, alllng))
#logging.info("Flash Write End")
def erase(self):
logging.info("Flash Erase Start")
super(STM32Flasher, self).erase()
logging.info("Flash Erase End")
def verify(self, data):
logging.info("Flash verify")
self.bytes = len(data)
verify = self.read()
if data == verify:
logging.info("Verify successful")
return True
else:
self.veriFail = str(len(data)) + ' vs ' + str(len(verify)) + '\n'
for i in xrange(0, len(data)):
if data[i] != verify[i]:
self.veriFail += hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i]) + '\n'
logging.error(self.veriFail)
return False
def __str__(self):
id = self.getId()[1:3] # id without length byte and ack byte
version = self.getVersion()
return "ID: %s Bootloader version: %x" % (hex(reduce(lambda x, y: x*0x100+y, id)), version[0])
def auto_int(x):
return int(x, 0)
def main():
parser = argparse.ArgumentParser(description='STM32 flash tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = '''\
Example:
%s --port COM3 --erase --write --verify build/src/general/foxbms_flash.bin
Copyright (c) 2015, 2016 Fraunhofer IISB.
All rights reserved.
This program has been released under the conditions of the 3-clause BSD
license.
''' % sys.argv[0])
parser.add_argument('-v', '--verbosity', action='count', default=0, help="increase output verbosity")
parser.add_argument('--erase', '-e', action='store_true', help='erase firmware')
parser.add_argument('--read', '-r', action='store_true', help='read and store firmware')
parser.add_argument('--write', '-w', action='store_true', help='writes firmware')
parser.add_argument('--verify', '-y', action='store_true', help='verify the firmware')
parser.add_argument('--bytes', '-s', type=int, default = 256, help='bytes to read from the firmware')
parser.add_argument('--bauds', '-b', type=int, default=115200, help='transfer speed (bauds)')
parser.add_argument('--port', '-p', type=str, default='/dev/tty.usbserial-ftCYPMYJ', help='ttyUSB port')
parser.add_argument('--address', '-a', type=auto_int, default=0x08000000, help='target address')
parser.add_argument('--goaddress', '-g', type=auto_int, default=-1, help='start address (use -1 for default)')
parser.add_argument('firmware', metavar = 'FIRMWARE FILE', help='firmware binary')
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level = logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.ERROR)
if args.read:
if args.erase:
parser.error('Cannot use --erase together with --read')
if args.write:
parser.error('Cannot use --write together with --read')
if args.bytes == None:
parser.error('Please give a length (in bytes) to read')
with STM32Flasher(**vars(args)) as flasher:
if args.write or args.verify:
with open(args.firmware, 'rb') as f:
data = map(lambda c: ord(c), f.read())
if args.erase:
flasher.erase()
if args.write:
flasher.write(data)
if args.verify:
flasher.verify(data)
if args.read:
rdata = flasher.read()
with open(args.firmware, 'wb') as f:
f.write(''.join(map(chr,rdata)))
if args.goaddress > -1:
flasher.go(args.goaddress)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import country_converter as coco
import pandas as pd
from covsirphy.util.term import Term
from covsirphy.loading.db_base import _RemoteDatabase
class _OWID(_RemoteDatabase):
"""
Access "Our World In Data".
https://github.com/owid/covid-19-data/tree/master/public/data
https://ourworldindata.org/coronavirus
Args:
filename (str): CSV filename to save records
"""
# URL for vaccine data
URL_V = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/"
URL_V_REC = f"{URL_V}vaccinations.csv"
URL_V_LOC = f"{URL_V}locations.csv"
# URL for PCR data
URL_P = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/"
URL_P_REC = f"{URL_P}covid-testing-all-observations.csv"
# Citation
CITATION = "Hasell, J., Mathieu, E., Beltekian, D. et al." \
" A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020)." \
" https://doi.org/10.1038/s41597-020-00688-8"
# Column names and data types
# {"name in database": "name defined in Term class"}
COL_DICT = {
"date": Term.DATE,
"location": Term.COUNTRY,
Term.PROVINCE: Term.PROVINCE,
"iso_code": Term.ISO3,
"vaccines": Term.PRODUCT,
"total_vaccinations": Term.VAC,
"people_vaccinated": Term.V_ONCE,
"people_fully_vaccinated": Term.V_FULL,
"tests": Term.TESTS,
}
def download(self, verbose):
"""
Download the dataset from the server and set the list of primary sources.
Args:
verbose (int): level of verbosity
Returns:
pandas.DataFrame
Index
reset index
Columns
defined by the first values of self.COL_DICT.values()
Note:
If @verbose is equal to or over 1, how to show the list will be explained.
"""
# Download datasets
if verbose:
print("Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/")
# Vaccinations
v_rec_cols = [
"date", "location", "iso_code", "total_vaccinations", "people_vaccinated", "people_fully_vaccinated"]
v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)
v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=["location", "vaccines"])
v_df = v_rec_df.merge(v_loc_df, how="left", on="location")
# Tests
pcr_rec_cols = ["ISO code", "Date", "Daily change in cumulative total", "Cumulative total"]
pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)
pcr_df = pcr_df.rename(columns={"ISO code": "iso_code", "Date": "date"})
pcr_df["cumsum"] = pcr_df.groupby("iso_code")["Daily change in cumulative total"].cumsum()
pcr_df = pcr_df.assign(tests=lambda x: x["Cumulative total"].fillna(x["cumsum"]))
# Combine data (vaccinations/tests)
df = v_df.set_index(["iso_code", "date"])
df = df.combine_first(pcr_df.set_index(["iso_code", "date"]).loc[:, ["tests"]])
df = df.reset_index()
# Location (country/province)
df["location"] = df["location"].replace(
{
# COG
"Congo": "Republic of the Congo",
}
)
df = df.loc[~df["iso_code"].str.contains("OWID_")]
df["location"] = df.groupby("iso_code")["location"].bfill()
df.loc[df["location"] == df["iso_code"], "location"] = None
df.loc[df["location"].isna(), "location"] = df.loc[df["location"].isna(), "iso_code"].apply(
lambda x: coco.convert(x, to="name_short", not_found=None))
df[self.PROVINCE] = self.UNKNOWN
return df
|
#!/usr/bin/env python
# Quick hack to generate a sqlite db of chat logs.
# Currently used like so:
# find ~/Desktop/chatlogs -name "*.ichat" -exec python examples/imlog2db.py -d db.sqlite {} \;
# and so on..
import sys
import os
sys.path.append(os.path.dirname(__file__) + '/../imlog')
import re
import imlog
import sqlite3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('files', metavar='FILE', nargs="+")
parser.add_argument("-d" , dest="db", required=True)
args = parser.parse_args()
def init_log(path):
if re.search("chatlog$", path):
return imlog.AdiumLog(path)
if re.search("ichat$", path):
return imlog.IChatLog(path)
conn = sqlite3.connect(args.db)
cur = conn.cursor()
cur.execute("select * from sqlite_master where type = 'table' and name = 'imlogs'")
if cur.fetchone() == None:
cur.execute("""
create table imlogs (
sender text,
txt text,
t text
)
""")
sql = "insert into imlogs values (?, ?, ?)"
for path in args.files:
log = init_log(path)
for msg in log.messages:
try:
cur.execute(sql, (msg.sender, msg.text, msg.time))
conn.commit()
except sqlite3.InterfaceError:
print msg.sender
print msg.text
print msg.time
|
# -----------------------------------------------------------------------------
#
# Copyright (C) 2021 CERN & Newcastle University for the benefit of the
# BioDynaMo collaboration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
class Print:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
@staticmethod
def success(message):
print(Print.BOLD + Print.GREEN + str(message) + Print.END)
@staticmethod
def error(message):
print(Print.RED + str(message) + Print.END)
@staticmethod
def warning(message):
print(Print.YELLOW + str(message) + Print.END)
@staticmethod
def new_step(message):
print('\n' + Print.BOLD + Print.BLUE + str(message) + Print.END)
|
"""
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
WARNING
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
This directory is for the internal of Theano.
You are strongly advised not to use it, except if you know
what you are doing!
If you want to use a scalar variable in a Theano graph,
you probably want to use theano.tensor.[c,z,f,d,b,w,i,l,]scalar!
"""
import math
import warnings
from copy import copy
from itertools import imap
from textwrap import dedent
import numpy
import theano
from theano.compat import PY3
from theano import gof
from theano.gof import (Op, utils, Variable, Constant, Type, Apply,
FunctionGraph)
from theano.gof.python25 import partial, all, any
from theano.configparser import config
from theano.gradient import DisconnectedType
from theano.gradient import grad_undefined
builtin_complex = complex
builtin_int = int
builtin_float = float
class ComplexError(Exception):
"""Raised if complex numbers are used in an unsupported operation."""
pass
class IntegerDivisionError(Exception):
"""Raised if someone tries to divide integers with '/' instead of '//'."""
pass
def upcast(dtype, *dtypes):
# Should we try to keep float32 instead of float64? This is used so that
# for instance mixing int64 with float32 yields float32 instead of float64.
# Note that we store this boolean as a one-element list so that it can be
# modified within `make_array`.
keep_float32 = [(config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32')]
def make_array(dt):
if dt == 'float64':
# There is an explicit float64 dtype: we cannot keep float32.
keep_float32[0] = False
return numpy.zeros((), dtype=dt)
z = make_array(dtype)
for dt in dtypes:
z = z + make_array(dt=dt)
rval = str(z.dtype)
if rval == 'float64' and keep_float32[0]:
return 'float32'
else:
return rval
def as_scalar(x, name=None):
if isinstance(x, gof.Apply):
if len(x.outputs) != 1:
raise ValueError("It is ambiguous which output of a multi-output"
" Op has to be fetched.", x)
else:
x = x.outputs[0]
if isinstance(x, Variable):
if not isinstance(x.type, Scalar):
raise TypeError("Variable type field must be a Scalar.", x, x.type)
return x
try:
return constant(x)
except TypeError:
raise TypeError("Cannot convert %s to Scalar" % x, type(x))
def constant(x):
# pass through numpy scalars, since they are already typed on
# purpose typically.
if hasattr(x, 'dtype'):
assert x.ndim == 0
return ScalarConstant(Scalar(str(x.dtype)), x)
if isinstance(x, builtin_float):
for dtype in ['float32', 'float64']:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
x_ = None
assert x_ is not None
return ScalarConstant(Scalar(str(x_.dtype)), x)
if isinstance(x, builtin_int):
for dtype in ['int8', 'int16', 'int32', 'int64']:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
x_ = None
assert x_ is not None
return ScalarConstant(Scalar(str(x_.dtype)), x)
if isinstance(x, builtin_complex):
#TODO: We have added the complex type, so this should be tested
raise NotImplementedError()
raise TypeError(x)
#return ScalarConstant(float64, float(x))
class Scalar(Type):
"""
Internal class, should not be used by clients
Primarily used by tensor.elemwise and tensor.reduce
Analogous to TensorType, but for zero-dimensional objects
Maps directly to C primitives
TODO: refactor to be named ScalarType for consistency with TensorType
"""
ndim = 0
def __init__(self, dtype):
if dtype == 'floatX':
dtype = config.floatX
self.dtype = dtype
self.dtype_specs() # error checking
def filter(self, data, strict=False, allow_downcast=None):
py_type = self.dtype_specs()[0]
if strict and not isinstance(data, py_type):
raise TypeError("%s expected a %s, got %s of type %s" % (
self, py_type, data, type(data)), data)
try:
converted_data = py_type(data)
if (allow_downcast or
(allow_downcast is None and
type(data) is float and
self.dtype == theano.config.floatX) or
data == converted_data):
return py_type(data)
else:
raise TypeError('Value cannot accurately be converted to dtype'
' (%s) and allow_downcast is not True' % self.dtype)
except Exception, e:
raise TypeError("Could not convert %s (value=%s) to %s" % (
type(data), data, self.dtype), e)
def values_eq_approx(self, a, b, tolerance=1e-4):
return abs(a - b) <= ((abs(a) + abs(b)) * tolerance)
def c_headers(self):
l = ['<math.h>']
l.append('<numpy/arrayscalars.h>')
if config.lib.amdlibm:
l += ['<amdlibm.h>']
return l
def c_libraries(self):
l = []
if config.lib.amdlibm:
l += ['amdlibm']
return l
def c_compile_args(self):
if config.lib.amdlibm:
return ['-DREPLACE_WITH_AMDLIBM']
else:
return []
def __eq__(self, other):
return type(self) == type(other) and other.dtype == self.dtype
def __hash__(self):
return hash('theano.scalar.Scalar') ^ hash(self.dtype)
def dtype_specs(self):
try:
return { # dtype: (py_type, c_type, cls_name)
'float32': (numpy.float32, 'npy_float32', 'Float32'),
'float64': (numpy.float64, 'npy_float64', 'Float64'),
'complex128': (numpy.complex128, 'theano_complex128',
'Complex128'),
'complex64': (numpy.complex64, 'theano_complex64',
'Complex64'),
'uint8': (numpy.uint8, 'npy_uint8', 'UInt8'),
'int8': (numpy.int8, 'npy_int8', 'Int8'),
'uint16': (numpy.uint16, 'npy_uint16', 'UInt16'),
'int16': (numpy.int16, 'npy_int16', 'Int16'),
'uint32': (numpy.uint32, 'npy_uint32', 'UInt32'),
'int32': (numpy.int32, 'npy_int32', 'Int32'),
'uint64': (numpy.uint64, 'npy_uint64', 'UInt64'),
'int64': (numpy.int64, 'npy_int64', 'Int64')
}[self.dtype]
except KeyError:
raise TypeError("Unsupported dtype for %s: %s" % (
self.__class__.__name__, self.dtype))
def upcast(self, *others):
return upcast(*[x.dtype for x in [self] + list(others)])
def make_variable(self, name=None):
return ScalarVariable(self, name=name)
def __str__(self):
return str(self.dtype)
def __repr__(self):
return "Scalar(%s)" % self.dtype
def c_literal(self, data):
if 'complex' in self.dtype:
raise NotImplementedError("No literal for complex values.")
return str(data)
def c_declare(self, name, sub):
return """
%(dtype)s %(name)s;
typedef %(dtype)s %(name)s_dtype;
""" % dict(name=name, dtype=self.dtype_specs()[1])
def c_init(self, name, sub):
return """
%(name)s = 0;
""" % locals()
def c_extract(self, name, sub):
specs = self.dtype_specs()
return """
if (!PyObject_TypeCheck(py_%(name)s, &%(pyarr_type)s))
{
PyErr_Format(PyExc_ValueError,
"Scalar check failed (%(dtype)s)");
%(fail)s
}
PyArray_ScalarAsCtype(py_%(name)s, &%(name)s);
""" % dict(sub,
name=name,
dtype=specs[1],
pyarr_type='Py%sArrType_Type' % specs[2])
def c_sync(self, name, sub):
specs = self.dtype_specs()
return """
Py_XDECREF(py_%(name)s);
py_%(name)s = PyArrayScalar_New(%(cls)s);
if (!py_%(name)s)
{
Py_XINCREF(Py_None);
py_%(name)s = Py_None;
PyErr_Format(PyExc_MemoryError,
"Instantiation of new Python scalar failed (%(dtype)s)");
%(fail)s
}
PyArrayScalar_ASSIGN(py_%(name)s, %(cls)s, %(name)s);
""" % dict(sub,
name=name,
dtype=specs[1],
cls=specs[2])
def c_cleanup(self, name, sub):
return ""
def c_support_code(self):
if self.dtype.startswith('complex'):
cplx_types = ['theano_complex64', 'theano_complex128']
real_types = ['npy_int8', 'npy_int16', 'npy_int32', 'npy_int64',
'npy_float32', 'npy_float64']
# If the 'int' C type is not exactly the same as an existing
# 'npy_intX', some C code may not compile, e.g. when assigning
# the value 0 (cast to 'int' in C) to a theano_complex64.
if (numpy.dtype('intc').num not in
[numpy.dtype(d[4:]).num for d in real_types]):
# In that case we add the 'int' type to the real types.
real_types.append('int')
template = """
struct theano_complex%(nbits)s : public npy_complex%(nbits)s
{
typedef theano_complex%(nbits)s complex_type;
typedef npy_float%(half_nbits)s scalar_type;
complex_type operator +(const complex_type &y) const {
complex_type ret;
ret.real = this->real + y.real;
ret.imag = this->imag + y.imag;
return ret;
}
complex_type operator -() const {
complex_type ret;
ret.real = -this->real;
ret.imag = -this->imag;
return ret;
}
bool operator ==(const complex_type &y) const {
return (this->real == y.real) && (this->imag == y.imag);
}
bool operator ==(const npy_float%(nbits)s &y) const {
return (this->real == y) && (this->imag == 0);
}
complex_type operator -(const complex_type &y) const {
complex_type ret;
ret.real = this->real - y.real;
ret.imag = this->imag - y.imag;
return ret;
}
complex_type operator *(const complex_type &y) const {
complex_type ret;
ret.real = this->real * y.real - this->imag * y.imag;
ret.imag = this->real * y.imag + this->imag * y.real;
return ret;
}
complex_type operator /(const complex_type &y) const {
complex_type ret;
scalar_type y_norm_square = y.real * y.real + y.imag * y.imag;
ret.real = (this->real * y.real + this->imag * y.imag) / y_norm_square;
ret.imag = (this->imag * y.real - this->real * y.imag) / y_norm_square;
return ret;
}
template <typename T>
complex_type& operator =(const T& y);
theano_complex%(nbits)s() {}
template <typename T>
theano_complex%(nbits)s(const T& y) { *this = y; }
template <typename TR, typename TI>
theano_complex%(nbits)s(const TR& r, const TI& i) { this->real=r; this->imag=i; }
};
"""
def operator_eq_real(mytype, othertype):
return '''
template <> %(mytype)s & %(mytype)s::operator=<%(othertype)s>(const %(othertype)s & y)
{ this->real=y; this->imag=0; return *this; }
''' % dict(mytype=mytype, othertype=othertype)
def operator_eq_cplx(mytype, othertype):
return '''
template <> %(mytype)s & %(mytype)s::operator=<%(othertype)s>(const %(othertype)s & y)
{ this->real=y.real; this->imag=y.imag; return *this; }
''' % dict(mytype=mytype, othertype=othertype)
operator_eq = ''.join(operator_eq_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types) \
+ ''.join(operator_eq_cplx(ctype1, ctype2)
for ctype1 in cplx_types
for ctype2 in cplx_types)
# We are not using C++ generic templating here, because this would
# generate two different functions for adding a complex64 and a
# complex128, one returning a complex64, the other a complex128,
# and the compiler complains it is ambiguous.
# Instead, we generate code for known and safe types only.
def operator_plus_real(mytype, othertype):
return '''
const %(mytype)s operator+(const %(mytype)s &x, const %(othertype)s &y)
{ return %(mytype)s(x.real+y, x.imag); }
const %(mytype)s operator+(const %(othertype)s &y, const %(mytype)s &x)
{ return %(mytype)s(x.real+y, x.imag); }
''' % dict(mytype=mytype, othertype=othertype)
operator_plus = ''.join(operator_plus_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types)
def operator_minus_real(mytype, othertype):
return '''
const %(mytype)s operator-(const %(mytype)s &x, const %(othertype)s &y)
{ return %(mytype)s(x.real-y, x.imag); }
const %(mytype)s operator-(const %(othertype)s &y, const %(mytype)s &x)
{ return %(mytype)s(y-x.real, -x.imag); }
''' % dict(mytype=mytype, othertype=othertype)
operator_minus = ''.join(operator_minus_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types)
def operator_mul_real(mytype, othertype):
return '''
const %(mytype)s operator*(const %(mytype)s &x, const %(othertype)s &y)
{ return %(mytype)s(x.real*y, x.imag*y); }
const %(mytype)s operator*(const %(othertype)s &y, const %(mytype)s &x)
{ return %(mytype)s(x.real*y, x.imag*y); }
''' % dict(mytype=mytype, othertype=othertype)
operator_mul = ''.join(operator_mul_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types)
return template % dict(nbits=64, half_nbits=32) \
+ template % dict(nbits=128, half_nbits=64) \
+ operator_eq \
+ operator_plus \
+ operator_minus \
+ operator_mul
else:
return ""
def c_code_cache_version(self):
# Use the correct type checking and conversion functions
return (10, numpy.__version__)
# Make operators work with 64 and 128 arguments at the same time
return (9, numpy.__version__)
# put const around operators and added unary '-' operator
return (8, numpy.__version__)
# no need to put lib.amdlibm here as c_compile_args() are put
# in the key.
return (7,) # make complex c code optional
return (6,) # added implemeentations of operators that work
# with scalar arguments
return (5,) # added constructors to theano_complex class
return (4,) # explicit T given in specialization of operator=
# lines. This makes it compile with open64
def get_shape_info(self, obj):
return obj.itemsize
def get_size(self, shape_info):
return shape_info
# Register C code for ViewOp on Scalars.
theano.compile.register_view_op_c_code(
Scalar,
"""
%(oname)s = %(iname)s;
""",
1)
int8 = Scalar('int8')
int16 = Scalar('int16')
int32 = Scalar('int32')
int64 = Scalar('int64')
uint8 = Scalar('uint8')
uint16 = Scalar('uint16')
uint32 = Scalar('uint32')
uint64 = Scalar('uint64')
float32 = Scalar('float32')
float64 = Scalar('float64')
complex64 = Scalar('complex64')
complex128 = Scalar('complex128')
int_types = int8, int16, int32, int64
uint_types = uint8, uint16, uint32, uint64
float_types = float32, float64
complex_types = complex64, complex128
discrete_types = int_types + uint_types
continuous_types = float_types + complex_types
all_types = discrete_types + continuous_types
class _scalar_py_operators:
# So that we can simplify checking code when we have a mixture of Scalar
# variables and Tensor variables
ndim = 0
dtype = property(lambda self: self.type.dtype)
""" The dtype of this scalar. """
#UNARY
def __abs__(self):
return abs_(self)
def __neg__(self):
return neg(self)
#CASTS
#def __int__(self): return AsInt(self).out
#def __float__(self): return AsDouble(self).out
#def __complex__(self): return AsComplex(self).out
#BITWISE
def __invert__(self):
return invert(self)
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __xor__(self, other):
return xor(self, other)
def __rand__(self, other):
return and_(other, self)
def __ror__(self, other):
return or_(other, self)
def __rxor__(self, other):
return xor(other, self)
#COMPARISONS
def __lt__(self, other):
return lt(self, other)
def __le__(self, other):
return le(self, other)
def __gt__(self, other):
return gt(self, other)
def __ge__(self, other):
return ge(self, other)
#ARITHMETIC - NORMAL
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __mul__(self, other):
return mul(self, other)
if PY3:
def __truediv__(self, other):
return div_proxy(self, other)
else:
def __div__(self, other):
return div_proxy(self, other)
def __floordiv__(self, other):
return int_div(self, other)
def __mod__(self, other):
return mod_check(self, other)
def __pow__(self, other):
return pow(self, other)
#ARITHMETIC - RIGHT-OPERAND
def __radd__(self, other):
return add(other, self)
def __rsub__(self, other):
return sub(other, self)
def __rmul__(self, other):
return mul(other, self)
def __rdiv__(self, other):
return div_proxy(other, self)
def __rmod__(self, other):
return mod(other, self)
def __rpow__(self, other):
return pow(other, self)
def zeros_like(self):
# The second is needed for Elemwise ops to work right
return second(self, ScalarConstant(Scalar(str(self.type.dtype)), 0))
def astype(self, dtype):
return cast(self, dtype)
class ScalarVariable(_scalar_py_operators, Variable):
pass
class ScalarConstant(_scalar_py_operators, Constant):
pass
# Register ScalarConstant as the type of Constant corresponding to Scalar
Scalar.Constant = ScalarConstant
# Easy constructors
def _multi(*fns):
def f2(f, names):
if len(names) == 1:
return f(names)
else:
return [f(name) for name in names]
if len(fns) == 1:
return partial(f2, fns[0])
else:
return [partial(f2, f) for f in fns]
ints = _multi(int64)
floats = _multi(float64)
complexs = _multi(complex128)
complexs64 = _multi(complex64)
complexs128 = _multi(complex128)
# Using a class instead of a function makes it possible to deep-copy it in
# Python 2.4.
# Note that currently only a few functions use this mechanism, because it is
# enough to make the test-suite pass with Python 2.4. However, it may prove
# necessary to use this same mechanism in other places as well in the future.
class upcast_out(object):
def __new__(self, *types):
return Scalar(dtype=Scalar.upcast(*types)),
class upgrade_to_float(object):
def __new__(self, *types):
"""
Upgrade any int types to float32 or float64 to avoid losing precision.
"""
conv = {int8: float32,
int16: float32,
int32: float64,
int64: float64,
uint8: float32,
uint16: float32,
uint32: float64,
uint64: float64}
return Scalar(Scalar.upcast(*[conv.get(type, type)
for type in types])),
class same_out(object):
def __new__(self, type):
return type,
def upcast_out_no_complex(*types):
if any([type in complex_types for type in types]):
raise TypeError('complex type are not supported')
return Scalar(dtype=Scalar.upcast(*types)),
def same_out_float_only(type):
if type not in float_types:
raise TypeError('only float type are supported')
return type,
class transfer_type(gof.utils.object2):
def __init__(self, *transfer):
assert all(type(x) == int for x in transfer)
self.transfer = transfer
def __str__(self):
return 'transfer_type{%s}' % self.transfer
def __call__(self, *types):
upcast = upcast_out(*types)
retval = []
for i in self.transfer:
if i is None:
retval += [upcast]
else:
retval += [types[i]]
return retval
#return [upcast if i is None else types[i] for i in self.transfer]
def __eq__(self, other):
return type(self) == type(other) and self.transfer == other.transfer
def __hash__(self):
return hash(self.transfer)
class specific_out(gof.utils.object2):
def __init__(self, *spec):
self.spec = spec
def __call__(self, *types):
return self.spec
def __eq__(self, other):
return type(self) == type(other) and self.spec == other.spec
def __hash__(self):
return hash(self.spec)
def int_out(*types):
return int64,
def float_out(*types):
return float64,
def upgrade_to_float_no_complex(*types):
"""
don't accept complex, otherwise call upgrade_to_float().
"""
for type in types:
if type in complex_types:
raise TypeError('complex argument not supported')
return upgrade_to_float(*types)
def same_out_nocomplex(type):
if type in complex_types:
raise TypeError('complex argument not supported')
return type,
def int_out_nocomplex(*types):
for type in types:
if type in complex_types:
raise TypeError('complex argument not supported')
return int64,
def float_out_nocomplex(*types):
for type in types:
if type in complex_types:
raise TypeError('complex argument not supported')
return float64,
class unary_out_lookup(gof.utils.object2):
"""
get a output_types_preference object by passing a dictionary:
unary_out_lookup({int8:int32, float32:complex128})
The result is an op that maps in8 to int32 and float32 to
complex128 and other input types lead to a TypeError.
"""
def __init__(self, type_table):
self.tbl = type_table
def __call__(self, *types):
if len(types) == 1:
types = types[0]
try:
rval = self.tbl[types]
except Exception:
raise TypeError(types)
if isinstance(types, (list, tuple)):
return rval
else:
return [rval]
def __eq__(self, other):
return type(self) == type(other) and self.tbl == other.tbl
def __hash__(self):
return hash(type(self)) # ignore hash of table
def real_out(type):
if type == complex64:
return float32,
if type == complex128:
return float64,
return type,
class ScalarOp(Op):
nin = -1
nout = 1
def __init__(self, output_types_preference=None, name=None):
self.name = name
if output_types_preference is not None:
if not callable(output_types_preference):
raise TypeError(
"Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" % (self.__class__, output_types_preference))
self.output_types_preference = output_types_preference
def make_node(self, *inputs):
if self.nin >= 0:
if len(inputs) != self.nin:
raise TypeError("Wrong number of inputs for %s.make_node (got %i(%s), expected %i)" \
% (self, len(inputs), str(inputs), self.nin))
inputs = [as_scalar(input) for input in inputs]
outputs = [t() for t in self.output_types([input.
type for input in inputs])]
if len(outputs) != self.nout:
raise TypeError("Not the right number of outputs produced for %s(%s). Expected %s, got %s."
% (self, ", ".join(str(input) for input in inputs), self.nout, len(outputs)))
return Apply(self, inputs, outputs)
def output_types(self, types):
if hasattr(self, 'output_types_preference'):
variables = self.output_types_preference(*types)
if not isinstance(variables, (list, tuple)) or any(not isinstance(x, Type) for x in variables):
raise TypeError(
"output_types_preference should return a list or a tuple of types", self.output_types_preference, variables)
if len(variables) != self.nout:
raise TypeError("Not the right number of outputs types produced for %s(%s) by %s. Expected %s, got %s."
% (self, ", ".join(str(type) for type in variables),
self.output_types_preference, self.nout, len(variables)))
return variables
else:
raise NotImplementedError(
"Cannot calculate the output types for %s" % self)
def perform(self, node, inputs, output_storage):
if self.nout == 1:
output_storage[0][0] = self.impl(*inputs)
else:
variables = utils.from_return_values(self.impl(*inputs))
assert len(variables) == len(output_storage)
for storage, variable in zip(output_storage, variables):
storage[0] = variable
def impl(self, *inputs):
raise utils.MethodNotDefined("impl", type(self),
self.__class__.__name__)
def grad(self, inputs, output_gradients):
raise utils.MethodNotDefined("grad", type(self),
self.__class__.__name__)
def __eq__(self, other):
test = type(self) == type(other) \
and getattr(self, 'output_types_preference', None) \
== getattr(other, 'output_types_preference', None)
return test
def __hash__(self):
return hash(type(self).__name__) ^ hash(
getattr(self, 'output_types_preference', 0))
def __str__(self):
if hasattr(self, 'name') and self.name:
return self.name
else:
param = [(k, v) for k, v in self.__dict__.items()
if k not in ["name", "_op_use_c_code"]]
if param:
return "%s{%s}" % (self.__class__.__name__,
", ".join("%s=%s" % (k, v)
for k, v in param))
else:
return self.__class__.__name__
def c_code_cache_version(self):
return (4,)
def c_code_contiguous(self, node, name, inp, out, sub):
"""This function is called by Elemwise when all inputs and
outputs are c_contiguous. This allows to use the SIMD version
of this op.
The inputs are the same as c_code except that:
- inp and out must be the names of the variables associated to the
ndarrays in the C code
- node must be the elemwise node (this is needed to know
the inputs/outputs types)
"""
raise theano.gof.utils.MethodNotDefined()
class UnaryScalarOp(ScalarOp):
nin = 1
amd_float32 = None
amd_float64 = None
def c_code_contiguous(self, node, name, (x, ), (z, ), sub):
if (not theano.config.lib.amdlibm or
# We compare the dtype AND the broadcast flag
# as this function do not broadcast
node.inputs[0].type != node.outputs[0].type):
raise theano.gof.utils.MethodNotDefined()
dtype = node.inputs[0].dtype
if dtype == 'float32' and self.amd_float32 is not None:
dtype = 'float'
fct = self.amd_float32
elif dtype == 'float64' and self.amd_float64 is not None:
dtype = 'double'
fct = self.amd_float64
else:
raise theano.gof.utils.MethodNotDefined()
return """
npy_intp n = PyArray_SIZE(%(z)s);
%(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
%(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
%(fct)s(n, x, z);
""" % locals()
class BinaryScalarOp(ScalarOp):
# One may define in subclasses the following fields:
# - `identity`: for an associative operation, identity corresponds to
# the neutral element. For instance, it will be 0 for addition, 1 for
# multiplication, True for "and", False for "or".
# - `commutative`: whether op(a, b) == op(b, a)
# - `associative`: whether op(op(a, b), c) == op(a, op(b, c))
nin = 2
###############
# Comparisons
###############
class LogicalComparison(BinaryScalarOp):
def output_types(self, *input_dtypes):
return [int8]
def grad(self, inputs, output_gradients):
x, y = inputs
out = self(x, y)
assert str(out.type.dtype).find('int') != -1
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
class FixedLogicalComparison(UnaryScalarOp):
"""
Comparison to a fixed value.
"""
def output_types(self, *input_dtypes):
return [int8]
def grad(self, inputs, output_gradients):
x ,= inputs
out = self(x)
assert str(out.type.dtype).find('int') != -1
return [x.zeros_like().astype(theano.config.floatX)]
class LT(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in < don't support complex
return numpy.less(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s < %(y)s);" % locals()
lt = LT()
class GT(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in > don't support complex
return numpy.greater(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s > %(y)s);" % locals()
gt = GT()
class LE(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in <= don't support complex
return numpy.less_equal(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s <= %(y)s);" % locals()
le = LE()
class GE(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in >= don't support complex
return numpy.greater_equal(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s >= %(y)s);" % locals()
ge = GE()
class EQ(LogicalComparison):
identity = False
commutative = True
associative = False
def impl(self, x, y):
return x == y
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s == %(y)s);" % locals()
eq = EQ()
class NEQ(LogicalComparison):
identity = False
commutative = True
associative = False
def impl(self, x, y):
return x != y
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s != %(y)s);" % locals()
neq = NEQ()
class IsNan(FixedLogicalComparison):
def impl(self, x):
return numpy.isnan(x)
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = isnan(%(x)s);" % locals()
isnan = IsNan()
class IsInf(FixedLogicalComparison):
def impl(self, x):
return numpy.isinf(x)
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
# Note that the C isinf returns -1 for -Inf and +1 for +Inf, while
# numpy simply returns True: we mimic numpy's behavior here, thus
# the absolute value.
return "%(z)s = abs(isinf(%(x)s));" % locals()
isinf = IsInf()
class InRange(LogicalComparison):
nin = 3
def __init__(self, openlow, openhi):
self.openlow = openlow
self.openhi = openhi
def impl(self, x, low, hi):
if self.openlow and x <= low:
return False
elif not self.openlow and x < low:
return False
if self.openhi and x >= hi:
return False
elif not self.openhi and x > hi:
return False
return True
def c_code(self, node, name, (x, low, hi), (z, ), sub):
if self.openlow:
cmp1 = '>'
else:
cmp1 = '>='
#backport
#cmp1 = '>' if self.openlow else '>='
if self.openhi:
cmp2 = '<'
else:
cmp2 = '<='
#backport
#cmp2 = '<' if self.openhi else '<='
return ("%(z)s = %(x)s %(cmp1)s %(low)s &&"
" %(x)s %(cmp2)s %(hi)s;" % locals())
def grad(self, (x, low, hi), (gz, )):
return None, None, None
inopenrange = InRange(True, True)
inclosedrange = InRange(False, False)
class Switch(ScalarOp):
nin = 3
def impl(self, cond, ift, iff):
if cond:
return ift
else:
return iff
#backport
#return ift if cond else iff
def c_code(self, node, name, (cond, ift, iff), (z, ), sub):
return "%(z)s = %(cond)s ? %(ift)s : %(iff)s;" % locals()
def grad(self, (cond, ift, iff), (gz, )):
first_part = switch(cond, gz, 0.)
second_part = switch(cond, 0., gz)
out = self(cond, ift, iff)
if out.type.dtype in discrete_types:
first_part = 0.
second_part = 0.
# cond does affect the elements of the output so it is connected.
# For the sake of making the gradient convenient we assume that
# condition + epsilon always triggers the same branch as condition
condition_grad = cond.zeros_like().astype(theano.config.floatX)
return (condition_grad, first_part, second_part)
def output_types(self, (cond_t, ift_t, iff_t)):
return upcast_out(ift_t, iff_t)
switch = Switch()
####################
# BIT-WISE OPERATORS
####################
class UnaryBitOp(UnaryScalarOp):
def output_types(self, *input_types):
for i in input_types[0]:
if i not in (int8, int16, int32, int64):
raise TypeError('input to a BitOp must have type int8,'
' int16, int32 or int64... not %s' % i)
return upcast_out(*input_types[0])
def grad(self, inputs, output_gradients):
return [inputs[0].zeros_like().astype(theano.config.floatX)]
class BinaryBitOp(BinaryScalarOp):
def output_types(self, *input_types):
t0, t1 = input_types[0]
for i in input_types[0]:
if i not in (int8, int16, int32, int64):
raise TypeError('input to a BitOp must have type int8,'
' int16, int32 or int64... not %s' % i)
return upcast_out(*input_types[0])
def grad(self, inputs, output_gradients):
a,b = inputs
return [a.zeros_like().astype(theano.config.floatX), b.zeros_like().astype(theano.config.floatX)]
class OR(BinaryBitOp):
identity = 0
commutative = True
associative = True
def impl(self, x, y):
return x | y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = (%(x)s | %(y)s);" % locals()
or_ = OR()
class XOR(BinaryBitOp):
identity = 0
commutative = True
associative = True
def impl(self, x, y):
return x ^ y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = (%(x)s ^ %(y)s);" % locals()
xor = XOR()
class AND(BinaryBitOp):
identity = 1
commutative = True
associative = True
def impl(self, x, y):
return x & y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = (%(x)s & %(y)s);" % locals()
and_ = AND()
class Invert(UnaryBitOp):
def impl(self, x):
return ~x
def c_code(self, node, name, (x,), (z, ), sub):
return "%(z)s = (~%(x)s);" % locals()
invert = Invert()
##############
# Arithmetic
##############
class Maximum(BinaryScalarOp):
commutative = True
associative = True
def impl(self, *inputs):
# The built-in max function don't support complex type
return numpy.maximum(*inputs)
def c_code(self, node, name, (x, y), (z, ), sub):
if any([i.type in complex_types for i in node.inputs]):
raise NotImplementedError()
# Test for both y>x and x>=y to detect NaN
return ('%(z)s = ((%(y)s)>(%(x)s)? (%(y)s): '
'((%(x)s)>=(%(y)s)? (%(x)s): nan("")));' % locals())
def grad(self, (x, y), (gz, )):
assert gz.type not in complex_types
# max is not defined for complex_types
output = self(x, y)
if output.type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
gx = eq(output, x) * gz
gy = eq(output, y) * gz
return (gx, gy)
maximum = Maximum(upcast_out, name='maximum')
class Minimum(BinaryScalarOp):
commutative = True
associative = True
def impl(self, *inputs):
# The built-in min function don't support complex type
return numpy.minimum(*inputs)
def c_code(self, node, name, (x, y), (z, ), sub):
if any([i.type in complex_types for i in node.inputs]):
raise NotImplementedError()
return ('%(z)s = ((%(y)s)<(%(x)s)? (%(y)s): '
'((%(x)s)<=(%(y)s)? (%(x)s): nan("")));' % locals())
def grad(self, (x, y), (gz, )):
assert gz.type not in complex_types
# max is not defined for complex_types
output = minimum(x, y)
if output.type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
gx = eq(output, x) * gz
gy = eq(output, y) * gz
return (gx, gy)
minimum = Minimum(upcast_out, name='minimum')
class Add(ScalarOp):
identity = 0
commutative = True
associative = True
def impl(self, *inputs):
return sum(inputs)
def c_code(self, node, name, inputs, (z, ), sub):
if not inputs:
return z + " = 0;"
else:
return z + " = " + " + ".join(inputs) + ";"
def grad(self, inputs, (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if self(*inputs).type in discrete_types:
assert gz is not None
retval = []
for ii, inp in enumerate(inputs):
if hasattr(inp, 'zeros_like'):
retval.append(
inp.zeros_like().astype(theano.config.floatX))
else:
retval.append(grad_undefined(self, ii, inp))
else:
retval = []
for i in inputs:
retval += [gz]
return retval
add = Add(upcast_out, name='add')
class Mul(ScalarOp):
identity = 1
commutative = True
associative = True
def impl(self, *inputs):
return numpy.product(inputs)
def c_code(self, node, name, inputs, (z, ), sub):
if not inputs:
return z + " = 1;"
else:
return z + " = " + " * ".join(inputs) + ";"
def grad(self, inputs, (gz, )):
retval = []
# The following 3 lines verify that gz is complex when the
# output is complex. The rest of this function make this supposition.
output_type = self.output_types([i.type for i in inputs])[0]
if output_type in complex_types:
if not gz.type in complex_types:
raise TypeError('Mul with output_type ' + str(output_type) +\
' expected gz type to be complex, got gz with type ' +\
str(gz.type))
if output_type in discrete_types:
return [ipt.zeros_like().astype(theano.config.floatX)
for ipt in inputs]
for input in inputs:
if gz.type in complex_types:
# zr+zi = (xr + xi)(yr + yi)
# zr+zi = (xr*yr - xi*yi) + (xr yi + xi yr )
otherprod = mul(*(utils.difference(inputs, [input])))
yr = real(otherprod)
yi = imag(otherprod)
if input.type in complex_types:
retval += [complex(yr * real(gz) + yi * imag(gz),
yr * imag(gz) - yi * real(gz))]
else:
retval += [yr * real(gz) + yi * imag(gz)]
else:
retval += [mul(*([gz] + utils.difference(inputs,
[input])))]
return retval
mul = Mul(upcast_out, name='mul')
class Sub(BinaryScalarOp):
def impl(self, x, y):
return x - y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = %(x)s - %(y)s;" % locals()
def grad(self, (x, y), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if (x - y).type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
first_part = gz
second_part = -gz
return first_part, second_part
sub = Sub(upcast_out, name='sub')
def int_or_true_div(x_discrete, y_discrete):
"""
Return 'int' or 'true' depending on the type of division used for x / y.
:param x_discrete: True if `x` is discrete ([unsigned] integer).
:param y_discrete: True if `x` is discrete ([unsigned] integer).
:returns: 'int' if `x / y` should be an integer division, or `true` if it
should be a true division.
Raises an IntegerDivisionError if both `x_discrete` and `y_discrete` are
True and `config.int_division` is set to 'raise'.
This function is used by both scalar/basic.py and tensor.basic/py.
"""
if (x_discrete and y_discrete):
if config.int_division == 'raise':
raise IntegerDivisionError(
"With `config.int_division` set to 'raise', dividing two "
"integer types with '/' is forbidden to avoid confusion "
"between integer and floating point divisions. Please "
"use // for integer division, or if you want a float result "
"either cast one of the arguments to a float or directly call "
"`x.__truediv__(y)`.")
elif config.int_division == 'int':
warnings.warn(
"Division of two integer types with x / y is deprecated, "
"please use x // y for an integer division.",
DeprecationWarning,
stacklevel=4)
return 'int'
elif config.int_division == 'floatX':
return 'true'
else:
raise NotImplementedError(config.int_division)
else:
return 'true'
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y."""
f = eval('%s_div' % int_or_true_div(as_scalar(x).type in discrete_types,
as_scalar(y).type in discrete_types))
return f(x, y)
class TrueDiv(BinaryScalarOp):
def output_types(self, types):
if all(t in discrete_types for t in types):
return [Scalar(config.floatX)]
else:
return super(TrueDiv, self).output_types(types)
def impl(self, x, y):
x = numpy.asarray(x)
y = numpy.asarray(y)
if all(a.dtype in discrete_types for a in (x, y)):
return numpy.array(float(x) / y, dtype=config.floatX)
else:
return x / y
def c_code(self, node, name, (x, y), (z, ), sub):
# we generate good c code only when both are complex!
if sum([node.inputs[0].type in complex_types,
node.inputs[1].type in complex_types]) == 1:
raise NotImplementedError('type not supported', type)
if (node.inputs[0].type in discrete_types and
node.inputs[1].type in discrete_types):
return "%(z)s = ((double)%(x)s) / %(y)s;" % locals()
return "%(z)s = %(x)s / %(y)s;" % locals()
def grad(self, (x, y), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
# If the output of this op is discrete, then it
# it is locally flat everywhere, so the gradient
# through it is 0.
# This is different from it not being connected
# to the output; x/y is still a function of x
# and y; it's just a step function.
if (x / y).type in discrete_types:
return [x.zeros_like(), y.zeros_like()]
first_part = gz / y
if y.type in complex_types:
raise NotImplementedError()
second_part = -(gz * x) / (y * y)
return first_part, second_part
true_div = TrueDiv(upcast_out, name='true_div')
class IntDiv(BinaryScalarOp):
complex_error = ComplexError(
"Theano does not support integer division (//) on "
"complex numbers, since numpy deprecated it.")
def impl(self, x, y):
return x // y
def c_support_code(self):
# We use a macro as python use % as a special string character,
# and the output of c_code may be run through another level
# of string formatting.
return "#define THEANO_MACRO_MOD(x,y) (x % y)"
def c_code(self, node, name, (x, y), (z,), sub):
t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])
if t in imap(str, discrete_types):
x_div_y_pp = '(%(x)s / %(y)s)' % locals()
x_div_y_mp = '((-%(x)s) / %(y)s)' % locals()
x_mod_y_mp = 'THEANO_MACRO_MOD((-%(x)s), %(y)s)' % locals()
x_div_y_pm = '(%(x)s / (-%(y)s))' % locals()
x_mod_y_pm = 'THEANO_MACRO_MOD(%(x)s, (-%(y)s))' % locals()
x_div_y_mm = '((-%(x)s) / (-%(y)s))' % locals()
elif t in imap(str, float_types):
# We need to call different functions of math.h
# depending on the type
if t == 'float32':
floor = 'floorf'
fmod = 'fmodf'
elif t == 'float64':
floor = 'floor'
fmod = 'fmod'
else:
raise NotImplementedError('type not supported', t)
x_div_y_pp = '%(floor)s(%(x)s / %(y)s)' % locals()
x_div_y_mp = '%(floor)s((-%(x)s) / %(y)s)' % locals()
x_mod_y_mp = '%(fmod)s((-%(x)s), %(y)s)' % locals()
x_div_y_pm = '%(floor)s(%(x)s / (-%(y)s))' % locals()
x_mod_y_pm = '%(fmod)s(%(x)s, (-%(y)s))' % locals()
x_div_y_mm = '%(floor)s((-%(x)s) / (-%(y)s))' % locals()
elif t in complex_types:
raise self.complex_error
else:
raise NotImplementedError('type not supported', t)
return dedent("""
if (%(x)s < 0) {
if (%(y)s < 0) {
%(z)s = %(x_div_y_mm)s;
} else {
%(z)s = - %(x_div_y_mp)s - ((%(x_mod_y_mp)s == 0) ? 0 : 1);
}
} else {
if (%(y)s < 0) {
%(z)s = - %(x_div_y_pm)s - ((%(x_mod_y_pm)s == 0) ? 0 : 1);
} else {
%(z)s = %(x_div_y_pp)s;
}
}
""") % locals()
def c_code_cache_version(self):
return (2,)
def grad(self, inputs, g_output):
return [None] * len(inputs)
int_div = IntDiv(upcast_out, name='int_div')
floor_div = int_div
def mod_check(x, y):
if (as_scalar(x).type in complex_types or
as_scalar(y).type in complex_types):
# Currently forbidden.
raise Mod.complex_error
else:
return mod(x, y)
class Mod(BinaryScalarOp):
complex_error = ComplexError(
"Theano does not support the mod operator (%) on "
"complex numbers, since numpy deprecated it.")
def impl(self, x, y):
if isinstance(x, numpy.complex) or isinstance(y, numpy.complex):
raise self.complex_error
return x % y
def c_code_cache_version(self):
return (5,)
def c_support_code(self):
# We use a macro as python use % as a special string character,
# and the output of c_code may be run through another level
# of string formatting.
return "#define THEANO_MACRO_MOD(x,y) (x % y)"
def c_code(self, node, name, (x, y), (z, ), sub):
"""
We want the result to have the same sign as python, not the other
implementation of mod.
"""
# raise NotImplementedError("Unlike Python, C's modulo returns negative
# modulo on negative dividend (to implement)")
t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])
if (str(t) in imap(str, discrete_types) or
t in ['uint8', 'int8', 'uint16', 'int16'] or
t in ['uint32', 'int32', 'uint64', 'int64'] or
t in discrete_types):
# The above or's should not be needed anymore. However, for now we
# keep them out of safety, and verify they are useless with an
# assert.
assert str(t) in imap(str, discrete_types)
x_mod_y = "THEANO_MACRO_MOD(%(x)s, %(y)s)" % locals()
x_mod_ymm = "THEANO_MACRO_MOD(-%(x)s, -%(y)s)" % locals()
x_mod_ypm = "THEANO_MACRO_MOD(%(x)s, -%(y)s)" % locals()
x_mod_ymp = "THEANO_MACRO_MOD(-%(x)s, %(y)s)" % locals()
elif (str(t) in imap(str, float_types) or
t in ['float32', 'float64'] or
t in float_types):
# The above or's should not be needed anymore. However, for now we
# keep them out of safety, and verify they are useless with an
# assert.
assert str(t) in imap(str, float_types)
x_mod_y = "fmod(%(x)s,%(y)s)" % locals()
x_mod_ymm = "fmod(-%(x)s,-%(y)s)" % locals()
x_mod_ypm = "fmod(%(x)s,-%(y)s)" % locals()
x_mod_ymp = "fmod(-%(x)s,%(y)s)" % locals()
elif str(t) in imap(str, complex_types):
raise self.complex_error
else:
raise NotImplementedError('type not supported', t)
return dedent("""
if (%(x)s < 0){
if (%(y)s < 0){
%(z)s = -(%(x_mod_ymm)s);
}else{
%(z)s = - %(x_mod_ymp)s + (%(x_mod_ymp)s != 0 ? %(y)s : 0);
}
}else if (%(y)s < 0){
%(z)s = (%(x_mod_ypm)s) + (%(x_mod_ypm)s != 0 ? %(y)s : 0);
}else{
%(z)s = %(x_mod_y)s;
}
""") % locals()
def grad(self, (x, y), (gz, )):
return None, None
mod = Mod(upcast_out, name='mod')
class Pow(BinaryScalarOp):
def impl(self, x, y):
return x ** y
def c_code(self, node, name, (x, y), (z, ), sub):
if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type)
return "%(z)s = pow(%(x)s, %(y)s);" % locals()
def grad(self, (x, y), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if self(x, y).type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
first_part = gz * y * x ** (y - 1)
second_part = gz * log(x) * x ** y
return (first_part, second_part)
def c_code_contiguous(self, node, name, (x, y), (z, ), sub):
if not theano.config.lib.amdlibm:
raise theano.gof.utils.MethodNotDefined()
# We compare the dtype AND the broadcast flag
# as this function do not broadcast
if (node.inputs[0].type == node.outputs[0].type and
node.inputs[1].type == node.outputs[0].type and
# amdlibm 3.0 do not have a float64 version of this SIMD function
node.inputs[0].dtype == 'float32' and
node.inputs[1].dtype == 'float32'):
dtype = 'float'
fct = "amd_vrsa_powf"
return """
npy_intp n = PyArray_SIZE(%(z)s);
%(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
%(dtype)s * y = (%(dtype)s*) PyArray_DATA(%(y)s);
%(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
%(fct)s(n, x, y, z);
""" % locals()
# We compare the dtype and check we broadcast a scalar
elif (node.inputs[0].type == node.outputs[0].type and
node.inputs[1].dtype == node.outputs[0].dtype and
all(node.inputs[1].broadcastable) and
# amdlibm 3.0 do not have a float64 version of this SIMD function
node.inputs[0].dtype == 'float32' and
node.inputs[1].dtype == 'float32'):
dtype = 'float'
fct = "amd_vrsa_powxf"
return """
npy_intp n = PyArray_SIZE(%(z)s);
%(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
%(dtype)s * y = (%(dtype)s*) PyArray_DATA(%(y)s);
%(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
%(fct)s(n, x, *y, z);
""" % locals()
raise theano.gof.utils.MethodNotDefined()
pow = Pow(upcast_out, name='pow')
class Clip(ScalarOp):
nin = 3
def impl(self, x, min, max):
if x < min:
return min
elif x > max:
return max
else:
return x
def c_code(self, node, name, (x, min, max), (z, ), sub):
return "%(z)s = %(x)s < %(min)s ? %(min)s : %(x)s > %(max)s ? %(max)s : %(x)s;" % locals()
def grad(self, (x, mn, mx), (gz, )):
assert gz.type not in complex_types
gx = ((x > mn) & (x < mx)) * gz
gmn = (x < mn) * gz
gmx = (x > mx) * gz
out = self(x, mn, mx)
def handle_int(v):
if out.type in int_types:
return v.zeros_like().astype(config.floatX)
return v
return map(handle_int, [gx, gmn, gmx])
# Don't allow complex even if numpy do
# As there is no mathematical reason for this function on complex
clip = Clip(upcast_out_no_complex, name='clip')
class Second(BinaryScalarOp):
def impl(self, x, y):
return y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = %(y)s;" % locals()
def connection_pattern(self, node):
# x is never connected because its elements are never used
# y is connected because its elements are copied over
return [[False], [True]]
def grad(self, (x, y), (gz, )):
if y.type in continuous_types:
# x is disconnected because the elements of x are not used
return DisconnectedType()(), gz
else:
#when y is discrete, we assume the function can be extended
#to deal with real-valued inputs by rounding them to the
#nearest integer. f(x+eps) thus equals f(x) so the gradient
#is zero, not disconnected or undefined
return DisconnectedType()(), y.zeros_like()
second = Second(transfer_type(1), name='second')
class Identity(UnaryScalarOp):
def impl(self, input):
return input
def c_code(self, node, name, (x, ), (z, ), sub):
return "%(z)s = %(x)s;" % locals()
def grad(self, (x, ), (gz, )):
if x.type in continuous_types:
return gz,
else:
return None,
identity = Identity(same_out, name='identity')
#### CASTING OPERATIONS
class Cast(UnaryScalarOp):
def __init__(self, o_type, name=None):
if not isinstance(o_type, Scalar):
raise TypeError(o_type)
super(Cast, self).__init__(specific_out(o_type), name=name)
self.o_type = o_type
self.ctor = getattr(numpy, o_type.dtype)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.o_type.dtype)
def impl(self, input):
return self.ctor(input)
def c_code(self, node, name, (x, ), (z, ), sub):
return "%s = (%s)%s;" % (z, node.outputs[0].type.dtype_specs()[1], x)
def grad(self, (x, ), (gz, )):
if self.o_type in continuous_types:
return [gz]
else:
return [x.zeros_like().astype(theano.config.floatX)]
def c_code_cache_version(self):
s = super(Cast, self).c_code_cache_version()
if s:
return (3,) + s
else:
return s
convert_to_int8 = Cast(int8, name='convert_to_int8')
convert_to_int16 = Cast(int16, name='convert_to_int16')
convert_to_int32 = Cast(int32, name='convert_to_int32')
convert_to_int64 = Cast(int64, name='convert_to_int64')
convert_to_uint8 = Cast(uint8, name='convert_to_uint8')
convert_to_uint16 = Cast(uint16, name='convert_to_uint16')
convert_to_uint32 = Cast(uint32, name='convert_to_uint32')
convert_to_uint64 = Cast(uint64, name='convert_to_uint64')
convert_to_float32 = Cast(float32, name='convert_to_float32')
convert_to_float64 = Cast(float64, name='convert_to_float64')
convert_to_complex64 = Cast(complex64, name='convert_to_complex64')
convert_to_complex128 = Cast(complex128, name='convert_to_complex128')
_cast_mapping = {
'int8': convert_to_int8,
'int16': convert_to_int16,
'int32': convert_to_int32,
'int64': convert_to_int64,
'uint8': convert_to_uint8,
'uint16': convert_to_uint16,
'uint32': convert_to_uint32,
'uint64': convert_to_uint64,
'float32': convert_to_float32,
'float64': convert_to_float64,
'complex64': convert_to_complex64,
'complex128': convert_to_complex128}
def cast(x, dtype):
"""Symbolically cast `x` to a Scalar of given `dtype`."""
if dtype == 'floatX':
dtype = config.floatX
_x = as_scalar(x)
if _x.type.dtype == dtype:
return _x
if _x.type.dtype.startswith('complex') and not dtype.startswith('complex'):
raise TypeError('Casting from complex to real is ambiguous: consider'
' real(), imag(), angle() or abs()')
return _cast_mapping[dtype](_x)
class Abs(UnaryScalarOp):
def make_node(self, x):
inputs = [as_scalar(input) for input in [x]]
if inputs[0].type == complex64:
outputs = [float32()]
elif inputs[0].type == complex128:
outputs = [float64()]
else:
outputs = [t() for t in self.output_types(
[input.type for input in inputs])]
return Apply(self, inputs, outputs)
def impl(self, x):
return numpy.abs(x)
def grad(self, (x, ), (gz, )):
if x.type in float_types + complex_types:
return gz * x / abs(x), # formula works for complex and real
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
type = node.inputs[0].type
if type in int_types:
return "%(z)s = abs(%(x)s);" % locals()
if type in float_types:
return "%(z)s = fabs(%(x)s);" % locals()
if type in complex_types:
return "%(z)s = sqrt(%(x)s.real*%(x)s.real + %(x)s.imag*%(x)s.imag);" % locals()
raise NotImplementedError('type not supported', type)
abs_ = Abs(same_out)
class Sgn(UnaryScalarOp):
def impl(self, x):
#casting to output type is handled by filter
return numpy.sign(x)
def grad(self, (x, ), (gz, )):
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
rval = rval.astype(theano.config.floatX)
return [rval]
def c_code(self, node, name, (x, ), (z, ), sub):
#casting is done by compiler
#TODO: use copysign
type = node.inputs[0].type
if type in float_types:
return "%(z)s = (%(x)s >= 0) ? (%(x)s == 0) ? 0.0 : 1.0 : -1.0;" % locals()
if type in int_types:
return "%(z)s = (%(x)s >= 0) ? (%(x)s == 0) ? 0 : 1 : -1;" % locals()
raise TypeError() # complex has no sgn
def c_code_cache_version(self):
s = super(Sgn, self).c_code_cache_version()
if s:
return (3,) + s
else: # if parent is unversioned, we are too
return s
sgn = Sgn(same_out_nocomplex, name='sgn')
class Ceil(UnaryScalarOp):
def impl(self, x):
return numpy.ceil(x)
def grad(self, (x,), (gz,)):
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
rval = rval.astype(theano.config.floatX)
return [rval]
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = ceil(%(x)s);" % locals()
ceil = Ceil(same_out_nocomplex, name='ceil')
class Floor(UnaryScalarOp):
def impl(self, x):
return numpy.floor(x)
def grad(self, (x,), (gz,)):
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
rval = rval.astype(theano.config.floatX)
return [rval]
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = floor(%(x)s);" % locals()
floor = Floor(same_out_nocomplex, name='floor')
class Trunc(UnaryScalarOp):
def impl(self, x):
return numpy.trunc(x)
def grad(self, (x,), (gz,)):
return [x.zeros_like().astype(theano.config.floatX)]
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = %(x)s >= 0? floor(%(x)s): -floor(-%(x)s);" % locals()
trunc = Trunc(same_out_nocomplex, name='trunc')
class RoundHalfToEven(UnaryScalarOp):
"""
This function implement the same rounding than numpy: Round half to even
c/c++ round fct IS DIFFERENT!
See http://en.wikipedia.org/wiki/Rounding for more detail
"""
def impl(self, x):
return numpy.round(x)
def c_code___(self, node, name, (x, ), (z, ), sub):
typ = node.outputs[0].type.dtype
if not node.outputs[0].type.dtype in ['float32', 'float64']:
Exception("The output should be float32 or float64")
return dedent("""
#ifndef ROUNDING_EPSILON
#define ROUNDING_EPSILON 0.0000001
#endif
if (%(x)s < 0.0){
// We implement the else part like that: -else( -%(x)s);
%(typ)s i;
std::modf( -%(x)s, &i );
// If %(x)s is exactly halfway between two integers
if ((-%(x)s -(i +0.5)) < epsilon){
// If 'i' is even then return 'i'
if (std::fmod( i, 2.0 ) < epsilon){
%(z)s = - i;
}else{
// Else return the nearest even integer
%(z)s = - ceil( i +0.5 );
}
}else{
// round to closest
%(z)s = - round(%(x)s+5);
}
}else{
%(typ)s i;
std::modf( %(x)s, &i );
// If %(x)s is exactly halfway between two integers
if ((%(x)s -(i +0.5)) < epsilon){
// If 'i' is even then return 'i'
if (std::fmod( i, 2.0 ) < epsilon){
%(z)s = i;
}else{
// Else return the nearest even integer
%(z)s = ceil( i +0.5 );
}
}else{
// round to closest
%(z)s = round(%(x)s+5);
}
}
#undef ROUNDING_EPSILON
""")
round_half_to_even = RoundHalfToEven(same_out_float_only)
def round_half_away_from_zero_(a):
if a > 0:
return numpy.floor(a + 0.5)
else:
return numpy.ceil(a - 0.5)
round_half_away_from_zero_vec64 = numpy.vectorize(
round_half_away_from_zero_,
doc='round_half_away_from_zero_vec64')
round_half_away_from_zero_vec32 = numpy.vectorize(
round_half_away_from_zero_,
doc='round_half_away_from_zero_vec32',
otypes=['float32'])
def round_half_away_from_zero_vec(a):
if getattr(a, 'dtype', None) == numpy.float32:
return round_half_away_from_zero_vec32(a)
return round_half_away_from_zero_vec64(a)
class RoundHalfAwayFromZero(UnaryScalarOp):
"""
Implement the same rounding algo as c round() fct.
numpy.round fct IS DIFFERENT!
See http://en.wikipedia.org/wiki/Rounding for more detail
"""
def impl(self, x):
return round_half_away_from_zero_vec(x)
def c_code(self, node, name, (x, ), (z, ), sub):
if node.outputs[0].type.dtype in ['float32', 'float64']:
return "%(z)s = round(%(x)s);" % locals()
else:
Exception("The output should be float32 or float64")
round_half_away_from_zero = RoundHalfAwayFromZero(same_out_float_only)
class Neg(UnaryScalarOp):
def impl(self, x):
return -x
def grad(self, (x,), (gz,)):
if x.type in continuous_types:
return -gz,
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = -%(x)s;" % locals()
neg = Neg(same_out, name='neg')
class Inv(UnaryScalarOp):
""" multiplicative inverse. Also called reciprocal"""
def impl(self, x):
return 1.0 / x
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return -gz / (x * x),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = 1.0 / %(x)s;" % locals()
inv = Inv(upgrade_to_float, name='inv')
class Log(UnaryScalarOp):
""" log base e """
amd_float32 = "amd_vrsa_logf"
amd_float64 = "amd_vrda_log"
def impl(self, x):
return numpy.log(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / x,
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
#todo: the version using log2 seems to be very slightly faster
# on some machines for some reason, check if it's worth switching
#return "%(z)s = log2(%(x)s) * 0.69314718055994529;" % locals()
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log(%(x)s);" % locals()
log = Log(upgrade_to_float, name='log')
class Log2(UnaryScalarOp):
""" log base 2 """
amd_float32 = "amd_vrsa_log2f"
amd_float64 = "amd_vrda_log2"
def impl(self, x):
return numpy.log2(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (x * math.log(2.0)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log2(%(x)s);" % locals()
log2 = Log2(upgrade_to_float, name='log2')
class Log10(UnaryScalarOp):
""" log base 10 """
amd_float32 = "amd_vrsa_log10f"
amd_float64 = "amd_vrda_log10"
def impl(self, x):
return numpy.log10(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (x * numpy.log(10.0)),
else:
return None
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log10(%(x)s);" % locals()
log10 = Log10(upgrade_to_float, name='log10')
class Log1p(UnaryScalarOp):
""" log(1+x) """
def impl(self, x):
return numpy.log1p(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if gz.type in float_types:
return [gz / (1 + x)]
return [None]
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log1p(%(x)s);" % locals()
log1p = Log1p(upgrade_to_float, name='log1p')
class Exp(UnaryScalarOp):
amd_float32 = "amd_vrsa_expf"
amd_float64 = "amd_vrda_exp"
def impl(self, x):
return numpy.exp(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
elif x.type in float_types:
return gz * exp(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp(%(x)s);" % locals()
exp = Exp(upgrade_to_float, name='exp')
class Exp2(UnaryScalarOp):
def impl(self, x):
return numpy.exp2(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
elif x.type in float_types:
return gz * exp2(x) * log(numpy.cast[x.type](2)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp2(%(x)s);" % locals()
exp2 = Exp2(upgrade_to_float, name='exp2')
class Expm1(UnaryScalarOp):
def impl(self, x):
return numpy.expm1(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
elif x.type in float_types:
return gz * exp(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp(%(x)s) - 1;" % locals()
expm1 = Expm1(upgrade_to_float, name='expm1')
class Sqr(UnaryScalarOp):
def impl(self, x):
return x * x
def grad(self, (x, ), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * x * 2,
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
return "%(z)s = %(x)s * %(x)s;" % locals()
sqr = Sqr(same_out, name='sqr')
class Sqrt(UnaryScalarOp):
def impl(self, x):
return numpy.sqrt(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return (gz * 0.5) / sqrt(x),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sqrt(%(x)s);" % locals()
sqrt = Sqrt(upgrade_to_float, name='sqrt')
class Deg2Rad(UnaryScalarOp):
def impl(self, x):
return numpy.deg2rad(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * numpy.asarray(numpy.pi / 180, gz.type),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = %(x)s * (M_PI / 180.0);" % locals()
deg2rad = Deg2Rad(upgrade_to_float, name='deg2rad')
class Rad2Deg(UnaryScalarOp):
def impl(self, x):
return numpy.rad2deg(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * numpy.asarray(180. / numpy.pi, gz.type),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = %(x)s * (180.0 / M_PI);" % locals()
rad2deg = Rad2Deg(upgrade_to_float, name='rad2deg')
class Cos(UnaryScalarOp):
amd_float32 = "amd_vrsa_cosf"
amd_float64 = "amd_vrda_cos"
def impl(self, x):
return numpy.cos(x)
def grad(self, (x, ), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return -gz * sin(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = cos(%(x)s);" % locals()
cos = Cos(upgrade_to_float, name='cos')
class ArcCos(UnaryScalarOp):
def impl(self, x):
return numpy.arccos(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return - gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = acos(%(x)s);" % locals()
arccos = ArcCos(upgrade_to_float, name='arccos')
class Sin(UnaryScalarOp):
amd_float32 = "amd_vrsa_sinf"
amd_float64 = "amd_vrda_sin"
def impl(self, x):
return numpy.sin(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * cos(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sin(%(x)s);" % locals()
sin = Sin(upgrade_to_float, name='sin')
class ArcSin(UnaryScalarOp):
def impl(self, x):
return numpy.arcsin(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = asin(%(x)s);" % locals()
arcsin = ArcSin(upgrade_to_float, name='arcsin')
class Tan(UnaryScalarOp):
def impl(self, x):
return numpy.tan(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqr(cos(x)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = tan(%(x)s);" % locals()
tan = Tan(upgrade_to_float, name='tan')
class ArcTan(UnaryScalarOp):
def impl(self, x):
return numpy.arctan(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (numpy.cast[x.type](1) + sqr(x)),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = atan(%(x)s);" % locals()
arctan = ArcTan(upgrade_to_float, name='arctan')
class ArcTan2(BinaryScalarOp):
def impl(self, y, x):
return numpy.arctan2(y, x)
def grad(self, (y, x), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types and y.type in float_types:
return [gz * x / (sqr(x) + sqr(y)),
gz * neg(y) / (sqr(x) + sqr(y))]
else:
return None,
def c_code(self, node, name, (y, x), (z,), sub):
if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type)
return "%(z)s = atan2(%(y)s, %(x)s);" % locals()
arctan2 = ArcTan2(upgrade_to_float, name='arctan2')
class Cosh(UnaryScalarOp):
"""
cosh(x) = (exp(x) + exp(-x)) / 2
"""
def impl(self, x):
return numpy.cosh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * sinh(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = cosh(%(x)s);" % locals()
cosh = Cosh(upgrade_to_float, name='cosh')
class ArcCosh(UnaryScalarOp):
def impl(self, x):
return numpy.arccosh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqrt(sqr(x) - numpy.cast[x.type](1)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = acosh(%(x)s);" % locals()
arccosh = ArcCosh(upgrade_to_float, name='arccosh')
class Sinh(UnaryScalarOp):
"""
sinh(x) = (exp(x) - exp(-x)) / 2
"""
def impl(self, x):
return numpy.sinh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * cosh(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sinh(%(x)s);" % locals()
sinh = Sinh(upgrade_to_float, name='sinh')
class ArcSinh(UnaryScalarOp):
def impl(self, x):
return numpy.arcsinh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqrt(sqr(x) + numpy.cast[x.type](1)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = asinh(%(x)s);" % locals()
arcsinh = ArcSinh(upgrade_to_float, name='arcsinh')
class Tanh(UnaryScalarOp):
"""
tanh(x) = sinh(x) / cosh(x)
= (exp(2*x) - 1) / (exp(2*x) + 1)
"""
def impl(self, x):
return numpy.tanh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * (1 - sqr(tanh(x))),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = tanh(%(x)s);" % locals()
tanh = Tanh(upgrade_to_float, name='tanh')
class ArcTanh(UnaryScalarOp):
def impl(self, x):
return numpy.arctanh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (numpy.cast[x.type](1) - sqr(x)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = atanh(%(x)s);" % locals()
arctanh = ArcTanh(upgrade_to_float, name='arctanh')
class Real(UnaryScalarOp):
"""Extract the real coordinate of a complex number. """
def impl(self, x):
return numpy.real(x)
def grad(self, (x, ), (gz, )):
return [complex(gz, 0)]
real = Real(real_out, name='real')
class Imag(UnaryScalarOp):
def impl(self, x):
return numpy.imag(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
return [complex(0, gz)]
elif x.type in float_types:
return [second(x, 0)]
else:
return [None]
imag = Imag(real_out, name='imag')
class Angle(UnaryScalarOp):
def impl(self, x):
return numpy.angle(x)
def grad(self, (c, ), (gtheta, )):
# y = x.imag
# r = sqrt(y**2 + x.real**2)
# g = y/r
# if x == 0 and y == 0:
# theta = 0
# elif x >= 0:
# theta = numpy.arcsin(g)
# else:
# theta = -numpy.arcsin(g)+numpy.pi
x = real(c)
y = imag(c)
r = abs(c)
gr = -gtheta * y / (r ** 2 * sqrt(1 - (y / r) ** 2))
gx = gr * x / r
gy = gr * y / r
if c in complex_types:
return [cast(complex(gx, gy), x.type.dtype)]
elif c in float_types:
return [cast(second(x, 0), x.type.dtype)]
else:
return [None]
angle = Angle(specific_out(float64), name='angle')
class Complex(BinaryScalarOp):
@staticmethod
def output_types_preference(x, y):
if x in complex_types:
raise TypeError(x)
if y in complex_types:
raise TypeError(y)
up = Scalar.upcast(x, y)
if up in ('float64', 'int64', 'uint64', 'int32', 'uint32'):
return [complex128]
else:
return [complex64]
def impl(self, x, y):
return numpy.complex(x, y)
def grad(self, (x, y), (gz,)):
return [cast(real(gz), x.type.dtype),
cast(imag(gz), y.type.dtype)]
complex = Complex(name='complex')
class Conj(UnaryScalarOp):
def impl(self, x):
return numpy.conj(x)
conj = Conj(same_out, name='conj')
class ComplexFromPolar(BinaryScalarOp):
@staticmethod
def output_types_preference(x, y):
return Complex.output_types_preference(x, y)
def impl(self, r, theta):
if r < 0:
raise ValueError('polar radius must be non-negative', r)
x = r * numpy.cos(theta)
y = r * numpy.sin(theta)
if x.dtype == 'float32':
return numpy.complex64(numpy.complex(x, y))
else:
return numpy.complex128(numpy.complex(x, y))
def grad(self, (r, theta), (gz,)):
gr = gz * complex_from_polar(1, theta)
gtheta = gz * complex_from_polar(r, -theta)
return [gr, gtheta]
complex_from_polar = ComplexFromPolar(name='complex_from_polar')
class Composite(ScalarOp):
"""
Composite is an Op that takes a graph of scalar operations and
produces c code for the whole graph. Its purpose is to implement loop
fusion.
Composite depends on all the Ops in its graph having C code.
"""
def __str__(self):
return self.name
def make_new_inplace(self, output_types_preference=None, name=None):
"""
This op.__init__ fct don't have the same parameter as other scalar op.
This break the insert_inplace_optimizer optimization.
This fct allow fix patch this.
"""
out = self.__class__(self.inputs, self.outputs)
if name:
out.name = name
else:
name = out.name
super(Composite, out).__init__(output_types_preference, name)
return out
def init_c_code(self):
"""Return the C code for this Composite Op. """
subd = dict(
zip(self.fgraph.inputs,
["%%(i%i)s" % i for i in xrange(len(self.fgraph.inputs))])
+ zip(self.fgraph.outputs,
["%%(o%i)s" % i for i in xrange(len(self.fgraph.outputs))]))
for orphan in self.fgraph.variables: # fgraph.orphans:
if orphan.owner is None and orphan not in self.fgraph.inputs:
if isinstance(orphan, Constant):
subd[orphan] = orphan.type.c_literal(orphan.data)
else:
raise ValueError(
"All orphans in the fgraph to Composite must"
" be Constant instances.")
_c_code = "{\n"
i = 0
j = 0
self.nodenames = ["%(nodename)s_" + ('subnode%i' % j)
for j, n in enumerate(self.fgraph.toposort())]
for j, node in enumerate(self.fgraph.toposort()):
for output in node.outputs:
if output not in subd:
i += 1
name = "V%%(id)s_tmp%i" % i
subd[output] = name
_c_code += "%s %s;\n" % (
output.type.dtype_specs()[1], name)
s = node.op.c_code(node,
self.nodenames[j],
[subd[input] for input in node.inputs],
[subd[output] for output in node.outputs],
dict(fail="%(fail)s",
id="%%(id)s_%i" % j))
_c_code += s
_c_code += "\n"
_c_code += "}\n"
self._c_code = _c_code
def init_py_impls(self):
"""Return a list of functions that compute each output of self
"""
def compose_impl(r):
# this is not optimal at all eg in add(*1 -> mul(x, y), *1)
# it will calculate *1 twice
# it also doesn't follow fgraph.toposort but that's (presumably)
# still correct since we only have scalar ops
if r in self.fgraph.inputs:
idx = self.fgraph.inputs.index(r)
return lambda inputs: inputs[idx]
elif r.owner is None: # in fgraph.orphans:
return lambda inputs: r.data
node = r.owner
producers = [compose_impl(input) for input in node.inputs]
return lambda inputs: node.op.impl(*[p(inputs) for p in producers])
self._impls = [compose_impl(r) for r in self.fgraph.outputs]
def init_name(self):
"""Return a readable string representation of self.fgraph
"""
try:
rval = self.name
except AttributeError:
if 0:
l = []
for n in self.fgraph.toposort():
if hasattr(n.op, "name") and n.op.name is not None:
v = n.op.name
if v.startswith("Composite"):
v = v[len("Composite"):]
else:
v = n.op.__class__.__name__
l.append(v)
rval = "Composite{" + ",".join(l) + "}"
else:
for i, r in enumerate(self.fgraph.inputs):
r.name = 'i%i' % i
for i, r in enumerate(self.fgraph.outputs):
r.name = 'o%i' % i
io = set(self.fgraph.inputs + self.fgraph.outputs)
for i, r in enumerate(self.fgraph.variables):
if r not in io and len(r.clients) > 1:
r.name = 't%i' % i
rval = "Composite{%s}" % str(self.fgraph)
self.name = rval
def init_fgraph(self):
fgraph = FunctionGraph(*gof.graph.clone(self.inputs, self.outputs))
gof.MergeOptimizer().optimize(fgraph)
for node in fgraph.apply_nodes:
if not isinstance(node.op, ScalarOp):
raise ValueError("The fgraph to Composite must be exclusively"
" composed of ScalarOp instances.")
self.fgraph = fgraph
def __init__(self, inputs, outputs):
self.inputs = copy(inputs)
self.outputs = copy(outputs)
self.inputs_type = tuple([input.type for input in inputs])
self.outputs_type = tuple([output.type for output in outputs])
self.nin = len(inputs)
self.nout = len(outputs)
self.init_fgraph() # self.fgraph
self.init_name() # self.name
self.init_c_code() # self._c_code and self.nodenames
self.init_py_impls() # self._impls
def output_types(self, input_types):
if tuple(input_types) != self.inputs_type:
raise TypeError("Wrong types for Composite. Expected %s, got %s."
% (self.inputs_type, tuple(input_types)))
return self.outputs_type
def make_node(self, *inputs):
if (tuple([i.type for i in self.inputs]) ==
tuple([i.type for i in inputs])):
return super(Composite, self).make_node(*inputs)
else:
# Make a new op with the right input type.
assert len(inputs) == self.nin
res = theano.compile.rebuild_collect_shared(
self.outputs,
replace=dict(zip(self.inputs, inputs)),
rebuild_strict=False)
# After rebuild_collect_shared, the Variable in inputs
# are not necessarily in the graph represented by res.
# res[2][0] is a dict that map from the original variable to the
# cloned variable.
cloned_inputs = [res[2][0][i] for i in inputs]
node = Composite(cloned_inputs, res[1]).make_node(*inputs)
return node
def perform(self, node, inputs, output_storage):
for storage, impl in zip(output_storage, self._impls):
storage[0] = impl(inputs)
def impl(self, *inputs):
output_storage = [[None] for i in xrange(self.nout)]
self.perform(None, inputs, output_storage)
return utils.to_return_values([storage[0] for storage in
output_storage])
def grad(self, inputs, output_grads):
raise NotImplementedError("grad is not implemented for Composite")
def c_code(self, node, nodename, inames, onames, sub):
d = dict(zip(["i%i" % i for i in xrange(len(inames))],
inames) +
zip(["o%i" % i for i in xrange(len(onames))],
onames),
**sub)
d['nodename'] = nodename
if not 'id' in sub:
#The use of a dummy id is safe as the code is in a separate block.
#It won't generate conflicting variable name.
d['id'] = '_DUMMY_ID_'
return self._c_code % d
def c_code_cache_version(self):
rval = [3]
for x in self.fgraph.toposort():
xv = x.op.c_code_cache_version()
if xv:
rval.append(xv)
else:
return ()
return tuple(rval)
def c_support_code(self):
rval = []
for subnode in self.fgraph.toposort():
try:
rval.append(subnode.op.c_support_code())
except gof.utils.MethodNotDefined:
pass
# remove duplicate code blocks
return "\n".join(sorted(set(rval)))
def c_support_code_apply(self, node, name):
rval = []
for subnode, subnodename in zip(self.fgraph.toposort(), self.nodenames):
try:
subnode_support_code = subnode.op.c_support_code_apply(
subnode,
subnodename % dict(nodename=name))
if subnode_support_code:
rval.append(subnode_support_code)
except gof.utils.MethodNotDefined:
pass
# there should be no need to remove duplicate code blocks because
# each block should have been specialized for the given nodename.
# Any block that isn't specialized should be returned via
# c_support_code instead of c_support_code_apply.
return "\n".join(rval)
def __eq__(self, other):
if self is other:
return True
if (type(self) != type(other)
or self.nin != other.nin
or self.nout != other.nout):
return False
# see __hash__ for comment on why there is no mention of fgraph
# or module cache key here.
return (self._c_code == other._c_code)
def __hash__(self):
rval = hash((type(self),
self.nin,
self.nout,
self._c_code))
# Note that in general, the configparser settings at the time
# of code generation (__init__) affect the semantics of this Op.
# This function assumes that all relevant info about the configparser
# is embodied in _c_code. So the _c_code, rather than self.fgraph,
# is the signature of the semantics of this Op.
# _c_code is preserved through unpickling, so the Op will not change
# semantics when it is reloaded with different configparser
# settings.
return rval
def __getstate__(self):
rval = dict(self.__dict__)
del rval['_impls']
del rval['fgraph']
return rval
def __setstate__(self, d):
self.__dict__.update(d)
# We must call init to set fgraph and _impls again, as otherwise
# self.perform will not work.
self.init_fgraph()
self.init_py_impls()
assert self._c_code
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from io import StringIO
import pytest
from jinja2 import Template
from flexget.entry import Entry
from flexget.logger import capture_output
from flexget.manager import get_parser, Session
from flexget.task import TaskAbort
from flexget.components.series import db
def age_series(**kwargs):
import datetime
session = Session()
session.query(db.EpisodeRelease).update({'first_seen': datetime.datetime.now() - datetime.timedelta(**kwargs)})
session.commit()
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True)
def config(request):
"""Override and parametrize default config fixture for all series tests."""
newconfig = Template(request.cls.config).render({'parser': request.param})
# Make sure we remembered to put the section in config
assert request.cls.config != newconfig, 'config parameterization did nothing?'
return newconfig
class TestQuality(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
exact_quality:
mock:
- {title: 'QTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'QTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'QTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'QTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'QTest.S01E01.720p.XViD-FlexGet'}
series:
- QTest:
quality: 720p
quality_fail:
mock:
- {title: 'Q2Test.S01E01.HDTV.XViD-FlexGet'}
- {title: 'Q2Test.S01E01.PDTV.XViD-FlexGet'}
- {title: 'Q2Test.S01E01.DSR.XViD-FlexGet'}
series:
- Q2Test:
quality: 720p
min_quality:
mock:
- {title: 'MinQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.720p.XViD-FlexGet'}
series:
- MinQTest:
quality: ">720p"
max_quality:
mock:
- {title: 'MaxQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.720p.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.720p.bluray-FlexGet'}
series:
- MaxQTest:
quality: "<720p <=HDTV"
min_max_quality:
mock:
- {title: 'MinMaxQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.720p.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.HR.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.1080p.XViD-FlexGet'}
series:
- MinMaxQTest:
quality: 480p-hr
max_unknown_quality:
mock:
- {title: 'MaxUnknownQTest.S01E01.XViD-FlexGet'}
series:
- MaxUnknownQTest:
quality: "<=hdtv"
quality_from_group:
mock:
- {title: 'GroupQual.S01E01.HDTV.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.PDTV.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.DSR.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.1080p.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.720p.XViD-FlexGet'}
- {title: 'Other.S01E01.hdtv.dd5.1.XViD-FlexGet'}
- {title: 'Other.S01E01.720p.hdtv.XViD-FlexGet'}
series:
720P:
- GroupQual
# Test that an integer group name doesn't cause an exception.
1080:
- Test
hdtv <hr !dd5.1:
- Other
quality_in_series_name:
mock:
- title: my 720p show S01E01
- title: my 720p show S01E02 720p
series:
- my 720p show:
quality: '<720p'
"""
def test_exact_quality(self, execute_task):
"""Series plugin: choose by quality"""
task = execute_task('exact_quality')
assert task.find_entry('accepted', title='QTest.S01E01.720p.XViD-FlexGet'), \
'720p should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_quality_fail(self, execute_task):
task = execute_task('quality_fail')
assert not task.accepted, 'No qualities should have matched'
def test_min_quality(self, execute_task):
"""Series plugin: min_quality"""
task = execute_task('min_quality')
assert task.find_entry('accepted', title='MinQTest.S01E01.1080p.XViD-FlexGet'), \
'MinQTest.S01E01.1080p.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_max_quality(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('max_quality')
assert task.find_entry('accepted', title='MaxQTest.S01E01.HDTV.XViD-FlexGet'), \
'MaxQTest.S01E01.HDTV.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_min_max_quality(self, execute_task):
"""Series plugin: min_quality with max_quality"""
task = execute_task('min_max_quality')
assert task.find_entry('accepted', title='MinMaxQTest.S01E01.HR.XViD-FlexGet'), \
'MinMaxQTest.S01E01.HR.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_max_unknown_quality(self, execute_task):
"""Series plugin: max quality with unknown quality"""
task = execute_task('max_unknown_quality')
assert len(task.accepted) == 1, 'should have accepted'
def test_group_quality(self, execute_task):
"""Series plugin: quality from group name"""
task = execute_task('quality_from_group')
assert task.find_entry('accepted', title='GroupQual.S01E01.720p.XViD-FlexGet'), \
'GroupQual.S01E01.720p.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one (no entries should pass for series `other`'
def test_quality_in_series_name(self, execute_task):
"""Make sure quality in title does not get parsed as quality"""
task = execute_task('quality_in_series_name')
assert task.find_entry('accepted', title='my 720p show S01E01'), \
'quality in title should not have been parsed'
assert len(task.accepted) == 1, 'should not have accepted 720p entry'
class TestDatabase(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- some series
- progress
tasks:
test_1:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-FlexGet'}
test_2:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-DoppelGanger'}
progress_1:
mock:
- {title: 'Progress.S01E20.720p-FlexGet'}
- {title: 'Progress.S01E20.HDTV-FlexGet'}
progress_2:
mock:
- {title: 'Progress.S01E20.720p.Another-FlexGet'}
- {title: 'Progress.S01E20.HDTV-Another-FlexGet'}
"""
def test_database(self, execute_task):
"""Series plugin: simple database"""
task = execute_task('test_1')
task = execute_task('test_2')
assert task.find_entry('rejected', title='Some.Series.S01E20.720p.XViD-DoppelGanger'), \
'failed basic download remembering'
def test_doppelgangers(self, execute_task):
"""Series plugin: doppelganger releases (dupes)"""
task = execute_task('progress_1')
assert task.find_entry('accepted', title='Progress.S01E20.720p-FlexGet'), \
'best quality not accepted'
# should not accept anything
task = execute_task('progress_1')
assert not task.accepted, 'repeated execution accepted'
# introduce new doppelgangers
task = execute_task('progress_2')
assert not task.accepted, 'doppelgangers accepted'
class TestFilterSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-FlexGet'}
- {title: 'Another.Series.S01E20.720p.XViD-FlexGet'}
- {title: 'Another.Series.S01E21.1080p.H264-FlexGet'}
- {title: 'Date.Series.10-11-2008.XViD'}
- {title: 'Date.Series.10.12.2008.XViD'}
- {title: 'Date.Series.2008-10-13.XViD'}
- {title: 'Date.Series.10.14.09.XViD'}
- {title: 'Date Series 2010 11 17 XViD'}
- {title: 'Useless title', filename: 'Filename.Series.S01E26.XViD'}
- {title: 'Empty.Description.S01E22.XViD', description: ''}
# test chaining
regexp:
reject:
- 1080p
series:
- another series
- date series
- filename series
- empty description
- (some) series
metainfo_series_override:
metainfo_series: yes
mock:
- {title: 'Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet'}
- {title: 'Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet'}
series:
- Test Series
test_all_series_mode:
mock:
- {title: 'Test.Series.S01E02.PDTV.XViD-FlexGet'}
- {title: 'Test Series - 1x03 - PDTV XViD-FlexGet'}
- {title: 'Other.Show.S02E01.PDTV.XViD-FlexGet'}
- {title: 'other show season 2 episode 2'}
- {title: 'Date.Show.03-29-2012.HDTV.XViD-FlexGet'}
all_series: yes
test_alternate_name:
mock:
- title: The.Show.S01E01
- title: Other.Name.S01E02
- title: many.names.S01E01
- title: name.1.S01E02
- title: name.2.S01E03
- title: paren.title.2013.S01E01
series:
- The Show:
alternate_name: Other Name
- many names:
alternate_name:
- name 1
- name 2
- paren title (US):
alternate_name: paren title 2013
test_input_order_preserved:
series:
- Some Show
"""
def test_smoke(self, execute_task):
"""Series plugin: test several standard features"""
task = execute_task('test')
# normal passing
assert task.find_entry(title='Another.Series.S01E20.720p.XViD-FlexGet'), \
'Another.Series.S01E20.720p.XViD-FlexGet should have passed'
# series with brackets
assert task.find_entry('accepted', title='Some.Series.S01E20.720p.XViD-FlexGet'), \
'Some.Series.S01E20.720p.XViD-FlexGet should have been accepted'
# date formats
df = ['Date.Series.10-11-2008.XViD', 'Date.Series.10.12.2008.XViD', 'Date Series 2010 11 17 XViD',
'Date.Series.2008-10-13.XViD', 'Date.Series.10.14.09.XViD']
for d in df:
entry = task.find_entry(title=d)
assert entry, 'Date format did not match %s' % d
assert 'series_parser' in entry, 'series_parser missing from %s' % d
assert entry['series_parser'].id_type == 'date', '%s did not return three groups for dates' % d
# parse from filename
assert task.find_entry(filename='Filename.Series.S01E26.XViD'), 'Filename parsing failed'
# empty description
assert task.find_entry(title='Empty.Description.S01E22.XViD'), 'Empty Description failed'
# chaining with regexp plugin
assert task.find_entry('rejected', title='Another.Series.S01E21.1080p.H264-FlexGet'), \
'regexp chaining'
def test_metainfo_series_override(self, execute_task):
"""Series plugin: override metainfo_series"""
task = execute_task('metainfo_series_override')
# Make sure the metainfo_series plugin is working first
entry = task.find_entry('entries', title='Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet')
assert entry['series_guessed'], 'series should have been guessed'
assert entry['series_name'] == entry['series_parser'].name == 'Other Show With Extra Crap', \
'metainfo_series is not running'
# Make sure the good series data overrode metainfo data for the listed series
entry = task.find_entry('accepted', title='Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet')
assert not entry.get('series_guessed'), 'series plugin should override series_guessed'
assert entry['series_name'] == entry['series_parser'].name == 'Test Series', \
'Series name should be \'Test Series\', was: entry: %s, parser: %s' % (
entry['series_name'], entry['series_parser'].name)
def test_all_series_mode(self, execute_task):
"""Series plugin: test all option"""
task = execute_task('test_all_series_mode')
assert task.find_entry('accepted', title='Test.Series.S01E02.PDTV.XViD-FlexGet')
task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet')
entry = task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet')
assert entry
assert entry.get('series_name') == 'Test Series'
entry = task.find_entry('accepted', title='Other.Show.S02E01.PDTV.XViD-FlexGet')
assert entry.get('series_guessed')
entry2 = task.find_entry('accepted', title='other show season 2 episode 2')
# Make sure case is normalized so series are marked with the same name no matter the case in the title
assert entry.get('series_name') == entry2.get(
'series_name') == 'Other Show', 'Series names should be in title case'
entry = task.find_entry('accepted', title='Date.Show.03-29-2012.HDTV.XViD-FlexGet')
assert entry.get('series_guessed')
assert entry.get('series_name') == 'Date Show'
def test_alternate_name(self, execute_task):
task = execute_task('test_alternate_name')
assert all(e.accepted for e in task.all_entries), 'All releases should have matched a show'
@pytest.mark.parametrize('reverse', [False, True])
def test_input_order_preserved(self, manager, execute_task, reverse):
"""If multiple versions of an episode are acceptable, make sure the first one is accepted."""
entries = [
Entry(title='Some Show S01E01 720p proper', url='http://a'),
Entry(title='Some Show S01E01 1080p', url='http://b')
]
if reverse:
entries.reverse()
task = execute_task('test_input_order_preserved', options={'inject': entries})
assert task.accepted[0] == entries[0], 'first entry should have been accepted'
class TestEpisodeAdvancement(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_backwards_1:
mock:
- {title: 'backwards s02e12'}
- {title: 'backwards s02e10'}
series:
- backwards
test_backwards_2:
mock:
- {title: 'backwards s02e01'}
series:
- backwards
test_backwards_3:
mock:
- {title: 'backwards s01e01'}
series:
- backwards
test_backwards_okay_1:
mock:
- {title: 'backwards s01e02'}
series:
- backwards:
tracking: backfill
test_backwards_okay_2:
mock:
- {title: 'backwards s01e03'}
series:
- backwards:
tracking: no
test_forwards_1:
mock:
- {title: 'forwards s01e01'}
series:
- forwards
test_forwards_2:
mock:
- {title: 'forwards s02e01'}
series:
- forwards
test_forwards_3:
mock:
- {title: 'forwards s03e01'}
series:
- forwards
test_forwards_4:
mock:
- {title: 'forwards s04e02'}
series:
- forwards
test_forwards_5:
mock:
- {title: 'forwards s05e01'}
series:
- forwards
test_forwards_okay_1:
mock:
- {title: 'forwards s05e01'}
series:
- forwards:
tracking: no
test_unordered:
mock:
- {title: 'zzz s01e05'}
- {title: 'zzz s01e06'}
- {title: 'zzz s01e07'}
- {title: 'zzz s01e08'}
- {title: 'zzz s01e09'}
- {title: 'zzz s01e10'}
- {title: 'zzz s01e15'}
- {title: 'zzz s01e14'}
- {title: 'zzz s01e13'}
- {title: 'zzz s01e12'}
- {title: 'zzz s01e11'}
- {title: 'zzz s01e01'}
series:
- zzz
test_seq1:
mock:
- title: seq 05
series:
- seq
test_seq2:
mock:
- title: seq 06
series:
- seq
test_seq3:
mock:
- title: seq 10
series:
- seq
test_seq4:
mock:
- title: seq 01
series:
- seq
"""
def test_backwards(self, execute_task):
"""Series plugin: episode advancement (backwards)"""
task = execute_task('test_backwards_1')
assert task.find_entry('accepted', title='backwards s02e12'), \
'backwards s02e12 should have been accepted'
assert task.find_entry('accepted', title='backwards s02e10'), \
'backwards s02e10 should have been accepted within grace margin'
task = execute_task('test_backwards_2')
assert task.find_entry('accepted', title='backwards s02e01'), \
'backwards s02e01 should have been accepted, in current season'
task = execute_task('test_backwards_3')
assert task.find_entry('rejected', title='backwards s01e01'), \
'backwards s01e01 should have been rejected, in previous season'
task = execute_task('test_backwards_okay_1')
assert task.find_entry('accepted', title='backwards s01e02'), \
'backwards s01e01 should have been accepted, backfill enabled'
task = execute_task('test_backwards_okay_2')
assert task.find_entry('accepted', title='backwards s01e03'), \
'backwards s01e01 should have been accepted, tracking off'
def test_forwards(self, execute_task):
"""Series plugin: episode advancement (future)"""
task = execute_task('test_forwards_1')
assert task.find_entry('accepted', title='forwards s01e01'), \
'forwards s01e01 should have been accepted'
task = execute_task('test_forwards_2')
assert task.find_entry('accepted', title='forwards s02e01'), \
'forwards s02e01 should have been accepted'
task = execute_task('test_forwards_3')
assert task.find_entry('accepted', title='forwards s03e01'), \
'forwards s03e01 should have been accepted'
task = execute_task('test_forwards_4')
assert task.find_entry('rejected', title='forwards s04e02'), \
'forwards s04e02 should have been rejected'
task = execute_task('test_forwards_5')
assert task.find_entry('rejected', title='forwards s05e01'), \
'forwards s05e01 should have been rejected'
task = execute_task('test_forwards_okay_1')
assert task.find_entry('accepted', title='forwards s05e01'), \
'forwards s05e01 should have been accepted with tracking turned off'
def test_unordered(self, execute_task):
"""Series plugin: unordered episode advancement"""
task = execute_task('test_unordered')
assert len(task.accepted) == 12, \
'not everyone was accepted'
def test_sequence(self, execute_task):
# First should be accepted
task = execute_task('test_seq1')
entry = task.find_entry('accepted', title='seq 05')
assert entry['series_id'] == 5
# Next in sequence should be accepted
task = execute_task('test_seq2')
entry = task.find_entry('accepted', title='seq 06')
assert entry['series_id'] == 6
# Should be too far in the future
task = execute_task('test_seq3')
entry = task.find_entry(title='seq 10')
assert entry not in task.accepted, 'Should have been too far in future'
# Should be too far in the past
task = execute_task('test_seq4')
entry = task.find_entry(title='seq 01')
assert entry not in task.accepted, 'Should have been too far in the past'
class TestFilterSeriesPriority(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'foobar 720p s01e01'}
- {title: 'foobar hdtv s01e01'}
regexp:
reject:
- 720p
series:
- foobar
"""
def test_priorities(self, execute_task):
"""Series plugin: regexp plugin is able to reject before series plugin"""
task = execute_task('test')
assert task.find_entry('rejected', title='foobar 720p s01e01'), \
'foobar 720p s01e01 should have been rejected'
assert task.find_entry('accepted', title='foobar hdtv s01e01'), \
'foobar hdtv s01e01 is not accepted'
class TestPropers(object):
config = """
templates:
global:
parsing:
series: {{parser}}
# prevents seen from rejecting on second execution,
# we want to see that series is able to reject
disable: builtins
series:
- test
- foobar
- asfd:
quality: HR-1080p
- V
- tftest:
propers: 3 hours
- notest:
propers: no
tasks:
propers_1:
mock:
- {title: 'Test.S01E01.720p-FlexGet'}
# introduce proper, should be accepted
propers_2:
mock:
- {title: 'Test.S01E01.720p.Proper-FlexGet'}
# introduce non-proper, should not be downloaded
propers_3:
mock:
- {title: 'Test.S01E01.FlexGet'}
# introduce proper at the same time, should nuke non-proper and get proper
proper_at_first:
mock:
- {title: 'Foobar.S01E01.720p.FlexGet'}
- {title: 'Foobar.S01E01.720p.proper.FlexGet'}
# test a lot of propers at once
lot_propers:
mock:
- {title: 'V.2009.S01E01.PROPER.HDTV.A'}
- {title: 'V.2009.S01E01.PROPER.HDTV.B'}
- {title: 'V.2009.S01E01.PROPER.HDTV.C'}
diff_quality_1:
mock:
- {title: 'Test.S01E02.720p-FlexGet'}
# low quality proper, should not be accepted
diff_quality_2:
mock:
- {title: 'Test.S01E02.HDTV.Proper-FlexGet'}
# min + max quality with propers
min_max_quality_1:
mock:
- {title: 'asfd.S01E01.720p-FlexGet'}
min_max_quality_2:
mock:
- {title: 'asfd.S01E01.720p.Proper-FlexGet'}
proper_timeframe_1:
mock:
- {title: 'TFTest.S01E01.720p-FlexGet'}
proper_timeframe_2:
mock:
- {title: 'TFTest.S01E01.720p.proper-FlexGet'}
no_propers_1:
mock:
- {title: 'NoTest.S01E01.720p-FlexGet'}
no_propers_2:
mock:
- {title: 'NoTest.S01E01.720p.proper-FlexGet'}
proper_upgrade_1:
mock:
- {title: 'Test.S02E01.hdtv.proper'}
proper_upgrade_2:
mock:
- {title: 'Test.S02E01.hdtv.real.proper'}
anime_proper_1:
mock:
- title: test 04v0 hdtv
anime_proper_2:
mock:
- title: test 04 hdtv
fastsub_proper_1:
mock:
- title: test s01e01 Fastsub hdtv
fastsub_proper_2:
mock:
- title: test s01e01 Fastsub repack hdtv
fastsub_proper_3:
mock:
- title: test s01e01 hdtv
fastsub_proper_4:
mock:
- title: test s01e01 proper hdtv
"""
def test_propers_timeframe(self, execute_task):
"""Series plugin: propers timeframe"""
task = execute_task('proper_timeframe_1')
assert task.find_entry('accepted', title='TFTest.S01E01.720p-FlexGet'), \
'Did not accept before timeframe'
# let 6 hours pass
age_series(hours=6)
task = execute_task('proper_timeframe_2')
assert task.find_entry('rejected', title='TFTest.S01E01.720p.proper-FlexGet'), \
'Did not reject after proper timeframe'
def test_no_propers(self, execute_task):
"""Series plugin: no propers at all"""
task = execute_task('no_propers_1')
assert len(task.accepted) == 1, 'broken badly'
task = execute_task('no_propers_2')
assert len(task.rejected) == 1, 'accepted proper'
def test_min_max_propers(self, execute_task):
"""Series plugin: min max propers"""
task = execute_task('min_max_quality_1')
assert len(task.accepted) == 1, 'uhh, broken badly'
task = execute_task('min_max_quality_2')
assert len(task.accepted) == 1, 'should have accepted proper'
def test_lot_propers(self, execute_task):
"""Series plugin: proper flood"""
task = execute_task('lot_propers')
assert len(task.accepted) == 1, 'should have accepted (only) one of the propers'
def test_diff_quality_propers(self, execute_task):
"""Series plugin: proper in different/wrong quality"""
task = execute_task('diff_quality_1')
assert len(task.accepted) == 1
task = execute_task('diff_quality_2')
assert len(task.accepted) == 0, 'should not have accepted lower quality proper'
def test_propers(self, execute_task):
"""Series plugin: proper accepted after episode is downloaded"""
# start with normal download ...
task = execute_task('propers_1')
assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \
'Test.S01E01-FlexGet should have been accepted'
# rejects downloaded
task = execute_task('propers_1')
assert task.find_entry('rejected', title='Test.S01E01.720p-FlexGet'), \
'Test.S01E01-FlexGet should have been rejected'
# accepts proper
task = execute_task('propers_2')
assert task.find_entry('accepted', title='Test.S01E01.720p.Proper-FlexGet'), \
'new undownloaded proper should have been accepted'
# reject downloaded proper
task = execute_task('propers_2')
assert task.find_entry('rejected', title='Test.S01E01.720p.Proper-FlexGet'), \
'downloaded proper should have been rejected'
# reject episode that has been downloaded normally and with proper
task = execute_task('propers_3')
assert task.find_entry('rejected', title='Test.S01E01.FlexGet'), \
'Test.S01E01.FlexGet should have been rejected'
def test_proper_available(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('proper_at_first')
assert task.find_entry('accepted', title='Foobar.S01E01.720p.proper.FlexGet'), \
'Foobar.S01E01.720p.proper.FlexGet should have been accepted'
def test_proper_upgrade(self, execute_task):
"""Series plugin: real proper after proper"""
task = execute_task('proper_upgrade_1')
assert task.find_entry('accepted', title='Test.S02E01.hdtv.proper')
task = execute_task('proper_upgrade_2')
assert task.find_entry('accepted', title='Test.S02E01.hdtv.real.proper')
def test_anime_proper(self, execute_task):
task = execute_task('anime_proper_1')
assert task.accepted, 'ep should have accepted'
task = execute_task('anime_proper_2')
assert task.accepted, 'proper ep should have been accepted'
def test_fastsub_proper(self, execute_task):
task = execute_task('fastsub_proper_1')
assert task.accepted, 'ep should have accepted'
task = execute_task('fastsub_proper_2')
assert task.accepted, 'proper ep should have been accepted'
task = execute_task('fastsub_proper_3')
assert task.accepted, 'proper ep should have been accepted'
task = execute_task('fastsub_proper_4')
assert task.accepted, 'proper ep should have been accepted'
class TestSimilarNames(object):
# hmm, not very good way to test this .. seriesparser should be tested alone?
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'FooBar.S03E01.DSR-FlexGet'}
- {title: 'FooBar: FirstAlt.S02E01.DSR-FlexGet'}
- {title: 'FooBar: SecondAlt.S01E01.DSR-FlexGet'}
series:
- FooBar
- 'FooBar: FirstAlt'
- 'FooBar: SecondAlt'
test_ambiguous:
mock:
- title: Foo.2.2
series:
- Foo:
identified_by: sequence
- Foo 2:
identified_by: sequence
"""
def test_names(self, execute_task):
"""Series plugin: similar namings"""
task = execute_task('test')
assert task.find_entry('accepted', title='FooBar.S03E01.DSR-FlexGet'), 'Standard failed?'
assert task.find_entry('accepted', title='FooBar: FirstAlt.S02E01.DSR-FlexGet'), 'FirstAlt failed'
assert task.find_entry('accepted', title='FooBar: SecondAlt.S01E01.DSR-FlexGet'), 'SecondAlt failed'
def test_ambiguous(self, execute_task):
task = execute_task('test_ambiguous')
# In the event of ambiguous match, more specific one should be chosen
assert task.find_entry('accepted', title='Foo.2.2')['series_name'] == 'Foo 2'
class TestDuplicates(object):
config = """
templates:
global:
parsing:
series: {{parser}}
# just cleans log a bit ..
disable:
- seen
tasks:
test_dupes:
mock:
- {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[FlexGet]'}
- {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[ASDF]'}
series:
- Foo 2009
test_1:
mock:
- {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[FlexGet]'}
- {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[ASDF]'}
series:
- foo bar
test_2:
mock:
- {title: 'Foo.Bar.S02E04.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]'}
- {title: 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E03.HDTV.XviD-FlexGet'}
- {title: 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'}
series:
- foo bar
test_true_dupes:
mock:
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
series:
- dupe
"""
def test_dupes(self, execute_task):
"""Series plugin: dupes with same quality"""
task = execute_task('test_dupes')
assert len(task.accepted) == 1, 'accepted both'
def test_true_dupes(self, execute_task):
"""Series plugin: true duplicate items"""
task = execute_task('test_true_dupes')
assert len(task.accepted) == 1, 'should have accepted (only) one'
def test_downloaded(self, execute_task):
"""Series plugin: multiple downloaded and new episodes are handled correctly"""
task = execute_task('test_1')
task = execute_task('test_2')
# these should be accepted
accepted = ['Foo.Bar.S02E03.HDTV.XviD-FlexGet', 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY']
for item in accepted:
assert task.find_entry('accepted', title=item), \
'%s should have been accepted' % item
# these should be rejected
rejected = ['Foo.Bar.S02E04.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]',
'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]']
for item in rejected:
assert task.find_entry('rejected', title=item), \
'%s should have been rejected' % item
class TestQualities(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: builtins
series:
- FooBar:
qualities:
- SDTV
- 720p
- 1080p
- FooBaz:
upgrade: yes
qualities:
- hdtv
- hr
- 720p
- FooBum:
quality: 720p-1080i
upgrade: yes
- FooD:
target: 720p
timeframe: 0 hours
upgrade: yes
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
- {title: 'FooBar.S01E01.1080p-FlexGet'}
- {title: 'FooBar.S01E01.HR-FlexGet'}
test_2:
mock:
- {title: 'FooBar.S01E01.720p-FlexGet'}
propers_1:
mock:
- {title: 'FooBar.S01E02.720p-FlexGet'}
propers_2:
mock:
- {title: 'FooBar.S01E02.720p.Proper-FlexGet'}
upgrade_1:
mock:
- {title: 'FooBaz.S01E02.pdtv-FlexGet'}
- {title: 'FooBaz.S01E02.HR-FlexGet'}
upgrade_2:
mock:
- {title: 'FooBaz.S01E02.720p-FlexGet'}
- {title: 'FooBaz.S01E02.1080p-FlexGet'}
upgrade_3:
mock:
- {title: 'FooBaz.S01E02.hdtv-FlexGet'}
- {title: 'FooBaz.S01E02.720p rc-FlexGet'}
quality_upgrade_1:
mock:
- title: FooBum.S03E01.1080p # too high
- title: FooBum.S03E01.hdtv # too low
- title: FooBum.S03E01.720p # in range
quality_upgrade_2:
mock:
- title: FooBum.S03E01.1080i # should be upgraded to
- title: FooBum.S03E01.720p-ver2 # Duplicate ep
target_1:
mock:
- title: Food.S06E11.hdtv
target_2:
mock:
- title: Food.S06E11.1080p
- title: Food.S06E11.720p
"""
def test_qualities(self, execute_task):
"""Series plugin: qualities"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet'), \
'Didn''t accept FooBar.S01E01.PDTV-FlexGet'
assert task.find_entry('accepted', title='FooBar.S01E01.1080p-FlexGet'), \
'Didn''t accept FooBar.S01E01.1080p-FlexGet'
assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \
'Accepted FooBar.S01E01.HR-FlexGet'
task = execute_task('test_2')
assert task.find_entry('accepted', title='FooBar.S01E01.720p-FlexGet'), \
'Didn''t accept FooBar.S01E01.720p-FlexGet'
# test that it rejects them afterwards
task = execute_task('test_1')
assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-FlexGet'), \
'Didn\'t reject FooBar.S01E01.PDTV-FlexGet'
assert task.find_entry('rejected', title='FooBar.S01E01.1080p-FlexGet'), \
'Didn\'t reject FooBar.S01E01.1080p-FlexGet'
assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \
'Accepted FooBar.S01E01.HR-FlexGet'
def test_propers(self, execute_task):
"""Series plugin: qualities + propers"""
task = execute_task('propers_1')
assert task.accepted
task = execute_task('propers_2')
assert task.accepted, 'proper not accepted'
task = execute_task('propers_2')
assert not task.accepted, 'proper accepted again'
def test_qualities_upgrade(self, execute_task):
task = execute_task('upgrade_1')
assert task.find_entry('accepted', title='FooBaz.S01E02.HR-FlexGet'), 'HR quality should be accepted'
assert len(task.accepted) == 1, 'Only best quality should be accepted'
task = execute_task('upgrade_2')
assert task.find_entry('accepted', title='FooBaz.S01E02.720p-FlexGet'), '720p quality should be accepted'
assert len(task.accepted) == 1, 'Only best quality should be accepted'
task = execute_task('upgrade_3')
assert not task.accepted, 'Should not have accepted worse qualities'
def test_quality_upgrade(self, execute_task):
task = execute_task('quality_upgrade_1')
assert len(task.accepted) == 1, 'Only one ep should have passed quality filter'
assert task.find_entry('accepted', title='FooBum.S03E01.720p')
task = execute_task('quality_upgrade_2')
assert len(task.accepted) == 1, 'one ep should be valid upgrade'
assert task.find_entry('accepted', title='FooBum.S03E01.1080i')
def test_target_upgrade(self, execute_task):
task = execute_task('target_1')
assert len(task.accepted) == 1, 'Only one ep should have been grabbed'
assert task.find_entry('accepted', title='Food.S06E11.hdtv')
task = execute_task('target_2')
assert len(task.accepted) == 1, 'one ep should be valid upgrade'
assert task.find_entry('accepted', title='Food.S06E11.720p'), 'Should upgrade to `target`'
class TestIdioticNumbering(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar:
identified_by: ep
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
test_2:
mock:
- {title: 'FooBar.102.PDTV-FlexGet'}
"""
def test_idiotic(self, execute_task):
"""Series plugin: idiotic numbering scheme"""
task = execute_task('test_1')
task = execute_task('test_2')
entry = task.find_entry(title='FooBar.102.PDTV-FlexGet')
assert entry, 'entry not found?'
assert entry['series_season'] == 1, 'season not detected'
assert entry['series_episode'] == 2, 'episode not detected'
class TestNormalization(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: [seen]
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
series:
- FOOBAR
test_2:
mock:
- {title: 'FooBar.S01E01.PDTV-aoeu'}
series:
- foobar
test_3:
mock:
- title: Foo bar & co 2012.s01e01.sdtv.a
series:
- foo bar & co 2012
test_4:
mock:
- title: Foo bar & co 2012.s01e01.sdtv.b
series:
- Foo/Bar and Co. (2012)
"""
def test_capitalization(self, execute_task):
"""Series plugin: configuration capitalization"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet')
task = execute_task('test_2')
assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-aoeu')
def test_normalization(self, execute_task):
task = execute_task('test_3')
assert task.find_entry('accepted', title='Foo bar & co 2012.s01e01.sdtv.a')
task = execute_task('test_4')
assert task.find_entry('rejected', title='Foo bar & co 2012.s01e01.sdtv.b')
class TestMixedNumbering(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar:
identified_by: ep
tasks:
test_1:
mock:
- {title: 'FooBar.S03E07.PDTV-FlexGet'}
test_2:
mock:
- {title: 'FooBar.0307.PDTV-FlexGet'}
"""
def test_mixednumbering(self, execute_task):
"""Series plugin: Mixed series numbering"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S03E07.PDTV-FlexGet')
task = execute_task('test_2')
assert task.find_entry('rejected', title='FooBar.0307.PDTV-FlexGet')
class TestExact(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
auto:
mock:
- {title: 'ABC.MIAMI.S01E01.PDTV-FlexGet'}
- {title: 'ABC.S01E01.PDTV-FlexGet'}
- {title: 'ABC.LA.S01E01.PDTV-FlexGet'}
series:
- ABC
- ABC LA
- ABC Miami
name_regexp:
mock:
- title: show s09e05 hdtv
- title: show a s09e06 hdtv
series:
- show:
name_regexp: ^show
exact: yes
date:
mock:
- title: date show 04.01.2011 hdtv
- title: date show b 04.02.2011 hdtv
series:
- date show:
exact: yes
"""
def test_auto(self, execute_task):
"""Series plugin: auto enable exact"""
task = execute_task('auto')
assert task.find_entry('accepted', title='ABC.S01E01.PDTV-FlexGet')
assert task.find_entry('accepted', title='ABC.LA.S01E01.PDTV-FlexGet')
assert task.find_entry('accepted', title='ABC.MIAMI.S01E01.PDTV-FlexGet')
def test_with_name_regexp(self, execute_task):
task = execute_task('name_regexp')
assert task.find_entry('accepted', title='show s09e05 hdtv')
assert not task.find_entry('accepted', title='show a s09e06 hdtv')
def test_dated_show(self, execute_task):
task = execute_task('date')
assert task.find_entry('accepted', title='date show 04.01.2011 hdtv')
assert not task.find_entry('accepted', title='date show b 04.02.2011 hdtv')
class TestTimeframe(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- test:
timeframe: 5 hours
target: 720p
tasks:
test_no_waiting:
mock:
- {title: 'Test.S01E01.720p-FlexGet'}
test_stop_waiting_1:
mock:
- {title: 'Test.S01E02.HDTV-FlexGet'}
test_stop_waiting_2:
mock:
- {title: 'Test.S01E02.720p-FlexGet'}
test_proper_afterwards:
mock:
- {title: 'Test.S01E02.720p.Proper-FlexGet'}
test_expires:
mock:
- {title: 'Test.S01E03.pdtv-FlexGet'}
test_min_max_fail:
series:
- mm test:
timeframe: 5 hours
target: 720p
quality: hdtv+ <=720p
mock:
- {title: 'MM Test.S01E02.pdtv-FlexGet'}
- {title: 'MM Test.S01E02.1080p-FlexGet'}
test_min_max_pass:
series:
- mm test:
timeframe: 5 hours
target: 720p
quality: hdtv+ <=720p
mock:
- {title: 'MM Test.S01E02.pdtv-FlexGet'}
- {title: 'MM Test.S01E02.hdtv-FlexGet'}
- {title: 'MM Test.S01E02.1080p-FlexGet'}
test_qualities_fail:
series:
- q test:
timeframe: 5 hours
qualities:
- hdtv
- 1080p
mock:
- {title: 'Q Test.S01E02.pdtv-FlexGet'}
- {title: 'Q Test.S01E02.1080p-FlexGet'}
test_qualities_pass:
series:
- q test:
timeframe: 5 hours
qualities:
- sdtv
- 720p
mock:
- {title: 'Q Test.S01E02.1080p-FlexGet'}
test_with_quality_1:
series:
- q test:
timeframe: 5 hours
quality: hdtv+
target: 720p
mock:
- title: q test s01e01 pdtv 720p
test_with_quality_2:
series:
- q test:
timeframe: 5 hours
quality: hdtv+
target: 720p
mock:
- title: q test s01e01 hdtv
"""
def test_no_waiting(self, execute_task):
"""Series plugin: no timeframe waiting needed"""
task = execute_task('test_no_waiting')
assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \
'720p not accepted immediattely'
def test_stop_waiting(self, execute_task):
"""Series plugin: timeframe quality appears, stop waiting, proper appears"""
task = execute_task('test_stop_waiting_1')
assert task.entries and not task.accepted
task = execute_task('test_stop_waiting_2')
assert task.find_entry('accepted', title='Test.S01E02.720p-FlexGet'), \
'720p should have caused stop waiting'
task = execute_task('test_proper_afterwards')
assert task.find_entry('accepted', title='Test.S01E02.720p.Proper-FlexGet'), \
'proper should have been accepted'
def test_expires(self, execute_task):
"""Series plugin: timeframe expires"""
# first execution should not accept anything
task = execute_task('test_expires')
assert not task.accepted
# let 3 hours pass
age_series(hours=3)
task = execute_task('test_expires')
assert not task.accepted, 'expired too soon'
# let another 3 hours pass, should expire now!
age_series(hours=6)
task = execute_task('test_expires')
assert task.accepted, 'timeframe didn\'t expire'
def test_min_max_fail(self, execute_task):
task = execute_task('test_min_max_fail')
assert not task.accepted
# Let 6 hours pass, timeframe should not even been started, as pdtv doesn't meet min_quality
age_series(hours=6)
task = execute_task('test_min_max_fail')
assert task.entries and not task.accepted
def test_min_max_pass(self, execute_task):
task = execute_task('test_min_max_pass')
assert not task.accepted
# Let 6 hours pass, timeframe should expire and accept hdtv copy
age_series(hours=6)
task = execute_task('test_min_max_pass')
assert task.find_entry('accepted', title='MM Test.S01E02.hdtv-FlexGet')
assert len(task.accepted) == 1
def test_qualities_fail(self, execute_task):
task = execute_task('test_qualities_fail')
assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet'), \
'should have accepted wanted quality'
assert len(task.accepted) == 1
# Let 6 hours pass, timeframe should not even been started, as we already have one of our qualities
age_series(hours=6)
task = execute_task('test_qualities_fail')
assert task.entries and not task.accepted
def test_qualities_pass(self, execute_task):
task = execute_task('test_qualities_pass')
assert not task.accepted, 'None of the qualities should have matched'
# Let 6 hours pass, timeframe should expire and accept 1080p copy
age_series(hours=6)
task = execute_task('test_qualities_pass')
assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet')
assert len(task.accepted) == 1
def test_with_quality(self, execute_task):
task = execute_task('test_with_quality_1')
assert not task.accepted, 'Entry does not pass quality'
age_series(hours=6)
# Entry from first test feed should not pass quality
task = execute_task('test_with_quality_1')
assert not task.accepted, 'Entry does not pass quality'
# Timeframe should not yet have started
task = execute_task('test_with_quality_2')
assert not task.accepted, 'Timeframe should not yet have passed'
age_series(hours=6)
task = execute_task('test_with_quality_2')
assert task.accepted, 'Timeframe should have passed'
class TestBacklog(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
backlog:
mock:
- {title: 'Test.S01E01.hdtv-FlexGet'}
series:
- test: {timeframe: 6 hours}
"""
def testBacklog(self, manager, execute_task):
"""Series plugin: backlog"""
task = execute_task('backlog')
assert task.entries and not task.accepted, 'no entries at the start'
# simulate test going away from the task
del (manager.config['tasks']['backlog']['mock'])
age_series(hours=12)
task = execute_task('backlog')
assert task.accepted, 'backlog is not injecting episodes'
class TestManipulate(object):
"""Tests that it's possible to manipulate entries before they're parsed by series plugin"""
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_1:
mock:
- {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'}
series:
- test
test_2:
mock:
- {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'}
series:
- test
manipulate:
- title:
extract: '^PREFIX: (.*)'
"""
def testManipulate(self, execute_task):
"""Series plugin: test manipulation priority"""
# should not work with the prefix
task = execute_task('test_1')
assert not task.accepted, 'series accepted even with prefix?'
assert not task.accepted, 'series rejecte even with prefix?'
task = execute_task('test_2')
assert task.accepted, 'manipulate failed to pre-clean title'
class TestFromGroup(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: '[Ignored] Test 12'}
- {title: '[FlexGet] Test 12'}
- {title: 'Test.13.HDTV-Ignored'}
- {title: 'Test.13.HDTV-FlexGet'}
- {title: 'Test.14.HDTV-Name'}
- {title: 'Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3'}
- {title: 'Test :: h264 10-bit | Softsubs (Ignore) | Episode 3'}
series:
- test: {from_group: [Name, FlexGet]}
"""
def test_from_group(self, execute_task):
"""Series plugin: test from_group"""
task = execute_task('test')
assert task.find_entry('accepted', title='[FlexGet] Test 12')
assert task.find_entry('accepted', title='Test.13.HDTV-FlexGet')
assert task.find_entry('accepted', title='Test.14.HDTV-Name')
assert task.find_entry('accepted', title='Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3')
class TestBegin(object):
config = """
templates:
global:
parsing:
series: {{parser}}
eps:
mock:
- {title: 'WTest.S02E03.HDTV.XViD-FlexGet'}
- {title: 'W2Test.S02E03.HDTV.XViD-FlexGet'}
tasks:
season_id_test:
template: eps
series:
- WTest:
begin: S02
- W2Test:
begin: S03
before_ep_test:
template: eps
series:
- WTest:
begin: S02E05
- W2Test:
begin: S03E02
after_ep_test:
template: eps
series:
- WTest:
begin: S02E03
- W2Test:
begin: S02E01
before_seq_test:
mock:
- title: WTest.1.HDTV.XViD-FlexGet
- title: W2Test.13.HDTV.XViD-FlexGet
series:
- WTest:
begin: 2
- W2Test:
begin: 120
after_seq_test:
mock:
- title: WTest.2.HDTV.XViD-FlexGet
- title: W2Test.123.HDTV.XViD-FlexGet
series:
- WTest:
begin: 2
- W2Test:
begin: 120
before_date_test:
mock:
- title: WTest.2001.6.6.HDTV.XViD-FlexGet
- title: W2Test.12.30.2012.HDTV.XViD-FlexGet
series:
- WTest:
begin: '2009-05-05'
- W2Test:
begin: '2012-12-31'
after_date_test:
mock:
- title: WTest.2009.5.5.HDTV.XViD-FlexGet
- title: W2Test.1.1.2013.HDTV.XViD-FlexGet
series:
- WTest:
begin: '2009-05-05'
- W2Test:
begin: '2012-12-31'
test_advancement1:
mock:
- title: WTest.S01E01
series:
- WTest
test_advancement2:
mock:
- title: WTest.S03E01
series:
- WTest
test_advancement3:
mock:
- title: WTest.S03E01
series:
- WTest:
begin: S03E01
"""
def test_season_id(self, execute_task):
task = execute_task('season_id_test')
assert task.find_entry('accepted', title='WTest.S02E03.HDTV.XViD-FlexGet'), \
'Entry should have been accepted, it\'s after the begin episode'
assert task.find_entry('rejected', title='W2Test.S02E03.HDTV.XViD-FlexGet'), \
'Entry should have been rejected, it\'s before the begin episode'
def test_before_ep(self, execute_task):
task = execute_task('before_ep_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_ep(self, execute_task):
task = execute_task('after_ep_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_before_seq(self, execute_task):
task = execute_task('before_seq_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_seq(self, execute_task):
task = execute_task('after_seq_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_before_date(self, execute_task):
task = execute_task('before_date_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_date(self, execute_task):
task = execute_task('after_date_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_advancement(self, execute_task):
# Put S01E01 into the database as latest download
task = execute_task('test_advancement1')
assert task.accepted
# Just verify regular ep advancement would block S03E01
task = execute_task('test_advancement2')
assert not task.accepted, 'Episode advancement should have blocked'
# Make sure ep advancement doesn't block it when we've set begin to that ep
task = execute_task('test_advancement3')
assert task.accepted, 'Episode should have been accepted'
class TestSeriesPremiere(object):
config = """
templates:
global:
parsing:
series: {{parser}}
metainfo_series: yes
series_premiere: yes
tasks:
test:
mock:
- {title: 'Foobar.S01E01.PDTV-FlexGet'}
- {title: 'Foobar.S01E11.1080p-FlexGet'}
- {title: 'Foobar.S02E02.HR-FlexGet'}
"""
def testOnlyPremieres(self, execute_task):
"""Test series premiere"""
task = execute_task('test')
assert task.find_entry('accepted', title='Foobar.S01E01.PDTV-FlexGet',
series_name='Foobar', series_season=1,
series_episode=1), 'Series premiere should have been accepted'
assert len(task.accepted) == 1
# TODO: Add more tests, test interaction with series plugin and series_exists
class TestImportSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
timeframe_max:
configure_series:
settings:
propers: 12 hours
target: 720p
timeframe: 5 minutes
quality: "<=720p <=bluray"
from:
mock:
- title: the show
mock:
- title: the show s03e02 1080p bluray
- title: the show s03e02 hdtv
test_import_altnames:
configure_series:
from:
mock:
- {title: 'the show', configure_series_alternate_name: 'le show'}
mock:
- title: le show s03e03
"""
def test_timeframe_max(self, execute_task):
"""Tests configure_series as well as timeframe with max_quality."""
task = execute_task('timeframe_max')
assert not task.accepted, 'Entry shouldnot have been accepted on first run.'
age_series(minutes=6)
task = execute_task('timeframe_max')
assert task.find_entry('accepted', title='the show s03e02 hdtv'), \
'hdtv should have been accepted after timeframe.'
def test_import_altnames(self, execute_task):
"""Tests configure_series with alternate_name."""
task = execute_task('test_import_altnames')
entry = task.find_entry(title='le show s03e03')
assert entry.accepted, 'entry matching series alternate name should have been accepted.'
assert entry['series_name'] == 'the show', 'entry series should be set to the main name'
class TestIDTypes(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
all_types:
series:
- episode
- seasonless episode
- date
- sequence
- stupid id:
id_regexp: (\\dcat)
mock:
- title: episode S03E04
- title: episode 3x05
- title: date 2011.4.3 other crap hdtv
- title: date 4.5.11
- title: sequence 003
- title: sequence 4
- title: stupid id 3cat
- title: seasonless episode e01
"""
def test_id_types(self, execute_task):
task = execute_task('all_types')
for entry in task.entries:
assert entry['series_name'], '%s not parsed by series plugin' % entry['title']
assert entry['series_id_type'] in entry['series_name']
class TestCaseChange(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
first:
mock:
- title: theshow s02e04
series:
- TheShow
second:
mock:
- title: thEshoW s02e04 other
series:
- THESHOW
"""
def test_case_change(self, execute_task):
task = execute_task('first')
# Make sure series_name uses case from config, make sure episode is accepted
assert task.find_entry('accepted', title='theshow s02e04', series_name='TheShow')
task = execute_task('second')
# Make sure series_name uses new case from config, make sure ep is rejected because we have a copy
assert task.find_entry('rejected', title='thEshoW s02e04 other', series_name='THESHOW')
class TestInvalidSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
blank:
mock:
- title: whatever
series:
- '':
quality: 720p
"""
def test_blank_series(self, execute_task):
"""Make sure a blank series doesn't crash."""
task = execute_task('blank')
assert not task.aborted, 'Task should not have aborted'
class TestDoubleEps(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_double1:
mock:
- title: double S01E02-E03
series:
- double
test_double2:
mock:
- title: double S01E03
series:
- double
"""
def test_double(self, execute_task):
# First should be accepted
task = execute_task('test_double1')
assert task.find_entry('accepted', title='double S01E02-E03')
# We already got ep 3 as part of double, should not be accepted
task = execute_task('test_double2')
assert not task.find_entry('accepted', title='double S01E03')
class TestAutoLockin(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar
- BarFood
tasks:
try_date_1:
mock:
- title: FooBar 2012-10-10 HDTV
lock_ep:
mock:
- title: FooBar S01E01 HDTV
- title: FooBar S01E02 HDTV
- title: FooBar S01E03 HDTV
try_date_2:
mock:
- title: FooBar 2012-10-11 HDTV
test_special_lock:
mock:
- title: BarFood christmas special HDTV
- title: BarFood easter special HDTV
- title: BarFood haloween special HDTV
- title: BarFood bad special HDTV
try_reg:
mock:
- title: BarFood S01E01 HDTV
- title: BarFood 2012-9-9 HDTV
"""
def test_ep_lockin(self, execute_task):
task = execute_task('try_date_1')
assert task.find_entry('accepted', title='FooBar 2012-10-10 HDTV'), \
'dates should be accepted before locked in on an identifier type'
task = execute_task('lock_ep')
assert len(task.accepted) == 3, 'All ep mode episodes should have been accepted'
task = execute_task('try_date_2')
assert not task.find_entry('accepted', title='FooBar 2012-10-11 HDTV'), \
'dates should not be accepted after series has locked in to ep mode'
def test_special_lock(self, execute_task):
"""Make sure series plugin does not lock in to type 'special'"""
task = execute_task('test_special_lock')
assert len(task.accepted) == 4, 'All specials should have been accepted'
task = execute_task('try_reg')
assert len(task.accepted) == 2, 'Specials should not have caused episode type lock-in'
class TestReruns(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
one_accept:
mock:
- title: the show s01e01
- title: the show s01e01 different
series:
- the show
rerun: 2
mock_output: yes
"""
def test_one_accept(self, execute_task):
task = execute_task('one_accept')
assert len(task.mock_output) == 1, \
'should have accepted once!: %s' % ', '.join(e['title'] for e in task.mock_output)
class TestSpecials(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
preferspecials:
mock:
- title: the show s03e04 special
series:
- the show:
prefer_specials: True
nopreferspecials:
mock:
- title: the show s03e05 special
series:
- the show:
prefer_specials: False
assumespecial:
mock:
- title: the show SOMETHING
series:
- the show:
assume_special: True
noassumespecial:
mock:
- title: the show SOMETHING
series:
- the show:
assume_special: False
special_looks_like_season_pack:
mock:
- title: Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget
series:
- Doctor Who
"""
def test_prefer_specials(self, execute_task):
# Test that an entry matching both ep and special is flagged as a special when prefer_specials is True
task = execute_task('preferspecials')
entry = task.find_entry('accepted', title='the show s03e04 special')
assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged a special was not.'
def test_not_prefer_specials(self, execute_task):
# Test that an entry matching both ep and special is flagged as an ep when prefer_specials is False
task = execute_task('nopreferspecials')
entry = task.find_entry('accepted', title='the show s03e05 special')
assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged a special was.'
def test_assume_special(self, execute_task):
# Test that an entry with no ID found gets flagged as a special and accepted if assume_special is True
task = execute_task('assumespecial')
entry = task.find_entry(title='the show SOMETHING')
assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged as a special was not.'
assert entry.accepted, 'Entry which should have been accepted was not.'
def test_not_assume_special(self, execute_task):
# Test that an entry with no ID found does not get flagged as a special and accepted if assume_special is False
task = execute_task('noassumespecial')
entry = task.find_entry(title='the show SOMETHING')
assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged as a special was.'
assert not entry.accepted, 'Entry which should not have been accepted was.'
def test_special_looks_like_a_season_pack(self, execute_task):
"""Make sure special episodes are not being parsed as season packs"""
task = execute_task('special_looks_like_season_pack')
entry = task.find_entry(title='Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget')
assert entry.get('series_id_type') == 'special', 'Entry should have been flagged as a special'
assert not entry['season_pack'], 'Entry should not have been flagged as a season pack'
assert entry.accepted, 'Entry which should not have been accepted was.'
class TestAlternateNames(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
alternate_name:
series:
- Some Show:
begin: S01E01
alternate_name: Other Show
another_alternate_name:
series:
- Some Show:
alternate_name: Good Show
set_other_alternate_name:
mock:
- title: Third.Show.S01E01
- title: Other.Show.S01E01
series:
- Some Show:
alternate_name: Third Show
rerun: 0
duplicate_names_in_different_series:
series:
- First Show:
begin: S01E01
alternate_name: Third Show
- Second Show:
begin: S01E01
alternate_name: Third Show
"""
def test_set_alternate_name(self, execute_task):
# Tests that old alternate names are not kept in the database.
task = execute_task('alternate_name')
task = execute_task('set_other_alternate_name')
assert task.find_entry('accepted', title='Third.Show.S01E01'), \
'A new alternate name should have been associated with the series.'
assert task.find_entry('undecided', title='Other.Show.S01E01'), \
'The old alternate name for the series is still present.'
def test_duplicate_alternate_names_in_different_series(self, execute_task):
with pytest.raises(TaskAbort) as ex:
execute_task('duplicate_names_in_different_series')
# only test that the reason is about alternate names, not which names.
reason = 'Error adding alternate name'
assert ex.value.reason[:27] == reason, \
'Wrong reason for task abortion. Should be about duplicate alternate names.'
# Test the DB behaves like we expect ie. alternate names cannot
def test_alternate_names_are_removed_from_db(self, execute_task):
from flexget.manager import Session
with Session() as session:
execute_task('alternate_name')
# test the current state of alternate names
assert len(session.query(db.AlternateNames).all()) == 1, 'There should be one alternate name present.'
assert session.query(db.AlternateNames).first().alt_name == 'Other Show', \
'Alternate name should have been Other Show.'
# run another task that overwrites the alternate names
execute_task('another_alternate_name')
assert len(session.query(db.AlternateNames).all()) == 1, \
'The old alternate name should have been removed from the database.'
assert session.query(db.AlternateNames).first().alt_name == 'Good Show', \
'The alternate name in the database should be the new one, Good Show.'
class TestCLI(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
learn_series:
series:
- Some Show
- Other Show
mock:
- title: Some Series S01E01
- title: Other Series S01E02
"""
def test_series_list(self, manager, execute_task):
"""Very rudimentary test, mostly makes sure this doesn't crash."""
execute_task('learn_series')
options = get_parser().parse_args(['series', 'list', '--porcelain'])
buffer = StringIO()
with capture_output(buffer, loglevel='error'):
manager.handle_cli(options=options)
lines = buffer.getvalue().split('\n')
assert all(any(line.lstrip().startswith(series) for line in lines) for series in ['Some Show', 'Other Show'])
class TestSeriesRemove(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
get_episode:
seen: local
series:
- My Show
mock:
- title: My Show S01E01 1080p
- title: My Show S01E01 720p
remove_episode:
seen: no
mock:
- title: My Show S01E01
series_name: My Show
series_id: S01E01
accept_all: yes
series_remove: yes
"""
def test_remove_episode(self, execute_task):
task = execute_task('get_episode')
assert len(task.accepted) == 1
first_rls = task.accepted[0]
task = execute_task('get_episode')
assert not task.accepted, 'series plugin duplicate blocking not working?'
task = execute_task('remove_episode')
task = execute_task('get_episode')
assert len(task.accepted) == 1, 'new release not accepted after forgetting ep'
assert task.accepted[0] != first_rls, 'same release accepted on second run'
class TestSeriesSeasonPack(object):
_config = """
templates:
global:
parsing:
series: internal
series:
- foo:
season_packs: yes
- bar:
season_packs: yes
tracking: backfill
- baz:
season_packs: 3
- boo:
season_packs: always
- bla:
season_packs: only
- bro:
season_packs:
threshold: 1
reject_eps: yes
tasks:
multiple_formats:
mock:
- title: foo.s01.720p-flexget
- title: foo.2xALL.720p-flexget
foo_s01:
mock:
- title: foo.s01.720p-flexget
foo_s02:
mock:
- title: foo.s02.720p-flexget
foo_s03:
mock:
- title: foo.s03.720p-flexget
foo_s01ep1:
mock:
- title: foo.s01e1.720p-flexget
foo_s02ep1:
mock:
- title: foo.s02e1.720p-flexget
season_pack_priority:
mock:
- title: foo.s01e1.720p-flexget
- title: foo.s01e2.720p-flexget
- title: foo.s01e3.720p-flexget
- title: foo.s01e4.720p-flexget
- title: foo.s01e5.720p-flexget
- title: foo.s01.720p-flexget
respect_begin:
series:
- bar:
begin: s02e01
season_packs: yes
mock:
- title: bar.s01.720p-flexget
- title: bar.s02.720p-flexget
several_seasons:
mock:
- title: foo.s03.720p-flexget
- title: foo.s07.720p-flexget
- title: foo.s03.1080p-flexget
- title: foo.s06.720p-flexget
- title: foo.s09.720p-flexget
test_backfill_1:
mock:
- title: bar.s03.720p-flexget
test_backfill_2:
mock:
- title: bar.s02.720p-flexget
test_backfill_3:
mock:
- title: bar.s03e01.720p-flexget
test_backfill_4:
mock:
- title: bar.s02e01.1080p-flexget
test_specific_season_pack_threshold_1:
mock:
- title: baz.s01e01.720p-flexget
- title: baz.s01e02.720p-flexget
- title: baz.s01e03.720p-flexget
test_specific_season_pack_threshold_2:
mock:
- title: baz.s01.720p-flexget
test_specific_season_pack_threshold_3:
mock:
- title: baz.s01e01.720p-flexget
- title: baz.s01e02.720p-flexget
- title: baz.s01e03.720p-flexget
- title: baz.s01e04.720p-flexget
test_always_get_season_pack_1:
mock:
- title: boo.s01e01.720p-flexget
- title: boo.s01e02.720p-flexget
- title: boo.s01e03.720p-flexget
- title: boo.s01e04.720p-flexget
test_always_get_season_pack_2:
mock:
- title: boo.s01.720p-flexget
test_only_get_season_packs:
mock:
- title: bla.s01.720p-flexget
- title: bla.s02e01.720p-flexget
test_proper_season_pack:
mock:
- title: foo.s01.720p-flexget
- title: foo.s01.720p.proper-flexget
test_proper_season_pack_2:
mock:
- title: foo.s01.720p-flexget
test_proper_season_pack_3:
mock:
- title: foo.s01.720p.proper-flexget
test_all_series:
mock:
- title: show.name.s01.720p.HDTV-Group
all_series:
season_packs: yes
test_with_dict_config_1:
mock:
- title: bro.s01e01.720p.HDTV-Flexget
- title: bro.s01.720p.HDTV-Flexget
test_with_dict_config_2:
mock:
- title: bro.s02.720p.HDTV-Flexget
"""
@pytest.fixture()
def config(self):
"""Overrides outer config fixture since season pack support does not work with guessit parser"""
return self._config
def test_season_pack_simple(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
def test_basic_tracking(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s01ep1')
assert len(task.accepted) == 0
task = execute_task('foo_s02ep1')
assert len(task.accepted) == 1
def test_season_pack_takes_priority(self, execute_task):
task = execute_task('season_pack_priority')
assert len(task.accepted) == 1
entry = task.find_entry(title='foo.s01.720p-flexget')
assert entry.accepted
def test_respect_begin(self, execute_task):
task = execute_task('respect_begin')
assert len(task.accepted) == 1
entry = task.find_entry(title='bar.s02.720p-flexget')
assert entry.accepted
def test_tracking_rules_old_eps(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s02')
assert len(task.accepted) == 1
task = execute_task('foo_s01ep1')
assert not task.accepted
def test_tracking_rules_old_season(self, execute_task):
task = execute_task('foo_s02')
assert len(task.accepted) == 1
task = execute_task('foo_s01')
assert not task.accepted
def test_tracking_rules_new_season(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s03')
assert not task.accepted
def test_several_seasons(self, execute_task):
task = execute_task('several_seasons')
assert len(task.accepted) == 4
def test_multiple_formats(self, execute_task):
task = execute_task('multiple_formats')
assert len(task.accepted) == 2
def test_backfill(self, execute_task):
task = execute_task('test_backfill_1')
assert len(task.accepted) == 1
task = execute_task('test_backfill_2')
assert len(task.accepted) == 1
task = execute_task('test_backfill_3')
assert not task.accepted
task = execute_task('test_backfill_4')
assert not task.accepted
def test_default_threshold(self, execute_task):
task = execute_task('foo_s01ep1')
assert len(task.accepted) == 1
task = execute_task('foo_s01')
assert len(task.accepted) == 0
def test_specific_season_pack_threshold_positive(self, execute_task):
task = execute_task('test_specific_season_pack_threshold_1')
assert len(task.accepted) == 3
task = execute_task('test_specific_season_pack_threshold_2')
assert len(task.accepted) == 1
def test_specific_season_pack_threshold_negative(self, execute_task):
task = execute_task('test_specific_season_pack_threshold_3')
assert len(task.accepted) == 4
task = execute_task('test_specific_season_pack_threshold_2')
assert not task.accepted
def test_loose_threshold(self, execute_task):
task = execute_task('test_always_get_season_pack_1')
assert len(task.accepted) == 4
task = execute_task('test_always_get_season_pack_2')
assert len(task.accepted) == 1
def test_exclusive(self, execute_task):
task = execute_task('test_only_get_season_packs')
assert len(task.accepted) == 1
entry = task.find_entry(title='bla.s01.720p-flexget')
assert entry.accepted
def test_proper_season_pack(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('test_proper_season_pack')
assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget')
def test_proper_season_pack_2(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('test_proper_season_pack_2')
assert task.find_entry('accepted', title='foo.s01.720p-flexget')
task = execute_task('test_proper_season_pack_3')
assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget')
def test_all_series(self, execute_task):
task = execute_task('test_all_series')
assert task.find_entry('accepted', title='show.name.s01.720p.HDTV-Group')
def test_advanced_config(self, execute_task):
task = execute_task('test_with_dict_config_1')
assert not task.find_entry('accepted', title='bro.s01e01.720p.HDTV-Flexget')
assert task.find_entry('accepted', title='bro.s01.720p.HDTV-Flexget')
execute_task('test_with_dict_config_2',
options={'inject': [Entry(title='bro.s02e01.720p.HDTV-Flexget', url='')],
'immortal': True})
task = execute_task('test_with_dict_config_2')
assert task.find_entry('accepted', title='bro.s02.720p.HDTV-Flexget')
class TestSeriesDDAudio(object):
_config = """
templates:
global:
parsing:
series: internal
tasks:
min_quality:
mock:
- {title: 'MinQATest.S01E01.720p.XViD.DD5.1-FlexGet'}
- {title: 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'}
series:
- MinQATest:
quality: ">dd5.1"
max_quality:
mock:
- {title: 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'}
- {title: 'MaxQATest.S01E01.720p.XViD.DD+5.1-FlexGet'}
series:
- MaxQATest:
quality: "<=dd5.1"
test_channels:
mock:
- {title: 'Channels.S01E01.1080p.HDTV.DD+2.0-FlexGet'}
- {title: 'Channels.S01E01.1080p.HDTV.DD+5.1-FlexGet'}
- {title: 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'}
series:
- Channels:
quality: dd+5.1
"""
@pytest.fixture()
def config(self):
"""Overrides outer config fixture since DD+ and arbitrary channels support does not work with guessit parser"""
return self._config
def test_min_quality(self, execute_task):
"""Series plugin: min_quality"""
task = execute_task('min_quality')
assert task.find_entry('accepted', title='MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'), \
'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only two'
def test_max_quality(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('max_quality')
assert task.find_entry('accepted', title='MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'), \
'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_channels(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('test_channels')
assert task.find_entry(title='Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'), \
'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
|
#!/usr/bin/env python
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=unused-argument
import mock
def create(*args, **kwargs):
"""
Create a Mock object that imitates a valid Cloud module.
:param args: Not used
:param kwargs: Not used
:return: mock.MagicMock
"""
attrs = {"client.get_suite.return_value": True, "get_campaign_id.side_effect": [True, KeyError],
"get_campaigns.return_value": True, "update_testcase.return_value": True,
"upload_results.side_effect": [True, False]}
mock_module = mock.MagicMock()
mock_module.configure_mock(**attrs)
return mock_module
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import uuid
from random import sample
import cassandra.concurrent
from cassandra.cluster import Cluster
from cassandra.policies import RoundRobinPolicy, TokenAwarePolicy
from solrcloudpy import SolrConnection, SearchOptions
from six.moves import input
solr_connection = None
solr_collection = None
SOLR_UNIQUE_KEY = None
cassandra_cluster = None
cassandra_session = None
cassandra_table = None
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().handlers[0].setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"))
def init(args):
global solr_connection
solr_connection = SolrConnection(args.solr)
global solr_collection
solr_collection = solr_connection[args.collection]
global SOLR_UNIQUE_KEY
SOLR_UNIQUE_KEY = args.solrIdField
dc_policy = RoundRobinPolicy()
token_policy = TokenAwarePolicy(dc_policy)
global cassandra_cluster
cassandra_cluster = Cluster(contact_points=args.cassandra, port=args.cassandraPort,
protocol_version=int(args.cassandraProtocolVersion),
load_balancing_policy=token_policy)
global cassandra_session
cassandra_session = cassandra_cluster.connect(keyspace=args.cassandraKeyspace)
global cassandra_table
cassandra_table = args.cassandraTable
def delete_by_query(args):
if args.query:
se = SearchOptions()
se.commonparams.q(args.query) \
.fl(SOLR_UNIQUE_KEY) \
.fl('id')
for fq in args.filterquery if args.filterquery is not None else []:
se.commonparams.fq(fq)
query = se
elif args.jsonparams:
se = SearchOptions(**json.loads(args.jsonparams))
se.commonparams.fl(SOLR_UNIQUE_KEY) \
.fl('id')
query = se
else:
raise RuntimeError("either query or jsonparams is required")
if check_query(query):
logging.info("Collecting tiles ....")
solr_docs = do_solr_query(query)
if confirm_delete(len(solr_docs)):
deleted_ids = do_delete(solr_docs, query)
logging.info("Deleted tile IDs %s" % json.dumps([str(doc_id) for doc_id in deleted_ids], indent=2))
else:
logging.info("Exiting")
return
else:
logging.info("Exiting")
return
def confirm_delete(num_found):
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
while do_continue not in ['y', 'n']:
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
return do_continue == 'y'
def check_query(query):
solr_response = solr_collection.search(query)
num_found = solr_response.result.response.numFound
if num_found == 0:
logging.info("Query returned 0 results")
return False
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
while do_continue not in ['y', 'n', 's', '']:
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
if do_continue == 'y' or do_continue == '':
return True
elif do_continue == 'n':
return False
else:
se = SearchOptions()
se.commonparams.q('%s:%s' % (SOLR_UNIQUE_KEY, sample(solr_response.result.response.docs, 1)[0][SOLR_UNIQUE_KEY]))
logging.info(json.dumps(solr_collection.search(se).result.response.docs[0], indent=2))
return check_query(query)
def do_solr_query(query):
doc_ids = []
next_cursor_mark = "*"
query.commonparams.sort('%s asc' % SOLR_UNIQUE_KEY)
while True:
query.commonparams.remove_param('cursorMark')
query.commonparams.add_params(cursorMark=next_cursor_mark)
solr_response = solr_collection.search(query)
try:
result_next_cursor_mark = solr_response.result.nextCursorMark
except AttributeError:
# No Results
return []
if result_next_cursor_mark == next_cursor_mark:
break
else:
next_cursor_mark = solr_response.result.nextCursorMark
doc_ids.extend([uuid.UUID(doc['id']) for doc in solr_response.result.response.docs])
return doc_ids
def do_delete(doc_ids, query):
logging.info("Executing Cassandra delete...")
delete_from_cassandra(doc_ids)
logging.info("Executing Solr delete...")
delete_from_solr(query)
return doc_ids
def delete_from_cassandra(doc_ids):
statement = cassandra_session.prepare("DELETE FROM %s WHERE tile_id=?" % cassandra_table)
results = cassandra.concurrent.execute_concurrent_with_args(cassandra_session, statement,
[(doc_id,) for doc_id in doc_ids])
for (success, result) in results:
if not success:
logging.warning("Could not delete tile %s" % result)
def delete_from_solr(query):
solr_collection.delete(query, commit=False)
solr_collection.commit()
def parse_args():
parser = argparse.ArgumentParser(description='Delete data from NEXUS using a Solr Query',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--solr',
help='The url of the SOLR server.',
required=True,
metavar='127.0.0.1:8983')
parser.add_argument('--collection',
help='The name of the SOLR collection.',
required=True,
metavar='nexustiles')
parser.add_argument('--solrIdField',
help='The name of the unique ID field for this collection.',
required=False,
default='solr_id_s',
metavar='solr_id_s')
parser.add_argument('--cassandra',
help='The hostname(s) or IP(s) of the Cassandra server(s).',
required=True,
nargs='+',
metavar=('127.0.0.100', '127.0.0.101'))
parser.add_argument('-k', '--cassandraKeyspace',
help='The Cassandra keyspace.',
required=True,
metavar='nexustiles')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-q', '--query',
help='The ''q'' parameter passed to SOLR Search',
metavar='*:*')
group.add_argument('--jsonparams',
help='Full query prameters formatted as JSON')
parser.add_argument('-fq', '--filterquery',
help='The ''fq'' parameter passed to SOLR Search. Only used if --jsonparams is not provided',
required=False,
nargs='+')
parser.add_argument('-t', '--cassandraTable',
help='The name of the cassandra table.',
required=False,
default='sea_surface_temp')
parser.add_argument('-p', '--cassandraPort',
help='The port used to connect to Cassandra.',
required=False,
default='9042')
parser.add_argument('-pv', '--cassandraProtocolVersion',
help='The version of the Cassandra protocol the driver should use.',
required=False,
choices=['1', '2', '3', '4', '5'],
default='3')
return parser.parse_args()
if __name__ == "__main__":
the_args = parse_args()
init(the_args)
delete_by_query(the_args)
|
from decimal import *
# Purpose: read in SSDEEP output and print findings.
# Author: Tanner G.
def main():
file = open("ssdeep_comparison", "r")
# read past first line of output
file.readline()
filea_data = file.readline()
fileb_data = file.readline()
file.close()
totalCount = 0
similarities = 0
index = 0
max_len = len(filea_data)
while index < max_len:
totalCount +=1
if filea_data[index] == "," or fileb_data[index] == ",":
index = max_len
totalCount -=1
break
elif filea_data[index] == fileb_data[index]:
similarities +=1
index +=1
else:
index+=1
continue
print("------------------")
print("Stats from ssdeep:")
print("------------------")
print("Total Count: " + str(totalCount))
print("Similarities: " + str(similarities))
ratio = (Decimal(similarities)/Decimal(totalCount) * 100)
print ("Hash similarity detected: " + str(ratio)[:5] + "%")
outputFile = open("ssdeep_stats", "w")
outputFile.write("count:"+str(totalCount)+",ratio:"+str(ratio)[:5]+"\n")
outputFile.close()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import cPickle
from functools import wraps
def redis_lru(capacity=5000, slice=slice(None)):
def decorator(func):
cache_keys = "lru:keys:%s" % (func.__name__,)
cache_vals = "lru:vals:%s" % (func.__name__,)
cache_hits = "lru:hits:%s" % (func.__name__,)
cache_miss = "lru:miss:%s" % (func.__name__,)
lvars = [None] # closure mutable
def add(key, value):
eject()
conn = lvars[0]
conn.incr(cache_miss)
conn.hset(cache_vals, key, cPickle.dumps(value))
conn.zadd(cache_keys, 0, key)
return value
def get(key):
conn = lvars[0]
value = conn.hget(cache_vals, key)
if value:
conn.incr(cache_hits)
conn.zincrby(cache_keys, key, 1.0)
value = cPickle.loads(value)
return value
def eject():
conn = lvars[0]
count = min((capacity / 10) or 1, 1000)
if conn.zcard(cache_keys) >= capacity:
eject = conn.zrange(cache_keys, 0, count)
conn.zremrangebyrank(cache_keys, 0, count)
conn.hdel(cache_vals, *eject)
@wraps(func)
def wrapper(*args, **kwargs):
conn = lvars[0]
if conn:
items = args + tuple(sorted(kwargs.items()))
key = cPickle.dumps(items[slice])
return get(key) or add(key, func(*args, **kwargs))
else:
return func(*args, **kwargs)
def info():
conn = lvars[0]
size = int(conn.zcard(cache_keys) or 0)
hits, misses = int(conn.get(cache_hits) or 0), int(conn.get(cache_miss) or 0)
return hits, misses, capacity, size
def clear():
conn = lvars[0]
conn.delete(cache_keys, cache_vals)
conn.delete(cache_hits, cache_miss)
def init(conn):
lvars[0] = conn
wrapper.init = init
wrapper.info = info
wrapper.clear = clear
return wrapper
return decorator
|
swizzle_table = [
[
b"\x00",
b"\x01",
b"\x40",
b"\x03",
b"\x10",
b"\x21",
b"\x50",
b"\x23",
b"\x04",
b"\x09",
b"\x44",
b"\x0b",
b"\x14",
b"\x29",
b"\x54",
b"\x2b",
],
[
b"\x08",
b"\x11",
b"\x48",
b"\x13",
b"\x18",
b"\x31",
b"\x58",
b"\x33",
b"\x0c",
b"\x19",
b"\x4c",
b"\x1b",
b"\x1c",
b"\x39",
b"\x5c",
b"\x3b",
],
[
b"\x80",
b"\x05",
b"\xc0",
b"\x07",
b"\x90",
b"\x25",
b"\xd0",
b"\x27",
b"\x84",
b"\x0d",
b"\xc4",
b"\x0f",
b"\x94",
b"\x2d",
b"\xd4",
b"\x2f",
],
[
b"\x88",
b"\x15",
b"\xc8",
b"\x17",
b"\x98",
b"\x35",
b"\xd8",
b"\x37",
b"\x8c",
b"\x1d",
b"\xcc",
b"\x1f",
b"\x9c",
b"\x3d",
b"\xdc",
b"\x3f",
],
[
b"\x02",
b"\x41",
b"\x42",
b"\x43",
b"\x12",
b"\x61",
b"\x52",
b"\x63",
b"\x06",
b"\x49",
b"\x46",
b"\x4b",
b"\x16",
b"\x69",
b"\x56",
b"\x6b",
],
[
b"\x0a",
b"\x51",
b"\x4a",
b"\x53",
b"\x1a",
b"\x71",
b"\x5a",
b"\x73",
b"\x0e",
b"\x59",
b"\x4e",
b"\x5b",
b"\x1e",
b"\x79",
b"\x5e",
b"\x7b",
],
[
b"\x82",
b"\x45",
b"\xc2",
b"\x47",
b"\x92",
b"\x65",
b"\xd2",
b"\x67",
b"\x86",
b"\x4d",
b"\xc6",
b"\x4f",
b"\x96",
b"\x6d",
b"\xd6",
b"\x6f",
],
[
b"\x8a",
b"\x55",
b"\xca",
b"\x57",
b"\x9a",
b"\x75",
b"\xda",
b"\x77",
b"\x8e",
b"\x5d",
b"\xce",
b"\x5f",
b"\x9e",
b"\x7d",
b"\xde",
b"\x7f",
],
[
b"\x20",
b"\x81",
b"\x60",
b"\x83",
b"\x30",
b"\xa1",
b"\x70",
b"\xa3",
b"\x24",
b"\x89",
b"\x64",
b"\x8b",
b"\x34",
b"\xa9",
b"\x74",
b"\xab",
],
[
b"\x28",
b"\x91",
b"\x68",
b"\x93",
b"\x38",
b"\xb1",
b"\x78",
b"\xb3",
b"\x2c",
b"\x99",
b"\x6c",
b"\x9b",
b"\x3c",
b"\xb9",
b"\x7c",
b"\xbb",
],
[
b"\xa0",
b"\x85",
b"\xe0",
b"\x87",
b"\xb0",
b"\xa5",
b"\xf0",
b"\xa7",
b"\xa4",
b"\x8d",
b"\xe4",
b"\x8f",
b"\xb4",
b"\xad",
b"\xf4",
b"\xaf",
],
[
b"\xa8",
b"\x95",
b"\xe8",
b"\x97",
b"\xb8",
b"\xb5",
b"\xf8",
b"\xb7",
b"\xac",
b"\x9d",
b"\xec",
b"\x9f",
b"\xbc",
b"\xbd",
b"\xfc",
b"\xbf",
],
[
b"\x22",
b"\xc1",
b"\x62",
b"\xc3",
b"\x32",
b"\xe1",
b"\x72",
b"\xe3",
b"\x26",
b"\xc9",
b"\x66",
b"\xcb",
b"\x36",
b"\xe9",
b"\x76",
b"\xeb",
],
[
b"\x2a",
b"\xd1",
b"\x6a",
b"\xd3",
b"\x3a",
b"\xf1",
b"\x7a",
b"\xf3",
b"\x2e",
b"\xd9",
b"\x6e",
b"\xdb",
b"\x3e",
b"\xf9",
b"\x7e",
b"\xfb",
],
[
b"\xa2",
b"\xc5",
b"\xe2",
b"\xc7",
b"\xb2",
b"\xe5",
b"\xf2",
b"\xe7",
b"\xa6",
b"\xcd",
b"\xe6",
b"\xcf",
b"\xb6",
b"\xed",
b"\xf6",
b"\xef",
],
[
b"\xaa",
b"\xd5",
b"\xea",
b"\xd7",
b"\xba",
b"\xf5",
b"\xfa",
b"\xf7",
b"\xae",
b"\xdd",
b"\xee",
b"\xdf",
b"\xbe",
b"\xfd",
b"\xfe",
b"\xff",
],
]
MOSHI_SET_OFFSET = 0
MOSHI_TERMINATION = 2
MOSHI_VECTOR_SPEED = 5
MOSHI_RASTER_SPEED = 4
MOSHI_CUT_ABS = 15
MOSHI_CUT_HORIZ = 14
MOSHI_CUT_VERT = 11
MOSHI_MOVE_ABS = 7
MOSHI_MOVE_HORIZ = 6
MOSHI_MOVE_VERT = 3
MOSHI_FREEMOTOR = 1
MOSHI_ESTOP = 1
MOSHI_EPILOGUE = 2
MOSHI_PROLOGUE = 6
# 6 also seen at laser startup.
MOSHI_LASER = 7
MOSHI_READ = 14
# 14 is also sometimes done as a keepalive each 3.4 seconds.
class MoshiBlob:
"""
MoshiBlobs are datablobs of Moshi types. These are series of commands which should be executed as a program within
the Moshicontroller.
"""
def __init__(self, channel=None):
self.data = bytearray() # Queued additional commands programs.
self.channel = channel
self.last_x = 0
self.last_y = 0
self.offset_x = 0
self.offset_y = 0
self._stage = 0
def __len__(self):
return len(self.data)
def pipe_int8(self, value):
"""
Write an 8 bit into to the current program.
"""
v = bytes(
bytearray(
[
value & 0xFF,
]
)
)
self.write(v)
def pipe_int16le(self, value):
"""
Write a 16 bit little-endian value to the current program.
"""
v = bytes(
bytearray(
[
(value >> 0) & 0xFF,
(value >> 8) & 0xFF,
]
)
)
self.write(v)
def write(self, bytes_to_write):
"""
Writes data to the queue, this will be moved into the buffer by the thread in a threadsafe manner.
:param bytes_to_write: data to write to the queue.
:return:
"""
self.data += bytes_to_write
return self
def vector_speed(self, speed_mms, normal_speed_mms):
"""
Vector Speed Byte. (0x00 position), followed by 2 int8 values.
Jog and Normal Speed. These values are limited to integer values which
are 1 to 256.
:return:
"""
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel(
"Vector Cut Speed: %d mm/s Normal Speed: %d mm/s"
% (int(speed_mms), int(normal_speed_mms))
)
self.write(swizzle_table[MOSHI_VECTOR_SPEED][0])
if speed_mms > 256:
speed_mms = 256
if speed_mms < 1:
speed_mms = 1
self.pipe_int8(speed_mms - 1)
self.pipe_int8(normal_speed_mms - 1)
def raster_speed(self, speed_mms):
"""
Write speed for raster programs.
"""
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel("Raster Header Speed: %d cm/s" % int(speed_mms))
self.write(swizzle_table[MOSHI_RASTER_SPEED][0])
speed_cms = int(round(speed_mms / 10))
if speed_cms == 0:
speed_cms = 1
self.pipe_int8(speed_cms - 1)
def set_offset(self, z, x, y):
"""
2nd Command For Jump. (0x03 position), followed by 3 int16le (2)
:return:
"""
assert self._stage == 1
self._stage = 2
self.offset_x = x
self.offset_y = y
if self.channel:
self.channel("Set Location z: %d, x: %d, y: %d" % (int(z), int(x), int(y)))
self.write(swizzle_table[MOSHI_SET_OFFSET][0])
self.pipe_int16le(z) # Unknown, always zero.
self.pipe_int16le(x) # x
self.pipe_int16le(y) # y
def termination(self):
"""
Terminal Commands for Jump/Program. (last 7 bytes). (4)
:return:
"""
# assert self._stage == 3
self._stage = 4
if self.channel:
self.channel("Termination.")
for i in range(7):
self.write(swizzle_table[MOSHI_TERMINATION][0])
def cut_abs(self, x, y):
"""
Write an absolute position cut value.
Laser will cut to this position from the current stored head position.
Head position is stored on the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Cut x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_CUT_ABS][1])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_abs(self, x, y):
"""
Write an absolute position move value.
Laser will move without cutting to this position from the current stored head position.
Head position is stored on the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Move x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_MOVE_ABS][0])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_vertical_abs(self, y):
"""
Write an absolute position vertical move.
Laser will move the y position without cutting to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Move Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_MOVE_VERT][0])
self.pipe_int16le(int(y))
def move_horizontal_abs(self, x):
"""
Write an absolute position horizontal move.
Laser will move the x position without cutting to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Move Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_MOVE_HORIZ][0])
self.pipe_int16le(int(x))
def cut_horizontal_abs(self, x):
"""
Write an absolute position horizontal cut.
Laser will cut to the x position with laser firing to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Cut Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_CUT_HORIZ][0])
self.pipe_int16le(int(x))
def cut_vertical_abs(self, y):
"""
Write an absolute position vertical cut.
Laser will cut to the y position with laser firing to the new position from the head position
stored in the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Cut Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_CUT_VERT][0])
self.pipe_int16le(int(y))
@staticmethod
def _swizzle(b, p7, p6, p5, p4, p3, p2, p1, p0):
return (
((b >> 0) & 1) << p0
| ((b >> 1) & 1) << p1
| ((b >> 2) & 1) << p2
| ((b >> 3) & 1) << p3
| ((b >> 4) & 1) << p4
| ((b >> 5) & 1) << p5
| ((b >> 6) & 1) << p6
| ((b >> 7) & 1) << p7
)
@staticmethod
def convert(q):
"""
Translated Moshiboard swizzle into correct Moshi command code.
Moshiboards command codes have 16 values with 16 different swizzled values. There are
two different swizzles depending on the parity of the particular code. These codes are used
randomly by Moshi's native software. The board itself reads these all the same.
"""
if q & 1:
return MoshiBlob._swizzle(q, 7, 6, 2, 4, 3, 5, 1, 0)
else:
return MoshiBlob._swizzle(q, 5, 1, 7, 2, 4, 3, 6, 0)
@staticmethod
def reconvert(q):
"""
Counter-translate a particular command code back into correct values.
"""
for m in range(5):
q = MoshiBlob.convert(q)
return q
|
from core.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import permissions, status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import JSONParser
from requests.exceptions import ConnectionError
from projects.models import (
Page,
Project,
ProjectMemberRole,
ProjectAuditParameters,
AvailableAuditParameters,
Script,
)
from projects.serializers import (
PageSerializer,
ProjectSerializer,
ProjectMemberRoleSerializer,
ProjectAuditParametersSerializer,
AvailableAuditParameterSerializer,
ScriptSerializer,
)
from projects.permissions import (
check_if_member_of_project,
check_if_admin_of_project,
is_admin_of_project,
)
from audits.tasks import get_wpt_audit_configurations
def get_user_projects(user_id):
return Project.objects.filter(members__id=user_id, is_active=True)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all the user’s projects", ProjectSerializer(many=True)
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectSerializer,
responses={201: openapi.Response("Returns the created project", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_list(request):
if request.method == "GET":
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects, many=True, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
data = JSONParser().parse(request)
serializer = ProjectSerializer(data=data, context={"user_id": request.user.id})
if serializer.is_valid():
project = Project.objects.create(**serializer.validated_data)
project.save()
return JsonResponse(
{"uuid": project.uuid, **serializer.data},
status=status.HTTP_201_CREATED,
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def first_project(request):
"""Returns the first project of the user.
This is used to speed up the loading of the first project page"""
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects.first(), context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response("Returns details of a project.", ProjectSerializer)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectSerializer,
responses={
200: openapi.Response(
"Updates a project. Allows for partial updates.", ProjectSerializer
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Projects"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_detail(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
if is_admin_of_project(request.user.id, project.uuid):
serializer = ProjectSerializer(
project, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
serializer = ProjectSerializer(
project,
fields=(
"uuid",
"name",
"project_members",
"pages",
"scripts",
"audit_parameters_list",
"screenshot_url",
"latest_audit_at",
"has_siblings",
),
context={"user_id": request.user.id},
)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectSerializer(
project, data=data, partial=True, context={"user_id": request.user.id}
)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
project.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all pages in the project", PageSerializer(many=True)
)
},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["post"],
request_body=PageSerializer,
responses={201: openapi.Response("Returns the created page", PageSerializer)},
tags=["Pages"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_page_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
pages = project.pages.all()
serializer = PageSerializer(pages, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(data=data)
if serializer.is_valid():
page = Page.objects.create(project=project, **serializer.validated_data)
page.save()
return JsonResponse(
{"uuid": page.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("Returns details of a page.", PageSerializer)},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["put"],
request_body=PageSerializer,
responses={
200: openapi.Response(
"Updates a page. Allows for partial updates.", PageSerializer
)
},
tags=["Pages"],
)
@swagger_auto_schema(methods=["delete"], responses={204: "No content"}, tags=["Pages"])
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_page_detail(request, project_uuid, page_uuid):
project = get_object_or_404(Project, pk=project_uuid)
page = get_object_or_404(Page, pk=page_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if page.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = PageSerializer(page)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(page, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
page.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectAuditParametersSerializer,
responses={
201: openapi.Response(
"Returns the created project audit parameter",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameter_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(data=data)
if serializer.is_valid():
audit_parameter = ProjectAuditParameters.objects.create(
project=project, **serializer.validated_data
)
audit_parameter.save()
serializer = ProjectAuditParametersSerializer(audit_parameter)
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns the details of a project audit parameter.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectAuditParametersSerializer,
responses={
200: openapi.Response(
"Updates a project audit parameter. Allows for partial updates.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Audit Parameters"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameters_detail(request, project_uuid, audit_parameters_uuid):
project = get_object_or_404(Project, pk=project_uuid)
audit_parameters = get_object_or_404(
ProjectAuditParameters, pk=audit_parameters_uuid
)
check_if_member_of_project(request.user.id, project.uuid)
if audit_parameters.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = ProjectAuditParametersSerializer(audit_parameters)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(audit_parameters, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
audit_parameters.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectMemberRoleSerializer,
responses={
200: openapi.Response(
"Updates a project member. Allows for partial updates.",
ProjectMemberRoleSerializer,
)
},
tags=["Project Members"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Members"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_member_detail(request, project_uuid, user_id):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
project_member = ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=user_id
)
if not project_member:
return HttpResponse(
"No project member was found", status=status.HTTP_404_NOT_FOUND
)
if request.method == "PUT":
data = JSONParser().parse(request)
if "is_admin" in data and type(data["is_admin"]) is bool:
project_member.update(is_admin=data["is_admin"])
serializer = ProjectMemberRoleSerializer(project_member.first())
return JsonResponse(serializer.data)
return HttpResponse(
"Please provide a valid 'is_admin' value.",
status=status.HTTP_400_BAD_REQUEST,
)
elif request.method == "DELETE":
project_member.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"user_id": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns the updated project with the new member.", ProjectSerializer
)
},
tags=["Project Members"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_members(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
if "user_id" in data:
if not ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=data["user_id"]
):
user = User.objects.filter(id=data["user_id"])
if not user:
return HttpResponse(
"No user found with this id", status=status.HTTP_404_NOT_FOUND
)
project = Project.objects.filter(uuid=project_uuid).first()
project.members.add(user.first(), through_defaults={"is_admin": False})
serializer = ProjectSerializer(project)
return JsonResponse(serializer.data)
return HttpResponse(
"The user is already a member of the project",
status=status.HTTP_400_BAD_REQUEST,
)
return HttpResponse(
"You must provide a user_id", status=status.HTTP_400_BAD_REQUEST
)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"wpt_instance_url": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns discovered available audit parameters for the WPT instance URL passed in parameter",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
def discover_available_audit_parameters(request):
data = JSONParser().parse(request)
if "wpt_instance_url" in data:
try:
get_wpt_audit_configurations(data["wpt_instance_url"])
except ConnectionError:
return JsonResponse(
{
"error": "UNREACHABLE",
"details": "The WPT instance is not reachable, please check the URL",
},
status=status.HTTP_400_BAD_REQUEST,
)
available_audit_parameters = AvailableAuditParameters.objects.filter(
is_active=True
)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
return JsonResponse(
{
"error": "MISSING_PARAMETER",
"details": "You must provide a wpt_instance_url in the request body",
},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns all WebPageTest available audit parameters",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def available_audit_parameters(request):
available_audit_parameters = AvailableAuditParameters.objects.filter(is_active=True)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
@swagger_auto_schema(
methods=["post"],
request_body=ScriptSerializer,
responses={201: openapi.Response("Returns the created script", ScriptSerializer)},
tags=["Scripts"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_scripts(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(data=data)
if serializer.is_valid():
script = Script.objects.create(project=project, **serializer.validated_data)
script.save()
return JsonResponse(
{"uuid": script.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["put"],
request_body=ScriptSerializer,
responses={
200: openapi.Response(
"Updates a script. Allows for partial updates.", ScriptSerializer
)
},
tags=["Scripts"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Scripts"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_script_detail(request, project_uuid, script_uuid):
project = get_object_or_404(Project, pk=project_uuid)
script = get_object_or_404(Script, pk=script_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if script.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(script, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
script.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
|
"""Provides the Objector class."""
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: # pragma: no cover
from ... import praw
class Objector:
"""The objector builds :class:`.RedditBase` objects."""
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
"""Convert JSON response into an error object.
:param data: The dict to be converted.
:returns: An instance of :class:`~.RedditAPIException`, or ``None`` if ``data``
doesn't fit this model.
"""
if isinstance(data, list):
# Fetching a Submission returns a list (of two items). Although it's handled
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
# See `Collection._fetch()`.
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
"""Raise an error if the argument resolves to an error object."""
error = cls.parse_error(data)
if error:
raise error
def __init__(self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
"""Initialize an Objector instance.
:param reddit: An instance of :class:`~.Reddit`.
"""
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
:param data: The structured data, assumed to be a dict.
:returns: An instance of :class:`~.RedditBase`.
"""
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
# Modmail mod action
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
# Modmail message
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
# This is a Rule
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
# Modmail author
data = snake_case_keys(data)
# Prevent clobbering base-36 id
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
# Modmail user
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
# Modmail subreddit
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
# discards icon and subscribed information
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
# discards flair information
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
"""Create RedditBase objects from data.
:param data: The structured data.
:returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is
``None``.
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
# This is a widget
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: # Submission.reply
return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: # Subreddit.submit
# The URL is the URL to the submission, so it's removed.
del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds['submission']}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
|
from .core import *
SCHEMA_VERSION = 'v2.6.5'
SCHEMA_URL = 'https://vega.github.io/schema/vega/v2.6.5.json'
|
"""Various input/output utility functions"""
from typing import Any, Optional
import os
import re
from io import BytesIO
import cloudpickle
import pandas as pd
from zstandard import ZstdCompressor, ZstdDecompressor
COMPRESSION_MAX_OUTPUT_SIZE = 10 ** 9 # 1GB
def pickle_dumps(variable: object) -> bytes:
pickle: bytes = cloudpickle.dumps(variable)
return pickle
def pickle_loads(dumped_pickle: bytes) -> Any:
return cloudpickle.loads(dumped_pickle)
def save_df(df: pd.DataFrame, format: str = "csv") -> bytes:
pandas_version: int = int(re.sub("[^0-9]", "", pd.__version__))
if format == "csv":
csv_buffer = BytesIO()
if pandas_version >= 120:
df.to_csv(csv_buffer, index=False)
else:
csv_buffer.write(df.to_csv(index=False).encode("utf-8"))
csv_buffer.seek(0)
return csv_buffer.getvalue()
else:
raise ValueError("Invalid method: {method}. Choose 'csv'.")
def compress(data: bytes, method: Optional[str] = "zstd") -> bytes:
if method == "zstd":
compressor = ZstdCompressor(level=3, write_checksum=True)
compressed_data = compressor.compress(data)
elif method is None:
compressed_data = data
# elif compression == "lz4":
# import lz4.frame
# data = lz4.frame.compress(data, compression_level=3, content_checksum=True)
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return compressed_data
def decompress(
data: bytes, method: Optional[str] = "zstd", max_output_size: int = COMPRESSION_MAX_OUTPUT_SIZE
) -> bytes:
if method == "zstd":
decompressor = ZstdDecompressor()
decompressed_data = decompressor.decompress(data, max_output_size=max_output_size)
elif method is None:
decompressed_data = data
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return decompressed_data
|
# coding: utf-8
from __future__ import unicode_literals
from io import StringIO, BytesIO
from pathlib import Path
import pytest
from .util import load_test_model
from ..tokens import Doc
from ..strings import StringStore
from .. import util
# These languages are used for generic tokenizer tests – only add a language
# here if it's using spaCy's tokenizer (not a different library)
# TODO: re-implement generic tokenizer tests
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'xx']
_models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_sm'],
'fr': ['fr_core_news_sm'],
'xx': ['xx_ent_web_sm'],
'en_core_web_md': ['en_core_web_md'],
'es_core_news_md': ['es_core_news_md']}
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(params=_models['en'])
def EN(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['de'])
def DE(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['fr'])
def FR(request):
return load_test_model(request.param)
@pytest.fixture()
def RU(request):
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru')()
#@pytest.fixture(params=_languages)
#def tokenizer(request):
#lang = util.get_lang_class(request.param)
#return lang.Defaults.create_tokenizer()
@pytest.fixture
def tokenizer():
return util.get_lang_class('xx').Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return util.get_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return util.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser(en_vocab):
nlp = util.get_lang_class('en')(en_vocab)
return nlp.create_pipe('parser')
@pytest.fixture
def es_tokenizer():
return util.get_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture
def de_tokenizer():
return util.get_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture
def fr_tokenizer():
return util.get_lang_class('fr').Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return util.get_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return util.get_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture
def ro_tokenizer():
return util.get_lang_class('ro').Defaults.create_tokenizer()
@pytest.fixture
def id_tokenizer():
return util.get_lang_class('id').Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return util.get_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return util.get_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture
def ga_tokenizer():
return util.get_lang_class('ga').Defaults.create_tokenizer()
@pytest.fixture
def he_tokenizer():
return util.get_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture
def nb_tokenizer():
return util.get_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture
def da_tokenizer():
return util.get_lang_class('da').Defaults.create_tokenizer()
@pytest.fixture
def ja_tokenizer():
janome = pytest.importorskip("MeCab")
return util.get_lang_class('ja').Defaults.create_tokenizer()
@pytest.fixture
def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture
def ar_tokenizer():
return util.get_lang_class('ar').Defaults.create_tokenizer()
@pytest.fixture
def ru_tokenizer():
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru').Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return util.get_lang_class('en').Defaults.create_entity()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
for lang in _languages + ['all']:
parser.addoption("--%s" % lang, action="store_true", help="Use %s models" % lang)
for model in _models:
if model not in _languages:
parser.addoption("--%s" % model, action="store_true", help="Use %s model" % model)
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)
# Check if test is marked with models and has arguments set, i.e. specific
# language. If so, skip test if flag not set.
if item.get_marker('models'):
for arg in item.get_marker('models').args:
if not item.config.getoption("--%s" % arg) and not item.config.getoption("--all"):
pytest.skip("need --%s or --all option to run" % arg)
|
from loss.BCELoss import cal_bce_loss
from loss.HEL import cal_hel_loss
from loss.IOULoss import cal_iou_loss, cal_weighted_iou_loss
from loss.L12Loss import cal_mae_loss, cal_mse_loss
from loss.SSIM import cal_ssim_loss
supported_loss = dict(
bce=cal_bce_loss,
hel=cal_hel_loss,
iou=cal_iou_loss,
weighted_iou=cal_weighted_iou_loss,
mae=cal_mae_loss,
mse=cal_mse_loss,
ssim=cal_ssim_loss,
)
def get_loss_combination_with_cfg(loss_cfg: dict) -> dict:
loss_combination = {}
for loss_name, with_loss in loss_cfg.items():
if with_loss:
if loss_func := supported_loss.get(loss_name):
loss_combination[loss_name] = loss_func
else:
raise Exception(f"{loss_name} is not be supported!")
return loss_combination
|
#!/usr/bin/env python3
#https://codeforces.com/group/H9K9zY8tcT/contest/297258/problem/B
#heap?
from queue import PriorityQueue
n = int(input())
g = {}
c = {str(i):0 for i in range(1,n+1)} #children count
for i in range(1,n+1):
k = str(i)
g[k] = input().split() # l[0]=weight; l[1]=no use; l[2:] parents;
for p in g[k][2:]:
c[p] += 1
q = PriorityQueue()
[q.put((int(g[k][0]),k)) for k in c if c[k]==0]
m = 0
i = n-1
while not q.empty():
w,k = q.get()
l = i + w
i -= 1
if l>m:
m = l
for p in g[k][2:]:
c[p] -= 1
if c[p]==0:
q.put((int(g[p][0]),p))
print(m)
|
from django.forms import ModelForm
from .models import MRIScan
class MRIScanForm(ModelForm):
class Meta:
model = MRIScan
fields = ['case_id', 't1', 't1ce', 't2', 'flair']
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for task_set.tasks.fixed_text_rnn_classification."""
from absl.testing import parameterized
from task_set import registry
from task_set.tasks import family_test_utils
from task_set.tasks.fixed import fixed_text_rnn_classification # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
class FixedTextRNNClassificationTest(family_test_utils.SingleTaskTestCase):
def test_right_number_of_tasks(self):
task_names = registry.task_registry.get_all_fixed_config_names()
self.assertLen(task_names, 12)
@parameterized.parameters(registry.task_registry.get_all_fixed_config_names())
def test_tasks(self, task_name):
self.task_test(registry.task_registry.get_instance(task_name))
if __name__ == "__main__":
tf.test.main()
|
# get hsv values using trackbar
import cv2
import numpy as np
import time
# A required callback method that goes into the trackbar function.
def nothing(x):
pass
# Initializing the webcam feed.
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
# Create a window named trackbars.
cv2.namedWindow("Trackbars")
# Now create 6 trackbars that will control the lower and upper range of
# H,S and V channels. The Arguments are like this: Name of trackbar,
# window name, range,callback function. For Hue the range is 0-179 and
# for S,V its 0-255.
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
while True:
# Start reading the webcam feed frame by frame.
ret, frame = cap.read()
if not ret:
break
# Flip the frame horizontally (Not required)
frame = cv2.flip( frame, 1 )
# Convert the BGR image to HSV image.
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Get the new values of the trackbar in real time as the user changes
# them
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
# Set the lower and upper HSV range according to the value selected
# by the trackbar
lower_range = np.array([l_h, l_s, l_v])
upper_range = np.array([u_h, u_s, u_v])
# Filter the image and get the binary mask, where white represents
# your target color
mask = cv2.inRange(hsv, lower_range, upper_range)
# You can also visualize the real part of the target color (Optional)
res = cv2.bitwise_and(frame, frame, mask=mask)
# Converting the binary mask to 3 channel image, this is just so
# we can stack it with the others
mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# stack the mask, orginal frame and the filtered result
stacked = np.hstack((mask_3,frame,res))
# Show this stacked frame at 40% of the size.
cv2.imshow('Trackbars',cv2.resize(stacked,None,fx=0.4,fy=0.4))
# If the user presses ESC then exit the program
key = cv2.waitKey(1)
if key == 27:
break
# If the user presses `s` then print this array.
if key == ord('s'):
thearray = [[l_h,l_s,l_v],[u_h, u_s, u_v]]
print(thearray)
# Also save this array as penval.npy
np.save('penval',thearray)
break
# Release the camera & destroy the windows.
cap.release()
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Direct3D10_1
GUID : 9b7e4c8f-342c-4106-a19f-4f2704f689f0
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=1, version=0)
class Microsoft_Windows_Direct3D10_1_1_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchOldDebugObjectName" / Int32ul,
"OldDebugObjectName" / Bytes(lambda this: this.CchOldDebugObjectName),
"CchNewDebugObjectName" / Int32ul,
"NewDebugObjectName" / Bytes(lambda this: this.CchNewDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=2, version=0)
class Microsoft_Windows_Direct3D10_1_2_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchDebugObjectName" / Int32ul,
"DebugObjectName" / Bytes(lambda this: this.CchDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=3, version=0)
class Microsoft_Windows_Direct3D10_1_3_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=4, version=0)
class Microsoft_Windows_Direct3D10_1_4_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=5, version=0)
class Microsoft_Windows_Direct3D10_1_5_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=6, version=0)
class Microsoft_Windows_Direct3D10_1_6_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=7, version=0)
class Microsoft_Windows_Direct3D10_1_7_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=8, version=0)
class Microsoft_Windows_Direct3D10_1_8_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=9, version=0)
class Microsoft_Windows_Direct3D10_1_9_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=10, version=0)
class Microsoft_Windows_Direct3D10_1_10_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=11, version=0)
class Microsoft_Windows_Direct3D10_1_11_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=12, version=0)
class Microsoft_Windows_Direct3D10_1_12_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=13, version=0)
class Microsoft_Windows_Direct3D10_1_13_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=14, version=0)
class Microsoft_Windows_Direct3D10_1_14_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=15, version=0)
class Microsoft_Windows_Direct3D10_1_15_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=16, version=0)
class Microsoft_Windows_Direct3D10_1_16_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=17, version=0)
class Microsoft_Windows_Direct3D10_1_17_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=18, version=0)
class Microsoft_Windows_Direct3D10_1_18_0(Etw):
pattern = Struct(
"Resources" / Int32ul,
"pIDXGISurfaces" / Int64ul,
"hNewKMResources" / Int32ul
)
|
from __future__ import absolute_import
import sys
import os
import errno
import types
import gc
import signal
import traceback
from gevent.event import AsyncResult
from gevent.hub import get_hub, linkproxy, sleep, getcurrent
from gevent.fileobject import FileObject
from gevent.greenlet import Greenlet, joinall
spawn = Greenlet.spawn
import subprocess as __subprocess__
# Standard functions and classes that this module re-implements in a gevent-aware way.
__implements__ = ['Popen',
'call',
'check_call',
'check_output']
# Standard functions and classes that this module re-imports.
__imports__ = ['PIPE',
'STDOUT',
'CalledProcessError',
# Windows:
'CREATE_NEW_CONSOLE',
'CREATE_NEW_PROCESS_GROUP',
'STD_INPUT_HANDLE',
'STD_OUTPUT_HANDLE',
'STD_ERROR_HANDLE',
'SW_HIDE',
'STARTF_USESTDHANDLES',
'STARTF_USESHOWWINDOW']
__extra__ = ['MAXFD',
'_eintr_retry_call',
'STARTUPINFO',
'pywintypes',
'list2cmdline',
'_subprocess',
# Python 2.5 does not have _subprocess, so we don't use it
'WAIT_OBJECT_0',
'WaitForSingleObject',
'GetExitCodeProcess',
'GetStdHandle',
'CreatePipe',
'DuplicateHandle',
'GetCurrentProcess',
'DUPLICATE_SAME_ACCESS',
'GetModuleFileName',
'GetVersion',
'CreateProcess',
'INFINITE',
'TerminateProcess']
for name in __imports__[:]:
try:
value = getattr(__subprocess__, name)
globals()[name] = value
except AttributeError:
__imports__.remove(name)
__extra__.append(name)
if sys.version_info[:2] <= (2, 6):
__implements__.remove('check_output')
__extra__.append('check_output')
_subprocess = getattr(__subprocess__, '_subprocess', None)
_NONE = object()
for name in __extra__[:]:
if name in globals():
continue
value = _NONE
try:
value = getattr(__subprocess__, name)
except AttributeError:
if _subprocess is not None:
try:
value = getattr(_subprocess, name)
except AttributeError:
pass
if value is _NONE:
__extra__.remove(name)
else:
globals()[name] = value
__all__ = __implements__ + __imports__
mswindows = sys.platform == 'win32'
if mswindows:
import msvcrt
else:
import fcntl
import pickle
from gevent import monkey
fork = monkey.get_original('os', 'fork')
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-1", "/dev/null"])
'/dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c", "echo hello world"], stderr=STDOUT)
'hello world\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output = process.communicate()[0]
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
ex = CalledProcessError(retcode, cmd)
# on Python 2.6 and older CalledProcessError does not accept 'output' argument
ex.output = output
raise ex
return output
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, threadpool=None):
"""Create new Popen instance."""
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
hub = get_hub()
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
if threadpool is None:
threadpool = hub.threadpool
self.threadpool = threadpool
self._waiting = False
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
assert threadpool is None
self._loop = hub.loop
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
self.result = AsyncResult()
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = FileObject(p2cwrite, 'wb')
if c2pread is not None:
if universal_newlines:
self.stdout = FileObject(c2pread, 'rU')
else:
self.stdout = FileObject(c2pread, 'rb')
if errread is not None:
if universal_newlines:
self.stderr = FileObject(errread, 'rU')
else:
self.stderr = FileObject(errread, 'rb')
def __repr__(self):
return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode)
def _on_child(self, watcher):
watcher.stop()
status = watcher.rstatus
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
self.returncode = os.WEXITSTATUS(status)
self.result.set(self.returncode)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
greenlets = []
if self.stdin:
greenlets.append(spawn(write_and_close, self.stdin, input))
if self.stdout:
stdout = spawn(self.stdout.read)
greenlets.append(stdout)
else:
stdout = None
if self.stderr:
stderr = spawn(self.stderr.read)
greenlets.append(stderr)
else:
stderr = None
joinall(greenlets)
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
self.wait()
return (None if stdout is None else stdout.value or '',
None if stderr is None else stderr.value or '')
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(),
handle, GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format(comspec, args)
if GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com":
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
# Retain the process handle, but close the thread handle
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
"""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
self.result.set(self.returncode)
return self.returncode
def rawlink(self, callback):
if not self.result.ready() and not self._waiting:
self._waiting = True
Greenlet.spawn(self._wait)
self.result.rawlink(linkproxy(callback, self))
# XXX unlink
def _blocking_wait(self):
WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _wait(self):
self.threadpool.spawn(self._blocking_wait).rawlink(self.result)
def wait(self, timeout=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
if not self._waiting:
self._waiting = True
self._wait()
return self.result.wait(timeout=timeout)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def rawlink(self, callback):
self.result.rawlink(linkproxy(callback, self))
# XXX unlink
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = self.pipe_cloexec()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def _remove_nonblock_flag(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def pipe_cloexec(self):
"""Create a pipe with FDs set CLOEXEC."""
# Pipes' FDs are set CLOEXEC by default because we don't want them
# to be inherited by other subprocesses: the CLOEXEC flag is removed
# from the child's FDs by _dup2(), between fork() and exec().
# This is not atomic: we would need the pipe2() syscall for that.
r, w = os.pipe()
self._set_cloexec_flag(r)
self._set_cloexec_flag(w)
return r, w
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
self._loop.install_sigchld()
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = self.pipe_cloexec()
try:
try:
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = fork()
except:
if gc_was_enabled:
gc.enable()
raise
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
if errwrite == 0 or errwrite == 1:
errwrite = os.dup(errwrite)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
self._set_cloexec_flag(a, False)
elif a is not None:
os.dup2(a, b)
self._remove_nonblock_flag(b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = set([None])
for fd in [p2cread, c2pwrite, errwrite]:
if fd not in closed and fd > 2:
os.close(fd)
closed.add(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
finally:
# Make sure that the process exits no matter what.
# The return code does not matter much as it won't be
# reported to the application
os._exit(1)
# Parent
self._watcher = self._loop.child(self.pid)
self._watcher.start(self._on_child, self._watcher)
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread is not None and p2cwrite is not None:
os.close(p2cread)
if c2pwrite is not None and c2pread is not None:
os.close(c2pwrite)
if errwrite is not None and errread is not None:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
errpipe_read = FileObject(errpipe_read, 'rb')
data = errpipe_read.read()
finally:
if hasattr(errpipe_read, 'close'):
errpipe_read.close()
else:
os.close(errpipe_read)
if data != "":
self.wait()
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None:
os.close(fd)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
"""
if self.returncode is None:
if get_hub() is not getcurrent():
sig_pending = getattr(self._loop, 'sig_pending', True)
if sig_pending:
sleep(0.00001)
return self.returncode
def wait(self, timeout=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
return self.result.wait(timeout=timeout)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def write_and_close(fobj, data):
try:
if data:
fobj.write(data)
except (OSError, IOError), ex:
if ex.errno != errno.EPIPE and ex.errno != errno.EINVAL:
raise
finally:
try:
fobj.close()
except EnvironmentError:
pass
|
# Generated by Django 2.0.4 on 2018-04-24 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='sub_Title',
new_name='sub_title',
),
migrations.RemoveField(
model_name='comment',
name='vote',
),
migrations.AddField(
model_name='comment',
name='text',
field=models.CharField(default=1, max_length=150),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.interfaces.template import Template
from sentry.models import Event
from sentry.testutils import TestCase
class TemplateTest(TestCase):
@fixture
def interface(self):
return Template.to_python(dict(
filename='foo.html',
context_line='hello world',
lineno=1,
))
def test_serialize(self):
result = self.interface.to_json()
self.assertEquals(result['filename'], 'foo.html')
self.assertEquals(result['context_line'], 'hello world')
self.assertEquals(result['lineno'], 1)
def test_get_hash(self):
result = self.interface.get_hash()
self.assertEquals(result, ['foo.html', 'hello world'])
@mock.patch('sentry.interfaces.template.get_context')
@mock.patch('sentry.interfaces.template.Template.get_traceback')
def test_to_string_returns_traceback(self, get_traceback, get_context):
get_traceback.return_value = 'traceback'
event = mock.Mock(spec=Event)
result = self.interface.to_string(event)
get_traceback.assert_called_once_with(event, get_context.return_value)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\ntraceback')
def test_serialize_unserialize_behavior(self):
result = type(self.interface).to_python(self.interface.to_json())
assert result.to_json() == self.interface.to_json()
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
import ConfigParser
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
TCT_CONFIG_FILE = "/opt/tools/TCT_CONFIG"
tct_parser = ConfigParser.ConfigParser()
tct_parser.read(TCT_CONFIG_FILE)
SRC_DIR = tct_parser.get('DEVICE', 'DEVICE_SUITE_TARGET_30')
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "owner"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
from django.shortcuts import render
def contrib_file(request):
return render(request, "dpaste/contribute.html")
|
from tkinter import messagebox
from ClientInsert import *
class ClientEdit(ClientInsert):
def __init__(self, db, id_cliente, master):
super().__init__(db, master)
self.title('Editar Cliente')
self.__id_cliente = id_cliente
self.__list = master
table_cliente = db.select("CLIENTE", ["*"],
['id_cliente'], [str(id_cliente)])[0]
table_municipio = db.select("MUNICIPIO",
["id_uf_municipio", "nome_municipio"],
["id_municipio"],
[str(table_cliente["id_municipio_cliente"])])[0]
table_uf = db.select("UF",
["nome_uf"],
["id_uf"],
[str(table_municipio["id_uf_municipio"])])[0]
table_telefone = db.select("TELEFONE",
["numero_telefone", "ddd_telefone"],
["id_cliente_telefone"],
[str(id_cliente)])
telefones = ""
for telefone in table_telefone:
if (telefone['ddd_telefone'] != 0 and
telefone['numero_telefone'] != 0):
telefones += str(telefone['ddd_telefone'])
telefones += str(telefone['numero_telefone'])
self._ClientForm__str_rsocial.set(
table_cliente['rsocial_cliente'])
self._ClientForm__str_nfantasia.set(
table_cliente['nfantasia_cliente'])
self._ClientForm__tracer_cnpj.set(
str(table_cliente['cnpj_cliente']))
self._ClientForm__tracer_iestadual.set(
str(table_cliente['iestadual_cliente']))
self._ClientForm__tracer_imunicipal.set(
str(table_cliente['imunicipal_cliente']))
self._ClientForm__str_logradouro.set(
table_cliente['logradouro_cliente'])
self._ClientForm__str_complemento.set(
table_cliente['complemento_cliente'])
self._ClientForm__tracer_cep.set(
str(table_cliente['cep_cliente']))
self._ClientForm__tracer_telefone.set(
telefones)
celular = str(table_cliente['ddd_cel_cliente'])
celular += str(table_cliente['ncel_cliente'])
self._ClientForm__tracer_ncel.set(
celular)
self._ClientForm__str_bairro.set(
table_cliente['bairro_cliente'])
self._ClientForm__str_email.set(
table_cliente['email_cliente'])
self._ClientForm__str_url.set(
table_cliente['url_cliente'])
self._ClientForm__str_municipio.set(table_municipio["nome_municipio"])
self._ClientForm__str_uf.set(table_uf["nome_uf"])
self._ClientForm__int_whatsapp.set(table_cliente["whatsapp_cliente"])
self._ClientForm__button_salvar.config(
command=self.__button_salvar_action)
for i in range(0, len(self._ClientInsert__list_ufs)):
if self._ClientInsert__list_ufs[i] == table_uf['nome_uf']:
self._ClientForm__combo_uf.current(i)
for i in range(0, len(self._ClientInsert__list_municipios)):
if self._ClientInsert__list_municipios[i] == table_municipio['nome_municipio']:
self._ClientForm__combo_municipio.current(i)
def __button_salvar_action(self):
data = self._ClientInsert__data_validation()
if data == None:
return
else:
self.__database_update(data)
def __database_update(self, data):
rsocial = data[0]
nfantasia = data[1]
cnpj = data[2]
iestadual = data[3]
imunicipal = data[4]
logradouro = data[5]
complemento = data[6]
bairro = data[7]
municipio = data[8]
uf = data[9]
cep = data[10]
telefone = data[11]
ncel = data[12]
whatsapp = data[13]
email = data[14]
url = data[15]
uf_id = str(self._ClientInsert__db.select("UF",
['id_uf'], ['nome_uf'], [uf])[0]['id_uf'])
municipio_id = self._ClientInsert__db.select("MUNICIPIO",
['id_municipio'],
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
if len(municipio_id) == 0:
self._ClientInsert__db.insert("MUNICIPIO",
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
municipio_id = str(
self._ClientInsert__db.last_insert_id()[0]['LAST_INSERT_ID()'])
else:
municipio_id = str(municipio_id[0]['id_municipio'])
self._ClientInsert__db.update("CLIENTE",
['bairro_cliente',
'cep_cliente',
'rsocial_cliente',
'ncel_cliente',
'ddd_cel_cliente',
'nfantasia_cliente',
'whatsapp_cliente',
'cnpj_cliente',
'iestadual_cliente',
'imunicipal_cliente',
'logradouro_cliente',
'email_cliente',
'complemento_cliente',
'url_cliente',
'id_municipio_cliente'],
[bairro,
cep,
rsocial,
ncel[2:],
ncel[:2],
nfantasia,
whatsapp,
cnpj,
iestadual,
imunicipal,
logradouro,
email,
complemento,
url,
municipio_id],
['id_cliente'],
[str(self.__id_cliente)])
table_telefone_id = self._ClientInsert__db.select("TELEFONE",
['id_telefone'],
['id_cliente_telefone'],
[str(self.__id_cliente)])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone,
self._ClientInsert__number_telefone],
['id_telefone'],
[str(table_telefone_id[0]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone2,
self._ClientInsert__number_telefone2],
['id_telefone'],
[str(table_telefone_id[1]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone3,
self._ClientInsert__number_telefone3],
['id_telefone'],
[str(table_telefone_id[2]['id_telefone'])])
messagebox.showinfo("Informação", "Dados alterados!", parent=self)
self.destroy()
self.__list.filter_client()
|
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Title of the document</title>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.min.css">
<style>
.tradingview-widget-container {{
position: sticky;
top: 20px;
}}
.stocks-view {{
display: flex;
flex-wrap: nowrap;
}}
.stocks-listing {{
width: 780px;
flex-wrap: nowrap;
padding: 20px;
}}
.stocks-graph {{
flex-wrap: nowrap;
padding: 20px;
}}
th.sticky-header {{
position: sticky;
top: 0;
z-index: 10;
background-color: white;
}}
.positive-movement {{
color: green;
font-weight: bold;
}}
.negative-movement {{
color: red;
font-weight: bold;
}}
.blue-category {{
background-color: lightsteelblue;
}}
</style>
</head>
<body>
{}
<div class="stocks-view">
<div class="stocks-listing">
<table>
<thead>
<tr>
<th class="sticky-header">Symbol</th>
<th class="sticky-header">April 1 2019</th>
<th class="sticky-header">Dec 2 2019</th>
<th class="sticky-header">Today</th>
<th class="sticky-header">Movement since April 1 2019</th>
<th class="sticky-header">Movement since Dec 2 2019</th>
<th class="sticky-header">Bankruptcy probability</th>
</tr>
</thead>
<tbody>
{}
</tbody>
</table>
</div>
<div class="stocks-graph"
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_63a66"></div>
<div class="tradingview-widget-copyright"><a href="https://www.tradingview.com/symbols/AAPL/" rel="noopener" target="_blank"><span class="blue-text">AAPL Chart</span></a> by TradingView</div>
</div>
<!-- TradingView Widget END -->
</div>
</div>
<script type="text/javascript">
function renderChart(symbol) {{
new TradingView.widget(
{{
"width": 750,
"height": 500,
"symbol": symbol,
"interval": "180",
"timezone": "Etc/UTC",
"theme": "light",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"allow_symbol_change": true,
"container_id": "tradingview_63a66"
}}
);
}}
document.addEventListener('DOMContentLoaded', function(){{
renderChart('BA');
}}, false);
</script>
</body>
</html>"""
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Update(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
Options.update,
Options.quiet,
]
def run(self, state):
if not self._options.update:
return
_log.info("Updating working directory")
self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
def _update_command(self):
update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive)
return update_command
|
# Daniel Mc Callion
# This program prints the summer months
months = ("January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December")
summer = months[4:7]
for month in summer:
print(month)
|
class FieldC():
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchat(255)'):
super().__init__(name, ddl, primary_key, default)
class TinyIntFieldC(FieldC):
def __init__(self, name=None, default=0):
super().__init__(name, 'tinyint', False, default)
class IntFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'int', primary_key, default)
class BigIntFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class DoubleFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'double', primary_key, default)
class TextFieldC(FieldC):
def __init__(self, name=None, default=None):
super().__init__(name, 'text', False, default)
|
from django.contrib.auth import get_user_model
from django.test import TestCase
class UsersManagersTests(TestCase):
"""
Test user creation manager
"""
def test_create_user(self):
"""
Creates a new user with email as primary identifier instead of username
"""
User = get_user_model()
user = User.objects.create_user(email="normal@user.com", password="foo")
self.assertEqual(user.email, "normal@user.com")
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(user.username)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email="")
with self.assertRaises(ValueError):
User.objects.create_user(email="", password="foo")
def test_create_superuser(self):
"""
Creates a superuser with the custom user model
"""
User = get_user_model()
admin_user = User.objects.create_superuser(email="super@user.com", password="foo")
self.assertEqual(admin_user.email, "super@user.com")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(admin_user.username)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_superuser(email="super@user.com", password="foo", is_superuser=False)
|
import pysam
from optparse import OptionParser
from x_gene_annotation import *
class mRNA_Transfer():
def call_transfer_mut(self, sf_rna, sf_dna_up, sf_dna_bottom, sf_candidate):
m_rna_vars = self.load_variants(sf_rna)
m_DNA_RNA_ovlp_vars = self.get_overlap_variants(sf_dna_bottom, m_rna_vars)
m_candidates = self.get_mut_exclusive_var(sf_dna_up, m_DNA_RNA_ovlp_vars)
self.get_sub_var(sf_rna, m_candidates, sf_candidate)
def load_variants(self, sf_vcfFile):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
m_variants[s_id] = 1
return m_variants
def get_overlap_variants(self, sf_vcfFile, m_existing_vars):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
if s_id in m_existing_vars:
m_variants[s_id] = 1
return m_variants
def get_mut_exclusive_var(self, sf_vcfFile, m_existing_vars):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
# if s_id not in m_existing_vars:
m_variants[s_id] = 1
m_mut_exc_var = {}
for s_id in m_existing_vars:
if s_id not in m_variants:
m_mut_exc_var[s_id] = 1
return m_mut_exc_var
def get_sub_var(self, sf_vcfFile, m_existing_vars, sf_sub_vcf):
vcf = pysam.VariantFile(sf_vcfFile)
vcf_out = pysam.VariantFile(sf_sub_vcf, 'w', header=vcf.header)
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
if s_id in m_existing_vars:
vcf_out.write(var)
####
####
##parse the options:
def parse_option():
parser = OptionParser()
parser.add_option("-p", "--path", dest="wfolder", type="string",
help="Working folder")
parser.add_option("--gene", dest="gene", default="",
help="Gene Annotation file", metavar="FILE")
parser.add_option("--rna", dest="rna",
help="RNA mutation vcf file ", metavar="FILE")
parser.add_option("--phase", dest="phase",
help="Mutation phasing", metavar="FILE")
parser.add_option("--dna_up", dest="dna_up",
help="DNA mutation file of scion", metavar="FILE")
parser.add_option("--dna_bottom", dest="dna_bottom",
help="DNA mutation file of root ", metavar="FILE")
parser.add_option("-c", dest="cutoff", type="int", default=0,
help="cutoff of minimum supporting reads")
parser.add_option("-o", "--output", dest="output",
help="candidate mutation file", metavar="FILE")
(options, args) = parser.parse_args()
return (options, args)
####
if __name__ == '__main__':
(options, args) = parse_option()
sf_rna_mut=options.rna
sf_dna_up=options.dna_up
sf_dna_bottom=options.dna_bottom
sf_candidates=options.output
rna_transfer=mRNA_Transfer()
rna_transfer.call_transfer_mut(sf_rna_mut, sf_dna_up, sf_dna_bottom, sf_candidates)
sf_gene_annotation = options.gene
UP_DOWN_GENE=1500
if sf_gene_annotation !="":
gff = GFF3(sf_gene_annotation)
iextnd = UP_DOWN_GENE
gff.load_gene_annotation_with_extnd(iextnd)
gff.index_gene_annotation_interval_tree()
gff.annotate_results(sf_candidates, sf_candidates+".with_gene_annotation")
####
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import pytest
from ....testing import example_data
from ...niftyreg import get_custom_path
from ..asl import FitAsl
from ...niftyreg.tests.test_regutils import no_nifty_tool
@pytest.mark.skipif(
no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed")
def test_fit_asl():
""" Testing FitAsl interface."""
# Create the test node
fit_asl = FitAsl()
# Check if the command is properly defined
cmd = get_custom_path('fit_asl', env_dir='NIFTYFIT_DIR')
assert fit_asl.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
fit_asl.run()
# Tests on the interface:
# Runs cbf fitting assuming all tissue is GM!
in_file = example_data('asl.nii.gz')
fit_asl.inputs.source_file = in_file
cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
cbf='asl_cbf.nii.gz',
error='asl_error.nii.gz',
syn='asl_syn.nii.gz',
)
assert fit_asl.cmdline == expected_cmd
# Runs cbf fitting using IR/SR T1 data to estimate the local T1 and uses
# the segmentation data to fit tissue specific blood flow parameters
# (lambda,transit times,T1)
fit_asl2 = FitAsl(sig=True)
in_file = example_data('asl.nii.gz')
t1map = example_data('T1map.nii.gz')
seg = example_data('segmentation0.nii.gz')
fit_asl2.inputs.source_file = in_file
fit_asl2.inputs.t1map = t1map
fit_asl2.inputs.seg = seg
cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} \
-seg {seg} -sig -syn {syn} -t1map {t1map}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
t1map=t1map,
seg=seg,
cbf='asl_cbf.nii.gz',
error='asl_error.nii.gz',
syn='asl_syn.nii.gz',
)
assert fit_asl2.cmdline == expected_cmd
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class DimensionMetadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsDimensionMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DimensionMetadata()
x.Init(buf, n + offset)
return x
@classmethod
def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# DimensionMetadata
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DimensionMetadata
def Format(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def DenseSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegmentsType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegments(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# DimensionMetadata
def ArrayIndicesType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArrayIndices(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def DimensionMetadataStart(builder): builder.StartObject(6)
def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0)
def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0)
def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0)
def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0)
def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
def DimensionMetadataEnd(builder): return builder.EndObject()
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy
from openslides.utils.personal_info import PersonalInfo
from .models import Item
class AgendaPersonalInfo(PersonalInfo):
"""
Class for personal info block for the agenda app.
"""
headline = ugettext_lazy('I am on the list of speakers of the following items')
default_weight = 10
def get_queryset(self):
return Item.objects.filter(
speaker__person=self.request.user,
speaker__begin_time=None)
|
def test():
from tensorflow.keras import datasets
assert model.get_layer("class_prediction").get_config()["units"]==43, "Check the number of output classes"
assert model.get_layer("class_prediction").get_config()["activation"]=="softmax", "Check your activation function"
assert model.output[0].name== 'class_prediction/Identity:0', "How does the output look like?"
assert model.output[2].name== 'y1_prediction/Identity:0', "How does the output look like?"
assert model.output[3].name== 'x2_prediction/Identity:0', "How does the output look like?"
assert model.output[4].name== 'y2_prediction/Identity:0', "How does the output look like?"
assert model.get_layer("y1_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("y2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x1_prediction").get_config()["units"]==1, "Check the number of outputs"
__msg__.good("WELL DONE!")
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import CustomUser
User = get_user_model()
class TokenSerializer(serializers.Serializer):
"""
This serializer serializes the token data
"""
access = serializers.CharField(max_length=255)
refresh = serializers.CharField(max_length=255)
class UserSerializer(serializers.ModelSerializer):
"""
Serializes User class
"""
class Meta:
model = User
fields = "__all__"
def create(self, validated_data):
user = User(email=validated_data["email"])
user.set_password(validated_data['password'])
user.save()
return user
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version = None
with open('jaeger_client/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
assert version is not None, \
'Could not determine version number from jaeger_client/__init__.py'
setup(
name='jaeger-client',
version=version,
url='https://github.com/jaegertracing/jaeger-client-python',
description='Jaeger Python OpenTracing Tracer implementation',
author='Yuri Shkuro',
author_email='ys@uber.com',
packages=find_packages(exclude=['crossdock', 'tests', 'example', 'tests.*']),
include_package_data=True,
license='Apache License 2.0',
zip_safe=False,
keywords='jaeger, tracing, opentracing',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'threadloop>=1,<2',
'thrift',
'tornado>=4.3',
'opentracing>=2.1,<3.0',
],
# Uncomment below if need to test with unreleased version of opentracing
# dependency_links=[
# 'git+ssh://git@github.com/opentracing/opentracing-python.git@BRANCHNAME#egg=opentracing',
# ],
test_suite='tests',
extras_require={
':python_version<"3"': [
'futures',
],
'tests': [
'mock==1.0.1',
'pycurl>=7.43,<8',
# pinned to avoid RemovedInPytest4Warning
'pytest>=3.7.0,<3.8.0',
'pytest-cov==2.5.1',
'coverage<4.4', # can remove after https://bitbucket.org/ned/coveragepy/issues/581/44b1-44-breaking-in-ci
'pytest-timeout==1.3.1',
'pytest-tornado',
# pin <3.2 as otherwise it requires pytest>=3.8
'pytest-benchmark[histogram]>=3.0.0rc1,<3.2',
'pytest-localserver',
'flake8',
'flake8-quotes',
'codecov',
'tchannel>=0.27;python_version=="2.7"', # This is only used in python 2
'opentracing_instrumentation>=3,<4',
'prometheus_client==0.3.1',
]
},
)
|
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
and support conversion to/from XML. These classes are solely concerned
by providing direct Object <-> XML document conversions. No policy or
operational decisions should be made by code in these classes. Such
policy belongs in the 'designer.py' module which provides simplified
helpers for populating up config object instances.
"""
import time
from collections import OrderedDict
from lxml import etree
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _
from nova.pci import utils as pci_utils
from nova.virt import hardware
# Namespace to use for Nova specific metadata items in XML
NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
self.root_name = kwargs.get("root_name")
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
def _new_node(self, node_name, **kwargs):
if self.ns_uri is None:
return etree.Element(node_name, **kwargs)
else:
return etree.Element("{" + self.ns_uri + "}" + node_name,
nsmap={self.ns_prefix: self.ns_uri},
**kwargs)
def _text_node(self, node_name, value, **kwargs):
child = self._new_node(node_name, **kwargs)
child.text = str(value)
return child
def format_dom(self):
return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
def parse_dom(self, xmldoc):
if self.root_name != xmldoc.tag:
msg = (_("Root element name should be '%(name)s' not '%(tag)s'") %
{'name': self.root_name, 'tag': xmldoc.tag})
raise exception.InvalidInput(msg)
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, encoding='unicode',
pretty_print=pretty_print)
return xml_str
class LibvirtConfigCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
**kwargs)
self.host = None
self.guests = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCaps, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "host":
host = LibvirtConfigCapsHost()
host.parse_dom(c)
self.host = host
elif c.tag == "guest":
guest = LibvirtConfigCapsGuest()
guest.parse_dom(c)
self.guests.append(guest)
def format_dom(self):
caps = super(LibvirtConfigCaps, self).format_dom()
if self.host:
caps.append(self.host.format_dom())
for g in self.guests:
caps.append(g.format_dom())
return caps
class LibvirtConfigDomainCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCaps, self).__init__(
root_name="domainCapabilities", **kwargs)
self._features = None
self._machine = None
self._alias = None
self._devices = None
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCaps, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "features":
features = LibvirtConfigDomainCapsFeatures()
features.parse_dom(c)
self._features = features
elif c.tag == "machine":
self._machine = c.text
elif c.tag == "devices":
devices = LibvirtConfigDomainCapsDevices()
devices.parse_dom(c)
self._devices = devices
@property
def features(self):
if self._features is None:
return []
return self._features.features
@property
def machine_type(self):
if self._machine is None:
return ""
return self._machine
@property
def machine_type_alias(self):
if self._alias is None:
return self._machine
return self._alias
@machine_type_alias.setter
def machine_type_alias(self, alias):
self._alias = alias
@property
def devices(self):
if self._devices is None:
return []
return self._devices
class LibvirtConfigDomainCapsVideoModels(LibvirtConfigObject):
def __init__(self, **kwargs):
super().__init__(root_name='video', **kwargs)
self.supported = False
self.models = set()
def parse_dom(self, xmldoc):
super().parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
self.models = {str(node) for node in
xmldoc.xpath("//enum[@name='modelType']/value/text()")}
class LibvirtConfigDomainCapsDiskBuses(LibvirtConfigObject):
def __init__(self, **kwargs):
super().__init__(root_name='disk', **kwargs)
self.supported = False
self.buses = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsDiskBuses, self).parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
self.buses = {str(node) for node in
xmldoc.xpath("//enum[@name='bus']/value/text()")}
class LibvirtConfigDomainCapsDevices(LibvirtConfigObject):
DEVICE_PARSERS = {
'video': LibvirtConfigDomainCapsVideoModels,
'disk': LibvirtConfigDomainCapsDiskBuses,
}
def __init__(self, **kwargs):
super().__init__(root_name='devices', **kwargs)
self.devices = set()
def parse_dom(self, xmldoc):
super().parse_dom(xmldoc)
for c in list(xmldoc):
device = self.DEVICE_PARSERS.get(c.tag)
if device:
device = device()
device.parse_dom(c)
self.devices.add(device)
def _get_device(self, device_type):
for device in self.devices:
if type(device) == self.DEVICE_PARSERS.get(device_type):
return device
return None
@property
def disk(self):
return self._get_device('disk')
@property
def video(self):
return self._get_device('video')
class LibvirtConfigDomainCapsFeatures(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCapsFeatures, self).__init__(
root_name="features", **kwargs)
self.features = []
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsFeatures, self).parse_dom(xmldoc)
for c in xmldoc:
feature = None
if c.tag == "sev":
feature = LibvirtConfigDomainCapsFeatureSev()
if feature:
feature.parse_dom(c)
self.features.append(feature)
# There are many other features and domain capabilities,
# but we don't need to regenerate the XML (it's read-only
# data provided by libvirtd), so there's no point parsing
# them until we actually need their values.
# For the same reason, we do not need a format_dom() method, but
# it's a bug if this ever gets called and we inherited one from
# the base class, so override that to watch out for accidental
# calls.
def format_dom(self):
raise RuntimeError(_('BUG: tried to generate domainCapabilities XML'))
class LibvirtConfigDomainCapsFeatureSev(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCapsFeatureSev, self).__init__(
root_name='sev', **kwargs)
self.supported = False
self.cbitpos = None
self.reduced_phys_bits = None
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsFeatureSev, self).parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
for c in list(xmldoc):
if c.tag == 'reducedPhysBits':
self.reduced_phys_bits = int(c.text)
elif c.tag == 'cbitpos':
self.cbitpos = int(c.text)
class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMATopology, self).__init__(
root_name="topology",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
xmlcells = xmldoc[0]
for xmlcell in xmlcells:
cell = LibvirtConfigCapsNUMACell()
cell.parse_dom(xmlcell)
self.cells.append(cell)
def format_dom(self):
topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
cells = etree.Element("cells")
cells.set("num", str(len(self.cells)))
topo.append(cells)
for cell in self.cells:
cells.append(cell.format_dom())
return topo
class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.memory = 0
self.mempages = []
self.cpus = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
for c in xmldoc:
if c.tag == "memory":
self.memory = int(c.text)
elif c.tag == "pages":
pages = LibvirtConfigCapsNUMAPages()
pages.parse_dom(c)
self.mempages.append(pages)
elif c.tag == "cpus":
for c2 in c:
cpu = LibvirtConfigCapsNUMACPU()
cpu.parse_dom(c2)
self.cpus.append(cpu)
def format_dom(self):
cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
cell.set("id", str(self.id))
mem = etree.Element("memory")
mem.set("unit", "KiB")
mem.text = str(self.memory)
cell.append(mem)
for pages in self.mempages:
cell.append(pages.format_dom())
cpus = etree.Element("cpus")
cpus.set("num", str(len(self.cpus)))
for cpu in self.cpus:
cpus.append(cpu.format_dom())
cell.append(cpus)
return cell
class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
**kwargs)
self.id = None
self.socket_id = None
self.core_id = None
self.siblings = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
if xmldoc.get("socket_id") is not None:
self.socket_id = int(xmldoc.get("socket_id"))
if xmldoc.get("core_id") is not None:
self.core_id = int(xmldoc.get("core_id"))
if xmldoc.get("siblings") is not None:
self.siblings = hardware.parse_cpu_spec(
xmldoc.get("siblings"))
def format_dom(self):
cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
cpu.set("id", str(self.id))
if self.socket_id is not None:
cpu.set("socket_id", str(self.socket_id))
if self.core_id is not None:
cpu.set("core_id", str(self.core_id))
if self.siblings is not None:
cpu.set("siblings",
hardware.format_cpu_spec(self.siblings))
return cpu
class LibvirtConfigCapsNUMAPages(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMAPages, self).__init__(
root_name="pages", **kwargs)
self.size = None
self.total = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMAPages, self).parse_dom(xmldoc)
self.size = int(xmldoc.get("size"))
self.total = int(xmldoc.text)
def format_dom(self):
pages = super(LibvirtConfigCapsNUMAPages, self).format_dom()
pages.text = str(self.total)
pages.set("size", str(self.size))
pages.set("unit", "KiB")
return pages
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsHost, self).__init__(root_name="host",
**kwargs)
self.cpu = None
self.uuid = None
self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "cpu":
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
elif c.tag == "topology":
self.topology = LibvirtConfigCapsNUMATopology()
self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
if self.uuid:
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
if self.topology:
caps.append(self.topology.format_dom())
return caps
class LibvirtConfigCapsGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
**kwargs)
self.arch = None
self.ostype = None
# Map domain types such as 'qemu' and 'kvm' to
# LibvirtConfigCapsGuestDomain instances.
self.domains = OrderedDict()
self.default_domain = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
for child in xmldoc:
if child.tag == "os_type":
self.ostype = child.text
elif child.tag == "arch":
self.parse_arch(child)
def parse_arch(self, xmldoc):
self.arch = xmldoc.get("name")
# NOTE(aspiers): The data relating to each <domain> element
# under <arch> (such as <emulator> and many <machine>
# elements) is structured in a slightly odd way. There is one
# "default" domain such as
#
# <domain type='qemu'/>
#
# which has no child elements, and all its data is provided in
# sibling elements. Then others such as
#
# <domain type='kvm'>
#
# will have their <emulator> and <machine> elements as
# children. So we need to handle the two cases separately.
self.default_domain = LibvirtConfigCapsGuestDomain()
for child in xmldoc:
if child.tag == "domain":
if list(child):
# This domain has children, so create a new instance,
# parse it, and register it in the dict of domains.
domain = LibvirtConfigCapsGuestDomain()
domain.parse_dom(child)
self.domains[domain.domtype] = domain
else:
# This is the childless <domain/> element for the
# default domain
self.default_domain.parse_domain(child)
self.domains[self.default_domain.domtype] = \
self.default_domain
else:
# Sibling element of the default domain
self.default_domain.parse_child(child)
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
if self.ostype is not None:
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = self.format_arch()
caps.append(arch)
return caps
def format_arch(self):
arch = etree.Element("arch", name=self.arch)
for c in self.default_domain.format_dom():
arch.append(c)
arch.append(self._new_node("domain", type=self.default_domain.domtype))
for domtype, domain in self.domains.items():
if domtype == self.default_domain.domtype:
# We've already added this domain at the top level
continue
arch.append(domain.format_dom())
return arch
class LibvirtConfigCapsGuestDomain(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuestDomain, self).__init__(
root_name="domain", **kwargs)
self.domtype = None
# Track <emulator> values, which we need in order to be able
# to call virConnectGetDomainCapabilities() - typically
# something like '/usr/bin/qemu-system-i386'.
self.emulator = None
self.machines = {}
self.aliases = {}
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuestDomain, self).parse_dom(xmldoc)
self.parse_domain(xmldoc)
for c in xmldoc:
self.parse_child(c)
def parse_child(self, xmldoc):
if xmldoc.tag == "emulator":
self.emulator = xmldoc.text
elif xmldoc.tag == "machine":
self.parse_machine(xmldoc)
def parse_domain(self, xmldoc):
self.domtype = xmldoc.get("type")
if self.domtype is None:
raise exception.InvalidInput(
"Didn't find domain type in %s", xmldoc)
def parse_machine(self, xmldoc):
if 'canonical' in xmldoc.attrib:
self.aliases[xmldoc.text] = xmldoc.attrib
else:
self.machines[xmldoc.text] = xmldoc.attrib
def format_dom(self):
domain = super(LibvirtConfigCapsGuestDomain, self).format_dom()
if self.domtype is not None:
domain.set("type", self.domtype)
if self.emulator is not None:
domain.append(self._text_node("emulator", self.emulator))
for mach_type, machine in self.machines.items():
domain.append(self._text_node("machine", mach_type, **machine))
for alias, machine in self.aliases.items():
domain.append(self._text_node("machine", alias, **machine))
return domain
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestTimer, self).__init__(root_name="timer",
**kwargs)
self.name = "platform"
self.track = None
self.tickpolicy = None
self.present = None
def format_dom(self):
tm = super(LibvirtConfigGuestTimer, self).format_dom()
tm.set("name", self.name)
if self.track is not None:
tm.set("track", self.track)
if self.tickpolicy is not None:
tm.set("tickpolicy", self.tickpolicy)
if self.present is not None:
if self.present:
tm.set("present", "yes")
else:
tm.set("present", "no")
return tm
class LibvirtConfigGuestClock(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestClock, self).__init__(root_name="clock",
**kwargs)
self.offset = "utc"
self.adjustment = None
self.timezone = None
self.timers = []
def format_dom(self):
clk = super(LibvirtConfigGuestClock, self).format_dom()
clk.set("offset", self.offset)
if self.adjustment:
clk.set("adjustment", self.adjustment)
elif self.timezone:
clk.set("timezone", self.timezone)
for tm in self.timers:
clk.append(tm.format_dom())
return clk
def add_timer(self, tm):
self.timers.append(tm)
class LibvirtConfigCPUFeature(LibvirtConfigObject):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
**kwargs)
self.name = name
self.policy = "require"
def parse_dom(self, xmldoc):
super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
self.name = xmldoc.get("name")
self.policy = xmldoc.get("policy", "require")
def format_dom(self):
ft = super(LibvirtConfigCPUFeature, self).format_dom()
ft.set("name", self.name)
return ft
def __eq__(self, obj):
return obj.name == self.name
def __ne__(self, obj):
return obj.name != self.name
def __hash__(self):
return hash(self.name)
class LibvirtConfigCPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCPU, self).__init__(root_name='cpu',
**kwargs)
self.arch = None
self.vendor = None
self.model = None
self.sockets = None
self.cores = None
self.threads = None
self.features = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigCPU, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "arch":
self.arch = c.text
elif c.tag == "model":
self.model = c.text
elif c.tag == "vendor":
self.vendor = c.text
elif c.tag == "topology":
self.sockets = int(c.get("sockets"))
self.cores = int(c.get("cores"))
self.threads = int(c.get("threads"))
elif c.tag == "feature":
f = LibvirtConfigCPUFeature()
f.parse_dom(c)
if f.policy != "disable":
self.add_feature(f)
def format_dom(self):
cpu = super(LibvirtConfigCPU, self).format_dom()
if self.arch is not None:
cpu.append(self._text_node("arch", self.arch))
if self.model is not None:
cpu.append(self._text_node("model", self.model))
if self.vendor is not None:
cpu.append(self._text_node("vendor", self.vendor))
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
top.set("threads", str(self.threads))
cpu.append(top)
# sorting the features to allow more predictable tests
for f in sorted(self.features, key=lambda x: x.name):
if f.policy != "disable":
cpu.append(f.format_dom())
return cpu
def add_feature(self, feat):
self.features.add(feat)
class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
self.policy = "require"
def format_dom(self):
ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
ft.set("policy", self.policy)
return ft
class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.cpus = None
self.memory = None
self.memAccess = None
def parse_dom(self, xmldoc):
if xmldoc.get("id") is not None:
self.id = int(xmldoc.get("id"))
if xmldoc.get("memory") is not None:
self.memory = int(xmldoc.get("memory"))
if xmldoc.get("cpus") is not None:
self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
self.memAccess = xmldoc.get("memAccess")
def format_dom(self):
cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
if self.id is not None:
cell.set("id", str(self.id))
if self.cpus is not None:
cell.set("cpus",
hardware.format_cpu_spec(self.cpus))
if self.memory is not None:
cell.set("memory", str(self.memory))
if self.memAccess is not None:
cell.set("memAccess", self.memAccess)
return cell
class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc)
for child in xmldoc:
if child.tag == "cell":
cell = LibvirtConfigGuestCPUNUMACell()
cell.parse_dom(child)
self.cells.append(cell)
def format_dom(self):
numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
for cell in self.cells:
numa.append(cell.format_dom())
return numa
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
self.mode = None
self.match = "exact"
self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
for child in xmldoc:
if child.tag == "numa":
numa = LibvirtConfigGuestCPUNUMA()
numa.parse_dom(child)
self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
if self.numa is not None:
cpu.append(self.numa.format_dom())
return cpu
class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
**kwargs)
self.mode = "sysinfo"
def format_dom(self):
smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
smbios.set("mode", self.mode)
return smbios
class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
**kwargs)
self.type = "smbios"
self.bios_vendor = None
self.bios_version = None
self.system_manufacturer = None
self.system_product = None
self.system_version = None
self.system_serial = None
self.system_uuid = None
self.system_family = None
def format_dom(self):
sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
sysinfo.set("type", self.type)
bios = etree.Element("bios")
system = etree.Element("system")
if self.bios_vendor is not None:
bios.append(self._text_node("entry", self.bios_vendor,
name="vendor"))
if self.bios_version is not None:
bios.append(self._text_node("entry", self.bios_version,
name="version"))
if self.system_manufacturer is not None:
system.append(self._text_node("entry", self.system_manufacturer,
name="manufacturer"))
if self.system_product is not None:
system.append(self._text_node("entry", self.system_product,
name="product"))
if self.system_version is not None:
system.append(self._text_node("entry", self.system_version,
name="version"))
if self.system_serial is not None:
system.append(self._text_node("entry", self.system_serial,
name="serial"))
if self.system_uuid is not None:
system.append(self._text_node("entry", self.system_uuid,
name="uuid"))
if self.system_family is not None:
system.append(self._text_node("entry", self.system_family,
name="family"))
if len(list(bios)) > 0:
sysinfo.append(bios)
if len(list(system)) > 0:
sysinfo.append(system)
return sysinfo
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDevice, self).__init__(**kwargs)
@property
def uses_virtio(self):
return False
class LibvirtConfigGuestVTPM(LibvirtConfigGuestDevice):
def __init__(self, vtpm_config, vtpm_secret_uuid, **kwargs):
super(LibvirtConfigGuestVTPM, self).__init__(root_name="tpm", **kwargs)
self.version = vtpm_config.version
self.model = vtpm_config.model
self.secret_uuid = vtpm_secret_uuid
def format_dom(self):
# <tpm model='$model'>
dev = super(LibvirtConfigGuestVTPM, self).format_dom()
dev.set("model", self.model)
# <backend type='emulator' version='$version'>
back = etree.Element("backend")
back.set("type", "emulator")
back.set("version", self.version)
# <encryption secret='$secret_uuid'/>
enc = etree.Element("encryption")
enc.set("secret", self.secret_uuid)
back.append(enc)
dev.append(back)
return dev
class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = "file"
self.source_device = "disk"
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
self.driver_io = None
self.driver_iommu = False
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
self.disk_read_bytes_sec = None
self.disk_read_iops_sec = None
self.disk_write_bytes_sec = None
self.disk_write_iops_sec = None
self.disk_total_bytes_sec = None
self.disk_total_iops_sec = None
self.disk_read_bytes_sec_max = None
self.disk_write_bytes_sec_max = None
self.disk_total_bytes_sec_max = None
self.disk_read_iops_sec_max = None
self.disk_write_iops_sec_max = None
self.disk_total_iops_sec_max = None
self.disk_size_iops_sec = None
self.logical_block_size = None
self.physical_block_size = None
self.readonly = False
self.shareable = False
self.snapshot = None
self.backing_store = None
self.device_addr = None
self.boot_order = None
self.mirror = None
self.encryption = None
def _format_iotune(self, dev):
iotune = etree.Element("iotune")
if self.disk_read_bytes_sec is not None:
iotune.append(self._text_node("read_bytes_sec",
self.disk_read_bytes_sec))
if self.disk_read_iops_sec is not None:
iotune.append(self._text_node("read_iops_sec",
self.disk_read_iops_sec))
if self.disk_write_bytes_sec is not None:
iotune.append(self._text_node("write_bytes_sec",
self.disk_write_bytes_sec))
if self.disk_write_iops_sec is not None:
iotune.append(self._text_node("write_iops_sec",
self.disk_write_iops_sec))
if self.disk_total_bytes_sec is not None:
iotune.append(self._text_node("total_bytes_sec",
self.disk_total_bytes_sec))
if self.disk_total_iops_sec is not None:
iotune.append(self._text_node("total_iops_sec",
self.disk_total_iops_sec))
if self.disk_read_bytes_sec_max is not None:
iotune.append(self._text_node("read_bytes_sec_max",
self.disk_read_bytes_sec_max))
if self.disk_write_bytes_sec_max is not None:
iotune.append(self._text_node("write_bytes_sec_max",
self.disk_write_bytes_sec_max))
if self.disk_total_bytes_sec_max is not None:
iotune.append(self._text_node("total_bytes_sec_max",
self.disk_total_bytes_sec_max))
if self.disk_read_iops_sec_max is not None:
iotune.append(self._text_node("read_iops_sec_max",
self.disk_read_iops_sec_max))
if self.disk_write_iops_sec_max is not None:
iotune.append(self._text_node("write_iops_sec_max",
self.disk_write_iops_sec_max))
if self.disk_total_iops_sec_max is not None:
iotune.append(self._text_node("total_iops_sec_max",
self.disk_total_iops_sec_max))
if self.disk_size_iops_sec is not None:
iotune.append(self._text_node("size_iops_sec",
self.disk_size_iops_sec))
if len(iotune) > 0:
dev.append(iotune)
@property
def uses_virtio(self):
return 'virtio' == self.target_bus
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
dev.set("type", self.source_type)
dev.set("device", self.source_device)
if any((self.driver_name, self.driver_format, self.driver_cache,
self.driver_discard, self.driver_iommu)):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
if self.driver_io is not None:
drv.set("io", self.driver_io)
if self.driver_iommu:
drv.set("iommu", "on")
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network" and self.source_protocol:
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
self._format_iotune(dev)
# Block size tuning
if (self.logical_block_size is not None or
self.physical_block_size is not None):
blockio = etree.Element("blockio")
if self.logical_block_size is not None:
blockio.set('logical_block_size', self.logical_block_size)
if self.physical_block_size is not None:
blockio.set('physical_block_size', self.physical_block_size)
dev.append(blockio)
if self.readonly:
dev.append(etree.Element("readonly"))
if self.shareable:
dev.append(etree.Element("shareable"))
if self.boot_order:
dev.append(etree.Element("boot", order=self.boot_order))
if self.device_addr:
dev.append(self.device_addr.format_dom())
if self.encryption:
dev.append(self.encryption.format_dom())
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
self.driver_io = c.get('io')
self.driver_iommu = c.get('iommu', '') == "on"
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c:
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
elif c.tag == 'backingStore':
b = LibvirtConfigGuestDiskBackingStore()
b.parse_dom(c)
self.backing_store = b
elif c.tag == 'readonly':
self.readonly = True
elif c.tag == 'shareable':
self.shareable = True
elif c.tag == 'address':
obj = LibvirtConfigGuestDeviceAddress.parse_dom(c)
self.device_addr = obj
elif c.tag == 'boot':
self.boot_order = c.get('order')
elif c.tag == 'mirror':
m = LibvirtConfigGuestDiskMirror()
m.parse_dom(c)
self.mirror = m
elif c.tag == 'encryption':
e = LibvirtConfigGuestDiskEncryption()
e.parse_dom(c)
self.encryption = e
class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskBackingStore, self).__init__(
root_name="backingStore", **kwargs)
self.index = None
self.source_type = None
self.source_file = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.driver_name = None
self.driver_format = None
self.backing_store = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.index = xmldoc.get('index')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
elif c.tag == 'source':
self.source_file = c.get('file')
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for d in c:
if d.tag == 'host':
self.source_hosts.append(d.get('name'))
self.source_ports.append(d.get('port'))
elif c.tag == 'backingStore':
if len(c):
self.backing_store = LibvirtConfigGuestDiskBackingStore()
self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
"""Disk class for handling disk information in snapshots.
Similar to LibvirtConfigGuestDisk, but used to represent
disk entities in <domainsnapshot> structures rather than
real devices. These typically have fewer members, and
different expectations for which fields are required.
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = None
self.source_device = None
self.name = None
self.snapshot = None
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
def format_dom(self):
dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom()
if self.name:
dev.attrib['name'] = self.name
if self.snapshot:
dev.attrib['snapshot'] = self.snapshot
if self.source_type:
dev.set("type", self.source_type)
if self.source_device:
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
if self.target_bus and self.target_dev:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c:
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
for c in xmldoc:
if c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem",
**kwargs)
self.source_type = "mount"
self.source_dir = None
self.source_file = None
self.source_dev = None
self.target_dir = "/"
self.driver_type = "loop"
self.driver_format = "raw"
def format_dom(self):
dev = super(LibvirtConfigGuestFilesys, self).format_dom()
dev.set("type", self.source_type)
if self.source_type == "file":
dev.append(etree.Element("driver", type = self.driver_type,
format = self.driver_format))
dev.append(etree.Element("source", file=self.source_file))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_dev))
else:
dev.append(etree.Element("source", dir=self.source_dir))
dev.append(etree.Element("target", dir=self.target_dir))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestFilesys, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'driver':
if self.source_type == 'file':
self.driver_type = c.get('type')
self.driver_format = c.get('format')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_file = c.get('file')
elif self.source_type == 'block':
self.source_dev = c.get('dev')
else:
self.source_dir = c.get('dir')
elif c.tag == 'target':
self.target_dir = c.get('dir')
class LibvirtConfigGuestDiskEncryptionSecret(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(**kwargs)
self.type = None
self.uuid = None
def parse_dom(self, xmldoc):
self.type = xmldoc.get('type')
self.uuid = xmldoc.get('uuid')
def format_dom(self):
obj = etree.Element("secret")
obj.set("type", self.type)
obj.set("uuid", self.uuid)
return obj
class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
"""https://libvirt.org/formatstorageencryption.html
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskEncryption, self).__init__(**kwargs)
self.format = None
self.secret = None
def parse_dom(self, xmldoc):
self.format = xmldoc.get('format')
for c in xmldoc:
if c.tag == 'secret':
m = LibvirtConfigGuestDiskEncryptionSecret()
m.parse_dom(c)
self.secret = m
def format_dom(self):
obj = etree.Element("encryption")
obj.set("format", self.format)
obj.append(self.secret.format_dom())
return obj
class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskMirror, self).__init__(**kwargs)
self.ready = None
def parse_dom(self, xmldoc):
self.ready = xmldoc.get('ready')
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
self.count = 10000
def parse_dom(self, xmldoc):
self.start = int(xmldoc.get('start'))
self.target = int(xmldoc.get('target'))
self.count = int(xmldoc.get('count'))
def format_dom(self):
obj = super(LibvirtConfigGuestIDMap, self).format_dom()
obj.set("start", str(self.start))
obj.set("target", str(self.target))
obj.set("count", str(self.count))
return obj
class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid",
**kwargs)
class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid",
**kwargs)
class LibvirtConfigGuestDeviceAddress(LibvirtConfigObject):
def __init__(self, type=None, **kwargs):
super(LibvirtConfigGuestDeviceAddress, self).__init__(
root_name='address', **kwargs)
self.type = type
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddress, self).format_dom()
xml.set("type", self.type)
return xml
@staticmethod
def parse_dom(xmldoc):
addr_type = xmldoc.get('type')
if addr_type == 'pci':
obj = LibvirtConfigGuestDeviceAddressPCI()
elif addr_type == 'drive':
obj = LibvirtConfigGuestDeviceAddressDrive()
else:
return None
obj.parse_dom(xmldoc)
return obj
class LibvirtConfigGuestDeviceAddressDrive(LibvirtConfigGuestDeviceAddress):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDeviceAddressDrive, self).\
__init__(type='drive', **kwargs)
self.controller = None
self.bus = None
self.target = None
self.unit = None
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddressDrive, self).format_dom()
if self.controller is not None:
xml.set("controller", str(self.controller))
if self.bus is not None:
xml.set("bus", str(self.bus))
if self.target is not None:
xml.set("target", str(self.target))
if self.unit is not None:
xml.set("unit", str(self.unit))
return xml
def parse_dom(self, xmldoc):
self.controller = xmldoc.get('controller')
self.bus = xmldoc.get('bus')
self.target = xmldoc.get('target')
self.unit = xmldoc.get('unit')
def format_address(self):
return None
class LibvirtConfigGuestDeviceAddressPCI(LibvirtConfigGuestDeviceAddress):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDeviceAddressPCI, self).\
__init__(type='pci', **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddressPCI, self).format_dom()
if self.domain is not None:
xml.set("domain", str(self.domain))
if self.bus is not None:
xml.set("bus", str(self.bus))
if self.slot is not None:
xml.set("slot", str(self.slot))
if self.function is not None:
xml.set("function", str(self.function))
return xml
def parse_dom(self, xmldoc):
self.domain = xmldoc.get('domain')
self.bus = xmldoc.get('bus')
self.slot = xmldoc.get('slot')
self.function = xmldoc.get('function')
def format_address(self):
if self.domain is not None:
return pci_utils.get_pci_address(self.domain[2:],
self.bus[2:],
self.slot[2:],
self.function[2:])
class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInterface, self).__init__(
root_name="interface",
**kwargs)
self.net_type = None
self.target_dev = None
self.model = None
self.mac_addr = None
self.script = None
self.source_dev = None
self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
self.filterparams = []
self.driver_name = None
self.driver_iommu = False
self.vhostuser_mode = None
self.vhostuser_path = None
self.vhostuser_type = None
self.vhost_queues = None
self.vhost_rx_queue_size = None
self.vhost_tx_queue_size = None
self.vif_inbound_peak = None
self.vif_inbound_burst = None
self.vif_inbound_average = None
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
self.vlan = None
self.device_addr = None
self.mtu = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestInterface):
return False
# NOTE(arches) Skip checking target_dev for vhostuser
# vif type; target_dev is not a valid value for vhostuser.
# NOTE(gibi): For macvtap cases the domain has a target_dev
# generated by libvirt. It is not set by the vif driver code
# so it is not in config returned by the vif driver so we
# should not match on that.
return (
self.mac_addr == other.mac_addr and
self.net_type == other.net_type and
self.source_dev == other.source_dev and
(self.net_type == 'vhostuser' or not self.target_dev or
self.target_dev == other.target_dev) and
self.vhostuser_path == other.vhostuser_path)
@property
def uses_virtio(self):
return 'virtio' == self.model
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
if self.net_type == "hostdev":
dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
drv_elem = None
if (self.driver_name or
self.driver_iommu or
self.net_type == "vhostuser"):
drv_elem = etree.Element("driver")
if self.driver_name and self.net_type != "vhostuser":
# For vhostuser interface we should not set the driver name.
drv_elem.set("name", self.driver_name)
if self.driver_iommu:
drv_elem.set("iommu", "on")
if drv_elem is not None:
if self.vhost_queues is not None:
drv_elem.set('queues', str(self.vhost_queues))
if self.vhost_rx_queue_size is not None:
drv_elem.set('rx_queue_size', str(self.vhost_rx_queue_size))
if self.vhost_tx_queue_size is not None:
drv_elem.set('tx_queue_size', str(self.vhost_tx_queue_size))
if (drv_elem.get('name') or drv_elem.get('queues') or
drv_elem.get('rx_queue_size') or
drv_elem.get('tx_queue_size') or
drv_elem.get('iommu')):
# Append the driver element into the dom only if name
# or queues or tx/rx or iommu attributes are set.
dev.append(drv_elem)
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
if self.mtu is not None:
dev.append(etree.Element("mtu", size=str(self.mtu)))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
elif self.net_type == "hostdev":
source_elem = etree.Element("source")
domain, bus, slot, func = \
pci_utils.get_pci_address_fields(self.source_dev)
addr_elem = etree.Element("address", type='pci')
addr_elem.set("domain", "0x%s" % (domain))
addr_elem.set("bus", "0x%s" % (bus))
addr_elem.set("slot", "0x%s" % (slot))
addr_elem.set("function", "0x%s" % (func))
source_elem.append(addr_elem)
dev.append(source_elem)
elif self.net_type == "vhostuser":
dev.append(etree.Element("source", type=self.vhostuser_type,
mode=self.vhostuser_mode,
path=self.vhostuser_path))
elif self.net_type == "bridge":
dev.append(etree.Element("source", bridge=self.source_dev))
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
if self.mtu is not None:
dev.append(etree.Element("mtu", size=str(self.mtu)))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
if self.vlan and self.net_type in ("direct", "hostdev"):
vlan_elem = etree.Element("vlan")
tag_elem = etree.Element("tag", id=str(self.vlan))
vlan_elem.append(tag_elem)
dev.append(vlan_elem)
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
param = etree.Element("parameters")
param.set(p['key'], p['value'])
vport.append(param)
dev.append(vport)
if self.filtername is not None:
filter = etree.Element("filterref", filter=self.filtername)
for p in self.filterparams:
filter.append(etree.Element("parameter",
name=p['key'],
value=p['value']))
dev.append(filter)
if self.vif_inbound_average or self.vif_outbound_average:
bandwidth = etree.Element("bandwidth")
if self.vif_inbound_average is not None:
vif_inbound = etree.Element("inbound",
average=str(self.vif_inbound_average))
if self.vif_inbound_peak is not None:
vif_inbound.set("peak", str(self.vif_inbound_peak))
if self.vif_inbound_burst is not None:
vif_inbound.set("burst", str(self.vif_inbound_burst))
bandwidth.append(vif_inbound)
if self.vif_outbound_average is not None:
vif_outbound = etree.Element("outbound",
average=str(self.vif_outbound_average))
if self.vif_outbound_peak is not None:
vif_outbound.set("peak", str(self.vif_outbound_peak))
if self.vif_outbound_burst is not None:
vif_outbound.set("burst", str(self.vif_outbound_burst))
bandwidth.append(vif_outbound)
dev.append(bandwidth)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestInterface, self).parse_dom(xmldoc)
self.net_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'mac':
self.mac_addr = c.get('address')
elif c.tag == 'model':
self.model = c.get('type')
elif c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_iommu = (c.get('iommu', '') == 'on')
self.vhost_queues = c.get('queues')
self.vhost_rx_queue_size = c.get('rx_queue_size')
self.vhost_tx_queue_size = c.get('tx_queue_size')
elif c.tag == 'source':
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')
self.vhostuser_path = c.get('path')
elif self.net_type == 'hostdev':
for sub in c:
if sub.tag == 'address' and sub.get('type') == 'pci':
# strip the 0x prefix on each attribute since
# format_dom puts them back on - note that
# LibvirtConfigGuestHostdevPCI does not do this...
self.source_dev = (
pci_utils.get_pci_address(
sub.get('domain')[2:],
sub.get('bus')[2:],
sub.get('slot')[2:],
sub.get('function')[2:]
)
)
else:
self.source_dev = c.get('bridge')
elif c.tag == 'target':
self.target_dev = c.get('dev')
elif c.tag == 'script':
self.script = c.get('path')
elif c.tag == 'vlan':
# NOTE(mriedem): The vlan element can have multiple tag
# sub-elements but we're currently only storing a single tag
# id in the vlan attribute.
for sub in c:
if sub.tag == 'tag' and sub.get('id'):
self.vlan = int(sub.get('id'))
break
elif c.tag == 'virtualport':
self.vporttype = c.get('type')
for sub in c:
if sub.tag == 'parameters':
for k, v in dict(sub.attrib).items():
self.add_vport_param(k, v)
elif c.tag == 'filterref':
self.filtername = c.get('filter')
for sub in c:
if sub.tag == 'parameter':
self.add_filter_param(sub.get('name'),
sub.get('value'))
elif c.tag == 'bandwidth':
for sub in c:
# Note that only average is mandatory, burst and peak are
# optional (and all are ints).
if sub.tag == 'inbound':
self.vif_inbound_average = int(sub.get('average'))
if sub.get('burst'):
self.vif_inbound_burst = int(sub.get('burst'))
if sub.get('peak'):
self.vif_inbound_peak = int(sub.get('peak'))
elif sub.tag == 'outbound':
self.vif_outbound_average = int(sub.get('average'))
if sub.get('burst'):
self.vif_outbound_burst = int(sub.get('burst'))
if sub.get('peak'):
self.vif_outbound_peak = int(sub.get('peak'))
elif c.tag == 'address':
obj = LibvirtConfigGuestDeviceAddress.parse_dom(c)
self.device_addr = obj
elif c.tag == 'mtu':
self.mtu = int(c.get('size'))
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
def add_vport_param(self, key, value):
self.vportparams.append({'key': key, 'value': value})
class LibvirtConfigGuestInput(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInput, self).__init__(root_name="input",
**kwargs)
self.type = "tablet"
self.bus = "usb"
self.driver_iommu = False
def format_dom(self):
dev = super(LibvirtConfigGuestInput, self).format_dom()
dev.set("type", self.type)
dev.set("bus", self.bus)
if self.driver_iommu:
dev.append(etree.Element('driver', iommu="on"))
return dev
class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics",
**kwargs)
self.type = "vnc"
self.autoport = True
self.keymap = None
self.listen = None
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
dev.set("type", self.type)
if self.autoport:
dev.set("autoport", "yes")
else:
dev.set("autoport", "no")
if self.keymap:
dev.set("keymap", self.keymap)
if self.listen:
dev.set("listen", self.listen)
return dev
class LibvirtConfigSeclabel(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel",
**kwargs)
self.type = 'dynamic'
self.baselabel = None
def format_dom(self):
seclabel = super(LibvirtConfigSeclabel, self).format_dom()
seclabel.set('type', self.type)
if self.baselabel:
seclabel.append(self._text_node("baselabel", self.baselabel))
return seclabel
class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVideo, self).__init__(root_name="video",
**kwargs)
self.type = 'cirrus'
self.vram = None
self.heads = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.type
def format_dom(self):
dev = super(LibvirtConfigGuestVideo, self).format_dom()
model = etree.Element("model")
model.set("type", self.type)
if self.vram:
model.set("vram", str(self.vram))
if self.heads:
model.set("heads", str(self.heads))
dev.append(model)
if self.driver_iommu:
dev.append(etree.Element("driver", iommu="on"))
return dev
class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigMemoryBalloon, self).__init__(
root_name='memballoon',
**kwargs)
self.model = None
self.period = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.model
def format_dom(self):
dev = super(LibvirtConfigMemoryBalloon, self).format_dom()
dev.set('model', str(self.model))
if self.period is not None:
dev.append(etree.Element('stats', period=str(self.period)))
if self.driver_iommu:
dev.append(etree.Element('driver', iommu='on'))
return dev
class LibvirtConfigGuestController(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestController,
self).__init__(root_name="controller", **kwargs)
self.type = None
self.index = None
self.model = None
self.driver_iommu = False
@property
def uses_virtio(self):
model_is_virtio = 'virtio-scsi' == self.model
type_is_virtio = 'virtio-serial' == self.type
return model_is_virtio or type_is_virtio
def format_dom(self):
controller = super(LibvirtConfigGuestController, self).format_dom()
controller.set("type", self.type)
if self.index is not None:
controller.set("index", str(self.index))
if self.model:
controller.set("model", str(self.model))
if self.driver_iommu:
controller.append(etree.Element("driver", iommu="on"))
return controller
class LibvirtConfigGuestUSBHostController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUSBHostController, self).__init__(**kwargs)
self.type = 'usb'
class LibvirtConfigGuestPCIeRootController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestPCIeRootController, self).\
__init__(**kwargs)
self.type = 'pci'
self.model = 'pcie-root'
class LibvirtConfigGuestPCIeRootPortController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestPCIeRootPortController, self).\
__init__(**kwargs)
self.type = 'pci'
self.model = 'pcie-root-port'
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdev, self).\
__init__(root_name="hostdev", **kwargs)
self.mode = kwargs.get('mode')
self.type = kwargs.get('type')
# managed attribute is only used by PCI devices but mediated devices
# need to say managed=no
self.managed = kwargs.get('managed', 'yes')
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
dev.set("mode", self.mode)
dev.set("type", self.type)
dev.set("managed", self.managed)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.type = xmldoc.get('type')
self.managed = xmldoc.get('managed')
return list(xmldoc)
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
# These are returned from libvirt as hexadecimal strings with 0x prefix
# even if they have a different meaningful range: domain 16 bit,
# bus 8 bit, slot 5 bit, and function 3 bit
# On the other hand nova generates these values without the 0x prefix
self.domain = None
self.bus = None
self.slot = None
self.function = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestHostdevPCI):
return False
# NOTE(gibi): nova generates hexa string without 0x prefix but
# libvirt uses that prefix when returns the config so we need to
# normalize the strings before comparison
return (
int(self.domain, 16) == int(other.domain, 16) and
int(self.bus, 16) == int(other.bus, 16) and
int(self.slot, 16) == int(other.slot, 16) and
int(self.function, 16) == int(other.function, 16))
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element(
"address",
domain=self.domain if self.domain.startswith('0x')
else '0x' + self.domain,
bus=self.bus if self.bus.startswith('0x') else '0x' + self.bus,
slot=self.slot if self.slot.startswith('0x') else '0x' + self.slot,
function=self.function if self.function.startswith('0x')
else '0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc)
for c in childs:
if c.tag == "source":
for sub in c:
if sub.tag == 'address':
self.domain = sub.get('domain')
self.bus = sub.get('bus')
self.slot = sub.get('slot')
self.function = sub.get('function')
class LibvirtConfigGuestHostdevMDEV(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevMDEV, self).__init__(
mode='subsystem', type='mdev', managed='no', **kwargs)
# model attribute is only supported by mediated devices
self.model = kwargs.get('model', 'vfio-pci')
self.uuid = None
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevMDEV, self).format_dom()
if self.model:
dev.set("model", self.model)
address = etree.Element("address", uuid=self.uuid)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
children = super(LibvirtConfigGuestHostdevMDEV, self).parse_dom(xmldoc)
if xmldoc.get('model'):
self.model = xmldoc.get('model')
for c in children:
if c.tag == "source":
for sub in c:
if sub.tag == 'address':
self.uuid = sub.get('uuid')
return
class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharBase, self).__init__(**kwargs)
self.type = "pty"
self.source_path = None
self.listen_port = None
self.listen_host = None
self.log = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
elif self.type == "tcp":
dev.append(etree.Element("source", mode="bind",
host=self.listen_host,
service=str(self.listen_port)))
if self.log:
dev.append(self.log.format_dom())
return dev
class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChar, self).__init__(**kwargs)
self.target_port = None
self.target_type = None
def format_dom(self):
dev = super(LibvirtConfigGuestChar, self).format_dom()
if self.target_port is not None or self.target_type is not None:
target = etree.Element("target")
if self.target_port is not None:
target.set("port", str(self.target_port))
if self.target_type is not None:
target.set("type", self.target_type)
dev.append(target)
return dev
class LibvirtConfigGuestCharDeviceLog(LibvirtConfigObject):
"""Represents a sub-element to a character device."""
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharDeviceLog, self).__init__(root_name="log",
**kwargs)
self.file = None
self.append = "off"
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCharDeviceLog, self).parse_dom(xmldoc)
self.file = xmldoc.get("file")
self.append = xmldoc.get("append")
def format_dom(self):
log = super(LibvirtConfigGuestCharDeviceLog, self).format_dom()
log.set("file", self.file)
log.set("append", self.append)
return log
class LibvirtConfigGuestSerial(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSerial, self).__init__(root_name="serial",
**kwargs)
class LibvirtConfigGuestConsole(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestConsole, self).__init__(root_name="console",
**kwargs)
class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChannel, self).__init__(root_name="channel",
**kwargs)
self.target_type = "virtio"
self.target_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestChannel, self).format_dom()
target = etree.Element("target", type=self.target_type)
if self.target_name is not None:
target.set("name", self.target_name)
dev.append(target)
return dev
class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog",
**kwargs)
self.model = 'i6300esb'
self.action = 'reset'
def format_dom(self):
dev = super(LibvirtConfigGuestWatchdog, self).format_dom()
dev.set('model', self.model)
dev.set('action', self.action)
return dev
class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
root_name="vcpupin",
**kwargs)
self.id = None
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
root.set("vcpu", str(self.id))
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneEmulatorPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneEmulatorPin, self).__init__(
root_name="emulatorpin",
**kwargs)
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneEmulatorPin, self).format_dom()
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneVCPUSched(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUSched, self).__init__(
root_name="vcpusched",
**kwargs)
self.vcpus = None
self.scheduler = None
self.priority = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUSched, self).format_dom()
if self.vcpus is not None:
root.set("vcpus",
hardware.format_cpu_spec(self.vcpus))
if self.scheduler is not None:
root.set("scheduler", self.scheduler)
if self.priority is not None:
root.set("priority", str(self.priority))
return root
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune",
**kwargs)
self.shares = None
self.quota = None
self.period = None
self.vcpupin = []
self.emulatorpin = None
self.vcpusched = []
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
if self.shares is not None:
root.append(self._text_node("shares", str(self.shares)))
if self.quota is not None:
root.append(self._text_node("quota", str(self.quota)))
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
if self.emulatorpin is not None:
root.append(self.emulatorpin.format_dom())
for vcpu in self.vcpupin:
root.append(vcpu.format_dom())
for sched in self.vcpusched:
root.append(sched.format_dom())
return root
class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBacking, self).__init__(
root_name="memoryBacking", **kwargs)
self.hugepages = []
self.sharedpages = True
self.locked = False
self.filesource = False
self.sharedaccess = False
self.allocateimmediate = False
self.discard = False
def format_dom(self):
root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
if self.hugepages:
hugepages = etree.Element("hugepages")
for item in self.hugepages:
hugepages.append(item.format_dom())
root.append(hugepages)
if not self.sharedpages:
root.append(etree.Element("nosharepages"))
if self.locked:
root.append(etree.Element("locked"))
if self.filesource:
root.append(etree.Element("source", type="file"))
if self.sharedaccess:
root.append(etree.Element("access", mode="shared"))
if self.allocateimmediate:
root.append(etree.Element("allocation", mode="immediate"))
if self.discard:
root.append(etree.Element("discard"))
return root
class LibvirtConfigGuestMemoryBackingPage(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBackingPage, self).__init__(
root_name="page", **kwargs)
self.size_kb = None
self.nodeset = None
def format_dom(self):
page = super(LibvirtConfigGuestMemoryBackingPage, self).format_dom()
page.set("size", str(self.size_kb))
page.set("nodeset", hardware.format_cpu_spec(self.nodeset))
page.set("unit", "KiB")
return page
class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryTune, self).__init__(
root_name="memtune", **kwargs)
self.hard_limit = None
self.soft_limit = None
self.swap_hard_limit = None
self.min_guarantee = None
def format_dom(self):
root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
if self.hard_limit is not None:
root.append(self._text_node("hard_limit",
str(self.hard_limit),
unit="KiB"))
if self.soft_limit is not None:
root.append(self._text_node("soft_limit",
str(self.soft_limit),
unit="KiB"))
if self.swap_hard_limit is not None:
root.append(self._text_node("swap_hard_limit",
str(self.swap_hard_limit),
unit="KiB"))
if self.min_guarantee is not None:
root.append(self._text_node("min_guarantee",
str(self.min_guarantee),
unit="KiB"))
return root
class LibvirtConfigGuestNUMATuneMemory(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemory, self).__init__(
root_name="memory", **kwargs)
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemory, self).format_dom()
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATuneMemNode(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemNode, self).__init__(
root_name="memnode", **kwargs)
self.cellid = 0
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemNode, self).format_dom()
root.set("cellid", str(self.cellid))
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATune, self).__init__(
root_name="numatune", **kwargs)
self.memory = None
self.memnodes = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATune, self).format_dom()
if self.memory is not None:
root.append(self.memory.format_dom())
for node in self.memnodes:
root.append(node.format_dom())
return root
class LibvirtConfigGuestFeature(LibvirtConfigObject):
def __init__(self, name, **kwargs):
super(LibvirtConfigGuestFeature, self).__init__(root_name=name,
**kwargs)
class LibvirtConfigGuestFeatureACPI(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureACPI, self).__init__("acpi",
**kwargs)
class LibvirtConfigGuestFeatureAPIC(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureAPIC, self).__init__("apic",
**kwargs)
class LibvirtConfigGuestFeaturePAE(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeaturePAE, self).__init__("pae",
**kwargs)
class LibvirtConfigGuestFeatureKvmHidden(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureKvmHidden, self).__init__("kvm",
**kwargs)
def format_dom(self):
root = super(LibvirtConfigGuestFeatureKvmHidden, self).format_dom()
root.append(etree.Element("hidden", state="on"))
return root
class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
def __init__(self, state, **kwargs):
super(LibvirtConfigGuestFeaturePMU, self).__init__("pmu", **kwargs)
# NOTE(sean-k-mooney): bool_from_string is needed to handle the raw
# flavor exta_sepc value. bool_from_string internally checks if the
# value is already a bool and returns it. As such it's safe to use
# with the image metadata property too, so we call it unconditionally.
self.state = strutils.bool_from_string(state)
def format_dom(self):
root = super(LibvirtConfigGuestFeaturePMU, self).format_dom()
root.attrib['state'] = "on" if self.state else "off"
return root
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
MIN_SPINLOCK_RETRIES = 4095
# The spoofed vendor_id can be any alphanumeric string
SPOOFED_VENDOR_ID = "1234567890ab"
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureHyperV, self).__init__("hyperv",
**kwargs)
self.relaxed = False
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
self.vendorid_spoof = False
self.vendorid = self.SPOOFED_VENDOR_ID
def format_dom(self):
root = super(LibvirtConfigGuestFeatureHyperV, self).format_dom()
if self.relaxed:
root.append(etree.Element("relaxed", state="on"))
if self.vapic:
root.append(etree.Element("vapic", state="on"))
if self.spinlocks:
root.append(etree.Element("spinlocks", state="on",
retries=str(self.spinlock_retries)))
if self.vendorid_spoof:
root.append(etree.Element("vendor_id", state="on",
value=self.vendorid))
return root
class LibvirtConfigGuestSEVLaunchSecurity(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSEVLaunchSecurity, self).__init__(
root_name='launchSecurity', **kwargs)
self.cbitpos = None
self.reduced_phys_bits = None
def format_dom(self):
root = super(LibvirtConfigGuestSEVLaunchSecurity, self).format_dom()
root.set('type', 'sev')
policy = etree.Element('policy')
policy.text = '0x0033' # hardcoded default according to the spec
root.append(policy)
cbitpos = etree.Element('cbitpos')
cbitpos.text = str(self.cbitpos)
root.append(cbitpos)
reducedPhysBits = etree.Element('reducedPhysBits')
reducedPhysBits.text = str(self.reduced_phys_bits)
root.append(reducedPhysBits)
return root
class LibvirtConfigGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuest, self).__init__(root_name="domain",
**kwargs)
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
self.max_memory_size = None
self.max_memory_slots = 0
self.membacking = None
self.memtune = None
self.numatune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
self.cputune = None
self.features = []
self.clock = None
self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_loader_type = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
self.os_init_env = {}
self.os_root = None
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
self.os_mach_type = None
self.os_bootmenu = False
self.devices = []
self.metadata = []
self.idmaps = []
self.perf_events = []
self.launch_security = None
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
if self.max_memory_size is not None:
max_memory = self._text_node("maxMemory", self.max_memory_size)
max_memory.set("slots", str(self.max_memory_slots))
root.append(max_memory)
if self.membacking is not None:
root.append(self.membacking.format_dom())
if self.memtune is not None:
root.append(self.memtune.format_dom())
if self.numatune is not None:
root.append(self.numatune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
root.append(vcpu)
else:
root.append(self._text_node("vcpu", self.vcpus))
if len(self.metadata) > 0:
metadata = etree.Element("metadata")
for m in self.metadata:
metadata.append(m.format_dom())
root.append(metadata)
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
# Generate XML nodes for UEFI boot.
if self.os_loader_type == "pflash":
loader = self._text_node("loader", self.os_loader)
loader.set("type", "pflash")
loader.set("readonly", "yes")
os.append(loader)
else:
os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
os.append(self._text_node("cmdline", self.os_cmdline))
if self.os_root is not None:
os.append(self._text_node("root", self.os_root))
if self.os_init_path is not None:
os.append(self._text_node("init", self.os_init_path))
for name, value in self.os_init_env.items():
initenv = self._text_node("initenv", value)
initenv.set("name", name)
os.append(initenv)
for boot_dev in self.os_boot_dev:
os.append(etree.Element("boot", dev=boot_dev))
if self.os_smbios is not None:
os.append(self.os_smbios.format_dom())
if self.os_bootmenu:
os.append(etree.Element("bootmenu", enable="yes"))
root.append(os)
def _format_features(self, root):
if len(self.features) > 0:
features = etree.Element("features")
for feat in self.features:
features.append(feat.format_dom())
root.append(features)
def _format_devices(self, root):
if len(self.devices) == 0:
return
devices = etree.Element("devices")
for dev in self.devices:
devices.append(dev.format_dom())
root.append(devices)
def _format_idmaps(self, root):
if len(self.idmaps) == 0:
return
idmaps = etree.Element("idmap")
for idmap in self.idmaps:
idmaps.append(idmap.format_dom())
root.append(idmaps)
def _format_perf_events(self, root):
if len(self.perf_events) == 0:
return
perfs = etree.Element("perf")
for pe in self.perf_events:
event = etree.Element("event", name=pe, enabled="yes")
perfs.append(event)
root.append(perfs)
def _format_sev(self, root):
if self.launch_security is not None:
root.append(self.launch_security.format_dom())
def format_dom(self):
root = super(LibvirtConfigGuest, self).format_dom()
root.set("type", self.virt_type)
self._format_basic_props(root)
if self.sysinfo is not None:
root.append(self.sysinfo.format_dom())
self._format_os(root)
self._format_features(root)
if self.cputune is not None:
root.append(self.cputune.format_dom())
if self.clock is not None:
root.append(self.clock.format_dom())
if self.cpu is not None:
root.append(self.cpu.format_dom())
self._format_devices(root)
self._format_idmaps(root)
self._format_perf_events(root)
self._format_sev(root)
return root
def _parse_basic_props(self, xmldoc):
# memmbacking, memtune, numatune, metadata are skipped just because
# corresponding config types do not implement parse_dom method
if xmldoc.tag == 'uuid':
self.uuid = xmldoc.text
elif xmldoc.tag == 'name':
self.name = xmldoc.text
elif xmldoc.tag == 'memory':
self.memory = int(xmldoc.text)
elif xmldoc.tag == 'vcpu':
self.vcpus = int(xmldoc.text)
if xmldoc.get('cpuset') is not None:
self.cpuset = hardware.parse_cpu_spec(xmldoc.get('cpuset'))
def _parse_os(self, xmldoc):
# smbios is skipped just because LibvirtConfigGuestSMBIOS
# does not implement parse_dom method
for c in xmldoc:
if c.tag == 'type':
self.os_type = c.text
self.os_mach_type = c.get('machine')
elif c.tag == 'kernel':
self.os_kernel = c.text
elif c.tag == 'loader':
self.os_loader = c.text
if c.get('type') == 'pflash':
self.os_loader_type = 'pflash'
elif c.tag == 'initrd':
self.os_initrd = c.text
elif c.tag == 'cmdline':
self.os_cmdline = c.text
elif c.tag == 'root':
self.os_root = c.text
elif c.tag == 'init':
self.os_init_path = c.text
elif c.tag == 'boot':
self.os_boot_dev.append(c.get('dev'))
elif c.tag == 'bootmenu':
if c.get('enable') == 'yes':
self.os_bootmenu = True
elif c.tag == 'initenv':
self.os_init_env[c.get('name')] = c.text
def parse_dom(self, xmldoc):
self.virt_type = xmldoc.get('type')
# Note: This cover only for: LibvirtConfigGuestDisks
# LibvirtConfigGuestFilesys
# LibvirtConfigGuestHostdevPCI
# LibvirtConfigGuestHostdevMDEV
# LibvirtConfigGuestInterface
# LibvirtConfigGuestUidMap
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
# LibvirtConfigGuestVPMEM
for c in xmldoc:
if c.tag == 'devices':
for d in c:
if d.tag == 'disk':
obj = LibvirtConfigGuestDisk()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'filesystem':
obj = LibvirtConfigGuestFilesys()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'pci':
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'mdev':
obj = LibvirtConfigGuestHostdevMDEV()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'interface':
obj = LibvirtConfigGuestInterface()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'memory' and d.get('model') == 'nvdimm':
obj = LibvirtConfigGuestVPMEM()
obj.parse_dom(d)
self.devices.append(obj)
if c.tag == 'idmap':
for idmap in c:
obj = None
if idmap.tag == 'uid':
obj = LibvirtConfigGuestUIDMap()
elif idmap.tag == 'gid':
obj = LibvirtConfigGuestGIDMap()
if obj:
obj.parse_dom(idmap)
self.idmaps.append(obj)
elif c.tag == 'cpu':
obj = LibvirtConfigGuestCPU()
obj.parse_dom(c)
self.cpu = obj
elif c.tag == 'perf':
for p in c:
if p.get('enabled') and p.get('enabled') == 'yes':
self.add_perf_event(p.get('name'))
elif c.tag == 'os':
self._parse_os(c)
else:
self._parse_basic_props(c)
def add_device(self, dev):
self.devices.append(dev)
def add_perf_event(self, event):
self.perf_events.append(event)
def set_clock(self, clk):
self.clock = clk
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshot, self).__init__(
root_name="domainsnapshot",
**kwargs)
self.name = None
self.disks = []
def format_dom(self):
ss = super(LibvirtConfigGuestSnapshot, self).format_dom()
if self.name:
ss.append(self._text_node("name", self.name))
disks = etree.Element('disks')
for disk in self.disks:
disks.append(disk.format_dom())
ss.append(disks)
return ss
def add_disk(self, disk):
self.disks.append(disk)
class LibvirtConfigNodeDevice(LibvirtConfigObject):
"""Libvirt Node Devices parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevice, self).__init__(root_name="device",
**kwargs)
self.name = None
self.parent = None
self.pci_capability = None
self.mdev_information = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "name":
self.name = c.text
elif c.tag == "parent":
self.parent = c.text
elif c.tag == "capability" and c.get("type") in ['pci', 'net']:
pcicap = LibvirtConfigNodeDevicePciCap()
pcicap.parse_dom(c)
self.pci_capability = pcicap
elif c.tag == "capability" and c.get("type") in ['mdev']:
mdev_info = LibvirtConfigNodeDeviceMdevInformation()
mdev_info.parse_dom(c)
self.mdev_information = mdev_info
class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
"""Libvirt Node Devices pci capability parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciCap, self).__init__(
root_name="capability", **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
self.product = None
self.product_id = None
self.vendor = None
self.vendor_id = None
self.numa_node = None
self.fun_capability = []
self.mdev_capability = []
self.interface = None
self.address = None
self.link_state = None
self.features = []
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "domain":
self.domain = int(c.text)
elif c.tag == "slot":
self.slot = int(c.text)
elif c.tag == "bus":
self.bus = int(c.text)
elif c.tag == "function":
self.function = int(c.text)
elif c.tag == "product":
self.product = c.text
self.product_id = int(c.get('id'), 16)
elif c.tag == "vendor":
self.vendor = c.text
self.vendor_id = int(c.get('id'), 16)
elif c.tag == "numa":
self.numa_node = int(c.get('node'))
elif c.tag == "interface":
self.interface = c.text
elif c.tag == "address":
self.address = c.text
elif c.tag == "link":
self.link_state = c.get('state')
elif c.tag == "feature":
self.features.append(c.get('name'))
elif c.tag == "capability" and c.get('type') in \
('virt_functions', 'phys_function'):
funcap = LibvirtConfigNodeDevicePciSubFunctionCap()
funcap.parse_dom(c)
self.fun_capability.append(funcap)
elif c.tag == "capability" and c.get('type') in ('mdev_types',):
mdevcap = LibvirtConfigNodeDeviceMdevCapableSubFunctionCap()
mdevcap.parse_dom(c)
self.mdev_capability.append(mdevcap)
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.device_addrs = list() # list of tuple (domain,bus,slot,function)
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc)
self.type = xmldoc.get("type")
for c in xmldoc:
if c.tag == "address":
self.device_addrs.append((int(c.get('domain'), 16),
int(c.get('bus'), 16),
int(c.get('slot'), 16),
int(c.get('function'), 16)))
class LibvirtConfigNodeDeviceMdevCapableSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
# mdev_types is a list of dictionaries where each item looks like:
# {'type': 'nvidia-11', 'name': 'GRID M60-0B', 'deviceAPI': 'vfio-pci',
# 'availableInstances': 16}
self.mdev_types = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap,
self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "type":
mdev_type = {'type': c.get('id')}
for e in c:
mdev_type[e.tag] = (int(e.text)
if e.tag == 'availableInstances'
else e.text)
self.mdev_types.append(mdev_type)
class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDeviceMdevInformation, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "type":
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.device_model = 'virtio'
self.model = 'random'
self.backend = None
self.rate_period = None
self.rate_bytes = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.device_model
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', self.device_model)
backend = etree.Element("backend")
backend.set("model", self.model)
backend.text = self.backend
if self.rate_period and self.rate_bytes:
rate = etree.Element("rate")
rate.set("period", str(self.rate_period))
rate.set("bytes", str(self.rate_bytes))
dev.append(rate)
dev.append(backend)
if self.driver_iommu:
dev.append(etree.Element('driver', iommu="on"))
return dev
class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaInstance,
self).__init__(root_name="instance",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.package = None
self.flavor = None
self.name = None
self.creationTime = None
self.owner = None
self.roottype = None
self.rootid = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
pkg = self._new_node("package")
pkg.set("version", self.package)
meta.append(pkg)
if self.name is not None:
meta.append(self._text_node("name", self.name))
if self.creationTime is not None:
timestr = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(self.creationTime))
meta.append(self._text_node("creationTime", timestr))
if self.flavor is not None:
meta.append(self.flavor.format_dom())
if self.owner is not None:
meta.append(self.owner.format_dom())
if self.roottype is not None and self.rootid is not None:
root = self._new_node("root")
root.set("type", self.roottype)
root.set("uuid", str(self.rootid))
meta.append(root)
return meta
class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaFlavor,
self).__init__(root_name="flavor",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.name = None
self.memory = None
self.disk = None
self.swap = None
self.ephemeral = None
self.vcpus = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
meta.set("name", self.name)
if self.memory is not None:
meta.append(self._text_node("memory", str(self.memory)))
if self.disk is not None:
meta.append(self._text_node("disk", str(self.disk)))
if self.swap is not None:
meta.append(self._text_node("swap", str(self.swap)))
if self.ephemeral is not None:
meta.append(self._text_node("ephemeral", str(self.ephemeral)))
if self.vcpus is not None:
meta.append(self._text_node("vcpus", str(self.vcpus)))
return meta
class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaOwner,
self).__init__(root_name="owner",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.userid = None
self.username = None
self.projectid = None
self.projectname = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
if self.userid is not None and self.username is not None:
user = self._text_node("user", self.username)
user.set("uuid", self.userid)
meta.append(user)
if self.projectid is not None and self.projectname is not None:
project = self._text_node("project", self.projectname)
project.set("uuid", self.projectid)
meta.append(project)
return meta
class LibvirtConfigSecret(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigSecret,
self).__init__(root_name="secret")
self.ephemeral = False
self.private = False
self.description = None
self.uuid = None
self.usage_type = None
self.usage_id = None
def get_yes_no_str(self, value):
if value:
return 'yes'
return 'no'
def format_dom(self):
root = super(LibvirtConfigSecret, self).format_dom()
root.set("ephemeral", self.get_yes_no_str(self.ephemeral))
root.set("private", self.get_yes_no_str(self.private))
if self.description is not None:
root.append(self._text_node("description", str(self.description)))
if self.uuid is not None:
root.append(self._text_node("uuid", str(self.uuid)))
usage = self._new_node("usage")
usage.set("type", self.usage_type)
if self.usage_type in ('ceph', 'vtpm'):
usage.append(self._text_node('name', str(self.usage_id)))
elif self.usage_type == 'iscsi':
usage.append(self._text_node('target', str(self.usage_id)))
elif self.usage_type == 'volume':
usage.append(self._text_node('volume', str(self.usage_id)))
root.append(usage)
return root
class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVPMEM, self).__init__(
root_name="memory", **kwargs)
self.model = "nvdimm"
self.access = "shared"
self.source_path = kwargs.get("devpath", "")
self.align_size = kwargs.get("align_kb", 0)
self.pmem = True
self.target_size = kwargs.get("size_kb", 0)
self.target_node = 0
self.label_size = 2 * units.Ki
def format_dom(self):
memory = super(LibvirtConfigGuestVPMEM, self).format_dom()
memory.set("model", self.model)
memory.set("access", self.access)
source = etree.Element("source")
source.append(self._text_node("path", self.source_path))
source.append(self._text_node("alignsize", self.align_size))
if self.pmem is True:
source.append(etree.Element("pmem"))
target = etree.Element("target")
target.append(self._text_node("size", self.target_size))
target.append(self._text_node("node", self.target_node))
label = etree.Element("label")
label.append(self._text_node("size", self.label_size))
target.append(label)
memory.append(source)
memory.append(target)
return memory
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestVPMEM, self).parse_dom(xmldoc)
self.model = xmldoc.get("model")
self.access = xmldoc.get("access")
for c in list(xmldoc):
if c.tag == "source":
for sub in list(c):
if sub.tag == "path":
self.source_path = sub.text
if sub.tag == "alignsize":
self.align_size = sub.text
elif c.tag == "target":
for sub in list(c):
if sub.tag == "size":
self.target_size = sub.text
class LibvirtConfigGuestSound(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSound, self).__init__(root_name="sound",
**kwargs)
self.model = "ich6"
self.codec_type = "micro"
#self.address_type = "pci"
#self.address_domain = "0x0000"
#self.address_bus = "0x00"
#self.address_slot = "0x04"
#self.address_function = "0x0"
def format_dom(self):
dev = super(LibvirtConfigGuestSound, self).format_dom()
dev.set("model", self.model)
drv_codec = etree.Element("codec")
drv_codec.set("type", self.codec_type)
#drv_address = etree.Element("address")
#drv_address.set("type", self.address_type)
#drv_address.set("domain", self.address_domain)
#drv_address.set("bus", self.address_bus)
#drv_address.set("slot", self.address_slot)
#drv_address.set("function", self.address_function)
dev.append(drv_codec)
return dev
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/plugins.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
from king_phisher import errors
from king_phisher import find
from king_phisher import plugins
class ServerPlugin(plugins.PluginBase):
"""
The base object to be inherited by plugins that are loaded into the King
Phisher server. This provides a convenient interface for interacting with
the runtime.
"""
_logging_prefix = 'KingPhisher.Plugins.Server.'
def __init__(self, root_config):
self.root_config = root_config
"""A reference to the main server instance :py:attr:`~king_phisher.server.server.KingPhisherServer.config`."""
self.server = None
"""A reference to the :py:class:`~king_phisher.server.server.KingPhisherServer` instance. Only available if the instance has been created."""
super(ServerPlugin, self).__init__()
for option in self.options:
if self.config[option.name] is None:
raise errors.KingPhisherPluginError(self.name, 'missing required option: ' + option.name)
@property
def config(self):
"""
A dictionary that can be used by this plugin to access it's
configuration. Any changes to this configuration will be lost with the
server restarts.
"""
config = self.root_config.get('server.plugins').get(self.name)
if config is None:
config = {}
self.root_config.get('server.plugins')[self.name] = config
return config
class ServerPluginManager(plugins.PluginManagerBase):
"""
The manager for plugins loaded into the King Phisher server application.
"""
_plugin_klass = ServerPlugin
def __init__(self, config):
self.config = config
path = self._get_path()
self._server = None
super(ServerPluginManager, self).__init__(path, (config,))
for plugin in config.get_if_exists('server.plugins', {}).keys():
# load the plugin
try:
self.load(plugin)
except Exception:
self.logger.critical('failed to load plugin: ' + plugin, exc_info=True)
raise errors.KingPhisherPluginError(plugin, 'failed to load')
# check compatibility
klass = self[plugin]
for req_type, req_value, req_met in klass.compatibility:
req_type = req_type.lower()
if req_met:
self.logger.debug("plugin {0} requirement {1} ({2}) met".format(plugin, req_type, req_value))
continue
self.logger.warning("plugin {0} unmet requirement {1} ({2})".format(plugin, req_type, req_value))
raise errors.KingPhisherPluginError(plugin, 'failed to meet requirement: ' + req_type)
# enable the plugin
try:
self.enable(plugin)
except errors.KingPhisherPluginError as error:
raise error
except Exception:
self.logger.critical('failed to enable plugin: ' + plugin, exc_info=True)
raise errors.KingPhisherPluginError(plugin, 'failed to enable')
def _get_path(self):
path = [find.find_data_directory('plugins')]
extra_dirs = self.config.get_if_exists('server.plugin_directories', [])
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
elif not isinstance(extra_dirs, list):
raise errors.KingPhisherInputValidationError('configuration setting server.plugin_directories must be a list')
for directory in extra_dirs:
if not os.path.isdir(directory):
continue
path.append(directory)
return path
@property
def server(self):
return self._server
@server.setter
def server(self, value):
self._server = value
for _, plugin in self:
plugin.server = value
|
import click
import sys
from web3 import Web3
from plasma.client.client import Client
from plasma.utils import utils
@click.command()
@click.option('--token_address', help="The ethereum address of the pdex token smart contract", required=True)
@click.option('--root_chain_address', help="The ethereum address of the root chain smart contract", required=True)
def main(token_address, root_chain_address):
client = Client(root_chain_address)
maker_address = '0x0af467F2f6c20e3543B8a2a453e70DF034714aEB'
make_order_hex = client.get_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'))
if make_order_hex == None:
print("No valid utxos to create make order txn")
sys.exit(0)
make_order_hash = utils.hashPersonalMessage(make_order_hex)
signature = utils.sign(make_order_hash, bytes(bytearray.fromhex('46155f862a2249f0ee6d69122ead4ec56cf12a71049a3105a90b9708d7103f77')))
client.submit_signed_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'), make_order_hex, signature.hex())
if __name__ == '__main__':
main()
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .feedback import Feedback
class SearchResultFeedback(Feedback):
"""
Database model representing feedback about search results (e.g. empty results).
"""
search_query = models.CharField(max_length=1000, verbose_name=_("search term"))
@property
def object_name(self):
"""
This property returns the name of the object this feedback comments on.
:return: The name of the object this feedback refers to
:rtype: str
"""
return _("Search results for {}").format(self.search_query)
@property
def object_url(self):
"""
This property returns the url to the object this feedback comments on.
:return: The url to the referred object
:rtype: str
"""
return ""
@property
def related_feedback(self):
"""
This property returns all feedback entries which relate to the same object and have the same is_technical value.
:return: The queryset of related feedback
:rtype: ~django.db.models.query.QuerySet [ ~integreat_cms.cms.models.feedback.search_result_feedback.SearchResultFeedback ]
"""
return SearchResultFeedback.objects.filter(
region=self.region,
language=self.language,
search_query=self.search_query,
is_technical=self.is_technical,
)
class Meta:
#: The verbose name of the model
verbose_name = _("search result feedback")
#: The plural verbose name of the model
verbose_name_plural = _("search result feedback")
#: The default permissions for this model
default_permissions = ()
|
#!/usr/bin/env python
# coding: utf-8
import codecs
import sys
import sklearn as sk
import pandas as pd
import numpy as np
import math
from sklearn import preprocessing
from sklearn.decomposition import PCA
from src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf
from src.pca.metodo_potencia_deflation import power_iteration
from src.pca.metodo_potencia_deflation import power_deflation
def PCA_from_sklearn(X):
"""
componentes_principales(X): Función que devuelve las componentes principales.
Parámetros
----------
n_components: número de componentes.
svd_solver: str {‘auto’, ‘full’, ‘arpack’, ‘randomized’}
Se elige 'full', lo que significa que se ejecuta completamente SVD llamando al
solucionador estándar LAPACK a través de scipy.linalg.svd y se seleccionan los componentes mediante postprocessing.
Atributos
---------
varianza_explicada: porcentaje de varianza explicada por cada componente.
valores_singulares: valores singulares correspondientes a cada componente.
pca.components_: ejes principales que representan las direcciones de máxima varianza en los datos.
eigenvalues: son los valores propios utilizando la matriz de covarianza.
Método
---------
fit_transform: ajusta el modelo a los datos y aplica la reducción de dimensionalidad en los datos.
"""
X = pd.DataFrame(X)
n_components = len(X.columns)
pca_1 = PCA(n_components, svd_solver='full')
componentesprincipales_1 = pca_1.fit_transform(X)
pca_1.components_
var_exp = pca_1.explained_variance_ratio_
##Se obtiene el número de componentes a través de la varianza explicada acumulada de los componentes, la cual debe sumar 60%.
var_acumulada = var_exp.cumsum()
conteo = (var_acumulada) < 0.8
n_componentes = conteo.sum() + 1
pca = PCA(n_componentes, svd_solver='full')
componentesprincipales = pca.fit_transform(X)
pca.components_
varianza_explicada = pca.explained_variance_ratio_
eigenvalues = pca.explained_variance_
val_sing = pca.singular_values_
return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues
def PCA_from_SVD(A):
"""
Función para PCA a partir de la SVD de numpy
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Calcular SVD
U, S, Vt = np.linalg.svd(A_centered, full_matrices=False)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = A_centered@np.transpose(Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
def PCA_from_SVD_jacobi(A):
"""
Función para PCA a partir de la SVD
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Modificar esta línea de código, mandar a llamar la función creada por el equipo
# Calcular SVD
U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = A_centered@np.transpose(Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)]
def PCA_from_QR_vf(data,niter = 450):
"""
Función para PCA a partir de los eigenvectores
params: data: matriz de datos
niter: número de iteraciones máximas
return: componentes Los coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas)
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
Depende de la función: eigenvectores_QR
"""
# convertir a array
A = np.array(data)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
# Matriz de Covarianzas
#C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1)
C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1)
# Calcular algoritmo QR
E, Q = eigenvectores_eigenvalores_QR_vf(C,niter)
# Los componentes (coeficientes)
componentes = Q.T
# Los datos transformados (componentes principales)
# Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array
Z = datos_centrados@Q
# La varianza explicada
varianza_explicada = E/np.sum(E)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = data.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes
def PCA_from_potencia(X):
"""
Función que calcula PCA a partir del método de la potencia y deflation de Hotteling
params: A: matriz de datos
return: eigenvalues Numpy array con los eigenvectores de A
eigenvectors Numpy array con los correspondientes eigenvectores de A
"""
prop = 0 # Proporción de varianza explicada
comp = 1
cur_var = 0
comp_vecs = np.zeros([X.shape[1], X.shape[1]])
# convertir a array
A = np.array(X)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
#Calculamos la matriz de covarianzas
cov = np.dot(X.T, X)/X.shape[0]
#Aplicamos el método de la potencia
evalues_pow, evectors_pow = power_deflation(cov,2000)
# La varianza explicada
varianza_explicada = evalues_pow/np.sum(evalues_pow)
# Los datos transformados (componentes principales)
Z = datos_centrados@evectors_pow
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 80%
n = X.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
|
from tkinter import*
import random
import time
root = Tk()
root.geometry("1600x700+0+0")
root.title("Restaurant Management System")
Tops = Frame(root,bg="white",width = 1600,height=50,relief=SUNKEN)
Tops.pack(side=TOP)
f1 = Frame(root,width = 900,height=700,relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root ,width = 400,height=700,relief=SUNKEN)
f2.pack(side=RIGHT)
#------------------TIME--------------
localtime=time.asctime(time.localtime(time.time()))
#-----------------INFO TOP------------
lblinfo = Label(Tops, font=( 'aria' ,30, 'bold' ),text="Restaurant Management System",fg="steel blue",bd=10,anchor='w')
lblinfo.grid(row=0,column=0)
lblinfo = Label(Tops, font=( 'aria' ,20, ),text=localtime,fg="steel blue",anchor=W)
lblinfo.grid(row=1,column=0)
#---------------Calculator------------------
text_Input=StringVar()
operator =""
txtdisplay = Entry(f2,font=('ariel' ,20,'bold'), textvariable=text_Input , bd=5 ,insertwidth=7 ,bg="white",justify='right')
txtdisplay.grid(columnspan=4)
def btnclick(numbers):
global operator
operator=operator + str(numbers)
text_Input.set(operator)
def clrdisplay():
global operator
operator=""
text_Input.set("")
def eqals():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator = ""
def Ref():
x=random.randint(12980, 50876)
randomRef = str(x)
rand.set(randomRef)
cof =float(Fries.get())
colfries= float(Largefries.get())
cob= float(Burger.get())
cofi= float(Filet.get())
cochee= float(Cheese_burger.get())
codr= float(Drinks.get())
costoffries = cof*25
costoflargefries = colfries*40
costofburger = cob*35
costoffilet = cofi*50
costofcheeseburger = cochee*50
costofdrinks = codr*35
costofmeal = "Rp.",str('%.2f'% (costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks))
PayTax=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)*0.33)
Totalcost=(costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)
Ser_Charge=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)/99)
Service="Rp.",str('%.2f'% Ser_Charge)
OverAllCost="Rp.",str( PayTax + Totalcost + Ser_Charge)
PaidTax="Rp.",str('%.2f'% PayTax)
Service_Charge.set(Service)
cost.set(costofmeal)
Tax.set(PaidTax)
Subtotal.set(costofmeal)
Total.set(OverAllCost)
def qexit():
root.destroy()
def reset():
rand.set("")
Fries.set("")
Largefries.set("")
Burger.set("")
Filet.set("")
Subtotal.set("")
Total.set("")
Service_Charge.set("")
Drinks.set("")
Tax.set("")
cost.set("")
Cheese_burger.set("")
btn7=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="7",bg="powder blue", command=lambda: btnclick(7) )
btn7.grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="8",bg="powder blue", command=lambda: btnclick(8) )
btn8.grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="9",bg="powder blue", command=lambda: btnclick(9) )
btn9.grid(row=2,column=2)
Addition=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="+",bg="powder blue", command=lambda: btnclick("+") )
Addition.grid(row=2,column=3)
#---------------------------------------------------------------------------------------------
btn4=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="4",bg="powder blue", command=lambda: btnclick(4) )
btn4.grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="5",bg="powder blue", command=lambda: btnclick(5) )
btn5.grid(row=3,column=1)
btn6=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="6",bg="powder blue", command=lambda: btnclick(6) )
btn6.grid(row=3,column=2)
Substraction=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="-",bg="powder blue", command=lambda: btnclick("-") )
Substraction.grid(row=3,column=3)
#-----------------------------------------------------------------------------------------------
btn1=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="1",bg="powder blue", command=lambda: btnclick(1) )
btn1.grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="2",bg="powder blue", command=lambda: btnclick(2) )
btn2.grid(row=4,column=1)
btn3=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="3",bg="powder blue", command=lambda: btnclick(3) )
btn3.grid(row=4,column=2)
multiply=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="*",bg="powder blue", command=lambda: btnclick("*") )
multiply.grid(row=4,column=3)
#------------------------------------------------------------------------------------------------
btn0=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="0",bg="powder blue", command=lambda: btnclick(0) )
btn0.grid(row=5,column=0)
btnc=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="c",bg="powder blue", command=clrdisplay)
btnc.grid(row=5,column=1)
btnequal=Button(f2,padx=16,pady=16,bd=4,width = 16, fg="black", font=('ariel', 20 ,'bold'),text="=",bg="powder blue",command=eqals)
btnequal.grid(columnspan=4)
Decimal=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text=".",bg="powder blue", command=lambda: btnclick(".") )
Decimal.grid(row=5,column=2)
Division=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="/",bg="powder blue", command=lambda: btnclick("/") )
Division.grid(row=5,column=3)
status = Label(f2,font=('aria', 15, 'bold'),width = 16, text="clifter resturant",bd=2,relief=SUNKEN)
status.grid(row=7,columnspan=3)
#---------------------------------------------------------------------------------------
rand = StringVar()
Fries = StringVar()
Largefries = StringVar()
Burger = StringVar()
Filet = StringVar()
Subtotal = StringVar()
Total = StringVar()
Service_Charge = StringVar()
Drinks = StringVar()
Tax = StringVar()
cost = StringVar()
Cheese_burger = StringVar()
lblreference = Label(f1, font=( 'aria' ,16, 'bold' ),text="Order No.",fg="steel blue",bd=10,anchor='w')
lblreference.grid(row=0,column=0)
txtreference = Entry(f1,font=('ariel' ,16,'bold'), textvariable=rand , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtreference.grid(row=0,column=1)
lblfries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Fries Meal",fg="steel blue",bd=10,anchor='w')
lblfries.grid(row=1,column=0)
txtfries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Fries , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtfries.grid(row=1,column=1)
lblLargefries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Lunch Meal",fg="steel blue",bd=10,anchor='w')
lblLargefries.grid(row=2,column=0)
txtLargefries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Largefries , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtLargefries.grid(row=2,column=1)
lblburger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Burger Meal",fg="steel blue",bd=10,anchor='w')
lblburger.grid(row=3,column=0)
txtburger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtburger.grid(row=3,column=1)
lblFilet = Label(f1, font=( 'aria' ,16, 'bold' ),text="Pizza Meal",fg="steel blue",bd=10,anchor='w')
lblFilet.grid(row=4,column=0)
txtFilet = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Filet , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtFilet.grid(row=4,column=1)
lblCheese_burger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Cheese burger",fg="steel blue",bd=10,anchor='w')
lblCheese_burger.grid(row=5,column=0)
txtCheese_burger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Cheese_burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtCheese_burger.grid(row=5,column=1)
#--------------------------------------------------------------------------------------
lblDrinks = Label(f1, font=( 'aria' ,16, 'bold' ),text="Drinks",fg="steel blue",bd=10,anchor='w')
lblDrinks.grid(row=0,column=2)
txtDrinks = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Drinks , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtDrinks.grid(row=0,column=3)
lblcost = Label(f1, font=( 'aria' ,16, 'bold' ),text="cost",fg="steel blue",bd=10,anchor='w')
lblcost.grid(row=1,column=2)
txtcost = Entry(f1,font=('ariel' ,16,'bold'), textvariable=cost , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtcost.grid(row=1,column=3)
lblService_Charge = Label(f1, font=( 'aria' ,16, 'bold' ),text="Service Charge",fg="steel blue",bd=10,anchor='w')
lblService_Charge.grid(row=2,column=2)
txtService_Charge = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Service_Charge , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtService_Charge.grid(row=2,column=3)
lblTax = Label(f1, font=( 'aria' ,16, 'bold' ),text="Tax",fg="steel blue",bd=10,anchor='w')
lblTax.grid(row=3,column=2)
txtTax = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Tax , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtTax.grid(row=3,column=3)
lblSubtotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Subtotal",fg="steel blue",bd=10,anchor='w')
lblSubtotal.grid(row=4,column=2)
txtSubtotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Subtotal , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtSubtotal.grid(row=4,column=3)
lblTotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Total",fg="steel blue",bd=10,anchor='w')
lblTotal.grid(row=5,column=2)
txtTotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Total , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtTotal.grid(row=5,column=3)
#-----------------------------------------buttons------------------------------------------
lblTotal = Label(f1,text="---------------------",fg="white")
lblTotal.grid(row=6,columnspan=3)
btnTotal=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="TOTAL", bg="powder blue",command=Ref)
btnTotal.grid(row=7, column=1)
btnreset=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="RESET", bg="powder blue",command=reset)
btnreset.grid(row=7, column=2)
btnexit=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="EXIT", bg="powder blue",command=qexit)
btnexit.grid(row=7, column=3)
def price():
roo = Tk()
roo.geometry("600x220+0+0")
roo.title("Price List")
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="ITEM", fg="black", bd=5)
lblinfo.grid(row=0, column=0)
lblinfo = Label(roo, font=('aria', 15,'bold'), text="_____________", fg="white", anchor=W)
lblinfo.grid(row=0, column=2)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="PRICE", fg="black", anchor=W)
lblinfo.grid(row=0, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Fries Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=1, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="25", fg="steel blue", anchor=W)
lblinfo.grid(row=1, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Lunch Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=2, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="40", fg="steel blue", anchor=W)
lblinfo.grid(row=2, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Burger Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=3, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W)
lblinfo.grid(row=3, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Pizza Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=4, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="50", fg="steel blue", anchor=W)
lblinfo.grid(row=4, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Cheese Burger", fg="steel blue", anchor=W)
lblinfo.grid(row=5, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="30", fg="steel blue", anchor=W)
lblinfo.grid(row=5, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Drinks", fg="steel blue", anchor=W)
lblinfo.grid(row=6, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W)
lblinfo.grid(row=6, column=3)
roo.mainloop()
btnprice=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="PRICE", bg="powder blue",command=price)
btnprice.grid(row=7, column=0)
root.mainloop()
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.scene.xaxis.title"
_path_str = "layout.scene.xaxis.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.new_plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`new_plotly.graph_objs.layout.scene.x
axis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.layout.scene.xaxis.title.Font
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.layout.scene.xaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Utility methods, for compatibility between Python version
:author: Thomas Calmant
:copyright: Copyright 2015, isandlaTech
:license: Apache License 2.0
:version: 0.2.6
..
Copyright 2015 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 2, 6)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
import sys
# ------------------------------------------------------------------------------
if sys.version_info[0] < 3:
# Python 2
# pylint: disable=E1101
import types
try:
STRING_TYPES = (
types.StringType,
types.UnicodeType
)
except NameError:
# Python built without unicode support
STRING_TYPES = (types.StringType,)
NUMERIC_TYPES = (
types.IntType,
types.LongType,
types.FloatType
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
# pylint: disable=E0602
if type(string) is unicode:
return str(string)
return string
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data)
else:
# Python 3
# pylint: disable=E1101
STRING_TYPES = (
bytes,
str
)
NUMERIC_TYPES = (
int,
float
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
if type(string) is bytes:
return string
return bytes(string, "UTF-8")
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data, "UTF-8")
# ------------------------------------------------------------------------------
# Common
DictType = dict
ListType = list
TupleType = tuple
ITERABLE_TYPES = (
list,
set, frozenset,
tuple
)
VALUE_TYPES = (
bool,
type(None)
)
PRIMITIVE_TYPES = STRING_TYPES + NUMERIC_TYPES + VALUE_TYPES
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from django.conf import settings
from openstack_dashboard.api.rest import nova
from openstack_dashboard.test import helpers as test
class NovaRestTestCase(test.TestCase):
#
# Keypairs
#
@mock.patch.object(nova.api, 'nova')
def test_keypair_get(self, nc):
request = self.mock_rest_request()
nc.keypair_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.Keypairs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "sekrit"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "hi"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
#
# Availability Zones
#
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.availability_zone_list.assert_called_once_with(request, detail)
#
# Limits
#
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.content, '{"id": "one"}')
#
# Servers
#
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content,
'"missing required parameter \'source_id\'"')
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content, '{"id": "server123"}')
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
#
# Extensions
#
@mock.patch.object(nova.api, 'nova')
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
nc.list_extensions.assert_called_once_with(request)
#
# Flavors
#
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content, '{"extras": {}, "name": "1"}')
else:
self.assertEqual(response.content, '{"name": "1"}')
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
if is_public:
request = self.mock_rest_request(GET={'is_public': 'tRuE'})
elif is_public is None:
request = self.mock_rest_request(GET={})
else:
request = self.mock_rest_request(GET={'is_public': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=is_public,
get_extras=False)
def test_flavor_list_private(self):
self._test_flavor_list_public(is_public=False)
def test_flavor_list_public(self):
self._test_flavor_list_public(is_public=True)
def test_flavor_list_public_none(self):
self._test_flavor_list_public(is_public=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_extras(self, nc, get_extras=None):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request(GET={})
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content,
'{"items": [{"extras": {}, "id": "1"}, '
'{"extras": {}, "id": "2"}]}')
else:
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=None,
get_extras=get_extras)
def test_flavor_list_extras_no(self):
self._test_flavor_list_extras(get_extras=False)
def test_flavor_list_extras_yes(self):
self._test_flavor_list_extras(get_extras=True)
def test_flavor_list_extras_absent(self):
self._test_flavor_list_extras(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def test_flavor_extra_specs(self, nc):
request = self.mock_rest_request()
nc.flavor_get_extras.return_value.to_dict.return_value = {'foo': '1'}
response = nova.FlavorExtraSpecs().get(request, "1")
self.assertStatusCode(response, 200)
nc.flavor_get_extras.assert_called_once_with(request, "1", raw=True)
|
import IoTSensor
import LORAGateway
class GatewayPlacement:
def __init__(self, sensor_list):
self._sensor_list = sensor_list
self._gateway_list = []
def add_gateway(self, gateway):
self._gateway_list.append(gateway)
def remove_gateway(self, gateway):
self._gateway_list.remove(gateway)
def sensors_covered(self):
curr_placement_coverage = []
for g in self._gateway_list:
curr_gateway_coverage = g.get_coverage(self._sensor_list)
for s in curr_gateway_coverage:
if not s.get_id() in curr_placement_coverage:
curr_placement_coverage.append(s.get_id())
covers = True
for s in self._sensor_list:
if not s.get_id() in curr_placement_coverage:
covers = False
break
return covers
def energy_consumption(self, time):
energy = 0.0
for s in self._sensor_list:
energy = energy + s.get_total_consumption(time, s.get_closest_gateway(self._gateway_list))
for g in self._gateway_list:
energy = energy + g.get_energy_consumption(time)
return energy
def get_gateways_number(self):
return len(self._gateway_list)
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Create and remove network tunnels to the target via the server
--------------------------------------------------------------
"""
from . import tc
from . import ttb_client
class tunnel(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to create IP tunnels to
targets with IP connectivity.
Use by indicating a default IP address to use for interconnect
*ic* or explicitly indicating it in the :meth:`add` function:
>>> target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
>>> target.tunnel.add(PORT)
>>> target.tunnel.remove(PORT)
>>> target.tunnel.list()
Note that for tunnels to work, the target has to be acquired and
IP has to be up on it, which might requires it to be connected to
some IP network (it can be a TCF interconnect or any other
network).
"""
def __init__(self, target):
self.target = target
# Tunnels can always be added, even the target is not in an
# interconnect
self.ip_addr = None
def _ip_addr_get(self, ip_addr):
# FIXME: this shall validate the IP address using python-ipaddress
if ip_addr:
return ip_addr
if self.ip_addr:
return self.ip_addr
ip_addr = self.target.rt.get(
'ipv4_addr', self.target.rt.get('ipv6_addr', None))
if ip_addr:
return ip_addr
raise RuntimeError(
"Cannot identify any IPv4 or IPv6 address to use; "
"please set it in "
"`TARGET.tunnel.ip_addr = TARGET.addr_get(ic, \"ipv4\")` "
"or pass it explicitly")
def add(self, port, ip_addr = None, proto = None):
"""
Setup a TCP/UDP/SCTP v4 or v5 tunnel to the target
A local port of the given protocol in the server is fowarded
to the target's port. Teardown with :meth:`remove`.
If the tunnel already exists, it is not recreated, but the
port it uses is returned.
Redirects targets TCP4 port 3000 to server_port in the server
that provides ``target`` (target.kws['server']).
>>> server_name = target.rtb.parsed_url.hostname
>>> server_port = target.tunnel.add(3000)
Now connecting to ``server_name:server_port`` takes you to the
target's port 3000.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
:returns int local_port: port in the server where to connect
to in order to access the target.
"""
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
target = self.target
ip_addr = self._ip_addr_get(ip_addr)
r = target.rtb.rest_tb_target_ip_tunnel_add(
target.rt, ip_addr, port, proto, ticket = target.ticket)
self.target.report_info("%s tunnel added from %s:%d to %s:%d"
% (proto, target.rtb.parsed_url.hostname, r,
ip_addr, port))
return r
def remove(self, port, ip_addr = None, proto = None):
"""
Teardown a TCP/UDP/SCTP v4 or v5 tunnel to the target
previously created with :meth:`add`.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
"""
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
ip_addr = self._ip_addr_get(ip_addr)
target = self.target
target.rtb.rest_tb_target_ip_tunnel_remove(
target.rt, ip_addr, port, proto, ticket = target.ticket)
def list(self):
"""
List existing IP tunnels
:returns: list of tuples (protocol, target-ip-address, port,
port-in-server)
"""
target = self.target
return target.rtb.rest_tb_target_ip_tunnel_list(target.rt,
ticket = target.ticket)
# FIXME: work out tcf creating target_c instances, so it is easier to
# automate creating cmdline wrappers
def cmdline_tunnel_add(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
port = rtb.rest_tb_target_ip_tunnel_add(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
print("%s:%d" % (rtb.parsed_url.hostname, port))
def cmdline_tunnel_remove(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
rtb.rest_tb_target_ip_tunnel_remove(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
def cmdline_tunnel_list(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
tunnels = rtb.rest_tb_target_ip_tunnel_list(rt, ticket = args.ticket)
for tunnel in tunnels:
print("%s %s:%s %s:%s" % (tunnel[0],
rtb.parsed_url.hostname, tunnel[3],
tunnel[1], tunnel[2]))
def cmdline_setup(argsp):
ap = argsp.add_parser("tunnel-add", help = "create an IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store", type = int,
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None, type = str,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None, type = str,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_add)
ap = argsp.add_parser("tunnel-remove",
help = "remove an existing IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store",
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_remove)
ap = argsp.add_parser("tunnel-list", help = "List existing IP tunnels")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.set_defaults(func = cmdline_tunnel_list)
|
# np_baseball is available
# Import numpy
import numpy as np
# Create np_height_in from np_baseball
np_height_in = np_baseball[:,0]
# Print out the mean of np_height_in
print(np.mean(np_height_in))
# Print out the median of np_height_in
print(np.median(np_height_in))
# np_baseball is available
# Import numpy
import numpy as np
# Print mean height (first column)
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
# Print median height. Replace 'None'
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
# Print out the standard deviation on height. Replace 'None'
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
# Print out correlation between first and second column. Replace 'None'
corr = np.corrcoef(np_baseball[:,0], np_baseball[:,1])
print("Correlation: " + str(corr))
# heights and positions are available as lists
# Import numpy
import numpy as np
# Convert positions and heights to numpy arrays: np_positions, np_heights
np_positions = np.array(positions)
np_heights = np.array(heights)
# Heights of the goalkeepers: gk_heights
gk_heights = np_heights[np_positions == 'GK']
# Heights of the other players: other_heights
other_heights = np_heights[np_positions != 'GK']
# Print out the median height of goalkeepers. Replace 'None'
print("Median height of goalkeepers: " + str(np.median(gk_heights)))
# Print out the median height of other players. Replace 'None'
print("Median height of other players: " + str(np.median(other_heights)))
|
"""
Top-level URL lookup for InvenTree application.
Passes URL lookup downstream to each app as required.
"""
from django.conf.urls import url, include
from django.urls import path
from django.contrib import admin
from company.urls import company_urls
from company.urls import manufacturer_part_urls
from company.urls import supplier_part_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from plugin.urls import get_plugin_urls
from barcodes.api import barcode_api_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import order_api_urls
from label.api import label_api_urls
from report.api import report_api_urls
from plugin.api import plugin_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import auth_request
from .views import IndexView, SearchView, DatabaseStatsView
from .views import SettingsView, EditUserView, SetPasswordView, CustomEmailView, CustomConnectionsView, CustomPasswordResetFromKeyView
from .views import CustomSessionDeleteView, CustomSessionDeleteOtherView
from .views import CurrencyRefreshView
from .views import AppearanceSelectView, SettingCategorySelectView
from .views import DynamicJsView
from .api import InfoView, NotFoundView
from .api import ActionPluginView
from users.api import user_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^barcode/', include(barcode_api_urls)),
url(r'^settings/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^order/', include(order_api_urls)),
url(r'^label/', include(label_api_urls)),
url(r'^report/', include(report_api_urls)),
url(r'^plugin/', include(plugin_api_urls)),
# User URLs
url(r'^user/', include(user_urls)),
# Plugin endpoints
url(r'^action/', ActionPluginView.as_view(), name='api-action-plugin'),
# InvenTree information endpoint
url(r'^$', InfoView.as_view(), name='api-inventree-info'),
# Unknown endpoint
url(r'^.*$', NotFoundView.as_view(), name='api-404'),
]
settings_urls = [
url(r'^i18n/?', include('django.conf.urls.i18n')),
url(r'^appearance/?', AppearanceSelectView.as_view(), name='settings-appearance'),
url(r'^currencies-refresh/', CurrencyRefreshView.as_view(), name='settings-currencies-refresh'),
url(r'^category/', SettingCategorySelectView.as_view(), name='settings-category'),
# Catch any other urls
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/settings.html'), name='settings'),
]
# These javascript files are served "dynamically" - i.e. rendered on demand
dynamic_javascript_urls = [
url(r'^calendar.js', DynamicJsView.as_view(template_name='js/dynamic/calendar.js'), name='calendar.js'),
url(r'^nav.js', DynamicJsView.as_view(template_name='js/dynamic/nav.js'), name='nav.js'),
url(r'^settings.js', DynamicJsView.as_view(template_name='js/dynamic/settings.js'), name='settings.js'),
]
# These javascript files are pased through the Django translation layer
translated_javascript_urls = [
url(r'^api.js', DynamicJsView.as_view(template_name='js/translated/api.js'), name='api.js'),
url(r'^attachment.js', DynamicJsView.as_view(template_name='js/translated/attachment.js'), name='attachment.js'),
url(r'^barcode.js', DynamicJsView.as_view(template_name='js/translated/barcode.js'), name='barcode.js'),
url(r'^bom.js', DynamicJsView.as_view(template_name='js/translated/bom.js'), name='bom.js'),
url(r'^build.js', DynamicJsView.as_view(template_name='js/translated/build.js'), name='build.js'),
url(r'^company.js', DynamicJsView.as_view(template_name='js/translated/company.js'), name='company.js'),
url(r'^filters.js', DynamicJsView.as_view(template_name='js/translated/filters.js'), name='filters.js'),
url(r'^forms.js', DynamicJsView.as_view(template_name='js/translated/forms.js'), name='forms.js'),
url(r'^helpers.js', DynamicJsView.as_view(template_name='js/translated/helpers.js'), name='helpers.js'),
url(r'^label.js', DynamicJsView.as_view(template_name='js/translated/label.js'), name='label.js'),
url(r'^model_renderers.js', DynamicJsView.as_view(template_name='js/translated/model_renderers.js'), name='model_renderers.js'),
url(r'^modals.js', DynamicJsView.as_view(template_name='js/translated/modals.js'), name='modals.js'),
url(r'^order.js', DynamicJsView.as_view(template_name='js/translated/order.js'), name='order.js'),
url(r'^part.js', DynamicJsView.as_view(template_name='js/translated/part.js'), name='part.js'),
url(r'^report.js', DynamicJsView.as_view(template_name='js/translated/report.js'), name='report.js'),
url(r'^stock.js', DynamicJsView.as_view(template_name='js/translated/stock.js'), name='stock.js'),
url(r'^plugin.js', DynamicJsView.as_view(template_name='js/translated/plugin.js'), name='plugin.js'),
url(r'^tables.js', DynamicJsView.as_view(template_name='js/translated/tables.js'), name='tables.js'),
url(r'^table_filters.js', DynamicJsView.as_view(template_name='js/translated/table_filters.js'), name='table_filters.js'),
]
backendpatterns = [
# "Dynamic" javascript files which are rendered using InvenTree templating.
url(r'^js/dynamic/', include(dynamic_javascript_urls)),
url(r'^js/i18n/', include(translated_javascript_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^auth/?', auth_request),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
# 3rd party endpoints
url(r'^markdownx/', include('markdownx.urls')),
]
frontendpatterns = [
url(r'^part/', include(part_urls)),
url(r'^manufacturer-part/', include(manufacturer_part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^stats/', DatabaseStatsView.as_view(), name='stats'),
# plugin urls
get_plugin_urls(), # appends currently loaded plugin urls = None
# admin sites
url(r'^admin/error_log/', include('error_report.urls')),
url(r'^admin/shell/', include('django_admin_shell.urls')),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
# DB user sessions
url(r'^accounts/sessions/other/delete/$', view=CustomSessionDeleteOtherView.as_view(), name='session_delete_other', ),
url(r'^accounts/sessions/(?P<pk>\w+)/delete/$', view=CustomSessionDeleteView.as_view(), name='session_delete', ),
# Single Sign On / allauth
# overrides of urlpatterns
url(r'^accounts/email/', CustomEmailView.as_view(), name='account_email'),
url(r'^accounts/social/connections/', CustomConnectionsView.as_view(), name='socialaccount_connections'),
url(r"^accounts/password/reset/key/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$", CustomPasswordResetFromKeyView.as_view(), name="account_reset_password_from_key"),
url(r'^accounts/', include('allauth_2fa.urls')), # MFA support
url(r'^accounts/', include('allauth.urls')), # included urlpatterns
]
urlpatterns = [
url('', include(frontendpatterns)),
url('', include(backendpatterns)),
]
# Server running in "DEBUG" mode?
if settings.DEBUG:
# Static file access
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# Media file access
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Debug toolbar access (only allowed in DEBUG mode)
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
path('__debug/', include(debug_toolbar.urls)),
] + urlpatterns
# Send any unknown URLs to the parts page
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
|
import os
import logging
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import al
from al.dataset import mnist
from al.model.model_zoo.simple_cnn import ConvModel
from al.model.mnist import MnistLearner
from al.dataset.mnist import MnistDataset
from al.train.active_train import ActiveTrain
from al.helpers.experiment import set_up_experiment, load_config
from al.experiments import set_up_learner
DATASET = 'mnist'
FOLDER_PATH = os.path.dirname(__file__)
OUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(
__file__, FOLDER_PATH, logging_lvl=20)
logger.info('-------------------------')
logger.info('--LAUNCHING EXPERIMENTS--')
logger.info('-------------------------')
config = load_config(FOLDER_PATH, DATASET)
setupper = set_up_learner(DATASET)
config['active_learning']['output_dir'] = OUTPUT_DIR
config['experiment']['logger_name'] = logger_name
model_name = 'simple_cnn'
strategies = ['random_sampling', 'margin_sampling']
repeats = 1
score_data = {}
config['active_learning']['assets_per_query'] = 20
config['active_learning']['n_iter'] = 5
config['active_learning']['init_size'] = 100
config['train_parameters']['batch_size'] = 16
config['train_parameters']['iterations'] = 100
config['experiment']['n_classes'] = 2
raw_dataset, _ = setupper(config, OUTPUT_DIR, logger,
index_train=np.arange(60000))
full_train_dataset = raw_dataset.dataset
first_class = 1
second_class = 2
first_classes = []
second_classes = []
p = 0.1
for i in range(len(full_train_dataset)):
if full_train_dataset[i][1].numpy() == first_class:
first_classes.append(i)
elif full_train_dataset[i][1].numpy() == second_class and np.random.rand() < p:
second_classes.append(i)
train_indices = np.array(first_classes + second_classes)
np.random.permutation(train_indices)
for i in range(repeats):
logger.info('---------------------------')
logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
logger.info('---------------------------')
for strategy in strategies:
dataset, learner = setupper(
config, OUTPUT_DIR, logger, index_train=train_indices)
logger.info('---------------------------')
logger.info(f'----STRATEGY : {strategy}----')
logger.info('---------------------------')
trainer = ActiveTrain(learner, dataset, strategy, logger_name)
scores = trainer.train(
config['train_parameters'], **config['active_learning'])
score_data[(strategy, i)] = scores
logger.info(f'----DONE----\n')
logger.info('---------------------------')
logger.info(f'--------DONE--------')
logger.info('---------------------------\n\n\n')
# data = []
# for (strategy, experiment_number), scores_experiment in score_data.items():
# for step_result in scores_experiment:
# val_step_result = step_result['val']
# step = step_result['step']
# data.append(
# {'strategy': strategy,
# 'experiment': experiment_number,
# 'step': step,
# **val_step_result})
# df = pd.DataFrame(data)
# plot_dir = os.path.join(os.path.dirname(__file__), 'figures')
# plt.figure(num=0, figsize=(12, 5))
# sns.lineplot(x='step', y='accuracy', hue='strategy', data=df)
# plt.ylabel('Accuracy')
# plt.show()
# plt.savefig(os.path.join(plot_dir, 'accuracy_imbalance.png'))
|
from __future__ import annotations
import asyncio
import bisect
import builtins
import concurrent.futures
import errno
import heapq
import logging
import os
import random
import sys
import threading
import warnings
import weakref
from collections import defaultdict, deque, namedtuple
from collections.abc import Hashable, Iterable, MutableMapping
from contextlib import suppress
from datetime import timedelta
from inspect import isawaitable
from pickle import PicklingError
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .client import Client
from tlz import first, keymap, merge, pluck # noqa: F401
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.core import istask
from dask.system import CPU_COUNT
from dask.utils import (
apply,
format_bytes,
funcname,
parse_bytes,
parse_timedelta,
stringify,
typename,
)
from . import comm, preloading, profile, system, utils
from .batched import BatchedSend
from .comm import connect, get_address_host
from .comm.addressing import address_from_user_args, parse_address
from .comm.utils import OFFLOAD_THRESHOLD
from .core import (
CommClosedError,
Status,
coerce_to_address,
error_message,
pingpong,
send_recv,
)
from .diagnostics import nvml
from .diagnostics.plugin import _get_plugin_name
from .diskutils import WorkSpace
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from .proctitle import setproctitle
from .protocol import pickle, to_serialize
from .pubsub import PubSubWorkerExtension
from .security import Security
from .sizeof import safe_sizeof as sizeof
from .threadpoolexecutor import ThreadPoolExecutor
from .threadpoolexecutor import secede as tpe_secede
from .utils import (
LRU,
TimeoutError,
_maybe_complex,
get_ip,
has_arg,
import_file,
iscoroutinefunction,
json_load_robust,
key_split,
log_errors,
offload,
parse_ports,
silence_logging,
thread_state,
warn_on_duration,
)
from .utils_comm import gather_from_workers, pack_data, retry_operation
from .utils_perf import ThrottledGC, disable_gc_diagnosis, enable_gc_diagnosis
from .versions import get_versions
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
no_value = "--no-value-sentinel--"
IN_PLAY = ("waiting", "ready", "executing", "long-running")
PENDING = ("waiting", "ready", "constrained")
PROCESSING = ("waiting", "ready", "constrained", "executing", "long-running")
READY = ("ready", "constrained")
DEFAULT_EXTENSIONS = [PubSubWorkerExtension]
DEFAULT_METRICS = {}
DEFAULT_STARTUP_INFORMATION = {}
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
SerializedTask = namedtuple("SerializedTask", ["function", "args", "kwargs", "task"])
class TaskState:
"""Holds volatile state relating to an individual Dask task
* **dependencies**: ``set(TaskState instances)``
The data needed by this key to run
* **dependents**: ``set(TaskState instances)``
The keys that use this dependency.
* **duration**: ``float``
Expected duration the a task
* **priority**: ``tuple``
The priority this task given by the scheduler. Determines run order.
* **state**: ``str``
The current state of the task. One of ["waiting", "ready", "executing",
"fetch", "memory", "flight", "long-running", "rescheduled", "error"]
* **who_has**: ``set(worker)``
Workers that we believe have this data
* **coming_from**: ``str``
The worker that current task data is coming from if task is in flight
* **waiting_for_data**: ``set(keys of dependencies)``
A dynamic version of dependencies. All dependencies that we still don't
have for a particular key.
* **resource_restrictions**: ``{str: number}``
Abstract resources required to run a task
* **exception**: ``str``
The exception caused by running a task if it erred
* **traceback**: ``str``
The exception caused by running a task if it erred
* **type**: ``type``
The type of a particular piece of data
* **suspicious_count**: ``int``
The number of times a dependency has not been where we expected it
* **startstops**: ``[{startstop}]``
Log of transfer, load, and compute times for a task
* **start_time**: ``float``
Time at which task begins running
* **stop_time**: ``float``
Time at which task finishes running
* **metadata**: ``dict``
Metadata related to task. Stored metadata should be msgpack
serializable (e.g. int, string, list, dict).
* **nbytes**: ``int``
The size of a particular piece of data
* **annotations**: ``dict``
Task annotations
Parameters
----------
key: str
runspec: SerializedTask
A named tuple containing the ``function``, ``args``, ``kwargs`` and
``task`` associated with this `TaskState` instance. This defaults to
``None`` and can remain empty if it is a dependency that this worker
will receive from another worker.
"""
def __init__(self, key, runspec=None):
assert key is not None
self.key = key
self.runspec = runspec
self.dependencies = set()
self.dependents = set()
self.duration = None
self.priority = None
self.state = "new"
self.who_has = set()
self.coming_from = None
self.waiting_for_data = set()
self.resource_restrictions = None
self.exception = None
self.exception_text = ""
self.traceback = None
self.traceback_text = ""
self.type = None
self.suspicious_count = 0
self.startstops = list()
self.start_time = None
self.stop_time = None
self.metadata = {}
self.nbytes = None
self.annotations = None
self.scheduler_holds_ref = False
def __repr__(self):
return f"<Task {self.key!r} {self.state}>"
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
class Worker(ServerNode):
"""Worker node in a Dask distributed cluster
Workers perform two functions:
1. **Serve data** from a local dictionary
2. **Perform computation** on that data and on data from peers
Workers keep the scheduler informed of their data and use that scheduler to
gather data from other workers when necessary to perform a computation.
You can start a worker with the ``dask-worker`` command line application::
$ dask-worker scheduler-ip:port
Use the ``--help`` flag to see more options::
$ dask-worker --help
The rest of this docstring is about the internal state the the worker uses
to manage and track internal computations.
**State**
**Informational State**
These attributes don't change significantly during execution.
* **nthreads:** ``int``:
Number of nthreads used by this worker process
* **executors:** ``Dict[str, concurrent.futures.Executor]``:
Executors used to perform computation. Always contains the default
executor.
* **local_directory:** ``path``:
Path on local machine to store temporary files
* **scheduler:** ``rpc``:
Location of scheduler. See ``.ip/.port`` attributes.
* **name:** ``string``:
Alias
* **services:** ``{str: Server}``:
Auxiliary web servers running on this worker
* **service_ports:** ``{str: port}``:
* **total_out_connections**: ``int``
The maximum number of concurrent outgoing requests for data
* **total_in_connections**: ``int``
The maximum number of concurrent incoming requests for data
* **comm_threshold_bytes**: ``int``
As long as the total number of bytes in flight is below this threshold
we will not limit the number of outgoing connections for a single tasks
dependency fetch.
* **batched_stream**: ``BatchedSend``
A batched stream along which we communicate to the scheduler
* **log**: ``[(message)]``
A structured and queryable log. See ``Worker.story``
**Volatile State**
These attributes track the progress of tasks that this worker is trying to
complete. In the descriptions below a ``key`` is the name of a task that
we want to compute and ``dep`` is the name of a piece of dependent data
that we want to collect from others.
* **tasks**: ``{key: TaskState}``
The tasks currently executing on this worker (and any dependencies of those tasks)
* **data:** ``{key: object}``:
Prefer using the **host** attribute instead of this, unless
memory_limit and at least one of memory_target_fraction or
memory_spill_fraction values are defined, in that case, this attribute
is a zict.Buffer, from which information on LRU cache can be queried.
* **data.memory:** ``{key: object}``:
Dictionary mapping keys to actual values stored in memory. Only
available if condition for **data** being a zict.Buffer is met.
* **data.disk:** ``{key: object}``:
Dictionary mapping keys to actual values stored on disk. Only
available if condition for **data** being a zict.Buffer is met.
* **data_needed**: deque(keys)
The keys which still require data in order to execute, arranged in a deque
* **ready**: [keys]
Keys that are ready to run. Stored in a LIFO stack
* **constrained**: [keys]
Keys for which we have the data to run, but are waiting on abstract
resources like GPUs. Stored in a FIFO deque
* **executing_count**: ``int``
A count of tasks currently executing on this worker
* **executed_count**: int
A number of tasks that this worker has run in its lifetime
* **long_running**: {keys}
A set of keys of tasks that are running and have started their own
long-running clients.
* **has_what**: ``{worker: {deps}}``
The data that we care about that we think a worker has
* **pending_data_per_worker**: ``{worker: [dep]}``
The data on each worker that we still want, prioritized as a deque
* **in_flight_tasks**: ``int``
A count of the number of tasks that are coming to us in current
peer-to-peer connections
* **in_flight_workers**: ``{worker: {task}}``
The workers from which we are currently gathering data and the
dependencies we expect from those connections
* **comm_bytes**: ``int``
The total number of bytes in flight
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **active_threads**: ``{int: key}``
The keys currently running on active threads
* **waiting_for_data_count**: ``int``
A count of how many tasks are currently waiting for data
Parameters
----------
scheduler_ip: str
scheduler_port: int
ip: str, optional
data: MutableMapping, type, None
The object to use for storage, builds a disk-backed LRU dict by default
nthreads: int, optional
loop: tornado.ioloop.IOLoop
local_directory: str, optional
Directory where we place local resources
name: str, optional
memory_limit: int, float, string
Number of bytes of memory that this worker should use.
Set to zero for no limit. Set to 'auto' to calculate
as system.MEMORY_LIMIT * min(1, nthreads / total_cores)
Use strings or numbers like 5GB or 5e9
memory_target_fraction: float
Fraction of memory to try to stay beneath
memory_spill_fraction: float
Fraction of memory at which we start spilling to disk
memory_pause_fraction: float
Fraction of memory at which we stop running new tasks
executor: concurrent.futures.Executor, dict[str, concurrent.futures.Executor], str
The executor(s) to use. Depending on the type, it has the following meanings:
- Executor instance: The default executor.
- Dict[str, Executor]: mapping names to Executor instances. If the
"default" key isn't in the dict, a "default" executor will be created
using ``ThreadPoolExecutor(nthreads)``.
- Str: The string "offload", which refer to the same thread pool used for
offloading communications. This results in the same thread being used
for deserialization and computation.
resources: dict
Resources that this worker has like ``{'GPU': 2}``
nanny: str
Address on which to contact nanny, if it exists
lifetime: str
Amount of time like "1 hour" after which we gracefully shut down the worker.
This defaults to None, meaning no explicit shutdown time.
lifetime_stagger: str
Amount of time like "5 minutes" to stagger the lifetime value
The actual lifetime will be selected uniformly at random between
lifetime +/- lifetime_stagger
lifetime_restart: bool
Whether or not to restart a worker after it has reached its lifetime
Default False
Examples
--------
Use the command line to start a worker::
$ dask-scheduler
Start scheduler at 127.0.0.1:8786
$ dask-worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
_instances = weakref.WeakSet()
_initialized_clients = weakref.WeakSet()
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
ncores=None,
nthreads=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
service_ports=None,
service_kwargs=None,
name=None,
reconnect=True,
memory_limit="auto",
executor=None,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
memory_monitor_interval="200ms",
extensions=None,
metrics=DEFAULT_METRICS,
startup_information=DEFAULT_STARTUP_INFORMATION,
data=None,
interface=None,
host=None,
port=None,
protocol=None,
dashboard_address=None,
dashboard=False,
http_prefix="/",
nanny=None,
plugins=(),
low_level_profiler=dask.config.get("distributed.worker.profile.low-level"),
validate=None,
profile_cycle_interval=None,
lifetime=None,
lifetime_stagger=None,
lifetime_restart=None,
**kwargs,
):
self.tasks = dict()
self.waiting_for_data_count = 0
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(deque)
self.nanny = nanny
self._lock = threading.Lock()
self.data_needed = deque() # TODO: replace with heap?
self.in_flight_tasks = 0
self.in_flight_workers = dict()
self.total_out_connections = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.total_in_connections = dask.config.get(
"distributed.worker.connections.incoming"
)
self.comm_threshold_bytes = 10e6
self.comm_nbytes = 0
self._missing_dep_flight = set()
self.threads = dict()
self.active_threads_lock = threading.Lock()
self.active_threads = dict()
self.active_keys = set()
self.profile_keys = defaultdict(profile.create)
self.profile_keys_history = deque(maxlen=3600)
self.profile_recent = profile.create()
self.profile_history = deque(maxlen=3600)
self.generation = 0
self.ready = list()
self.constrained = deque()
self.executing_count = 0
self.executed_count = 0
self.long_running = set()
self.recent_messages_log = deque(
maxlen=dask.config.get("distributed.comm.recent-messages-log-length")
)
self.target_message_size = 50e6 # 50 MB
self.log = deque(maxlen=100000)
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self._transitions = {
# Basic state transitions
("new", "waiting"): self.transition_new_waiting,
("new", "fetch"): self.transition_new_fetch,
("waiting", "ready"): self.transition_waiting_ready,
("fetch", "flight"): self.transition_fetch_flight,
("ready", "executing"): self.transition_ready_executing,
("executing", "memory"): self.transition_executing_done,
("flight", "memory"): self.transition_flight_memory,
("flight", "fetch"): self.transition_flight_fetch,
# Shouldn't be a valid transition but happens nonetheless
("ready", "memory"): self.transition_ready_memory,
# Scheduler intercession (re-assignment)
("fetch", "waiting"): self.transition_fetch_waiting,
("flight", "waiting"): self.transition_flight_waiting,
# Errors, long-running, constrained
("waiting", "error"): self.transition_waiting_done,
("constrained", "executing"): self.transition_constrained_executing,
("executing", "error"): self.transition_executing_done,
("executing", "rescheduled"): self.transition_executing_done,
("executing", "long-running"): self.transition_executing_long_running,
("long-running", "error"): self.transition_executing_done,
("long-running", "memory"): self.transition_executing_done,
("long-running", "rescheduled"): self.transition_executing_done,
}
self.incoming_transfer_log = deque(maxlen=100000)
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=100000)
self.outgoing_count = 0
self.outgoing_current_count = 0
self.repetitively_busy = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
self._setup_logging(logger)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if not local_directory:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
os.makedirs(local_directory, exist_ok=True)
local_directory = os.path.join(local_directory, "dask-worker-space")
with warn_on_duration(
"1s",
"Creating scratch directories is taking a surprisingly long time. "
"This is often due to running workers on a network file system. "
"Consider specifying a local-directory to point workers to write "
"scratch data to a local disk.",
):
self._workspace = WorkSpace(os.path.abspath(local_directory))
self._workdir = self._workspace.new_work_dir(prefix="worker-")
self.local_directory = self._workdir.dir_path
if preload is None:
preload = dask.config.get("distributed.worker.preload")
if preload_argv is None:
preload_argv = dask.config.get("distributed.worker.preload-argv")
self.preloads = preloading.process_preloads(
self, preload, preload_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self.contact_address = contact_address
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
self._start_port = port
self._start_host = host
if host:
# Helpful error message if IPv6 specified incorrectly
_, host_address = parse_address(host)
if host_address.count(":") > 1 and not host_address.startswith("["):
raise ValueError(
"Host address with IPv6 must be bracketed like '[::1]'; "
f"got {host_address}"
)
self._interface = interface
self._protocol = protocol
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self.nthreads = nthreads or CPU_COUNT
if resources is None:
resources = dask.config.get("distributed.worker.resources", None)
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
self.death_timeout = parse_timedelta(death_timeout)
self.extensions = dict()
if silence_logs:
silence_logging(level=silence_logs)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
self.paused = False
if "memory_target_fraction" in kwargs:
self.memory_target_fraction = kwargs.pop("memory_target_fraction")
else:
self.memory_target_fraction = dask.config.get(
"distributed.worker.memory.target"
)
if "memory_spill_fraction" in kwargs:
self.memory_spill_fraction = kwargs.pop("memory_spill_fraction")
else:
self.memory_spill_fraction = dask.config.get(
"distributed.worker.memory.spill"
)
if "memory_pause_fraction" in kwargs:
self.memory_pause_fraction = kwargs.pop("memory_pause_fraction")
else:
self.memory_pause_fraction = dask.config.get(
"distributed.worker.memory.pause"
)
if isinstance(data, MutableMapping):
self.data = data
elif callable(data):
self.data = data()
elif isinstance(data, tuple):
self.data = data[0](**data[1])
elif self.memory_limit and (
self.memory_target_fraction or self.memory_spill_fraction
):
from .spill import SpillBuffer
self.data = SpillBuffer(
os.path.join(self.local_directory, "storage"),
target=int(
self.memory_limit
* (self.memory_target_fraction or self.memory_spill_fraction)
)
or sys.maxsize,
)
else:
self.data = dict()
self.actors = {}
self.loop = loop or IOLoop.current()
self.reconnect = reconnect
# Common executors always available
self.executors: dict[str, concurrent.futures.Executor] = {
"offload": utils._offload_executor,
"actor": ThreadPoolExecutor(1, thread_name_prefix="Dask-Actor-Threads"),
}
if nvml.device_get_count() > 0:
self.executors["gpu"] = ThreadPoolExecutor(
1, thread_name_prefix="Dask-GPU-Threads"
)
# Find the default executor
if executor == "offload":
self.executors["default"] = self.executors["offload"]
elif isinstance(executor, dict):
self.executors.update(executor)
elif executor is not None:
self.executors["default"] = executor
if "default" not in self.executors:
self.executors["default"] = ThreadPoolExecutor(
self.nthreads, thread_name_prefix="Dask-Default-Threads"
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.name = name
self.scheduler_delay = 0
self.stream_comms = dict()
self.heartbeat_active = False
self._ipython_kernel = None
if self.local_directory not in sys.path:
sys.path.insert(0, self.local_directory)
self.services = {}
self.service_specs = services or {}
self._dashboard_address = dashboard_address
self._dashboard = dashboard
self._http_prefix = http_prefix
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"free_keys": self.handle_free_keys,
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"start_ipython": self.start_ipython,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
"plugin-remove": self.plugin_remove,
"get_monitor_info": self.get_monitor_info,
}
stream_handlers = {
"close": self.close,
"compute-task": self.add_task,
"cancel-compute": self.cancel_compute,
"free-keys": self.handle_free_keys,
"superfluous-data": self.handle_superfluous_data,
"steal-request": self.steal_request,
}
super().__init__(
handlers=handlers,
stream_handlers=stream_handlers,
io_loop=self.loop,
connection_args=self.connection_args,
**kwargs,
)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
pc = PeriodicCallback(self.heartbeat, 1000)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(
lambda: self.batched_stream.send({"op": "keep-alive"}), 60000
)
self.periodic_callbacks["keep-alive"] = pc
self._suspicious_count_limit = 10
self._address = contact_address
self.memory_monitor_interval = parse_timedelta(
memory_monitor_interval, default="ms"
)
if self.memory_limit:
self._memory_monitoring = False
pc = PeriodicCallback(
self.memory_monitor, self.memory_monitor_interval * 1000
)
self.periodic_callbacks["memory"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS
for ext in extensions:
ext(self)
self._throttled_gc = ThrottledGC(logger=logger)
setproctitle("dask-worker [not started]")
profile_trigger_interval = parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000)
self.periodic_callbacks["profile-cycle"] = pc
self.plugins = {}
self._pending_plugins = plugins
self.lifetime = lifetime or dask.config.get(
"distributed.worker.lifetime.duration"
)
lifetime_stagger = lifetime_stagger or dask.config.get(
"distributed.worker.lifetime.stagger"
)
self.lifetime_restart = lifetime_restart or dask.config.get(
"distributed.worker.lifetime.restart"
)
if isinstance(self.lifetime, str):
self.lifetime = parse_timedelta(self.lifetime)
if isinstance(lifetime_stagger, str):
lifetime_stagger = parse_timedelta(lifetime_stagger)
if self.lifetime:
self.lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(self.lifetime, self.close_gracefully)
Worker._instances.add(self)
##################
# Administrative #
##################
def __repr__(self):
return "<%s: %r, %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>" % (
self.__class__.__name__,
self.address,
self.name,
self.status,
len(self.data),
self.executing_count,
self.nthreads,
len(self.ready),
self.in_flight_tasks,
self.waiting_for_data_count,
)
@property
def logs(self):
return self._deque_handler.deque
def log_event(self, topic, msg):
self.batched_stream.send(
{
"op": "log-event",
"topic": topic,
"msg": msg,
}
)
@property
def worker_address(self):
"""For API compatibility with Nanny"""
return self.address
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn(
"The local_dir attribute has moved to local_directory", stacklevel=2
)
return self.local_directory
@property
def executor(self):
return self.executors["default"]
async def get_metrics(self):
out = dict(
executing=self.executing_count,
in_memory=len(self.data),
ready=len(self.ready),
in_flight=self.in_flight_tasks,
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
spilled_nbytes=getattr(self.data, "spilled_total", 0),
)
out.update(self.monitor.recent())
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
# In case of collision, prefer core metrics
out.setdefault(k, result)
except Exception: # TODO: log error once
pass
return out
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self, comm=None):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.nthreads,
"ncores": self.nthreads, # backwards compatibility
"memory_limit": self.memory_limit,
}
#####################
# External Services #
#####################
async def _register_with_scheduler(self):
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
keys=list(self.data),
nthreads=self.nthreads,
name=self.name,
nbytes={
ts.key: ts.get_nbytes()
for ts in self.tasks.values()
# Only if the task is in memory this is a sensible
# result since otherwise it simply submits the
# default value
if ts.state == "memory"
},
types={k: typename(v) for k, v in self.data.items()},
now=time(),
resources=self.total_resources,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
self.status = Status.running
break
except OSError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError:
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
raise ValueError(f"Unexpected response from register: {response!r}")
else:
await asyncio.gather(
*(
self.plugin_add(name=name, plugin=plugin)
for name, plugin in response["worker-plugins"].items()
)
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.batched_stream.start(comm)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency):
self.latency = latency * 0.05 + self.latency * 0.95
if self.digests is not None:
self.digests["latency"].add(latency)
async def heartbeat(self):
if self.heartbeat_active:
logger.debug("Heartbeat skipped: channel busy")
return
self.heartbeat_active = True
logger.debug("Heartbeat: %s", self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=start,
metrics=await self.get_metrics(),
executing={
key: start - self.tasks[key].start_time
for key in self.active_keys
if key in self.tasks
},
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
for i in range(10):
if self.status != Status.running:
break
else:
await asyncio.sleep(0.05)
else:
await self._register_with_scheduler()
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except CommClosedError:
logger.warning("Heartbeat to scheduler failed", exc_info=True)
if not self.reconnect:
await self.close(report=False)
except OSError as e:
# Scheduler is gone. Respect distributed.comm.timeouts.connect
if "Timed out trying to connect" in str(e):
await self.close(report=False)
else:
raise e
finally:
self.heartbeat_active = False
async def handle_scheduler(self, comm):
try:
await self.handle_stream(
comm, every_cycle=[self.ensure_communicating, self.ensure_computing]
)
except Exception as e:
logger.exception(e)
raise
finally:
if self.reconnect and self.status == Status.running:
logger.info("Connection to scheduler broken. Reconnecting...")
self.loop.add_callback(self.heartbeat)
else:
await self.close(report=False)
def start_ipython(self, comm):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"worker": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
async def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_directory, filename)
def func(data):
if isinstance(data, str):
data = data.encode()
with open(out_filename, "wb") as f:
f.write(data)
f.flush()
return data
if len(data) < 10000:
data = func(data)
else:
data = await offload(func, data)
if load:
try:
import_file(out_filename)
cache_loads.data.clear()
except Exception as e:
logger.exception(e)
raise e
return {"status": "OK", "nbytes": len(data)}
def keys(self, comm=None):
return list(self.data)
async def gather(self, comm=None, who_has=None):
who_has = {
k: [coerce_to_address(addr) for addr in v]
for k, v in who_has.items()
if k not in self.data
}
result, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, who=self.address
)
self.update_data(data=result, report=False)
if missing_keys:
logger.warning(
"Could not find data: %s on workers: %s (who_has: %s)",
missing_keys,
missing_workers,
who_has,
)
return {"status": "partial-fail", "keys": missing_keys}
else:
return {"status": "OK"}
def get_monitor_info(self, comm=None, recent=False, start=0):
result = dict(
range_query=(
self.monitor.recent()
if recent
else self.monitor.range_query(start=start)
),
count=self.monitor.count,
last_time=self.monitor.last_time,
)
if nvml.device_get_count() > 0:
result["gpu_name"] = self.monitor.gpu_name
result["gpu_memory_total"] = self.monitor.gpu_memory_total
return result
#############
# Lifecycle #
#############
async def start(self):
if self.status and self.status in (
Status.closed,
Status.closing,
Status.closing_gracefully,
):
return
assert self.status is Status.undefined, self.status
await super().start()
enable_gc_diagnosis()
thread_state.on_event_loop_thread = True
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
kwargs = self.security.get_listen_args("worker")
if self._protocol in ("tcp", "tls"):
kwargs = kwargs.copy()
kwargs["default_host"] = get_ip(
get_address_host(self.scheduler.address)
)
try:
await self.listen(start_address, **kwargs)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Worker on host {self._start_host}"
f"with port {self._start_port}"
)
# Start HTTP server associated with this Worker node
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.worker.http.routes"),
prefix=self._http_prefix,
)
self.start_http_server(routes, self._dashboard_address)
if self._dashboard:
try:
import distributed.dashboard.worker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.worker.connect(
self.http_application,
self.http_server,
self,
prefix=self._http_prefix,
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
for preload in self.preloads:
await preload.start()
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = f"{self.listener.prefix}{self.ip}"
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
for k, v in self.service_ports.items():
logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.nthreads)
if self.memory_limit:
logger.info(" Memory: %26s", format_bytes(self.memory_limit))
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask-worker [%s]" % self.address)
await asyncio.gather(
*(self.plugin_add(plugin=plugin) for plugin in self._pending_plugins)
)
self._pending_plugins = ()
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
async def close(
self, report=True, timeout=30, nanny=True, executor_wait=True, safe=False
):
with log_errors():
if self.status in (Status.closed, Status.closing):
await self.finished()
return
self.reconnect = False
disable_gc_diagnosis()
try:
logger.info("Stopping worker at %s", self.address)
except ValueError: # address not available if already closed
logger.info("Stopping worker")
if self.status not in (Status.running, Status.closing_gracefully):
logger.info("Closed worker has not yet started: %s", self.status)
self.status = Status.closing
for preload in self.preloads:
await preload.teardown()
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully()
setproctitle("dask-worker [closing]")
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*(td for td in teardowns if isawaitable(td)))
for pc in self.periodic_callbacks.values():
pc.stop()
if self._client:
# If this worker is the last one alive, clean up the worker
# initialized clients
if not any(
w
for w in Worker._instances
if w != self and w.status == Status.running
):
for c in Worker._initialized_clients:
# Regardless of what the client was initialized with
# we'll require the result as a future. This is
# necessary since the heursitics of asynchronous are not
# reliable and we might deadlock here
c._asynchronous = True
if c.asynchronous:
await c.close()
else:
# There is still the chance that even with us
# telling the client to be async, itself will decide
# otherwise
c.close()
with suppress(EnvironmentError, TimeoutError):
if report and self.contact_address is not None:
await asyncio.wait_for(
self.scheduler.unregister(
address=self.contact_address, safe=safe
),
timeout,
)
await self.scheduler.close_rpc()
self._workdir.release()
self.stop_services()
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send({"op": "close-stream"})
if self.batched_stream:
with suppress(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
for executor in self.executors.values():
if executor is utils._offload_executor:
continue # Never shutdown the offload executor
if isinstance(executor, ThreadPoolExecutor):
executor._work_queue.queue.clear()
executor.shutdown(wait=executor_wait, timeout=timeout)
else:
executor.shutdown(wait=executor_wait)
self.stop()
await self.rpc.close()
self.status = Status.closed
await super().close()
setproctitle("dask-worker [closed]")
return "OK"
async def close_gracefully(self, restart=None):
"""Gracefully shut down a worker
This first informs the scheduler that we're shutting down, and asks it
to move our data elsewhere. Afterwards, we close as normal
"""
if self.status in (Status.closing, Status.closing_gracefully):
await self.finished()
if self.status == Status.closed:
return
if restart is None:
restart = self.lifetime_restart
logger.info("Closing worker gracefully: %s", self.address)
self.status = Status.closing_gracefully
await self.scheduler.retire_workers(workers=[self.address], remove=False)
await self.close(safe=True, nanny=not restart)
async def terminate(self, comm=None, report=True, **kwargs):
await self.close(report=report, **kwargs)
return "OK"
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == Status.closed
################
# Worker Peers #
################
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args # TODO, serialization
)
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self.loop.add_callback(batched_send_connect)
self.stream_comms[address].send(msg)
async def get_data(
self, comm, keys=None, who=None, serializers=None, max_connections=None
):
start = time()
if max_connections is None:
max_connections = self.total_in_connections
# Allow same-host connections more liberally
if (
max_connections
and comm
and get_address_host(comm.peer_address) == get_address_host(self.address)
):
max_connections = max_connections * 2
if self.paused:
max_connections = 1
throttle_msg = " Throttling outgoing connections because worker is paused."
else:
throttle_msg = ""
if (
max_connections is not False
and self.outgoing_current_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request "
"from %s (%d/%d).%s",
self.address,
who,
self.outgoing_current_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.outgoing_current_count += 1
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - set(data):
if k in self.actors:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k, worker=self)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
nbytes = {k: self.tasks[k].nbytes for k in data if k in self.tasks}
stop = time()
if self.digests is not None:
self.digests["get-data-load-duration"].add(stop - start)
start = time()
try:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except OSError:
logger.exception(
"failed during get data with %s -> %s", self.address, who, exc_info=True
)
comm.abort()
raise
finally:
self.outgoing_current_count -= 1
stop = time()
if self.digests is not None:
self.digests["get-data-send-duration"].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 # windows
self.outgoing_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2,
"duration": duration,
"who": who,
"keys": nbytes,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return Status.dont_reply
###################
# Local Execution #
###################
def update_data(self, comm=None, data=None, report=True, serializers=None):
for key, value in data.items():
ts = self.tasks.get(key)
if getattr(ts, "state", None) is not None:
self.transition(ts, "memory", value=value)
else:
self.tasks[key] = ts = TaskState(key)
self.put_key_in_memory(ts, value)
ts.priority = None
ts.duration = None
ts.scheduler_holds_ref = True
self.log.append((key, "receive-from-scatter"))
if report:
self.log.append(
("Notifying scheduler about in-memory in update-data", list(data))
)
self.batched_stream.send({"op": "add-keys", "keys": list(data)})
info = {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
return info
def handle_free_keys(self, comm=None, keys=None, reason=None):
"""
Handler to be called by the scheduler.
The given keys are no longer referred to and required by the scheduler.
The worker is now allowed to release the key, if applicable.
This does not guarantee that the memory is released since the worker may
still decide to hold on to the data and task since it is required by an
upstream dependency.
"""
self.log.append(("free-keys", keys, reason))
for key in keys:
ts = self.tasks.get(key)
if ts is not None:
ts.scheduler_holds_ref = False
self.release_key(key, report=False, reason=reason)
def handle_superfluous_data(self, keys=(), reason=None):
"""Stream handler notifying the worker that it might be holding unreferenced, superfluous data.
This should not actually happen during ordinary operations and is only
intended to correct any erroneous state. An example where this is
necessary is if a worker fetches data for a downstream task but that
task is released before the data arrives.
In this case, the scheduler will notify the worker that it may be
holding this unnecessary data, if the worker hasn't released the data itself, already.
This handler does not guarantee the task nor the data to be actually
released but only asks the worker to release the data on a best effort
guarantee. This protects from race conditions where the given keys may
already have been rescheduled for compute in which case the compute
would win and this handler is ignored.
For stronger guarantees, see handler free_keys
"""
self.log.append(("Handle superfluous data", keys, reason))
for key in list(keys):
ts = self.tasks.get(key)
if ts and not ts.scheduler_holds_ref:
self.release_key(key, reason=f"delete data: {reason}", report=False)
logger.debug("Worker %s -- Deleted %d keys", self.name, len(keys))
return "OK"
async def set_resources(self, **resources):
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.total_resources,
worker=self.contact_address,
)
###################
# Task Management #
###################
def cancel_compute(self, key, reason):
"""
Cancel a task on a best effort basis. This is only possible while a task
is in state `waiting` or `ready`.
Nothing will happen otherwise.
"""
ts = self.tasks.get(key)
if ts and ts.state in ("waiting", "ready"):
self.log.append((key, "cancel-compute", reason))
ts.scheduler_holds_ref = False
# All possible dependents of TS should not be in state Processing on
# scheduler side and therefore should not be assigned to a worker,
# yet.
assert not ts.dependents
self.release_key(key, reason=reason, report=False)
def add_task(
self,
key,
function=None,
args=None,
kwargs=None,
task=no_value,
who_has=None,
nbytes=None,
priority=None,
duration=None,
resource_restrictions=None,
actor=False,
annotations=None,
**kwargs2,
):
try:
runspec = SerializedTask(function, args, kwargs, task)
if key in self.tasks:
ts = self.tasks[key]
ts.scheduler_holds_ref = True
if ts.state == "memory":
assert key in self.data or key in self.actors
logger.debug(
"Asked to compute pre-existing result: %s: %s", key, ts.state
)
self.send_task_state_to_scheduler(ts)
return
if ts.state in IN_PLAY:
return
if ts.state == "error":
ts.exception = None
ts.exception_text = ""
ts.traceback = None
ts.traceback_text = ""
else:
# This is a scheduler re-assignment
# Either `fetch` -> `waiting` or `flight` -> `waiting`
self.log.append((ts.key, "re-adding key, new TaskState"))
self.transition(ts, "waiting", runspec=runspec)
else:
self.log.append((key, "new"))
self.tasks[key] = ts = TaskState(
key=key, runspec=SerializedTask(function, args, kwargs, task)
)
self.transition(ts, "waiting")
# TODO: move transition of `ts` to end of `add_task`
# This will require a chained recommendation transition system like
# the scheduler
if priority is not None:
priority = tuple(priority) + (self.generation,)
self.generation -= 1
if actor:
self.actors[ts.key] = None
ts.scheduler_holds_ref = True
ts.runspec = runspec
ts.priority = priority
ts.duration = duration
if resource_restrictions:
ts.resource_restrictions = resource_restrictions
ts.annotations = annotations
who_has = who_has or {}
for dependency, workers in who_has.items():
assert workers
if dependency not in self.tasks:
# initial state is "new"
# this dependency does not already exist on worker
self.tasks[dependency] = dep_ts = TaskState(key=dependency)
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
# check to ensure task wasn't already executed and partially released
# # TODO: make this less bad
state = "fetch" if dependency not in self.data else "memory"
# transition from new -> fetch handles adding dependency
# to waiting_for_data
discarded_self = False
if self.address in workers and state == "fetch":
discarded_self = True
workers = set(workers)
workers.discard(self.address)
who_has[dependency] = tuple(workers)
self.transition(dep_ts, state, who_has=workers)
self.log.append(
(
dependency,
"new-dep",
dep_ts.state,
f"requested by {ts.key}",
discarded_self,
)
)
else:
# task was already present on worker
dep_ts = self.tasks[dependency]
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
if dep_ts.state not in ("memory",):
ts.waiting_for_data.add(dep_ts.key)
self.update_who_has(who_has=who_has)
if nbytes is not None:
for key, value in nbytes.items():
self.tasks[key].nbytes = value
if ts.waiting_for_data:
self.data_needed.append(ts.key)
else:
self.transition(ts, "ready")
if self.validate:
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
if who_has:
assert all(self.tasks[dep] in ts.dependencies for dep in who_has)
assert all(self.tasks[dep.key] for dep in ts.dependencies)
for dependency in ts.dependencies:
self.validate_task(dependency)
self.validate_task(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition(self, ts, finish, **kwargs):
if ts is None:
return
start = ts.state
if start == finish:
return
func = self._transitions[start, finish]
self.log.append((ts.key, start, finish))
state = func(ts, **kwargs)
if state and finish != state:
self.log.append((ts.key, start, finish, state))
ts.state = state or finish
if self.validate:
self.validate_task(ts)
self._notify_plugins("transition", ts.key, start, state or finish, **kwargs)
def transition_new_waiting(self, ts):
try:
if self.validate:
assert ts.state == "new"
assert ts.runspec is not None
assert not ts.who_has
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_new_fetch(self, ts, who_has):
try:
if self.validate:
assert ts.state == "new"
assert ts.runspec is None
assert who_has
for dependent in ts.dependents:
dependent.waiting_for_data.add(ts.key)
ts.who_has.update(who_has)
for w in who_has:
self.has_what[w].add(ts.key)
self.pending_data_per_worker[w].append(ts.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_fetch_waiting(self, ts, runspec):
"""This is a rescheduling transition that occurs after a worker failure.
A task was available from another worker but that worker died and the
scheduler reassigned the task for computation here.
"""
try:
if self.validate:
assert ts.state == "fetch"
assert ts.runspec is None
assert runspec is not None
ts.runspec = runspec
# remove any stale entries in `has_what`
for worker in self.has_what.keys():
self.has_what[worker].discard(ts.key)
# clear `who_has` of stale info
ts.who_has.clear()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_waiting(self, ts, runspec):
"""This is a rescheduling transition that occurs after
a worker failure. A task was in flight from another worker to this
worker when that worker died and the scheduler reassigned the task for
computation here.
"""
try:
if self.validate:
assert ts.state == "flight"
assert ts.runspec is None
assert runspec is not None
ts.runspec = runspec
# remove any stale entries in `has_what`
for worker in self.has_what.keys():
self.has_what[worker].discard(ts.key)
# clear `who_has` of stale info
ts.who_has.clear()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_fetch_flight(self, ts, worker=None):
try:
if self.validate:
assert ts.state == "fetch"
assert ts.dependents
ts.coming_from = worker
self.in_flight_tasks += 1
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_fetch(self, ts):
try:
if self.validate:
assert ts.state == "flight"
self.in_flight_tasks -= 1
ts.coming_from = None
ts.runspec = None
if not ts.who_has:
if ts.key not in self._missing_dep_flight:
self._missing_dep_flight.add(ts.key)
logger.info("Task %s does not know who has", ts)
self.loop.add_callback(self.handle_missing_dep, ts)
for w in ts.who_has:
self.pending_data_per_worker[w].append(ts.key)
for dependent in ts.dependents:
dependent.waiting_for_data.add(ts.key)
if dependent.state == "waiting":
self.data_needed.append(dependent.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_memory(self, ts, value=None):
try:
if self.validate:
assert ts.state == "flight"
self.in_flight_tasks -= 1
ts.coming_from = None
self.put_key_in_memory(ts, value)
for dependent in ts.dependents:
try:
dependent.waiting_for_data.remove(ts.key)
self.waiting_for_data_count -= 1
except KeyError:
pass
self.log.append(("Notifying scheduler about in-memory", ts.key))
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_ready(self, ts):
try:
if self.validate:
assert ts.state == "waiting"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
assert all(dep.state == "memory" for dep in ts.dependencies)
assert ts.key not in self.ready
self.has_what[self.address].discard(ts.key)
if ts.resource_restrictions is not None:
self.constrained.append(ts.key)
return "constrained"
else:
heapq.heappush(self.ready, (ts.priority, ts.key))
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_done(self, ts, value=None):
try:
if self.validate:
assert ts.state == "waiting"
assert ts.key not in self.ready
self.waiting_for_data_count -= len(ts.waiting_for_data)
ts.waiting_for_data.clear()
if value is not None:
self.put_key_in_memory(ts, value)
self.send_task_state_to_scheduler(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_executing(self, ts):
try:
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
self.executing_count += 1
self.loop.add_callback(self.execute, ts.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_error(self, ts):
if self.validate:
assert ts.exception is not None
assert ts.traceback is not None
assert ts.exception_text
assert ts.traceback_text
self.send_task_state_to_scheduler(ts)
def transition_ready_memory(self, ts, value=no_value):
if value is not no_value:
self.put_key_in_memory(ts, value=value)
self.send_task_state_to_scheduler(ts)
def transition_constrained_executing(self, ts):
self.transition_ready_executing(ts)
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] -= quantity
if self.validate:
assert all(v >= 0 for v in self.available_resources.values())
def transition_executing_done(self, ts, value=no_value, report=True):
try:
if self.validate:
assert ts.state == "executing" or ts.key in self.long_running
assert not ts.waiting_for_data
assert ts.key not in self.ready
out = None
if ts.resource_restrictions is not None:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
if ts.state == "executing":
self.executing_count -= 1
self.executed_count += 1
elif ts.state == "long-running":
self.long_running.remove(ts.key)
if value is not no_value:
try:
self.put_key_in_memory(ts, value, transition=False)
except Exception as e:
logger.info("Failed to put key in memory", exc_info=True)
msg = error_message(e)
ts.exception = msg["exception"]
ts.exception_text = msg["exception_text"]
ts.traceback = msg["traceback"]
ts.traceback_text = msg["traceback_text"]
ts.state = "error"
out = "error"
for d in ts.dependents:
d.waiting_for_data.add(ts.key)
if report and self.batched_stream and self.status == Status.running:
self.send_task_state_to_scheduler(ts)
else:
raise CommClosedError
return out
except OSError:
logger.info("Comm closed")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_executing_long_running(self, ts, compute_duration=None):
try:
if self.validate:
assert ts.state == "executing"
self.executing_count -= 1
self.long_running.add(ts.key)
self.batched_stream.send(
{
"op": "long-running",
"key": ts.key,
"compute_duration": compute_duration,
}
)
self.ensure_computing()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def maybe_transition_long_running(self, ts, compute_duration=None):
if ts.state == "executing":
self.transition(ts, "long-running", compute_duration=compute_duration)
def stateof(self, key):
ts = self.tasks[key]
return {
"executing": ts.state == "executing",
"waiting_for_data": bool(ts.waiting_for_data),
"heap": key in pluck(1, self.ready),
"data": key in self.data,
}
def story(self, *keys):
keys = [key.key if isinstance(key, TaskState) else key for key in keys]
return [
msg
for msg in self.log
if any(key in msg for key in keys)
or any(
key in c
for key in keys
for c in msg
if isinstance(c, (tuple, list, set))
)
]
def ensure_communicating(self):
changed = True
try:
while (
changed
and self.data_needed
and len(self.in_flight_workers) < self.total_out_connections
):
changed = False
logger.debug(
"Ensure communicating. Pending: %d. Connections: %d/%d",
len(self.data_needed),
len(self.in_flight_workers),
self.total_out_connections,
)
key = self.data_needed[0]
if key not in self.tasks:
self.data_needed.popleft()
changed = True
continue
ts = self.tasks[key]
if ts.state != "waiting":
self.log.append((key, "communication pass"))
self.data_needed.popleft()
changed = True
continue
dependencies = ts.dependencies
if self.validate:
assert all(dep.key in self.tasks for dep in dependencies)
dependencies_fetch = set()
dependencies_missing = set()
for dependency_ts in dependencies:
if dependency_ts.state == "fetch":
if not dependency_ts.who_has:
dependencies_missing.add(dependency_ts)
else:
dependencies_fetch.add(dependency_ts)
del dependencies, dependency_ts
if dependencies_missing:
missing_deps2 = {
dep
for dep in dependencies_missing
if dep.key not in self._missing_dep_flight
}
for dep in missing_deps2:
self._missing_dep_flight.add(dep.key)
if missing_deps2:
logger.info(
"Can't find dependencies %s for key %s",
missing_deps2.copy(),
key,
)
self.loop.add_callback(self.handle_missing_dep, *missing_deps2)
dependencies_fetch -= dependencies_missing
self.log.append(
("gather-dependencies", key, {d.key for d in dependencies_fetch})
)
in_flight = False
while dependencies_fetch and (
len(self.in_flight_workers) < self.total_out_connections
or self.comm_nbytes < self.comm_threshold_bytes
):
to_gather_ts = dependencies_fetch.pop()
workers = [
w
for w in to_gather_ts.who_has
if w not in self.in_flight_workers
]
if not workers:
in_flight = True
continue
host = get_address_host(self.address)
local = [w for w in workers if get_address_host(w) == host]
if local:
worker = random.choice(local)
else:
worker = random.choice(list(workers))
to_gather, total_nbytes = self.select_keys_for_gather(
worker, to_gather_ts.key
)
self.comm_nbytes += total_nbytes
self.in_flight_workers[worker] = to_gather
for d in to_gather:
dependencies_fetch.discard(self.tasks.get(d))
self.transition(self.tasks[d], "flight", worker=worker)
assert not worker == self.address
self.loop.add_callback(
self.gather_dep,
worker=worker,
to_gather=to_gather,
total_nbytes=total_nbytes,
cause=ts,
)
changed = True
if not dependencies_fetch and not in_flight:
self.data_needed.popleft()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def send_task_state_to_scheduler(self, ts):
if ts.key in self.data or self.actors.get(ts.key):
typ = ts.type
if ts.nbytes is None or typ is None:
try:
value = self.data[ts.key]
except KeyError:
value = self.actors[ts.key]
ts.nbytes = sizeof(value)
typ = ts.type = type(value)
del value
try:
typ_serialized = dumps_function(typ)
except PicklingError:
# Some types fail pickling (example: _thread.lock objects),
# send their name as a best effort.
typ_serialized = pickle.dumps(typ.__name__, protocol=4)
d = {
"op": "task-finished",
"status": "OK",
"key": ts.key,
"nbytes": ts.nbytes,
"thread": self.threads.get(ts.key),
"type": typ_serialized,
"typename": typename(typ),
"metadata": ts.metadata,
}
elif ts.exception is not None:
d = {
"op": "task-erred",
"status": "error",
"key": ts.key,
"thread": self.threads.get(ts.key),
"exception": ts.exception,
"traceback": ts.traceback,
"exception_text": ts.exception_text,
"traceback_text": ts.traceback_text,
}
else:
logger.error("Key not ready to send to worker, %s: %s", ts.key, ts.state)
return
if ts.startstops:
d["startstops"] = ts.startstops
self.batched_stream.send(d)
def put_key_in_memory(self, ts, value, transition=True):
if ts.key in self.data:
ts.state = "memory"
return
if ts.key in self.actors:
self.actors[ts.key] = value
else:
start = time()
self.data[ts.key] = value
ts.state = "memory"
stop = time()
if stop - start > 0.020:
ts.startstops.append(
{"action": "disk-write", "start": start, "stop": stop}
)
if ts.nbytes is None:
ts.nbytes = sizeof(value)
ts.type = type(value)
for dep in ts.dependents:
try:
dep.waiting_for_data.remove(ts.key)
self.waiting_for_data_count -= 1
except KeyError:
pass
if not dep.waiting_for_data:
self.transition(dep, "ready")
self.log.append((ts.key, "put-in-memory"))
def select_keys_for_gather(self, worker, dep):
assert isinstance(dep, str)
deps = {dep}
total_bytes = self.tasks[dep].get_nbytes()
L = self.pending_data_per_worker[worker]
while L:
d = L.popleft()
ts = self.tasks.get(d)
if ts is None or ts.state != "fetch":
continue
if total_bytes + ts.get_nbytes() > self.target_message_size:
break
deps.add(d)
total_bytes += ts.get_nbytes()
return deps, total_bytes
@property
def total_comm_bytes(self):
warnings.warn(
"The attribute `Worker.total_comm_bytes` has been renamed to `comm_threshold_bytes`. "
"Future versions will only support the new name.",
DeprecationWarning,
)
return self.comm_threshold_bytes
async def gather_dep(
self,
worker: str,
to_gather: Iterable[str],
total_nbytes: int,
cause: TaskState,
):
"""Gather dependencies for a task from a worker who has them
Parameters
----------
worker : str
Address of worker to gather dependencies from
to_gather : list
Keys of dependencies to gather from worker -- this is not
necessarily equivalent to the full list of dependencies of ``dep``
as some dependencies may already be present on this worker.
total_nbytes : int
Total number of bytes for all the dependencies in to_gather combined
cause : TaskState
Task we want to gather dependencies for
"""
if self.validate:
self.validate_state()
if self.status != Status.running:
return
with log_errors():
response = {}
to_gather_keys = set()
try:
if self.validate:
self.validate_state()
for dependency_key in to_gather:
dependency_ts = self.tasks.get(dependency_key)
if dependency_ts and dependency_ts.state == "flight":
to_gather_keys.add(dependency_key)
# Keep namespace clean since this func is long and has many
# dep*, *ts* variables
del to_gather, dependency_key, dependency_ts
self.log.append(("request-dep", cause.key, worker, to_gather_keys))
logger.debug(
"Request %d keys for task %s from %s",
len(to_gather_keys),
cause,
worker,
)
start = time()
response = await get_data_from_worker(
self.rpc, to_gather_keys, worker, who=self.address
)
stop = time()
if response["status"] == "busy":
self.log.append(("busy-gather", worker, to_gather_keys))
for key in to_gather_keys:
ts = self.tasks.get(key)
if ts and ts.state == "flight":
self.transition(ts, "fetch")
return
cause.startstops.append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
total_bytes = sum(
self.tasks[key].get_nbytes()
for key in response["data"]
if key in self.tasks
)
duration = (stop - start) or 0.010
bandwidth = total_bytes / duration
self.incoming_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {
key: self.tasks[key].nbytes
for key in response["data"]
if key in self.tasks
},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1000000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, response["data"].values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
if self.digests is not None:
self.digests["transfer-bandwidth"].add(total_bytes / duration)
self.digests["transfer-duration"].add(duration)
self.counters["transfer-count"].add(len(response["data"]))
self.incoming_count += 1
self.log.append(("receive-dep", worker, list(response["data"])))
except OSError:
logger.exception("Worker stream died during communication: %s", worker)
has_what = self.has_what.pop(worker)
self.pending_data_per_worker.pop(worker)
self.log.append(("receive-dep-failed", worker, has_what))
for d in has_what:
ts = self.tasks[d]
ts.who_has.remove(worker)
except Exception as e:
logger.exception(e)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.comm_nbytes -= total_nbytes
busy = response.get("status", "") == "busy"
data = response.get("data", {})
# FIXME: We should not handle keys which were skipped by this coro. to_gather_keys is only a subset
assert set(to_gather_keys).issubset(
set(self.in_flight_workers.get(worker))
)
for d in self.in_flight_workers.pop(worker):
ts = self.tasks.get(d)
try:
if not busy and d in data:
self.transition(ts, "memory", value=data[d])
elif ts is None or ts.state == "executing":
self.log.append(("already-executing", d))
self.release_key(d, reason="already executing at gather")
elif ts.state == "flight" and not ts.dependents:
self.log.append(("flight no-dependents", d))
self.release_key(
d, reason="In-flight task no longer has dependents."
)
elif (
not busy
and d not in data
and ts.dependents
and ts.state != "memory"
):
ts.who_has.discard(worker)
self.has_what[worker].discard(ts.key)
self.log.append(("missing-dep", d))
self.batched_stream.send(
{
"op": "missing-data",
"errant_worker": worker,
"key": d,
}
)
self.transition(ts, "fetch")
elif ts.state not in ("ready", "memory"):
self.transition(ts, "fetch")
else:
logger.debug(
"Unexpected task state encountered for %r after gather_dep",
ts,
)
except Exception as exc:
emsg = error_message(exc)
assert ts is not None, ts
self.log.append(
(ts.key, "except-gather-dep-result", emsg, time())
)
# FIXME: We currently cannot release this task and its
# dependent safely
logger.debug(
"Exception occured while handling `gather_dep` response for %r",
ts,
exc_info=True,
)
if self.validate:
self.validate_state()
self.ensure_computing()
if not busy:
self.repetitively_busy = 0
self.ensure_communicating()
else:
# Exponential backoff to avoid hammering scheduler/worker
self.repetitively_busy += 1
await asyncio.sleep(0.100 * 1.5 ** self.repetitively_busy)
await self.query_who_has(*to_gather_keys)
self.ensure_communicating()
def bad_dep(self, dep):
exc = ValueError(
"Could not find dependent %s. Check worker logs" % str(dep.key)
)
for ts in list(dep.dependents):
msg = error_message(exc)
ts.exception = msg["exception"]
ts.traceback = msg["traceback"]
ts.exception_text = msg["exception_text"]
ts.traceback_text = msg["traceback_text"]
self.transition(ts, "error")
self.release_key(dep.key, reason="bad dep")
async def handle_missing_dep(self, *deps, **kwargs):
self.log.append(("handle-missing", deps))
try:
deps = {dep for dep in deps if dep.dependents}
if not deps:
return
for dep in list(deps):
if (
self._suspicious_count_limit
and dep.suspicious_count > self._suspicious_count_limit
):
deps.remove(dep)
self.bad_dep(dep)
if not deps:
return
for dep in deps:
logger.info(
"Dependent not found: %s %s . Asking scheduler",
dep.key,
dep.suspicious_count,
)
who_has = await retry_operation(
self.scheduler.who_has, keys=list(dep.key for dep in deps)
)
who_has = {k: v for k, v in who_has.items() if v}
self.update_who_has(who_has)
still_missing = set()
for dep in deps:
dep.suspicious_count += 1
if not who_has.get(dep.key):
logger.info(
"No workers found for %s",
dep.key,
)
self.log.append((dep.key, "no workers found", dep.dependents))
self.release_key(dep.key, reason="Handle missing no workers")
elif self.address in who_has and dep.state != "memory":
still_missing.add(dep)
self.batched_stream.send(
{
"op": "release-worker-data",
"keys": [dep.key],
"worker": self.address,
}
)
else:
logger.debug("New workers found for %s", dep.key)
self.log.append((dep.key, "new workers found"))
for dependent in dep.dependents:
if dep.key in dependent.waiting_for_data:
self.data_needed.append(dependent.key)
if still_missing:
logger.debug(
"Found self referencing who has response from scheduler for keys %s.\n"
"Trying again handle_missing",
deps,
)
await self.handle_missing_dep(*deps)
except Exception:
logger.error("Handle missing dep failed, retrying", exc_info=True)
retries = kwargs.get("retries", 5)
self.log.append(("handle-missing-failed", retries, deps))
if retries > 0:
await self.handle_missing_dep(*deps, retries=retries - 1)
else:
raise
finally:
try:
for dep in deps:
self._missing_dep_flight.remove(dep.key)
except KeyError:
pass
self.ensure_communicating()
async def query_who_has(self, *deps):
with log_errors():
response = await retry_operation(self.scheduler.who_has, keys=deps)
self.update_who_has(response)
return response
def update_who_has(self, who_has):
try:
for dep, workers in who_has.items():
if not workers:
continue
if dep in self.tasks:
if self.address in workers and self.tasks[dep].state != "memory":
logger.debug(
"Scheduler claims worker %s holds data for task %s which is not true.",
self.name,
dep,
)
# Do not mutate the input dict. That's rude
workers = set(workers) - {self.address}
self.tasks[dep].who_has.update(workers)
for worker in workers:
self.has_what[worker].add(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def steal_request(self, key):
# There may be a race condition between stealing and releasing a task.
# In this case the self.tasks is already cleared. The `None` will be
# registered as `already-computing` on the other end
ts = self.tasks.get(key)
if key in self.tasks:
state = ts.state
else:
state = None
response = {"op": "steal-response", "key": key, "state": state}
self.batched_stream.send(response)
if state in ("ready", "waiting", "constrained"):
# If task is marked as "constrained" we haven't yet assigned it an
# `available_resources` to run on, that happens in
# `transition_constrained_executing`
ts.scheduler_holds_ref = False
self.release_key(ts.key, reason="stolen")
if self.validate:
assert ts.key not in self.tasks
def release_key(
self,
key: Hashable,
cause: TaskState | None = None,
reason: str | None = None,
report: bool = True,
):
try:
if self.validate:
assert not isinstance(key, TaskState)
ts = self.tasks.get(key, None)
# If the scheduler holds a reference which is usually the
# case when it instructed the task to be computed here or if
# data was scattered we must not release it unless the
# scheduler allow us to. See also handle_delete_data and
if ts is None or ts.scheduler_holds_ref:
return
logger.debug(
"Release key %s", {"key": key, "cause": cause, "reason": reason}
)
if cause:
self.log.append((key, "release-key", {"cause": cause}, reason))
else:
self.log.append((key, "release-key", reason))
if key in self.data:
try:
del self.data[key]
except FileNotFoundError:
logger.error("Tried to delete %s but no file found", exc_info=True)
if key in self.actors:
del self.actors[key]
for worker in ts.who_has:
self.has_what[worker].discard(ts.key)
ts.who_has.clear()
if key in self.threads:
del self.threads[key]
if ts.state == "executing":
self.executing_count -= 1
if ts.resource_restrictions is not None:
if ts.state == "executing":
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
for d in ts.dependencies:
d.dependents.discard(ts)
if not d.dependents and d.state in ("flight", "fetch"):
self.release_key(d.key, reason="Dependent released")
if report:
# Inform the scheduler of keys which will have gone missing
# We are releasing them before they have completed
if ts.state in PROCESSING:
# This path is only hit with work stealing
msg = {"op": "release", "key": key, "cause": cause}
else:
# This path is only hit when calling release_key manually
msg = {
"op": "release-worker-data",
"keys": [key],
"worker": self.address,
}
self.batched_stream.send(msg)
self._notify_plugins("release_key", key, ts.state, cause, reason, report)
del self.tasks[key]
except CommClosedError:
pass
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
################
# Execute Task #
################
def run(self, comm, function, args=(), wait=True, kwargs=None):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def plugin_add(self, comm=None, plugin=None, name=None):
with log_errors(pdb=False):
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if name is None:
name = _get_plugin_name(plugin)
assert name
if name in self.plugins:
await self.plugin_remove(comm=comm, name=name)
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def plugin_remove(self, comm=None, name=None):
with log_errors(pdb=False):
logger.info(f"Removing Worker plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def actor_execute(
self,
comm=None,
actor=None,
function=None,
args=(),
kwargs: dict | None = None,
):
kwargs = kwargs or {}
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
try:
if iscoroutinefunction(func):
result = await func(*args, **kwargs)
elif separate_thread:
result = await self.loop.run_in_executor(
self.executors["actor"],
apply_function_actor,
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
)
else:
result = func(*args, **kwargs)
return {"status": "OK", "result": to_serialize(result)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def actor_attribute(self, comm=None, actor=None, attribute=None):
try:
value = getattr(self.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def meets_resource_constraints(self, key: str) -> bool:
ts = self.tasks[key]
if not ts.resource_restrictions:
return True
for resource, needed in ts.resource_restrictions.items():
if self.available_resources[resource] < needed:
return False
return True
async def _maybe_deserialize_task(self, ts):
if not isinstance(ts.runspec, SerializedTask):
return ts.runspec
try:
start = time()
# Offload deserializing large tasks
if sizeof(ts.runspec) > OFFLOAD_THRESHOLD:
function, args, kwargs = await offload(_deserialize, *ts.runspec)
else:
function, args, kwargs = _deserialize(*ts.runspec)
stop = time()
if stop - start > 0.010:
ts.startstops.append(
{"action": "deserialize", "start": start, "stop": stop}
)
return function, args, kwargs
except Exception:
logger.error("Could not deserialize task", exc_info=True)
self.log.append((ts.key, "deserialize-error"))
raise
def ensure_computing(self):
if self.paused:
return
try:
while self.constrained and self.executing_count < self.nthreads:
key = self.constrained[0]
ts = self.tasks.get(key, None)
if ts is None or ts.state != "constrained":
self.constrained.popleft()
continue
if self.meets_resource_constraints(key):
self.constrained.popleft()
self.transition(ts, "executing")
else:
break
while self.ready and self.executing_count < self.nthreads:
priority, key = heapq.heappop(self.ready)
ts = self.tasks.get(key)
if ts is None:
# It is possible for tasks to be released while still remaining on `ready`
# The scheduler might have re-routed to a new worker and told this worker
# to release. If the task has "disappeared" just continue through the heap
continue
elif ts.key in self.data:
self.transition(ts, "memory")
elif ts.state in READY:
self.transition(ts, "executing")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def execute(self, key):
if self.status in (Status.closing, Status.closed, Status.closing_gracefully):
return
if key not in self.tasks:
return
ts = self.tasks[key]
if ts.state != "executing":
# This might happen if keys are canceled
logger.debug(
"Trying to execute a task %s which is not in executing state anymore"
% ts
)
return
try:
if self.validate:
assert not ts.waiting_for_data
assert ts.state == "executing"
assert ts.runspec is not None
function, args, kwargs = await self._maybe_deserialize_task(ts)
args2, kwargs2 = self._prepare_args_for_execution(ts, args, kwargs)
if ts.annotations is not None and "executor" in ts.annotations:
executor = ts.annotations["executor"]
else:
executor = "default"
assert executor in self.executors
assert key == ts.key
self.active_keys.add(ts.key)
try:
e = self.executors[executor]
ts.start_time = time()
if iscoroutinefunction(function):
result = await apply_function_async(
function,
args2,
kwargs2,
self.scheduler_delay,
)
elif "ThreadPoolExecutor" in str(type(e)):
result = await self.loop.run_in_executor(
e,
apply_function,
function,
args2,
kwargs2,
self.execution_state,
ts.key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
)
else:
result = await self.loop.run_in_executor(
e,
apply_function_simple,
function,
args2,
kwargs2,
self.scheduler_delay,
)
finally:
self.active_keys.discard(ts.key)
# We'll need to check again for the task state since it may have
# changed since the execution was kicked off. In particular, it may
# have been canceled and released already in which case we'll have
# to drop the result immediately
if ts.key not in self.tasks:
logger.debug(
"Dropping result for %s since task has already been released."
% ts.key
)
return
result["key"] = ts.key
value = result.pop("result", None)
ts.startstops.append(
{"action": "compute", "start": result["start"], "stop": result["stop"]}
)
self.threads[ts.key] = result["thread"]
if result["op"] == "task-finished":
ts.nbytes = result["nbytes"]
ts.type = result["type"]
self.transition(ts, "memory", value=value)
if self.digests is not None:
self.digests["task-duration"].add(result["stop"] - result["start"])
elif isinstance(result.pop("actual-exception"), Reschedule):
self.batched_stream.send({"op": "reschedule", "key": ts.key})
self.transition(ts, "rescheduled", report=False)
self.release_key(ts.key, report=False, reason="Reschedule")
else:
ts.exception = result["exception"]
ts.traceback = result["traceback"]
ts.exception_text = result["exception_text"]
ts.traceback_text = result["traceback_text"]
logger.warning(
"Compute Failed\n"
"Function: %s\n"
"args: %s\n"
"kwargs: %s\n"
"Exception: %r\n",
str(funcname(function))[:1000],
convert_args_to_str(args2, max_len=1000),
convert_kwargs_to_str(kwargs2, max_len=1000),
result["exception"].data,
)
self.transition(ts, "error")
logger.debug("Send compute response to scheduler: %s, %s", ts.key, result)
if self.validate:
assert ts.state != "executing"
assert not ts.waiting_for_data
except Exception as exc:
logger.error(
"Exception during execution of task %s.", ts.key, exc_info=True
)
emsg = error_message(exc)
ts.exception = emsg["exception"]
ts.traceback = emsg["traceback"]
ts.exception_text = emsg["exception_text"]
ts.traceback_text = emsg["traceback_text"]
self.transition(ts, "error")
finally:
self.ensure_computing()
self.ensure_communicating()
def _prepare_args_for_execution(self, ts, args, kwargs):
start = time()
data = {}
for dep in ts.dependencies:
k = dep.key
try:
data[k] = self.data[k]
except KeyError:
from .actor import Actor # TODO: create local actor
data[k] = Actor(type(self.actors[k]), self.address, k, self)
args2 = pack_data(args, data, key_types=(bytes, str))
kwargs2 = pack_data(kwargs, data, key_types=(bytes, str))
stop = time()
if stop - start > 0.005:
ts.startstops.append({"action": "disk-read", "start": start, "stop": stop})
if self.digests is not None:
self.digests["disk-load-duration"].add(stop - start)
return args2, kwargs2
##################
# Administrative #
##################
async def memory_monitor(self):
"""Track this process's memory usage and act accordingly
If we rise above 70% memory use, start dumping data to disk.
If we rise above 80% memory use, stop execution of new tasks
"""
if self._memory_monitoring:
return
self._memory_monitoring = True
total = 0
proc = self.monitor.proc
memory = proc.memory_info().rss
frac = memory / self.memory_limit
def check_pause(memory):
frac = memory / self.memory_limit
# Pause worker threads if above 80% memory use
if self.memory_pause_fraction and frac > self.memory_pause_fraction:
# Try to free some memory while in paused state
self._throttled_gc.collect()
if not self.paused:
logger.warning(
"Worker is at %d%% memory usage. Pausing worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = True
elif self.paused:
logger.warning(
"Worker is at %d%% memory usage. Resuming worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = False
self.ensure_computing()
check_pause(memory)
# Dump data to disk if above 70%
if self.memory_spill_fraction and frac > self.memory_spill_fraction:
logger.debug(
"Worker is at %.0f%% memory usage. Start spilling data to disk.",
frac * 100,
)
start = time()
target = self.memory_limit * self.memory_target_fraction
count = 0
need = memory - target
while memory > target:
if not self.data.fast:
logger.warning(
"Unmanaged memory use is high. This may indicate a memory leak "
"or the memory may not be released to the OS; see "
"https://distributed.dask.org/en/latest/worker.html#memtrim "
"for more information. "
"-- Unmanaged memory: %s -- Worker memory limit: %s",
format_bytes(memory),
format_bytes(self.memory_limit),
)
break
k, v, weight = self.data.fast.evict()
del k, v
total += weight
count += 1
# If the current buffer is filled with a lot of small values,
# evicting one at a time is very slow and the worker might
# generate new data faster than it is able to evict. Therefore,
# only pass on control if we spent at least 0.5s evicting
if time() - start > 0.5:
await asyncio.sleep(0)
start = time()
memory = proc.memory_info().rss
if total > need and memory > target:
# Issue a GC to ensure that the evicted data is actually
# freed from memory and taken into account by the monitor
# before trying to evict even more data.
self._throttled_gc.collect()
memory = proc.memory_info().rss
check_pause(memory)
if count:
logger.debug(
"Moved %d tasks worth %s to disk",
count,
format_bytes(total),
)
self._memory_monitoring = False
return total
def cycle_profile(self):
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self):
"""
Get a frame from all actively computing threads
Merge these frames into existing profile counts
"""
if not self.active_threads: # hope that this is thread-atomic?
return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
if self.digests is not None:
self.digests["profile-duration"].add(stop - start)
async def get_profile(
self, comm=None, start=None, stop=None, key=None, server=False
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(self, comm=None, start=0, stop=None):
add_recent = stop is None
now = time() + self.scheduler_delay
stop = stop or now
start = start or 0
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, comm=None, keys=None):
with self.active_threads_lock:
frames = sys._current_frames()
active_threads = self.active_threads.copy()
frames = {k: frames[ident] for ident, k in active_threads.items()}
if keys is not None:
frames = {k: frame for k, frame in frames.items() if k in keys}
result = {k: profile.call_stack(frame) for k, frame in frames.items()}
return result
def _notify_plugins(self, method_name, *args, **kwargs):
for name, plugin in self.plugins.items():
if hasattr(plugin, method_name):
try:
getattr(plugin, method_name)(*args, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception" % name, exc_info=True
)
##############
# Validation #
##############
def validate_task_memory(self, ts):
assert ts.key in self.data or ts.key in self.actors
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key not in self.ready
assert ts.state == "memory"
def validate_task_executing(self, ts):
assert ts.state == "executing"
assert ts.runspec is not None
assert ts.key not in self.data
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_ready(self, ts):
assert ts.key in pluck(1, self.ready)
assert ts.key not in self.data
assert ts.state != "executing"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_waiting(self, ts):
assert ts.key not in self.data
assert ts.state == "waiting"
if ts.dependencies and ts.runspec:
assert not all(dep.key in self.data for dep in ts.dependencies)
def validate_task_flight(self, ts):
assert ts.key not in self.data
assert not any(dep.key in self.ready for dep in ts.dependents)
assert ts.coming_from
assert ts.coming_from in self.in_flight_workers
assert ts.key in self.in_flight_workers[ts.coming_from]
def validate_task_fetch(self, ts):
assert ts.runspec is None
assert ts.key not in self.data
assert self.address not in ts.who_has # !!!!!!!!
# FIXME This is currently not an invariant since upon comm failure we
# remove the erroneous worker from all who_has and correct the state
# upon the next ensure_communicate
# if not ts.who_has:
# # If we do not know who_has for a fetch task, it must be logged in
# # the missing dep. There should be a handle_missing_dep running for
# # all of these keys
# assert ts.key in self._missing_dep_flight, (
# ts.key,
# self.story(ts),
# self._missing_dep_flight.copy(),
# self.in_flight_workers.copy(),
# )
assert ts.dependents
for w in ts.who_has:
assert ts.key in self.has_what[w]
def validate_task(self, ts):
try:
if ts.state == "memory":
self.validate_task_memory(ts)
elif ts.state == "waiting":
self.validate_task_waiting(ts)
elif ts.state == "ready":
self.validate_task_ready(ts)
elif ts.state == "executing":
self.validate_task_executing(ts)
elif ts.state == "flight":
self.validate_task_flight(ts)
elif ts.state == "fetch":
self.validate_task_fetch(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self):
if self.status != Status.running:
return
try:
for ts in self.tasks.values():
assert ts.state is not None
# check that worker has task
for worker in ts.who_has:
assert ts.key in self.has_what[worker]
# check that deps have a set state and that dependency<->dependent links are there
for dep in ts.dependencies:
# self.tasks was just a dict of tasks
# and this check was originally that the key was in `task_state`
# so we may have popped the key out of `self.tasks` but the
# dependency can still be in `memory` before GC grabs it...?
# Might need better bookkeeping
assert dep.state is not None
assert ts in dep.dependents, ts
for key in ts.waiting_for_data:
ts_wait = self.tasks[key]
assert (
ts_wait.state == "flight"
or ts_wait.state == "fetch"
or ts_wait.key in self._missing_dep_flight
or ts_wait.who_has.issubset(self.in_flight_workers)
)
if ts.state == "memory":
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key in self.data or ts.key in self.actors
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
for ts in self.tasks.values():
self.validate_task(ts)
except Exception as e:
self.loop.add_callback(self.close)
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
#######################################
# Worker Clients (advanced workloads) #
#######################################
@property
def client(self) -> Client:
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout=None) -> Client:
"""Get local client attached to this worker
If no such client exists, create one
See Also
--------
get_client
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
try:
from .client import default_client
client = default_client()
except ValueError: # no clients found, need to make a new one
pass
else:
# must be lazy import otherwise cyclic import
from distributed.deploy.cluster import Cluster
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
# The below conditions should only happen in case a second
# cluster is alive, e.g. if a submitted task spawned its onwn
# LocalCluster, see gh4565
or (
isinstance(client._start_arg, str)
and client._start_arg == self.scheduler.address
or isinstance(client._start_arg, Cluster)
and client._start_arg.scheduler_address == self.scheduler.address
)
):
self._client = client
if not self._client:
from .client import Client
asynchronous = self.loop is IOLoop.current()
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
Worker._initialized_clients.add(self._client)
if not asynchronous:
assert self._client.status == "running"
return self._client
def get_current_task(self):
"""Get the key of the task we are currently running
This only makes sense to run within a task
Examples
--------
>>> from dask.distributed import get_worker
>>> def f():
... return get_worker().get_current_task()
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'f-1234'
See Also
--------
get_worker
"""
return self.active_threads[threading.get_ident()]
def get_worker() -> Worker:
"""Get the worker currently running this task
Examples
--------
>>> def f():
... worker = get_worker() # The worker on which this task is running
... return worker.address
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'tcp://127.0.0.1:47373'
See Also
--------
get_client
worker_client
"""
try:
return thread_state.execution_state["worker"]
except AttributeError:
try:
return first(w for w in Worker._instances if w.status == Status.running)
except StopIteration:
raise ValueError("No workers found")
def get_client(address=None, timeout=None, resolve_address=True) -> Client:
"""Get a client while within a task.
This client connects to the same scheduler to which the worker is connected
Parameters
----------
address : str, optional
The address of the scheduler to connect to. Defaults to the scheduler
the worker is connected to.
timeout : int or str
Timeout (in seconds) for getting the Client. Defaults to the
``distributed.comm.timeouts.connect`` configuration value.
resolve_address : bool, default True
Whether to resolve `address` to its canonical form.
Returns
-------
Client
Examples
--------
>>> def f():
... client = get_client(timeout="10s")
... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks
... results = client.gather(futures)
... return sum(results)
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
55
See Also
--------
get_worker
worker_client
secede
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
if address and resolve_address:
address = comm.resolve_address(address)
try:
worker = get_worker()
except ValueError: # could not find worker
pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from .client import Client
try:
client = Client.current() # TODO: assumes the same scheduler
except ValueError:
client = None
if client and (not address or client.scheduler.address == address):
return client
elif address:
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
"""
Have this task secede from the worker's thread pool
This opens up a new scheduling slot and a new thread for a new task. This
enables the client to schedule tasks on this node, which is
especially useful while waiting for other jobs to finish (e.g., with
``client.gather``).
Examples
--------
>>> def mytask(x):
... # do some work
... client = get_client()
... futures = client.map(...) # do some remote work
... secede() # while that work happens, remove ourself from the pool
... return client.gather(futures) # return gathered results
See Also
--------
get_client
get_worker
"""
worker = get_worker()
tpe_secede() # have this thread secede from the thread pool
duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.maybe_transition_long_running,
worker.tasks[thread_state.key],
compute_duration=duration,
)
class Reschedule(Exception):
"""Reschedule this task
Raising this exception will stop the current execution of the task and ask
the scheduler to reschedule this task, possibly on a different machine.
This does not guarantee that the task will move onto a different machine.
The scheduler will proceed through its normal heuristics to determine the
optimal machine to accept this task. The machine will likely change if the
load across the cluster has significantly changed since first scheduling
the task.
"""
def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT) -> int | None:
if memory_limit is None:
return None
if memory_limit == "auto":
memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores))
with suppress(ValueError, TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = int(memory_limit * system.MEMORY_LIMIT)
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
else:
memory_limit = int(memory_limit)
return min(memory_limit, system.MEMORY_LIMIT)
async def get_data_from_worker(
rpc,
keys,
worker,
who=None,
max_connections=None,
serializers=None,
deserializers=None,
):
"""Get keys from worker
The worker has a two step handshake to acknowledge when data has been fully
delivered. This function implements that handshake.
See Also
--------
Worker.get_data
Worker.gather_deps
utils_comm.gather_data_from_workers
"""
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
async def _get_data():
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
max_connections=max_connections,
)
try:
status = response["status"]
except KeyError:
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
return await retry_operation(_get_data, operation="get_data_from_worker")
job_counter = [0]
cache_loads = LRU(maxsize=100)
def loads_function(bytes_object):
"""Load a function from bytes, cache bytes"""
if len(bytes_object) < 100000:
try:
result = cache_loads[bytes_object]
except KeyError:
result = pickle.loads(bytes_object)
cache_loads[bytes_object] = result
return result
return pickle.loads(bytes_object)
def _deserialize(function=None, args=None, kwargs=None, task=no_value):
"""Deserialize task inputs and regularize to func, args, kwargs"""
if function is not None:
function = loads_function(function)
if args and isinstance(args, bytes):
args = pickle.loads(args)
if kwargs and isinstance(kwargs, bytes):
kwargs = pickle.loads(kwargs)
if task is not no_value:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
def execute_task(task):
"""Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7
"""
if istask(task):
func, args = task[0], task[1:]
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task
cache_dumps = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func) -> bytes:
"""Dump a function to bytes, cache functions"""
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func, protocol=4)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: # Unhashable function
result = pickle.dumps(func, protocol=4)
return result
def dumps_task(task):
"""Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'
'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}
"""
if istask(task):
if task[0] is apply and not any(map(_maybe_complex, task[2:])):
d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])}
if len(task) == 4:
d["kwargs"] = warn_dumps(task[3])
return d
elif not any(map(_maybe_complex, task[1:])):
return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])}
return to_serialize(task)
_warn_dumps_warned = [False]
def warn_dumps(obj, dumps=pickle.dumps, limit=1e6):
"""Dump an object to bytes, warn if those bytes are large"""
b = dumps(obj, protocol=4)
if not _warn_dumps_warned[0] and len(b) > limit:
_warn_dumps_warned[0] = True
s = str(obj)
if len(s) > 70:
s = s[:50] + " ... " + s[-15:]
warnings.warn(
"Large object of size %s detected in task graph: \n"
" %s\n"
"Consider scattering large objects ahead of time\n"
"with client.scatter to reduce scheduler burden and \n"
"keep data on workers\n\n"
" future = client.submit(func, big_data) # bad\n\n"
" big_future = client.scatter(big_data) # good\n"
" future = client.submit(func, big_future) # good"
% (format_bytes(len(b)), s)
)
return b
def apply_function(
function,
args,
kwargs,
execution_state,
key,
active_threads,
active_threads_lock,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.start_time = time()
thread_state.execution_state = execution_state
thread_state.key = key
msg = apply_function_simple(function, args, kwargs, time_delay)
with active_threads_lock:
del active_threads[ident]
return msg
def apply_function_simple(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
async def apply_function_async(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = await function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
def apply_function_actor(
function, args, kwargs, execution_state, key, active_threads, active_threads_lock
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.execution_state = execution_state
thread_state.key = key
thread_state.actor = True
result = function(*args, **kwargs)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
"""Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.
"""
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len: int | None = None) -> str:
"""Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str:
"""Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
async def run(server, comm, function, args=(), kwargs=None, is_coro=None, wait=True):
kwargs = kwargs or {}
function = pickle.loads(function)
if is_coro is None:
is_coro = iscoroutinefunction(function)
else:
warnings.warn(
"The is_coro= parameter is deprecated. "
"We now automatically detect coroutines/async functions"
)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server.loop.add_callback(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
try:
if nvml.device_get_count() < 1:
raise RuntimeError
except (Exception, RuntimeError):
pass
else:
async def gpu_metric(worker):
result = await offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
def print(*args, **kwargs):
"""Dask print function
This prints both wherever this function is run, and also in the user's
client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
msg = {
"args": tuple(stringify(arg) for arg in args),
"kwargs": {k: stringify(v) for k, v in kwargs.items()},
}
worker.log_event("print", msg)
builtins.print(*args, **kwargs)
def warn(*args, **kwargs):
"""Dask warn function
This raises a warning both wherever this function is run, and also
in the user's client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
worker.log_event("warn", {"args": args, "kwargs": kwargs})
warnings.warn(*args, **kwargs)
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateUtilizationReport
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vm-migration
# [START vmmigration_v1_generated_VmMigration_CreateUtilizationReport_async]
from google.cloud import vmmigration_v1
async def sample_create_utilization_report():
# Create a client
client = vmmigration_v1.VmMigrationAsyncClient()
# Initialize request argument(s)
request = vmmigration_v1.CreateUtilizationReportRequest(
parent="parent_value",
utilization_report_id="utilization_report_id_value",
)
# Make the request
operation = client.create_utilization_report(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END vmmigration_v1_generated_VmMigration_CreateUtilizationReport_async]
|
"""Class to hold the tracks and cameras of a 3D scene.
This can be the output of either data association or of bundle adjustment.
Authors: Ayush Baid, John Lambert, Xiaolong Wu
"""
import itertools
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from gtsam import PinholeCameraCal3Bundler, Pose3, SfmTrack
import gtsfm.utils.graph as graph_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.reprojection as reproj_utils
logger = logger_utils.get_logger()
EQUALITY_TOLERANCE = 1e-5
PRINT_NUM_SIG_FIGS = 2
class GtsfmData:
"""Class containing cameras and tracks, essentially describing the complete 3D scene.
This class is needed over GTSAM's SfmData type because GTSAM's type does not allow for non-contiguous cameras.
The situation of non-contiguous cameras can exists because of failures in front-end.
"""
def __init__(self, number_images: int) -> None:
"""Initializes the class.
Args:
number_images: number of images/cameras in the scene.
"""
self._cameras: Dict[int, PinholeCameraCal3Bundler] = {}
self._tracks: List[SfmTrack] = []
self._number_images = number_images
def __eq__(self, other: object) -> bool:
"""Checks equality with the other object."""
if not isinstance(other, GtsfmData):
return False
if self._number_images != other.number_images():
return False
for i, cam in self._cameras.items():
other_cam = other.get_camera(i)
if not cam.equals(other_cam, EQUALITY_TOLERANCE):
return False
for j in range(self.number_tracks()):
track = self.get_track(j)
other_track = other.get_track(j)
if track.number_measurements() != other_track.number_measurements():
return False
for k in range(track.number_measurements()):
i, uv = track.measurement(k)
other_i, other_uv = other_track.measurement(k)
if i != other_i:
return False
if not np.allclose(uv, other_uv):
return False
return True
def number_images(self) -> int:
"""Getter for the number of images.
Returns:
Number of images.
"""
return self._number_images
def number_tracks(self) -> int:
"""Getter for the number of tracks.
Returns:
Number of tracks.
"""
return len(self._tracks)
def get_valid_camera_indices(self) -> List[int]:
"""Getter for image indices where there is a valid (not None) camera.
Returns:
List of indices with a valid camera.
"""
return list(self._cameras.keys())
def get_camera(self, index: int) -> Optional[PinholeCameraCal3Bundler]:
"""Getter for camera.
Args:
index: the image index to fetch the camera for.
Returns:
The camera if it is a valid one, None otherwise.
"""
return self._cameras.get(index)
def get_camera_poses(self) -> List[Optional[Pose3]]:
"""Getter for camera poses wTi.
This function returns the pose for all cameras (equal to number_images in GtsfmData), even if they were not
computed by the pipeline.
Returns:
camera poses as a list, each representing wTi
"""
cameras = [self.get_camera(i) for i in range(self.number_images())]
poses = [camera.pose() if camera is not None else None for camera in cameras]
return poses
def get_track(self, index: int) -> SfmTrack:
"""Getter for the track.
Args:
index: track index to fetch.
Returns:
Requested track.
"""
return self._tracks[index]
def add_track(self, track: SfmTrack) -> bool:
"""Add a track, after checking if all the cameras in the track are already added.
Args:
track: track to add.
Returns:
Flag indicating the success of adding operation.
"""
# check if all cameras are already added
for j in range(track.number_measurements()):
i, _ = track.measurement(j)
if i not in self._cameras:
return False
self._tracks.append(track)
return True
def add_camera(self, index: int, camera: PinholeCameraCal3Bundler) -> None:
"""Adds a camera.
Args:
index: the index associated with this camera.
camera: camera object to it.
Raises:
ValueError: if the camera to be added is not a valid camera object.
"""
if camera is None:
raise ValueError("Camera cannot be None, should be a valid camera")
self._cameras[index] = camera
def get_track_length_statistics(self) -> Tuple[float, float]:
"""Compute mean and median lengths of all the tracks.
Returns:
Mean track length.
Median track length.
"""
if self.number_tracks() == 0:
return 0, 0
track_lengths = self.get_track_lengths()
return np.mean(track_lengths), np.median(track_lengths)
def get_track_lengths(self) -> np.ndarray:
"""Get an array containing the lengths of all tracks.
Returns:
Array containing all track lengths.
"""
if self.number_tracks() == 0:
return np.array([], dtype=np.uint32)
track_lengths = [self.get_track(j).number_measurements() for j in range(self.number_tracks())]
return np.array(track_lengths, dtype=np.uint32)
def select_largest_connected_component(self) -> "GtsfmData":
"""Selects the subset of data belonging to the largest connected component of the graph where the edges are
between cameras which feature in the same track.
Returns:
New GtSfmData object with the subset of tracks and cameras.
"""
camera_edges = []
for sfm_track in self._tracks:
cameras_in_use = []
for m_idx in range(sfm_track.number_measurements()):
i, _ = sfm_track.measurement(m_idx)
cameras_in_use.append(i)
# Recreate track connectivity from track information
# For example: a track has cameras [0, 2, 5]. In that case we will add pairs (0, 2), (0, 5), (2, 5)
camera_edges += list(itertools.combinations(cameras_in_use, 2))
if len(camera_edges) == 0:
return GtsfmData(self._number_images)
cameras_in_largest_cc = graph_utils.get_nodes_in_largest_connected_component(camera_edges)
logger.info(
"Largest connected component contains {} of {} cameras returned by front-end (of {} total imgs)".format(
len(cameras_in_largest_cc), len(self.get_valid_camera_indices()), self._number_images
)
)
return GtsfmData.from_selected_cameras(self, cameras_in_largest_cc)
@classmethod
def from_selected_cameras(cls, gtsfm_data: "GtsfmData", camera_indices: List[int]) -> "GtsfmData":
"""Selects the cameras in the input list and the tracks associated with those cameras.
Args:
gtsfm_data: data to pick the cameras from.
camera_indices: camera indices to select and keep in the new data.
Returns:
New object with the selected cameras and associated tracks.
"""
new_data = cls(gtsfm_data.number_images())
for i in gtsfm_data.get_valid_camera_indices():
if i in camera_indices:
new_data.add_camera(i, gtsfm_data.get_camera(i))
new_camera_indices = new_data.get_valid_camera_indices()
# add tracks which have all the camera present in new data
for j in range(gtsfm_data.number_tracks()):
track = gtsfm_data.get_track(j)
is_valid = True
for k in range(track.number_measurements()):
i, _ = track.measurement(k)
if i not in new_camera_indices:
is_valid = False
break
if is_valid:
new_data.add_track(track)
return new_data
def get_scene_reprojection_errors(self) -> np.ndarray:
"""Get the scene reprojection errors for all 3D points and all associated measurements.
Returns:
Reprojection errors as a 1D numpy array.
"""
scene_reproj_errors: List[float] = []
for track in self._tracks:
track_errors, _ = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
scene_reproj_errors.extend(track_errors)
return np.array(scene_reproj_errors)
def aggregate_metrics(self) -> Dict[str, Any]:
"""Aggregate metrics about the reprojection errors and 3d track lengths (summary stats).
Args:
ba_data: bundle adjustment result
Returns:
dictionary containing metrics of bundle adjustment result
"""
track_lengths_3d = self.get_track_lengths()
scene_reproj_errors = self.get_scene_reprojection_errors()
convert_to_rounded_float = lambda x: float(np.round(x, 3))
stats_dict = {}
stats_dict["number_tracks"] = self.number_tracks()
stats_dict["3d_track_lengths"] = {
"min": convert_to_rounded_float(track_lengths_3d.min()),
"mean": convert_to_rounded_float(np.mean(track_lengths_3d)),
"median": convert_to_rounded_float(np.median(track_lengths_3d)),
"max": convert_to_rounded_float(track_lengths_3d.max()),
}
stats_dict["reprojection_errors"] = {
"min": convert_to_rounded_float(np.min(scene_reproj_errors)),
"mean": convert_to_rounded_float(np.mean(scene_reproj_errors)),
"median": convert_to_rounded_float(np.median(scene_reproj_errors)),
"max": convert_to_rounded_float(np.max(scene_reproj_errors)),
}
return stats_dict
def get_avg_scene_reprojection_error(self) -> float:
"""Get average reprojection error for all 3d points in the entire scene
Returns:
Average of reprojection errors for every 3d point to its 2d measurements
"""
scene_reproj_errors = self.get_scene_reprojection_errors()
scene_avg_reproj_error = np.mean(scene_reproj_errors)
return scene_avg_reproj_error
def log_scene_reprojection_error_stats(self) -> None:
"""Logs reprojection error stats for all 3d points in the entire scene."""
scene_reproj_errors = self.get_scene_reprojection_errors()
logger.info("Min scene reproj error: %.3f", np.min(scene_reproj_errors))
logger.info("Avg scene reproj error: %.3f", np.mean(scene_reproj_errors))
logger.info("Median scene reproj error: %.3f", np.median(scene_reproj_errors))
logger.info("Max scene reproj error: %.3f", np.max(scene_reproj_errors))
def __validate_track(self, track: SfmTrack, reproj_err_thresh: float) -> bool:
"""Validates a track based on reprojection errors and cheirality checks.
Args:
track: track with 3D landmark and measurements.
reproj_err_thresh: reprojection err threshold for each measurement.
Returns:
validity of the track.
"""
errors, avg_reproj_error = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
# track is valid as all measurements have error below the threshold
cheirality_success = np.all(~np.isnan(errors))
return np.all(errors < reproj_err_thresh) and cheirality_success
def filter_landmarks(self, reproj_err_thresh: float = 5) -> "GtsfmData":
"""Filters out landmarks with high reprojection error
Args:
reproj_err_thresh: reprojection err threshold for each measurement.
"""
# TODO: move this function to utils or GTSAM
filtered_data = GtsfmData(self.number_images())
# add all the cameras
for i in self.get_valid_camera_indices():
filtered_data.add_camera(i, self.get_camera(i))
for j in range(self.number_tracks()):
track = self.get_track(j)
if self.__validate_track(track, reproj_err_thresh):
filtered_data.add_track(track)
return filtered_data
|
'''n = 99
p = 'garrafas'
while n > 0:
if n == 1:
p = 'garrafa'
print(f'{n} {p} de cerveja no muro!')
print(f'{n} {p} no muro!')
print('Se uma garrafa cair no chão')
print('Quantas restarão?')
n -= 1
print('Fim da canção!')'''
p = 'garrafas'
for c in range(99, 0, -1):
if c == 1:
p = 'garrafa'
print(f'{c} {p} de cerveja no muro!')
print(f'{c} {p} no muro!')
print('Se uma garrafa cair no chão')
print('Quantas restarão?')
print('Fim da canção!')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-17 21:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcommerce_carts', '0004_cart_coupon'),
('wagtailcommerce_orders', '0014_auto_20180130_1050'),
]
operations = [
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='wagtailcommerce_carts.Cart', verbose_name='cart'),
),
]
|
import os
import numpy as np
from pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython
from pyhlm.internals.hlm_states import WeakLimitHDPHLMStates
from pyhlm.word_model import LetterHSMM, LetterHSMMPython
import pyhsmm
import warnings
from tqdm import trange
warnings.filterwarnings('ignore')
import time
#%%
def load_datas(dataset_dir):
data = []
names = np.loadtxt(dataset_dir + "files.txt", dtype=str)
files = names
for name in names:
mfcc = np.loadtxt(dataset_dir + "DATA/" + name + ".txt")
delta = np.loadtxt(dataset_dir + "DATA/" + name + "_d.txt")
delta_delta = np.loadtxt(dataset_dir + "DATA/" + name + "_dd.txt")
data.append(np.hstack((mfcc, np.hstack((delta,delta_delta)))))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur[:-1])
unpacked[d-1] = 1.0
return unpacked
def save_stateseq(model, dataset_dir):
# Save sampled states sequences.
names = np.loadtxt(dataset_dir + "files.txt", dtype=str)
for i, s in enumerate(model.states_list):
with open("results/" + names[i] + "_s.txt", "a") as f:
np.savetxt(f, s.stateseq)
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.letter_stateseq)
with open("results/" + names[i] + "_d.txt", "a") as f:
np.savetxt(f, unpack_durations(s.durations_censored))
def save_params(itr_idx, model):
with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
def save_loglikelihood(model):
with open("summary_files/log_likelihood.txt", "a") as f:
f.write(str(model.log_likelihood()) + "\n")
def save_resample_times(resample_time):
with open("summary_files/resample_times.txt", "a") as f:
f.write(str(resample_time) + "\n")
#%%
if not os.path.exists('results'):
os.mkdir('results')
if not os.path.exists('parameters'):
os.mkdir('parameters')
if not os.path.exists('summary_files'):
os.mkdir('summary_files')
#%%
dataset_dir = "murakami_dataset/"
#%%
thread_num = 64
pre_train_iter = 1
train_iter = 100
trunc = 120
obs_dim = 9
letter_upper = 50
word_upper = 50
model_hypparams = {'num_states': word_upper, 'alpha': 10, 'gamma': 10, 'init_state_concentration': 10}
obs_hypparams = {
'mu_0':np.zeros(obs_dim),
'sigma_0':np.identity(obs_dim),
'kappa_0':0.01,
'nu_0':obs_dim+2
}
dur_hypparams = {
'alpha_0':200,
'beta_0':10
}
#%%
letter_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(letter_upper)]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(letter_upper)]
dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for state in range(word_upper)]
length_distn = pyhsmm.distributions.PoissonDuration(alpha_0=30, beta_0=10, lmbda=3)
#%%
letter_hsmm = LetterHSMM(alpha=10, gamma=10, init_state_concentration=10, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
model = WeakLimitHDPHLM(model_hypparams, letter_hsmm, dur_distns, length_distn)
#%%
files = np.loadtxt(dataset_dir + "files.txt", dtype=str)
datas = load_datas(dataset_dir)
#%% Pre training.
for d in datas:
letter_hsmm.add_data(d, trunc=trunc)
for t in trange(pre_train_iter):
letter_hsmm.resample_model(num_procs=1)
letter_hsmm.states_list = []
#%%
print("Add datas...")
for d in datas:
model.add_data(d, trunc=trunc, generate=False)
model.resample_states(num_procs=thread_num)
# # or
# for d in datas:
# model.add_data(d, trunc=trunc, initialize_from_prior=False)
print("Done!")
#%% Save init params and pyper params
with open("parameters/hypparams.txt", "w") as f:
f.write(str(model.hypparams))
save_params(0, model)
save_loglikelihood(model)
#%%
for t in trange(train_iter):
st = time.time()
model.resample_model(num_procs=thread_num)
resample_model_time = time.time() - st
save_stateseq(model, dataset_dir)
save_loglikelihood(model)
save_params(t+1, model)
save_resample_times(resample_model_time)
print(model.word_list)
print(model.word_counts())
print("log_likelihood:{}".format(model.log_likelihood()))
print("resample_model:{}".format(resample_model_time))
|
#
# MIT License
#
# Copyright 2017 Launchpad project contributors (see COPYRIGHT.md)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
This is a client implementation. This module is appended to zip file with python interpreter and sent together with
bootloader executable to be executed on remote machine.
"""
|
from django.shortcuts import render
from django.views.generic import View
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import messages
from cart.models import Order
class Dashboard(View):
def get(self,*args,**kwargs):
order_qs = Order.objects.filter(user=self.request.user,orderd=True)
context = {
'orders':order_qs,
}
return render(self.request,'dashboard/index.html',context=context)
|
import pytest
from testutils.factories import create_test_person
from django.contrib.auth.models import User, Permission
from openstates.data.models import Person, Organization
from people_admin.models import UnmatchedName, NameStatus, DeltaSet
from people_admin.views import MATCHER_PERM, EDIT_PERM, RETIRE_PERM
import json
@pytest.fixture
def admin_user():
u = User.objects.create(username="admin")
user_permissions = list(
Permission.objects.filter(
codename__in=[
p.split(".")[1] for p in (MATCHER_PERM, EDIT_PERM, RETIRE_PERM)
]
).values_list("id", flat=True)
)
u.user_permissions.set(user_permissions)
return u
@pytest.mark.django_db
def test_apply_match_matches(client, django_assert_num_queries, kansas, admin_user):
p = Person.objects.create(name="Samuel L. Jackson")
# kansas is a test fixture, it has some fake data attached we can use
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=1, session=session, name="Sam Jackson", sponsorships_count=5, votes_count=5
)
apply_data = {
"match_data": {"unmatchedId": 1, "button": "Match", "matchedId": p.id}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(apply_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.MATCHED_PERSON
assert matched.matched_person_id == p.id
@pytest.mark.django_db
def test_apply_match_ignore(client, django_assert_num_queries, kansas, admin_user):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=2, session=session, name="Eva Green", sponsorships_count=16, votes_count=7
)
match_data = {"match_data": {"unmatchedId": 2, "button": "Ignore", "matchedId": ""}}
client.force_login(admin_user)
with django_assert_num_queries(6):
# client can be used to mock GET/POST/etc.
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.IGNORED
@pytest.mark.django_db
def test_apply_match_source_error(
client, django_assert_num_queries, kansas, admin_user
):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=3,
session=session,
name="David Tennant",
sponsorships_count=10,
votes_count=2,
)
match_data = {
"match_data": {"unmatchedId": 3, "button": "Source Error", "matchedId": ""}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.SOURCE_ERROR
@pytest.mark.django_db
def test_apply_match_404(client, django_assert_num_queries, admin_user):
client.force_login(admin_user)
with django_assert_num_queries(5):
match_data = {
"match_data": {"unmatchedId": 9999, "button": "Match", "matchedId": "1"}
}
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 404
@pytest.mark.django_db
def test_people_list(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
senate = Organization.objects.get(name="Kansas Senate")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
sam.identifiers.create(scheme="twitter", identifier="@SamuelLJackson")
sam.contact_details.create(
value="555-555-5555", type="voice", note="Capitol Office"
)
create_test_person("Bosephorous Fogg", org=house, party="Republican", district="2")
create_test_person("Cran Crumble", org=senate, party="Republican", district="A")
client.force_login(admin_user)
with django_assert_num_queries(7):
resp = client.get("/admin/people/ks/")
assert resp.status_code == 200
people = resp.context["context"]["current_people"]
assert len(people) == 3
sam_data = [p for p in people if p["name"] == "Sam Jackson"][0]
assert sam_data["district"] == "1"
assert sam_data["twitter"] == "@SamuelLJackson"
assert sam_data["capitol_voice"] == "555-555-5555"
@pytest.mark.django_db
def test_retire_person(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
retire_data = {
"id": sam.id,
"name": sam.name,
"reason": "ran for new office",
"retirementDate": "2021-01-01",
"isDead": False,
"vacantSeat": True,
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/retire/",
json.dumps(retire_data),
content_type="application/json",
)
assert resp.status_code == 200
ds = DeltaSet.objects.get()
assert "retire Sam Jackson" == ds.name
assert ds.person_retirements.all().count() == 1
retirement = ds.person_retirements.get()
assert retirement.person_id == sam.id
assert retirement.reason == "ran for new office"
assert retirement.date == "2021-01-01"
assert retirement.is_vacant
assert retirement.is_dead is False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.