text
stringlengths 2
999k
|
|---|
#
"""
Texar defined exceptions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
__all__ = [
"TexarError"
]
class TexarError(Exception):
"""
Texar error.
"""
pass
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4']
rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 80, 'r1': 50, 'r2': 50}
state.data = { 'UAV': 1, 'r1': 3, 'r2': 1}
state.pos = {'c1': 'base', 'e1': 'r2', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': 'e1', 'UAV': 'o1'}
state.storm = {'active': False}
tasks = {
3: [['doActivities', 'UAV', [['survey', 'z3'], ['survey', 'z4'], ['survey', 'base']]]],
5: [['handleEmergency', 'r2', 'z4']],
}
eventsEnv = {
5: [alienSpotted, ['z2']]
}
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.managed_identities_service import ManagedIdentitiesServiceClient
from .services.managed_identities_service import ManagedIdentitiesServiceAsyncClient
from .types.managed_identities_service import AttachTrustRequest
from .types.managed_identities_service import CreateMicrosoftAdDomainRequest
from .types.managed_identities_service import DeleteDomainRequest
from .types.managed_identities_service import DetachTrustRequest
from .types.managed_identities_service import GetDomainRequest
from .types.managed_identities_service import ListDomainsRequest
from .types.managed_identities_service import ListDomainsResponse
from .types.managed_identities_service import OpMetadata
from .types.managed_identities_service import ReconfigureTrustRequest
from .types.managed_identities_service import ResetAdminPasswordRequest
from .types.managed_identities_service import ResetAdminPasswordResponse
from .types.managed_identities_service import UpdateDomainRequest
from .types.managed_identities_service import ValidateTrustRequest
from .types.resource import Domain
from .types.resource import Trust
__all__ = (
'ManagedIdentitiesServiceAsyncClient',
'AttachTrustRequest',
'CreateMicrosoftAdDomainRequest',
'DeleteDomainRequest',
'DetachTrustRequest',
'Domain',
'GetDomainRequest',
'ListDomainsRequest',
'ListDomainsResponse',
'ManagedIdentitiesServiceClient',
'OpMetadata',
'ReconfigureTrustRequest',
'ResetAdminPasswordRequest',
'ResetAdminPasswordResponse',
'Trust',
'UpdateDomainRequest',
'ValidateTrustRequest',
)
|
import os
from collections import OrderedDict
from typing import Tuple, List, Callable
from fs_s3fs import S3FS
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage.exposure import match_histograms
from datetime import datetime
from eolearn.core import EOPatch
def augment(
lr: np.ndarray,
hr: np.ndarray,
flip: bool = True,
rotate: bool = True,
distribution_shift: bool = False,
distribution_scale: bool = False,
permute_timestamps: bool = True,
max_distribution_shift: float = 0.25,
max_distribution_scale_diff: float = 0.25,
proba_of_original: float = 0.67
) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs a series of image augmentations with specified probability.
:param lr: array of low-resolution images, shape is `CxTxHxW`
:param hr: array of high-resolution images, shape is `CxHxW`
:param flip: whether to randomly flip height or width of arrays
:param rotate: whether to randomly rotate the arrays
:param distribution_shift: add an offset to the distribution
:param distribution_scale: scale the channels distribution
:param permute_timestamps: permute timestamps (not desired for HRN)
:param max_distribution_shift: set max distribution shift used in distribution shift augmentation
:param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation
:param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations
:returns: augmented lr and hr arrays
"""
# Base probability which, after `n_aug_conditions`, reduces to `proba_of_original`
n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps)
if aug_op)
rng_threshold = proba_of_original ** (1. / n_aug_conditions)
if flip and np.random.random() > rng_threshold:
flip_axis = np.random.choice([-2, -1])
lr = np.flip(lr, axis=flip_axis)
hr = np.flip(hr, axis=flip_axis)
if rotate and np.random.random() > rng_threshold:
k = np.random.choice(np.arange(-2, 3))
lr = np.rot90(lr, k=k, axes=(-2, -1))
hr = np.rot90(hr, k=k, axes=(-2, -1))
if distribution_shift and np.random.random() > rng_threshold:
d_shift = (np.random.random() - 0.5) * max_distribution_shift
lr = lr + d_shift
hr = hr + d_shift
if distribution_scale and np.random.random() > rng_threshold:
d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff
lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None]
hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None]
lr = (lr - lr_mean) * d_scale + lr_mean
hr = (hr - hr_mean) * d_scale + hr_mean
if permute_timestamps and np.random.random() > rng_threshold:
# expects lr in `CxTxHxW` shape
indices = np.random.permutation(lr.shape[1])
lr = lr[:, indices]
return lr, hr
def pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray:
""" Create an array with first dimension equal to k, filling with 0s in front or at back """
n_pad = k - len(feat)
if n_pad < 0:
raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}')
(_, h, w, c) = feat.shape
if pad_to_front:
feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat))
else:
feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c))))
return feat
class ImageSet(OrderedDict):
"""
An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality.
"""
def __init__(self, *args, **kwargs):
super(ImageSet, self).__init__(*args, **kwargs)
def __repr__(self):
dict_info = f"{'name':>10} : {self['name']}"
for name, v in self.items():
if hasattr(v, 'shape'):
dict_info += f"\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})"
else:
dict_info += f"\n{name:>10} : {v.__class__.__name__} ({v})"
return dict_info
def read_imageset(imset_file: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_deimos_npz: np.lib.npyio.NpzFile = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
assert padding in ['zeros', 'repeat']
# Read asset names
npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file,
allow_pickle=True)
features = npz['features']
hr = npz['labels']
if normalize:
country = npz['countries']
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
deimos_p1 = norm_deimos_npz['p1']
deimos_p99 = norm_deimos_npz['p99']
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1)
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
hr = np.moveaxis(hr, 2, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
timestamp_deimos=str(npz['timetamps_deimos'].item()),
lr=features,
hr=hr,
alphas=alphas)
return imageset
class ImagesetDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
channels_labels: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
transform: Callable = None,
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.channels_labels = channels_labels
self.n_views = n_views
self.padding = padding
self.transform = transform
self.histogram_matching = histogram_matching
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
histogram_matching=self.histogram_matching
)
lr = imset['lr'][self.channels_feats]
hr = imset['hr'][self.channels_labels]
if self.transform is not None:
lr, hr = self.transform(lr, hr)
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['hr'] = torch.from_numpy(hr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
def filter_cloudy_s2(eop, max_cc):
idxs = []
for i, _ in enumerate(eop.timestamp):
if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1):
idxs.append(i)
eop.data['BANDS'] = eop.data['BANDS'][idxs, ...]
eop.data['CLP'] = eop.data['CLP'][idxs, ...]
eop.mask['CLM'] = eop.mask['CLM'][idxs, ...]
eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...]
eop.timestamp = list(np.array(eop.timestamp)[idxs])
return eop
def timestamps_within_date(timestamps, start_date, end_date):
timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch
return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date]
def read_imageset_eopatch(imset_file: str,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
"""
assert padding in ['zeros', 'repeat']
eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True)
noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1)
ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date)
features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000
filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs]
if normalize:
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
lr=features,
alphas=alphas,
ts=filtered_ts[::-1])
return imageset
class EopatchPredictionDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.n_views = n_views
self.padding = padding
self.start_date = start_date
self.end_date = end_date
self.histogram_matching = histogram_matching
self.country = country
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset_eopatch(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
start_date=self.start_date,
end_date=self.end_date,
country=self.country,
histogram_matching=self.histogram_matching,
)
lr = imset['lr'][self.channels_feats]
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
|
from collections import defaultdict
from itertools import product
MULTITASK_PENALTY = 1
AUTHOR_PENALTY = 2
RELATION_COST = .05
DEFAULT_FLEXIBILITY = .1
OVERREQ_PENALTY = 0.5
def workload_diff(target, proposed):
"""
Helper for pitches_cost
:param target: <role, load>
:param proposed: <role, load>
:return: float
"""
total = 0
for role in target:
# flat penalty of -1 if no students are on a target role
diff = target[role] - (proposed[role] if role in proposed else -1)
# a negative diff means too much student were assigned on the role
if diff < 0:
# the penalty for going over requirements can be softened
diff *= OVERREQ_PENALTY
# the squared diff is added to the cost (so that greater discrepencies cost more)
total += diff ** 2
return total
def author_tasks(pitches, wishes):
tasks = {}
for pitch in pitches:
author = pitches[pitch]["author"]
for wpitch, role in wishes[author]:
if wpitch == pitch:
tasks[(wpitch, role)] = author
return tasks
class Cost:
def __init__(self, pitches, wishes, relations=None, flexibility=DEFAULT_FLEXIBILITY):
"""
:param pitches: <pitch, <role, load>>
:param wishes: <student, [(pitch, role)]>
:param relations: <student, <student, cost>>
:param flexibility: float in [0, 1]
"""
self.pitches = pitches
self.wishes = wishes
self.relations = relations if relations else {}
self.flexibility = flexibility
self.author_tasks = author_tasks(pitches, wishes)
def __call__(self, solution):
return (
(1 - self.flexibility) * self.pitches_cost(solution) +
self.flexibility * (self.wishes_cost(solution) +
RELATION_COST*self.relations_cost(solution))
)
def author_constraint(self, solution):
"""
cost of the authors not getting their roles on their pitch
:param solution: [student, wish index]
:return: float
"""
# <(pitch, role), author>
tasks_solution = {task: None for task in self.author_tasks}
for student, i in solution:
pitch, role = self.wishes[student][i]
if (pitch, role) in self.author_tasks:
if student == self.author_tasks[(pitch, role)] or tasks_solution[(pitch, role)] is None:
tasks_solution[(pitch, role)] = student
author_cost = 0
for task, student in tasks_solution.items():
if student != self.author_tasks[task]:
author_cost += 1
return author_cost
def pitches_cost(self, solution):
"""
cost of the pitches workload not being respected
:param solution: [student, wish index]
:return: float
"""
tasks_per_students = defaultdict(int)
for student, _ in solution:
tasks_per_students[student] += 1
workloads = defaultdict(lambda: defaultdict(float))
for student, i in solution:
pitch, role = self.wishes[student][i]
workloads[pitch][role] += 1/tasks_per_students[student]
# a penalty per additionnal task per student is added to avoid students multitasking too much
return (
# cost of workload diff between requirements and solution
sum(
workload_diff(self.pitches[pitch]
["workload"], workloads[pitch])
for pitch in self.pitches
if pitch in workloads
)
# cost of multitasking
+ MULTITASK_PENALTY * \
sum(tasks-1 for tasks in tasks_per_students.values())
# cost of author not having their roles
+ AUTHOR_PENALTY*self.author_constraint(solution)
)
def wishes_cost(self, solution):
"""
cost of the wishes not being respected
:param solution: [student, wish index]
:return: float
"""
return sum(
((i+1)/len(self.wishes[student]))**2
for student, i in solution
)
def relations_cost(self, solution):
"""
cost of the relations between students
:param solution: [student, wish index]
:return: float
"""
groups = defaultdict(list)
for student, i in solution:
pitch, role = self.wishes[student][i]
groups[pitch].append(student)
total = 0
for group in groups.values():
for student, other in product(filter(self.relations.__contains__, group), group):
if student != other:
if other not in self.relations[student]:
total += .5
elif self.relations[student][other] == -1:
total += 1
return total
def cost(pitches, wishes, solution, relations=None, flexibility=DEFAULT_FLEXIBILITY):
return Cost(pitches, wishes, relations, flexibility)(solution)
|
# coding=utf-8
"""Maximum trade profit problem dynamic programming solution Python implementation."""
def mx_profit(prices):
n = len(prices)
profit = [0] * n
mxp = prices[n - 1]
for i in range(n - 2, -1, -1):
mxp = max(mxp, prices[i])
profit[i] = max(profit[i + 1], mxp - prices[i])
mnp = prices[0]
for i in range(1, n):
mnp = min(mnp, prices[i])
profit[i] = max(profit[i - 1], profit[i] + (prices[i] - mnp))
return profit[n - 1]
if __name__ == "__main__":
prices = [2, 30, 15, 10, 8, 25, 80]
print(mx_profit(prices))
|
import os
from datetime import datetime
import pytest
from .helpers import tasks
from .helpers import assertions
from .helpers.env import E2EEnv
DIR = os.path.dirname(__file__)
TAP_MARIADB_ID = 'mariadb_to_rs'
TAP_MARIADB_BUFFERED_STREAM_ID = 'mariadb_to_rs_buffered_stream'
TAP_POSTGRES_ID = 'postgres_to_rs'
TAP_S3_CSV_ID = 's3_csv_to_rs'
TARGET_ID = 'redshift'
# pylint: disable=attribute-defined-outside-init
class TestTargetRedshift:
"""
End to end tests for Target Redshift
"""
def setup_method(self):
"""Initialise test project by generating YAML files from
templates for all the configured connectors"""
self.project_dir = os.path.join(DIR, 'test-project')
# Init query runner methods
self.e2e = E2EEnv(self.project_dir)
self.run_query_tap_mysql = self.e2e.run_query_tap_mysql
self.run_query_tap_postgres = self.e2e.run_query_tap_postgres
self.run_query_target_redshift = self.e2e.run_query_target_redshift
def teardown_method(self):
"""Delete test directories and database objects"""
@pytest.mark.dependency(name='import_config')
def test_import_project(self):
"""Import the YAML project with taps and target and do discovery mode
to write the JSON files for singer connectors """
# Skip every target_postgres related test if env vars not provided
if not self.e2e.env['TARGET_REDSHIFT']['is_configured']:
pytest.skip('Target Redshift environment variables are not provided')
# Setup and clean source and target databases
self.e2e.setup_tap_mysql()
self.e2e.setup_tap_postgres()
if self.e2e.env['TAP_S3_CSV']['is_configured']:
self.e2e.setup_tap_s3_csv()
self.e2e.setup_target_redshift()
# Import project
[return_code, stdout, stderr] = tasks.run_command(f'pipelinewise import_config --dir {self.project_dir}')
assertions.assert_command_success(return_code, stdout, stderr)
@pytest.mark.dependency(depends=['import_config'])
def test_replicate_mariadb_to_rs(self, tap_mariadb_id=TAP_MARIADB_ID):
"""Replicate data from Postgres to Redshift DWH"""
# 1. Run tap first time - both fastsync and a singer should be triggered
assertions.assert_run_tap_success(tap_mariadb_id, TARGET_ID, ['fastsync', 'singer'])
assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift)
#assertions.assert_all_columns_exist(self.run_query_tap_mysql, self.run_query_target_redshift,
# mysql_to_redshift.tap_type_to_target_type)
# 2. Make changes in MariaDB source database
# LOG_BASED
self.run_query_tap_mysql('UPDATE weight_unit SET isactive = 0 WHERE weight_unit_id IN (2, 3, 4)')
self.run_query_tap_mysql('UPDATE all_datatypes SET c_point = NULL')
# INCREMENTAL
self.run_query_tap_mysql('INSERT INTO address(isactive, street_number, date_created, date_updated,'
' supplier_supplier_id, zip_code_zip_code_id)'
'VALUES (1, 1234, NOW(), NOW(), 0, 1234)')
self.run_query_tap_mysql('UPDATE address SET street_number = 9999, date_updated = NOW()'
' WHERE address_id = 1')
# FULL_TABLE
self.run_query_tap_mysql('DELETE FROM no_pk_table WHERE id > 10')
# 3. Run tap second time - both fastsync and a singer should be triggered, there are some FULL_TABLE
assertions.assert_run_tap_success(tap_mariadb_id, TARGET_ID, ['fastsync', 'singer'])
assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift)
#assertions.assert_all_columns_exist(self.run_query_tap_mysql, self.run_query_target_redshift,
# mysql_to_redshift.tap_type_to_target_type)
@pytest.mark.dependency(depends=['import_config'])
def test_resync_mariadb_to_rs(self, tap_mariadb_id=TAP_MARIADB_ID):
"""Resync tables from MariaDB to Redshift DWH"""
assertions.assert_resync_tables_success(tap_mariadb_id, TARGET_ID)
assertions.assert_row_counts_equal(self.run_query_tap_mysql, self.run_query_target_redshift)
# assert_all_columns_exist currently not working on Redshift
#assertions.assert_all_columns_exist(self.run_query_tap_mysql, self.run_query_target_redshift,
# mysql_to_redshift.tap_type_to_target_type)
# pylint: disable=invalid-name
@pytest.mark.dependency(depends=['import_config'])
def test_replicate_mariadb_to_pg_with_custom_buffer_size(self):
"""Replicate data from MariaDB to Redshift DWH with custom buffer size
Same tests cases as test_replicate_mariadb_to_pg but using another tap with custom stream buffer size"""
self.test_replicate_mariadb_to_rs(tap_mariadb_id=TAP_MARIADB_BUFFERED_STREAM_ID)
@pytest.mark.dependency(depends=['import_config'])
def test_replicate_pg_to_rs(self):
"""Replicate data from Postgres to Redshift DWH"""
# 1. Run tap first time - both fastsync and a singer should be triggered
assertions.assert_run_tap_success(TAP_POSTGRES_ID, TARGET_ID, ['fastsync', 'singer'])
assertions.assert_row_counts_equal(self.run_query_tap_postgres, self.run_query_target_redshift)
# assert_all_columns_exist currently not working on Redshift
#assertions.assert_all_columns_exist(self.run_query_tap_postgres, self.run_query_target_redshift)
assertions.assert_date_column_naive_in_target(self.run_query_target_redshift,
'updated_at',
'ppw_e2e_tap_postgres."table_with_space and uppercase"')
# 2. Make changes in MariaDB source database
# LOG_BASED
self.run_query_tap_postgres('insert into public."table_with_space and UPPERCase" (cvarchar, updated_at) values '
"('M', '2020-01-01 08:53:56.8+10'),"
"('N', '2020-12-31 12:59:00.148+00'),"
"('O', null),"
"('P', '2020-03-03 12:30:00');")
# INCREMENTAL
self.run_query_tap_postgres('INSERT INTO public.city (id, name, countrycode, district, population) '
"VALUES (4080, 'Bath', 'GBR', 'England', 88859)")
self.run_query_tap_postgres('UPDATE public.edgydata SET '
"cjson = json '{\"data\": 1234}', "
"cjsonb = jsonb '{\"data\": 2345}', "
"cvarchar = 'Liewe Maatjies UPDATED' WHERE cid = 23")
# FULL_TABLE
self.run_query_tap_postgres("DELETE FROM public.country WHERE code = 'UMI'")
# 3. Run tap second time - both fastsync and a singer should be triggered, there are some FULL_TABLE
assertions.assert_run_tap_success(TAP_POSTGRES_ID, TARGET_ID, ['fastsync', 'singer'])
assertions.assert_row_counts_equal(self.run_query_tap_postgres, self.run_query_target_redshift)
# assert_all_columns_exist currently not working on Redshift
#assertions.assert_all_columns_exist(self.run_query_tap_postgres, self.run_query_target_redshift)
assertions.assert_date_column_naive_in_target(self.run_query_target_redshift,
'updated_at',
'ppw_e2e_tap_postgres."table_with_space and uppercase"')
result = self.run_query_target_redshift(
'SELECT updated_at FROM ppw_e2e_tap_postgres."table_with_space and uppercase" where cvarchar=\'M\';')[0][0]
assert result == datetime(2019, 12, 31, 22, 53, 56, 800000)
@pytest.mark.dependency(depends=['import_config'])
def test_replicate_s3_to_rs(self):
"""Replicate csv files from s3 to Redshift, check if return code is zero and success log file created"""
# Skip tap_s3_csv related test if required env vars not provided
if not self.e2e.env['TAP_S3_CSV']['is_configured']:
pytest.skip('Tap S3 CSV environment variables are not provided')
def assert_columns_exist():
"""Helper inner function to test if every table and column exists in target snowflake"""
assertions.assert_cols_in_table(self.run_query_target_redshift, 'ppw_e2e_tap_s3_csv', 'countries',
['city', 'country', 'currency', 'id', 'language'])
assertions.assert_cols_in_table(self.run_query_target_redshift, 'ppw_e2e_tap_s3_csv', 'people',
['birth_date', 'email', 'first_name', 'gender', 'group', 'id',
'ip_address', 'is_pensioneer', 'last_name'])
# 1. Run tap first time - both fastsync and a singer should be triggered
assertions.assert_run_tap_success(TAP_S3_CSV_ID, TARGET_ID, ['fastsync', 'singer'])
# 2. Run tap second time - both fastsync and a singer should be triggered
assertions.assert_run_tap_success(TAP_S3_CSV_ID, TARGET_ID, ['fastsync', 'singer'])
assert_columns_exist()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Download SQL file of MySQL database by phpMyAdmin
'''
import re
import os
import sys
import base64
import urllib
import urllib2
import traceback
from cookielib import CookieJar, DefaultCookiePolicy
from pprint import pprint
__author__ = 'furyu (furyutei@gmail.com)'
__version__ = '0.0.1e'
__copyright__ = 'Copyright (c) 2014 furyu'
__license__ = 'New BSD License'
def prn(message, linefeed = True): #{
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
if linefeed:
print message
else:
print message,
#} // end of def prn()
def prn_error(message, linefeed = True): #{
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
if linefeed:
print >> sys.stderr, message
else:
print >> sys.stderr, message,
#} // end of def prn_error()
class LoadSqlDump(object): #{
#{ class variables
DEFAULT_HEADER_DICT = {
'Accept-Charset': 'Shift_JIS,utf-8;q=0.7,*;q=0.7',
'Accept-Language': 'ja,en-us;q=0.7,en;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36',
}
DEFAULT_PMA_LOGIN_PARAM_DICT = dict(
server = u'1',
target = u'index.php',
)
DEFAULT_PARAM_DICT = dict(
server = u'1',
export_type = u'server',
export_method = u'quick',
quick_or_custom = u'custom',
output_format = u'sendit',
filename_template = u'@SERVER@',
remember_template = u'on',
charset_of_file = u'utf-8',
compression = u'none', # none, zip, gzip
what = u'sql',
codegen_structure_or_data = u'data',
codegen_format = u'0',
csv_separator = u'',
csv_enclosed = u'"',
csv_escaped = u'"',
csv_terminated = u'AUTO',
csv_null = u'NULL',
csv_structure_or_data = u'data',
excel_null = u'NULL',
excel_edition = u'win',
excel_structure_or_data = u'data',
htmlword_structure_or_data = u'structure_and_data',
htmlword_null = u'NULL',
json_structure_or_data = u'data',
latex_caption = u'something',
latex_structure_or_data = u'structure_and_data',
latex_structure_caption = u'テーブル @TABLE@ の構造',
latex_structure_continued_caption = u'テーブル @TABLE@ の構造 (続き)',
latex_structure_label = u'tab:@TABLE@-structure',
latex_comments = u'something',
latex_columns = u'something',
latex_data_caption = u'テーブル @TABLE@ の内容',
latex_data_continued_caption = u'テーブル @TABLE@ の内容 (続き)',
latex_data_label = u'tab:@TABLE@-data',
latex_null = u'\textit{NULL}',
mediawiki_structure_or_data = u'data',
ods_null = u'NULL',
ods_structure_or_data = u'data',
odt_structure_or_data = u'structure_and_data',
odt_comments = u'something',
odt_columns = u'something',
odt_null = u'NULL',
pdf_report_title = u'',
pdf_structure_or_data = u'data',
php_array_structure_or_data = u'data',
sql_include_comments = u'something',
sql_header_comment = u'',
sql_compatibility = u'NONE',
sql_structure_or_data = u'structure_and_data',
sql_drop_table = u'something', # "DROP TABLE / VIEW / PROCEDURE / FUNCTION コマンドの追加" にチェック
sql_procedure_function = u'something',
sql_create_table_statements = u'something',
sql_if_not_exists = u'something',
sql_auto_increment = u'something',
sql_backquotes = u'something',
sql_type = u'INSERT',
sql_insert_syntax = u'both',
sql_max_query_size = u'50000',
sql_hex_for_blob = u'something',
sql_utc_time = u'something',
texytext_structure_or_data = u'structure_and_data',
texytext_null = u'NULL',
yaml_structure_or_data = u'data',
knjenc = u'',
)
RE_TOKEN = re.compile(u"token\s*=[^\w]*(\w+)[^\w]")
KB = 1024
MB = 1024*1024
BUFSIZE = 256*1024
CODEC_LIST = ('utf_8', 'euc_jp', 'cp932',) # see http://docs.python.org/2.7/library/codecs.html#standard-encodings
RE_ESC_SEQ = re.compile('\x1b($B|$@|\(B|\(J| A)')
PMA_CODEC = 'utf-8'
#} // end of class variables
def __init__(self, url_phpmyadmin_top, user=None, passwd=None, pma_username=None, pma_password=None, tgt_dir=None, server_number=1, quiet=False, param_dict=None): #{
'''
url_phpmyadmin_top: URL of phpMyAdmin's toppage
user : user name for Basic Authentication
passwd : password for Basic Authentication
pma_username : user name for phpMyAdmin
pma_password : password for phpMyAdmin
tgt_dir : directory to save
server_number : MySQL server number
quiet : (True) quiet mode
param_dict : additional parameter's dictionary to export
'''
(src_codec, url_phpmyadmin_top) = self._str_decode(url_phpmyadmin_top)
url_phpmyadmin_top = re.sub(u'/index\.php(\?.*)?$', ur'', url_phpmyadmin_top)
if not re.search(u'/$', url_phpmyadmin_top): url_phpmyadmin_top += '/'
self.url_phpmyadmin_top = url_phpmyadmin_top
self.pma_username = pma_username
self.pma_password = pma_password
self.quiet = quiet
self.last_url = ''
try:
self.server_number = unicode(int(server_number))
except:
self.server_number = u'1'
self.header_dict = self.DEFAULT_HEADER_DICT.copy()
if user and passwd:
self.header_dict['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (user, passwd)))
self.login_param_dict = self.DEFAULT_PMA_LOGIN_PARAM_DICT.copy()
self.login_param_dict['server'] = self.server_number
self.param_dict = self.DEFAULT_PARAM_DICT.copy()
if isinstance(param_dict, dict): self.param_dict.update(param_dict)
self.param_dict['server'] = self.server_number
self.url_opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(CookieJar(policy=DefaultCookiePolicy(rfc2965=True, netscape=True))))
self.filename_codec = sys.getfilesystemencoding()
if tgt_dir:
(src_codec, tgt_dir) = self._str_decode(tgt_dir)
else:
tgt_dir = '.'
tgt_dir_enc = tgt_dir.encode(self.filename_codec, 'ignore')
if not os.path.isdir(tgt_dir_enc):
try:
os.makedirs(tgt_dir_enc)
except:
prn_error( traceback.format_exc() )
prn_error( u'Error: cannot create "%s"' % (tgt_dir) )
tgt_dir = '.'
tgt_dir_enc = tgt_dir.encode(self.filename_codec, 'ignore')
self.tgt_dir = tgt_dir
self.tgt_dir_enc = tgt_dir_enc
if not self.quiet:
prn( u'phpMyAdmin: %s' % (self.url_phpmyadmin_top) )
prn( u'directory : %s' % (tgt_dir) )
#} // end of def __init__()
def do(self, db_name): #{
"""
download "<db_name>.sql" via phpMyAdmin
"""
(src_codec, db_name) = self._str_decode(db_name)
filename = u'%s.sql' % (db_name)
if not self.quiet:
prn( u'%s' % (filename) )
flg_success = False
while True:
token = self._get_token()
if not token: break
url = self.url_phpmyadmin_top + 'export.php'
self.param_dict.update(
token = token,
db_select = [db_name],
)
rsp = self._fetch(url, data=self._make_data(self.param_dict))
if rsp.code < 200 or 300 <= rsp.code:
prn_error( u'Error: %s => %d: %s' % (url, rsp.code, rsp.msg) )
break
filename_enc = os.path.join(self.tgt_dir_enc, filename.encode(self.filename_codec, 'ignore'))
fp = open(filename_enc, 'wb')
#info = rsp.info()
#for key in info.keys():
# prn( '%s=%s' % (key, info.getheader(key)) )
#fp.write(rsp.read())
size = 0
for buf in iter(lambda:rsp.read(self.BUFSIZE),''):
fp.write(buf)
size += len(buf)
if not self.quiet:
if size < self.MB:
prn( '\r%6d KB' % (size//self.KB), False )
else:
prn( '\r%6.2f MB' % (float(size)/self.MB), False )
sys.stdout.flush()
fp.close()
if not self.quiet: prn( '' )
flg_success = True
break
return flg_success
#} // end of def exec()
def _get_token(self): #{
def _get_token_from_rsp(rsp): #{
(token, content) = (None, None)
while True:
if rsp.code < 200 or 300 <= rsp.code:
prn_error( u'Error: %s => %d: %s' % (url, rsp.code, rsp.msg) )
break
(src_codec, content) = self._str_decode(rsp.read())
mrslt = self.RE_TOKEN.search(content)
if not mrslt:
prn_error( u'Error: token not found' )
break
token = mrslt.group(1)
break
return (token, content)
#} // end of def _get_token_from_rsp()
url = self.url_phpmyadmin_top
rsp = self._fetch(url)
(token, content) = _get_token_from_rsp(rsp)
while token:
if not re.search(u'name="pma_username"', content): break
(pma_username, pma_password) = (self.pma_username, self.pma_password)
if not pma_username or not pma_password:
prn_error( u'Error: both pma_username and pma_password required' )
token = None
break
param_dict = self.login_param_dict
param_dict.update(dict(
pma_username = pma_username,
pma_password = pma_password,
token = token,
))
rsp = self._fetch(url, data=self._make_data(param_dict))
(token, content) = _get_token_from_rsp(rsp)
if re.search(u'name="pma_username"', content):
prn_error( u'Error: incorrect pma_username or pma_password' )
token = None
break
break
self.token = token
return token
#} // end of def _get_token()
def _quote(self, param, charset='utf-8'): #{
return urllib.quote(param.encode(charset, 'ignore'), safe='~')
#} // end of def _quote()
def _make_data(self, param_dict): #{
query_list=[]
quote = lambda s: self._quote(s, self.PMA_CODEC)
for key in sorted(param_dict.keys()):
if isinstance(param_dict[key], list):
for param in param_dict[key]:
query_list.append('%s[]=%s' % (quote(key), quote(param)))
else:
query_list.append('%s=%s' % (quote(key), quote(param_dict[key])))
return '&'.join(query_list)
#} // end of def _make_data()
def _fetch(self, url, data=None, headers=None): #{
mrslt = re.search(u'^(https?://)([^/]+)(.*)$', url)
proto = mrslt.group(1).encode(self.PMA_CODEC,'ignore')
domain = mrslt.group(2).encode('idna')
path = mrslt.group(3).encode(self.PMA_CODEC, 'ignore')
url = proto + domain + path
if not headers:
headers = self.header_dict
headers['Referer'] = self.last_url
rsp = self.url_opener.open(urllib2.Request(url, data=data, headers=headers))
self.last_url = rsp.geturl()
return rsp
#} // end of def _fetch()
def _str_decode(self, src_str): #{
(src_codec, dec_str) = (None, src_str)
while True:
if not isinstance(src_str, basestring): break
if isinstance(src_str, unicode):
src_codec = 'unicode_internal'
break
try:
dec_str = src_str.decode('iso2022_jp')
src_codec = self.RE_ESC_SEQ.search(src_str) and 'iso2022_jp' or 'ascii'
break
except UnicodeDecodeError, s:
pass
for test_codec in self.CODEC_LIST:
try:
dec_str = src_str.decode(test_codec)
src_codec = test_codec
break
except UnicodeDecodeError, s:
pass
break
return (src_codec, dec_str)
#} // end of def _str_decode()
#} // end of class LoadSqlDump()
if __name__ == '__main__': #{
import optparse
usage = u"./%prog [options] <phpMyAdmin's URL> <database name> [<database name> ...]"
optparser = optparse.OptionParser(usage=usage, version=__version__)
optparser.add_option(
'-u', '--ba-user',
action = 'store',
metavar = '<BASIC-AUTH USER>',
help = u"user name for Basic Authentication",
dest = 'user'
)
optparser.add_option(
'-p', '--ba-passwd',
action = 'store',
metavar = '<BASIC-AUTH PASSWORD>',
help = u"password for Basic Authentication",
dest = 'passwd'
)
optparser.add_option(
'-n', '--pma-user',
action = 'store',
metavar = '<PMA-USER>',
help = u"user password for phpMyAdmin",
dest = 'pma_username'
)
optparser.add_option(
'-w', '--pma-passwd',
action = 'store',
metavar = '<PMA-PASSWORD>',
help = u"user name for phpMyAdmin",
dest = 'pma_password'
)
optparser.add_option(
'-s', '--server-number',
type= 'int',
#default = 1,
metavar = '<SERVER NUMBER>',
help = u'MySQL server number(default: 1)',
dest = 'server_number',
)
optparser.add_option(
'-d', '--directory',
action = 'store',
metavar = '<DIRECTORY>',
help = u"directory to save",
dest = 'tgt_dir'
)
optparser.add_option(
'-q','--quiet'
, action = 'store_true'
, help = u"quiet mode"
, dest = 'quiet'
)
optparser.add_option(
'-f', '--option-list-file',
action = 'store',
metavar = '<OPTION LIST FILE>',
help = u"option list file",
dest = 'option_file'
)
(options, args) = optparser.parse_args()
# --- デフォルト
(user, passwd) = (None, None)
(pma_username, pma_password) = (None, None)
server_number = 1
tgt_dir = None
quiet = False
if options.option_file:
fp = open(options.option_file, 'rb')
_argv = []
for line in fp:
line = line.strip()
mrslt = re.search('^(-\w)\s+(.*)$', line)
if mrslt:
_argv.append(mrslt.group(1))
_argv.append(mrslt.group(2))
else:
_argv.append(line)
fp.close()
(_options, _args) = optparser.parse_args(_argv)
# --- オプションファイルでの指定
if _options.user is not None: user = _options.user
if _options.passwd is not None: passwd = _options.passwd
if _options.pma_username is not None: pma_username = _options.pma_username
if _options.pma_password is not None: pma_password = _options.pma_password
if _options.server_number is not None: server_number = _options.server_number
if _options.tgt_dir is not None: tgt_dir = _options.tgt_dir
if _options.quiet is not None: quiet = _options.quiet
# --- ユーザ指定
if options.user is not None: user = options.user
if options.passwd is not None: passwd = options.passwd
if options.pma_username is not None: pma_username = options.pma_username
if options.pma_password is not None: pma_password = options.pma_password
if options.server_number is not None: server_number = options.server_number
if options.tgt_dir is not None: tgt_dir = options.tgt_dir
if options.quiet is not None: quiet = options.quiet
if 1 < len(args):
exit_code = 0
url_phpmyadmin_top = args[0]
load_sqldump = LoadSqlDump(url_phpmyadmin_top, user=user, passwd=passwd, pma_username=pma_username, pma_password=pma_password, tgt_dir=tgt_dir, server_number=server_number, quiet=quiet)
for db_name in args[1:]:
if not load_sqldump.do(db_name):
exit_code += 1
exit(exit_code)
else:
optparser.print_help()
exit(255)
#} // end of __main__
# ■ end of file
|
"""
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/d2/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#pi information
PI_USERNAME = "pi"
PI_PASSWD = "raspberry"
PI_HOSTNAME = "raspberrypi.local"
PI_DONKEY_ROOT = "/home/pi/d2"
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20
MAX_LOOPS = 100000
#CAMERA
CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM)
IMAGE_W = 160
IMAGE_H = 120
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40
PCA9685_I2C_BUSNUM = None
#drivetrain
DRIVE_TRAIN_TYPE = "SERVO_ESC" # SERVO_ESC|DC_STEER_THROTTLE|DC_TWO_WHEEL|SERVO_HBRIDGE_PWM
#STEERING
STEERING_CHANNEL = 1
STEERING_LEFT_PWM = 460
STEERING_RIGHT_PWM = 290
#THROTTLE
THROTTLE_CHANNEL = 0
THROTTLE_FORWARD_PWM = 500
THROTTLE_STOPPED_PWM = 370
THROTTLE_REVERSE_PWM = 220
#DC_STEER_THROTTLE with one motor as steering, one as drive
HBRIDGE_PIN_LEFT = 18
HBRIDGE_PIN_RIGHT = 16
HBRIDGE_PIN_FWD = 15
HBRIDGE_PIN_BWD = 13
#DC_TWO_WHEEL - with two wheels as drive, left and right.
HBRIDGE_PIN_LEFT_FWD = 18
HBRIDGE_PIN_LEFT_BWD = 16
HBRIDGE_PIN_RIGHT_FWD = 15
HBRIDGE_PIN_RIGHT_BWD = 13
#TRAINING
BATCH_SIZE = 128
TRAIN_TEST_SPLIT = 0.8
MAX_EPOCHS = 100
SHOW_PLOT = True
VEBOSE_TRAIN = True
USE_EARLY_STOP = True
EARLY_STOP_PATIENCE = 5
MIN_DELTA = .0005
PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training
#model transfer options
FREEZE_LAYERS = False
NUM_LAST_LAYERS_TO_TRAIN = 7
#JOYSTICK
USE_JOYSTICK_AS_DEFAULT = True
JOYSTICK_MAX_THROTTLE = 0.3
JOYSTICK_STEERING_SCALE = 1.0
AUTO_RECORD_ON_THROTTLE = True
CONTROLLER_TYPE='ps3' #(ps3|ps4)
USE_NETWORKED_JS = False
NETWORK_JS_SERVER_IP = "192.168.0.1"
#RNN or 3D
SEQUENCE_LENGTH = 3
#IMU
HAVE_IMU = False
#LED
HAVE_RGB_LED = False
LED_INVERT = False #COMMON ANNODE?
#board pin number for pwm outputs
LED_PIN_R = 12
LED_PIN_G = 10
LED_PIN_B = 16
#LED status color, 0-100
LED_R = 0
LED_G = 0
LED_B = 1
#BEHAVIORS
TRAIN_BEHAVIORS = False
BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"]
BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ] #RGB tuples 0-100 per chanel
|
from typing import Any, Callable, Dict, Optional, Type, Union
from fugue.execution.execution_engine import ExecutionEngine, SQLEngine
from fugue.execution.native_execution_engine import NativeExecutionEngine
from triad.utils.convert import to_instance
from triad import assert_or_throw
class _ExecutionEngineFactory(object):
def __init__(self):
self._funcs: Dict[str, Callable] = {}
self._type_funcs: Dict[Type, Callable] = {}
self._sql_funcs: Dict[str, Callable] = {}
self.register_default(lambda conf, **kwargs: NativeExecutionEngine(conf=conf))
self.register_default_sql_engine(lambda engine, **kwargs: engine.sql_engine)
def register(
self, name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
if isinstance(name_or_type, str):
self._register(self._funcs, name=name_or_type, func=func, on_dup=on_dup)
else:
self._register(
self._type_funcs, name=name_or_type, func=func, on_dup=on_dup
)
def register_default(self, func: Callable, on_dup="overwrite") -> None:
self.register("", func, on_dup)
def register_sql_engine(
self, name: str, func: Callable, on_dup="overwrite"
) -> None:
self._register(self._sql_funcs, name=name, func=func, on_dup=on_dup)
def register_default_sql_engine(self, func: Callable, on_dup="overwrite") -> None:
self.register_sql_engine("", func, on_dup)
def make(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
if isinstance(engine, tuple):
execution_engine = self.make_execution_engine(
engine[0], conf=conf, **kwargs
)
sql_engine = self.make_sql_engine(engine[1], execution_engine)
execution_engine.set_sql_engine(sql_engine)
return execution_engine
else:
return self.make((engine, None), conf=conf, **kwargs)
def make_execution_engine(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
def make_engine(engine: Any) -> ExecutionEngine:
if isinstance(engine, str) and engine in self._funcs:
return self._funcs[engine](conf, **kwargs)
for k, f in self._type_funcs.items():
if isinstance(engine, k):
return f(engine, conf, **kwargs)
if isinstance(engine, ExecutionEngine):
if conf is not None:
engine.compile_conf.update(conf)
engine.compile_conf.update(kwargs)
return engine
return to_instance(
engine, ExecutionEngine, kwargs=dict(conf=conf, **kwargs)
)
result = make_engine(engine or "")
result.compile_conf.update(result.conf)
result.compile_conf.update(conf)
result.compile_conf.update(kwargs)
return result
def make_sql_engine(
self,
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
if engine is None:
engine = ""
if isinstance(engine, str) and engine in self._sql_funcs:
return self._sql_funcs[engine](execution_engine, **kwargs)
if isinstance(engine, SQLEngine):
assert_or_throw(
execution_engine is None and len(kwargs) == 0,
lambda: ValueError(
f"{engine} is an instance, can't take arguments "
f"execution_engine={execution_engine}, kwargs={kwargs}"
),
)
return engine
return to_instance(
engine, SQLEngine, kwargs=dict(execution_engine=execution_engine, **kwargs)
)
def _register(
self,
callables: Dict[Any, Callable],
name: Any,
func: Callable,
on_dup="overwrite",
) -> None:
if name not in callables:
callables[name] = func
if on_dup in ["raise", "throw"]:
raise KeyError(f"{name} is already registered")
if on_dup == "overwrite":
callables[name] = func
return
if on_dup == "ignore":
return
raise ValueError(on_dup)
_EXECUTION_ENGINE_FACTORY = _ExecutionEngineFactory()
def register_execution_engine(
name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` with
a given name.
:param name_or_type: alias of the execution engine, or type of an object that
can be converted to an execution engine
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
Alias registration examples:
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_execution_engine("my", lambda conf: MyExecutionEngine(conf))
# 0
make_execution_engine("my")
make_execution_engine("my", {"myconfig":"value})
# 1
with FugueWorkflow("my") as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run("my", {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("my")
Type registration examples:
.. code-block:: python
from pyspark.sql import SparkSession
from fugue_spark import SparkExecutionEngine
from fugue_sql import fsql
register_execution_engine(
SparkSession,
lambda session, conf: SparkExecutionEngine(session, conf))
spark_session = SparkSession.builder.getOrCreate()
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run(spark_session)
"""
_EXECUTION_ENGINE_FACTORY.register(name_or_type, func, on_dup)
def register_default_execution_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` as the
default engine.
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_execution_engine(lambda conf: MyExecutionEngine(conf))
# the following examples will use MyExecutionEngine
# 0
make_execution_engine()
make_execution_engine(None, {"myconfig":"value})
# 1
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run(None, {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("", {"myconfig":"value})
"""
_EXECUTION_ENGINE_FACTORY.register_default(func, on_dup)
def register_sql_engine(name: str, func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` with
a given name.
:param name: name of the SQL engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_sql_engine("mysql", lambda engine: MySQLEngine(engine))
# create execution engine with MySQLEngine as the default
make_execution_engine(("", "mysql"))
# create DaskExecutionEngine with MySQLEngine as the default
make_execution_engine(("dask", "mysql"))
# default execution engine + MySQLEngine
with FugueWorkflow(("","mysql")) as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_sql_engine(name, func, on_dup)
def register_default_sql_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` as the
default engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. note::
You should be careful to use this function, because when you set a custom
SQL engine as default, all execution engines you create will use this SQL
engine unless you are explicit. For example if you set the default SQL engine
to be a Spark specific one, then if you start a NativeExecutionEngine, it will
try to use it and will throw exceptions.
So it's always a better idea to use ``register_sql_engine`` instead
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_sql_engine(lambda engine: MySQLEngine(engine))
# create NativeExecutionEngine with MySQLEngine as the default
make_execution_engine()
# create SparkExecutionEngine with MySQLEngine instead of SparkSQLEngine
make_execution_engine("spark")
# NativeExecutionEngine with MySQLEngine
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_default_sql_engine(func, on_dup)
def make_execution_engine(
engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
"""Create :class:`~fugue.execution.execution_engine.ExecutionEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default execution
engine), a string (use the registered execution engine), an
:class:`~fugue.execution.execution_engine.ExecutionEngine` type, or
the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance
, or a tuple of two values where the first value represents execution
engine and the second value represents the sql engine (you can use ``None``
for either of them to use the default one), defaults to None
:param conf: |ParamsLikeObject|, defaults to None
:param kwargs: additional parameters to initialize the execution engine
:return: the :class:`~fugue.execution.execution_engine.ExecutionEngine`
instance
.. admonition:: Examples
.. code-block:: python
register_default_execution_engine(lambda conf: E1(conf))
register_execution_engine("e2", lambda conf, **kwargs: E2(conf, **kwargs))
register_sql_engine("s", lambda conf: S2(conf))
# E1 + E1.default_sql_engine
make_execution_engine()
# E2 + E2.default_sql_engine
make_execution_engine(e2)
# E1 + S2
make_execution_engine((None, "s"))
# E2(conf, a=1, b=2) + S2
make_execution_engine(("e2", "s"), conf, a=1, b=2)
# SparkExecutionEngine + SparkSQLEngine
make_execution_engine(SparkExecutionEngine)
make_execution_engine(SparkExecutionEngine(spark_session, conf))
# SparkExecutionEngine + S2
make_execution_engine((SparkExecutionEngine, "s"))
"""
return _EXECUTION_ENGINE_FACTORY.make(engine, conf, **kwargs)
def make_sql_engine(
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
"""Create :class:`~fugue.execution.execution_engine.SQLEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default SQL
engine), a string (use the registered SQL engine), an
:class:`~fugue.execution.execution_engine.SQLEngine` type, or
the :class:`~fugue.execution.execution_engine.SQLEngine` instance
(you can use ``None`` to use the default one), defaults to None
:param execution_engine: the
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
to create
the :class:`~fugue.execution.execution_engine.SQLEngine`. Normally you
should always provide this value.
:param kwargs: additional parameters to initialize the sql engine
:return: the :class:`~fugue.execution.execution_engine.SQLEngine`
instance
.. note::
For users, you normally don't need to call this function directly.
Use ``make_execution_engine`` instead
.. admonition:: Examples
.. code-block:: python
register_default_sql_engine(lambda conf: S1(conf))
register_sql_engine("s2", lambda conf: S2(conf))
engine = NativeExecutionEngine()
# S1(engine)
make_sql_engine(None, engine)
# S1(engine, a=1)
make_sql_engine(None, engine, a=1)
# S2(engine)
make_sql_engine("s2", engine)
# SqliteEngine(engine)
make_sql_engine(SqliteEngine)
"""
return _EXECUTION_ENGINE_FACTORY.make_sql_engine(engine, execution_engine, **kwargs)
|
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from mpl_toolkits.basemap import Basemap
import matplotlib.colors
timezone = 1
endpointsPARIS = []
clusters = []
cluster_endpoints={}
for number in [1,2,3]:
cluster_endpoints[number]=[]
clusters.append(number)
mn_height = []
with open('C:/HYSPLIT_argh/Alert_march2016_working/CLUSLIST_3','r') as f:
for line in f:
newline = line.split()
cluster = int(newline[0])
file = newline[7]
tdump_file = open(file, 'r')
endpoints = []
data_start = False
i=0
for line in tdump_file:
newline = line.split()
if data_start == True:
lat = float(newline[9])
lon = float(newline[10])
height = float(newline[11])
if cluster == 1 and i == 48:
mn_height.append(height)
endpoint = [lat, lon,height]
endpoints.append(endpoint)
i+=1
if newline[1] == 'PRESSURE':
data_start = True
tdump_file.close()
cluster_endpoints[cluster].append(endpoints)
#plottting
###set up the basemap instance
lat_pt = 82.
lon_pt = -62.
m = Basemap(projection='nplaea',boundinglat=50,lon_0=270,resolution='l')
fig, axes = plt.subplots(2,3,figsize=(12, 4), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 0.1, wspace=0.25)
axs = axes.ravel()
for i in [-1,-2,-3]:
axes[-1, i].axis('off')
axes[-2, i].axis('off')
# #axes[-3, i].axis('off')
colors = ['b','orange','g','r','c','m','k','y','#DF7401','#585858','grey','#663300']
for cluster_no in clusters:
print cluster_no,colors[cluster_no]
list = cluster_endpoints[cluster_no]
#subplot_num = 4,3,cluster_no
axs[cluster_no] = fig.add_subplot(1,3,cluster_no)
#m.drawmapboundary(fill_color='white')
m.bluemarble()
m.drawcoastlines()
#m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
parallels = np.arange(0.,81,10.)
m.drawparallels(parallels,labels=[False,True,False,False])
meridians = np.arange(10.,351.,20.)
m.drawmeridians(meridians,labels=[False,False,False,True])
plt.text(0.1,0.8,str(cluster_no), fontsize=20, transform=axs[cluster_no].transAxes,color = 'white')
for row in list:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
heights = np_endpoints[:,2]
x,y = m(lons,lats)
bt = m.plot(x,y,color=colors[cluster_no],linewidth=2)
#bt = m.scatter(x,y, c=heights, cmap=plt.get_cmap('jet'),edgecolors='none', marker = 'o')
#lim = bt.get_clim()
#print lim
#plt.clim(0,1000)
#cb = plt.colorbar()
#cb.set_label('height (m)', rotation=270)
dir = 'C:/Users/Sarah Hanna/Documents/Data/Alert Data/Alert-March 2016/'
os.chdir(dir)
plt.savefig('ALERT_cluster_all_trajs_from_HYSPLIT_240hr_backtrajectories-3clusters.png', bbox_inches='tight')
plt.show()
|
#there are many ways we can do random numbers
#1. import random
#used to produce pseudo-random numbers.
# They are called pseudo-random because they are not truly random and can be reproduced.
import random
a = random.random() #random float between 0 and 1
b = random.uniform(1,10) #random float between 1 and 10
c = random.randrange(1,10) #random integer between 1 and 10 (not including 10)
d = random.randint(1,10) #random integer between 1 and 10 (including 10)
e = random.choice(['a','b','c']) #random element from a list
#sample picks one element one time and choices may pick one element multiple times
f = random.sample(range(1,10),3) #3 random elements from a list
g = random.choices(range(1,10),k=3) #3 random elements from a list
h = random.normalvariate(0,1) #random float from normal distribution with mean 0 and standard deviation 1
random.shuffle(['a','b','c']) #shuffle a list in place
random.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated)
import secrets #secrets — Generate secure random numbers for managing secrets (True randomness)
# https://docs.python.org/3/library/secrets.html
#But this is slower than random module as more complex algorithms are used.
a = secrets.randbelow(10) #random integer between 0 and 9
b = secrets.randbits(10) #random integer between 0 and 2**10-1
c = secrets.choice(['a','b','c']) #random element from a list
d = secrets.sample(range(1,10),3) #3 random elements from a list
#2. import numpy
import numpy as np
#numpy random generator uses a different generator than random module and also has a different seed
np.random.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated)
a = np.random.random() #random float between 0 and 1
b = np.random.uniform(1,10) #random float between 1 and 10
c = np.random.randrange(1,10) #random integer between 1 and 10 (not including 10)
d = np.random.randint(1,10) #random integer between 1 and 10 (including 10)
e = np.random.choice(['a','b','c']) #random element from a list
f = np.random.randn(3) #list of 3 random elements
|
# Generated by Django 3.2.9 on 2021-11-16 11:37
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
# #!/usr/bin/env python
# """Tests for `aa_pbs_exporter` package."""
# from click.testing import CliRunner
# from aa_pbs_exporter.cli import aa_pbs_exporter_cli as cli
# def test_content(response):
# """Sample pytest test function with the pytest fixture as an argument."""
# # from bs4 import BeautifulSoup
# # assert 'GitHub' in BeautifulSoup(response.content).title.string
# def test_command_line_interface():
# """Test the CLI."""
# runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# assert "Console script for aa_pbs_exporter" in result.output
# help_result = runner.invoke(cli.main, ["--help"])
# assert help_result.exit_code == 0
# assert "--help Show this message and exit." in help_result.output
# def test_hello():
# """Test the hello command."""
# runner = CliRunner()
# result = runner.invoke(cli.main, ["hello", "Foo"])
# assert result.exit_code == 0
# assert "Hello Foo" in result.output
# help_result = runner.invoke(cli.main, ["--help"])
# assert help_result.exit_code == 0
# assert "--help Show this message and exit." in help_result.output
|
# -*- coding: utf-8 -*-
import json
import os
import sys
import time
from echopy import Echo
from project import RESOURCES_DIR, BLOCK_RELEASE_INTERVAL
if "BASE_URL" not in os.environ:
BASE_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BASE_URL"]
else:
BASE_URL = os.environ["BASE_URL"]
categories = [
# API SECTION
'api',
'login_api',
'asset_api',
'history_api',
'network_broadcast_api',
'registration_api',
'database_api',
'connection_to_apis',
# database_api section
'database_api_objects',
'database_api_subscriptions',
'database_api_blocks_transactions',
'database_api_globals',
'database_api_keys',
'database_api_accounts',
'database_api_contracts',
'database_api_balances',
'database_api_assets',
'database_api_committee_members',
'database_api_votes',
'database_api_authority_validation',
'database_api_proposed_transactions',
'database_api_sidechain_ethereum',
'database_api_sidechain_erc20',
'database_api_contract_fee_pool',
# OPERATIONS SECTION
'operations',
'account_management_operations',
'assert_conditions_operations',
'asset_management_operations',
'balance_object_operations',
'committee_members_operations',
'contract_operations',
'sidechain_operations',
'custom_extension_operations',
'assets_market_operations',
'proposal_operations',
'asset_transfer_operations',
'vesting_balances_operations',
'withdrawal_permissions_operations',
'sidechain',
'sidechain_ethereum',
'sidechain_erc20',
'scenarios',
]
types = [
# TEST TYPES
"main",
"positive",
"negative"
]
def process_filters(filters):
category_filters = []
type_filters = []
for pytests_filter in filters:
if pytests_filter in types:
type_filters.append(pytests_filter)
else:
category_filters.append(pytests_filter)
command = ""
if len(category_filters):
command = "{}-a ".format(command)
for category_filter in category_filters:
command = "{}{} ".format(command, category_filter)
if len(type_filters):
command = "{}-m ".format(command)
for type_filter in type_filters:
command = "{}{}:type ".format(command, type_filter)
return command
PYTESTS_FILTERS = "" if "PYTESTS_FILTERS" not in os.environ else os.environ["PYTESTS_FILTERS"].lower().split(":")
PYTESTS_FILTER_COMMAND = process_filters(PYTESTS_FILTERS)
def get_head_block_num(echo_connection):
return echo_connection.api.database.get_dynamic_global_properties()["head_block_number"]
def run(echo_connection, filter_command):
if get_head_block_num(echo_connection):
execution_status = os.system("if ! lcc run {}--exit-error-on-failure; then lcc report --failed; exit 1; fi"
.format(filter_command))
sys.exit(1 if execution_status > 1 else execution_status)
else:
time.sleep(BLOCK_RELEASE_INTERVAL)
run(echo_connection, filter_command)
echo = Echo()
echo.connect(BASE_URL)
run(echo, PYTESTS_FILTER_COMMAND)
|
#!/usr/bin/python
import sys
MIN_UDP_SPORT = 2048
if __name__ == "__main__":
N_SUBTABLES = int(sys.argv[1])
N_RULES = int(sys.argv[2])
assert N_RULES >= N_SUBTABLES
rules_per_subtable = N_RULES / N_SUBTABLES
if rules_per_subtable > 100:
udp_port_range = 100
ip_dst_range = rules_per_subtable /100
# assert rules_per_subtable % 100 == 0
else:
udp_port_range = 1
ip_dst_range = rules_per_subtable
for i in range (0, N_SUBTABLES):
flow = 'udp,nw_src='
flow = flow + str(i + 1) + '.0.0.0/' + str(8 + i) + ','
for j in range (0, ip_dst_range):
a = j / 256 / 256;
b = (j / 256) % 256;
c = j % 256
flow2 = flow + 'nw_dst=10.' + str(a) + '.' + str(b) + '.' + str(c) + '/32,tp_src=2048,'
for k in range (20, 20 + udp_port_range):
flow3 = flow2 + 'tp_dst=' + str(k) + ' '
flow3 = flow3 + 'actions=output:2'
print flow3
|
# -*- coding: utf-8 -*-
"""
Romanization of Thai words based on machine-learnt engine ("thai2rom")
"""
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pythainlp.corpus import download, get_corpus_path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ThaiTransliterator:
def __init__(self):
"""
Transliteration of Thai words
Now supports Thai to Latin (romanization)
"""
# Download the model, if it's not on your machine.
self.__filemodel = get_corpus_path("thai2rom-pytorch-attn")
if not self.__filemodel:
download("thai2rom-pytorch-attn")
self.__filemodel = get_corpus_path("thai2rom-pytorch-attn")
loader = torch.load(self.__filemodel, map_location=device)
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"]
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Restore the model and construct the encoder and decoder.
self._encoder = Encoder(
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)
self._decoder = AttentionDecoder(
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT
)
self._network = Seq2Seq(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
).to(device)
self._network.load_state_dict(loader["model_state_dict"])
self._network.eval()
def _prepare_sequence_in(self, text: str):
"""
Prepare input sequence for PyTorch
"""
idxs = []
for ch in text:
if ch in self._char_to_ix:
idxs.append(self._char_to_ix[ch])
else:
idxs.append(self._char_to_ix["<UNK>"])
idxs.append(self._char_to_ix["<end>"])
tensor = torch.tensor(idxs, dtype=torch.long)
return tensor.to(device)
def romanize(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).view(1, -1)
input_length = [len(text) + 1]
target_tensor_logits = self._network(input_tensor,
input_length,
None, 0)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.size(0) == 0:
target = ["<PAD>"]
else:
target_tensor = (
torch.argmax(
target_tensor_logits.squeeze(1),
1).cpu().numpy()
)
target = [self._ix_to_target_char[t] for t in target_tensor]
return "".join(target)
class Encoder(nn.Module):
def __init__(self, vocabulary_size, embedding_size,
hidden_size, dropout=0.5):
"""Constructor"""
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(vocabulary_size,
embedding_size)
self.rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size // 2,
bidirectional=True,
batch_first=True,
)
self.dropout = nn.Dropout(dropout)
def forward(self, sequences, sequences_lengths):
# sequences: (batch_size, sequence_length=MAX_LENGTH)
# sequences_lengths: (batch_size)
batch_size = sequences.size(0)
self.hidden = self.init_hidden(batch_size)
sequences_lengths = np.sort(sequences_lengths)[::-1]
index_sorted = np.argsort(
-sequences_lengths
) # use negation in sort in descending order
index_unsort = np.argsort(index_sorted) # to unsorted sequence
index_sorted = torch.from_numpy(index_sorted)
sequences = sequences.index_select(0, index_sorted.to(device))
sequences = self.character_embedding(sequences)
sequences = self.dropout(sequences)
sequences_packed = nn.utils.rnn.pack_padded_sequence(
sequences, sequences_lengths.copy(), batch_first=True
)
sequences_output, self.hidden = self.rnn(sequences_packed,
self.hidden)
sequences_output, _ = nn.utils.rnn.pad_packed_sequence(
sequences_output, batch_first=True
)
index_unsort = torch.from_numpy(index_unsort).to(device)
sequences_output = sequences_output.index_select(
0, index_unsort.clone().detach()
)
return sequences_output, self.hidden
def init_hidden(self, batch_size):
h_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
c_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
return (h_0, c_0)
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, encoder_outputs, mask):
# Calculate energies for each encoder output
if self.method == "dot":
attn_energies = torch.bmm(encoder_outputs,
hidden.transpose(1, 2)).squeeze(2)
elif self.method == "general":
attn_energies = self.attn(
encoder_outputs.view(-1, encoder_outputs.size(-1))
) # (batch_size * sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies.view(
*encoder_outputs.size()), hidden.transpose(1, 2)
).squeeze(2) # (batch_size, sequence_len)
elif self.method == "concat":
attn_energies = self.attn(
torch.cat((
hidden.expand(*encoder_outputs.size()),
encoder_outputs
), 2)
) # (batch_size, sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies,
self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),
).squeeze(2)
attn_energies = attn_energies.masked_fill(mask == 0, -1e10)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies, 1)
class AttentionDecoder(nn.Module):
def __init__(self, vocabulary_size, embedding_size,
hidden_size, dropout=0.5):
"""Constructor"""
super(AttentionDecoder, self).__init__()
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(vocabulary_size,
embedding_size)
self.rnn = nn.LSTM(
input_size=embedding_size + self.hidden_size,
hidden_size=hidden_size,
bidirectional=False,
batch_first=True,
)
self.attn = Attn(method="general", hidden_size=self.hidden_size)
self.linear = nn.Linear(hidden_size, vocabulary_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input, last_hidden, encoder_outputs, mask):
""""Defines the forward computation of the decoder"""
# input: (batch_size, 1)
# last_hidden: (batch_size, hidden_dim)
# encoder_outputs: (batch_size, sequence_len, hidden_dim)
# mask: (batch_size, sequence_len)
hidden = last_hidden.permute(1, 0, 2)
attn_weights = self.attn(hidden, encoder_outputs, mask)
context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)
context_vector = torch.sum(context_vector, dim=1)
context_vector = context_vector.unsqueeze(1)
embedded = self.character_embedding(input)
embedded = self.dropout(embedded)
rnn_input = torch.cat((context_vector, embedded), -1)
output, hidden = self.rnn(rnn_input)
output = output.view(-1, output.size(2))
x = self.linear(output)
return x, hidden[0], attn_weights
class Seq2Seq(nn.Module):
def __init__(
self, encoder, decoder, target_start_token,
target_end_token, max_length
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_idx = 0
self.target_start_token = target_start_token
self.target_end_token = target_end_token
self.max_length = max_length
assert encoder.hidden_size == decoder.hidden_size
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask
def forward(
self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5
):
# source_seq: (batch_size, MAX_LENGTH)
# source_seq_len: (batch_size, 1)
# target_seq: (batch_size, MAX_LENGTH)
batch_size = source_seq.size(0)
start_token = self.target_start_token
end_token = self.target_end_token
max_len = self.max_length
target_vocab_size = self.decoder.vocabulary_size
outputs = torch.zeros(max_len,
batch_size,
target_vocab_size).to(device)
if target_seq is None:
assert teacher_forcing_ratio == 0, "Must be zero during inference"
inference = True
else:
inference = False
encoder_outputs, encoder_hidden = self.encoder(source_seq,
source_seq_len)
decoder_input = (
torch.tensor([[start_token] * batch_size]).view(batch_size,
1).to(device)
)
encoder_hidden_h_t = torch.cat(
[encoder_hidden[0][0], encoder_hidden[0][1]], dim=1
).unsqueeze(dim=0)
decoder_hidden = encoder_hidden_h_t
max_source_len = encoder_outputs.size(1)
mask = self.create_mask(source_seq[:, 0:max_source_len])
for di in range(max_len):
decoder_output, decoder_hidden, _ = self.decoder(
decoder_input, decoder_hidden, encoder_outputs, mask
)
topv, topi = decoder_output.topk(1)
outputs[di] = decoder_output.to(device)
teacher_force = random.random() < teacher_forcing_ratio
decoder_input = (
target_seq[:, di].reshape(batch_size, 1)
if teacher_force
else topi.detach()
)
if inference and decoder_input == end_token:
return outputs[:di]
return outputs
_THAI_TO_ROM = ThaiTransliterator()
def romanize(text: str) -> str:
return _THAI_TO_ROM.romanize(text)
|
# -*- coding: utf-8 -*-
import os
import re
import requests
import shutil
import time
import xml.etree.ElementTree as ET
import urllib.parse
from collections import namedtuple
from dateutil.parser import parse as parsedate
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
from sphinx.util.console import bold, standout
from sphinx import __version__ as sphinx_version
if sphinx_version >= '1.6.0':
from sphinx.util.logging import getLogger
from ..doxylink import __version__
from .parsing import normalise, ParseException
Entry = namedtuple('Entry', ['kind', 'file'])
def report_info(env, msg, docname=None, lineno=None):
'''Convenience function for logging an informational
Args:
msg (str): Message of the warning
docname (str): Name of the document on which the error occured
lineno (str): Line number in the document on which the error occured
'''
if sphinx_version >= '1.6.0':
logger = getLogger(__name__)
if lineno is not None:
logger.info(msg, location=(docname, lineno))
else:
logger.info(msg, location=docname)
else:
env.info(docname, msg, lineno=lineno)
def report_warning(env, msg, docname=None, lineno=None):
'''Convenience function for logging a warning
Args:
msg (str): Message of the warning
docname (str): Name of the document on which the error occured
lineno (str): Line number in the document on which the error occured
'''
if sphinx_version >= '1.6.0':
logger = getLogger(__name__)
if lineno is not None:
logger.warning(msg, location=(docname, lineno))
else:
logger.warning(msg, location=docname)
else:
env.warn(docname, msg, lineno=lineno)
def is_url(str_to_validate):
''' Helper function to check if string contains URL
Args:
str_to_validate (str): String to validate as URL
Returns:
bool: True if given string is a URL, False otherwise
'''
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return bool(re.match(regex, str_to_validate))
class FunctionList:
"""A FunctionList maps argument lists to specific entries"""
def __init__(self):
self.kind = 'function_list'
self._arglist = {} # type: MutableMapping[str, str]
def __getitem__(self, arglist: str) -> Entry:
# If the user has requested a specific function through specifying an arglist then get the right anchor
if arglist:
try:
filename = self._arglist[arglist]
except KeyError:
# TODO Offer fuzzy suggestion
raise LookupError('Argument list match not found')
else:
# Otherwise just return the first entry (if they don't care they get whatever comes first)
filename = list(self._arglist.values())[0]
return Entry(kind='function', file=filename)
def add_overload(self, arglist: str, file: str) -> None:
self._arglist[arglist] = file
class SymbolMap:
"""A SymbolMap maps symbols to Entries or FunctionLists"""
def __init__(self, xml_doc: ET.ElementTree) -> None:
self._mapping = parse_tag_file(xml_doc)
def _get_symbol_match(self, symbol: str) -> str:
if self._mapping.get(symbol):
return symbol
piecewise_list = match_piecewise(self._mapping.keys(), symbol)
# If there is only one match, return it.
if len(piecewise_list) == 1:
return list(piecewise_list)[0]
# If there is more than one item in piecewise_list then there is an ambiguity
# Often this is due to the symbol matching the name of the constructor as well as the class name itself
# We will prefer the class
classes_list = {s for s in piecewise_list if self._mapping[s].kind == 'class'}
# If there is only one by here we return it.
if len(classes_list) == 1:
return list(classes_list)[0]
# Now, to disambiguate between ``PolyVox::Array< 1, ElementType >::operator[]`` and ``PolyVox::Array::operator[]`` matching ``operator[]``,
# we will ignore templated (as in C++ templates) tag names by removing names containing ``<``
no_templates_list = {s for s in piecewise_list if '<' not in s}
if len(no_templates_list) == 1:
return list(no_templates_list)[0]
# If not found by now, return the shortest match, assuming that's the most specific
if no_templates_list:
# TODO return a warning here?
return min(no_templates_list, key=len)
# TODO Offer fuzzy suggestion
raise LookupError('Could not find a match')
def __getitem__(self, item: str) -> Entry:
symbol, normalised_arglist = normalise(item)
matched_symbol = self._get_symbol_match(symbol)
entry = self._mapping[matched_symbol]
if isinstance(entry, FunctionList):
entry = entry[normalised_arglist]
return entry
def parse_tag_file(doc: ET.ElementTree) -> dict:
"""
Takes in an XML tree from a Doxygen tag file and returns a dictionary that looks something like:
.. code-block:: python
{'PolyVox': Entry(...),
'PolyVox::Array': Entry(...),
'PolyVox::Array1DDouble': Entry(...),
'PolyVox::Array1DFloat': Entry(...),
'PolyVox::Array1DInt16': Entry(...),
'QScriptContext::throwError': FunctionList(...),
'QScriptContext::toString': FunctionList(...)
}
Note the different form for functions. This is required to allow for 'overloading by argument type'.
:Parameters:
doc : xml.etree.ElementTree
The XML DOM object
:return: a dictionary mapping fully qualified symbols to files
"""
mapping = {} # type: MutableMapping[str, Union[Entry, FunctionList]]
function_list = [] # This is a list of function to be parsed and inserted into mapping at the end of the function.
for compound in doc.findall('./compound'):
compound_kind = compound.get('kind')
if compound_kind not in {'namespace', 'class', 'struct', 'file', 'define', 'group', 'page'}:
continue
compound_name = compound.findtext('name')
compound_filename = compound.findtext('filename')
# TODO The following is a hack bug fix I think
# Doxygen doesn't seem to include the file extension to <compound kind="file"><filename> entries
# If it's a 'file' type, check if it _does_ have an extension, if not append '.html'
if compound_kind in ('file', 'page') and not os.path.splitext(compound_filename)[1]:
compound_filename = compound_filename + '.html'
# If it's a compound we can simply add it
mapping[compound_name] = Entry(kind=compound_kind, file=compound_filename)
for member in compound.findall('member'):
# If the member doesn't have an <anchorfile> element, use the parent compounds <filename> instead
# This is the way it is in the qt.tag and is perhaps an artefact of old Doxygen
anchorfile = member.findtext('anchorfile') or compound_filename
member_symbol = compound_name + '::' + member.findtext('name')
member_kind = member.get('kind')
arglist_text = member.findtext('./arglist') # If it has an <arglist> then we assume it's a function. Empty <arglist> returns '', not None. Things like typedefs and enums can have empty arglists
if arglist_text and member_kind not in {'variable', 'typedef', 'enumeration'}:
function_list.append((member_symbol, arglist_text, member_kind, join(anchorfile, '#', member.findtext('anchor'))))
else:
mapping[member_symbol] = Entry(kind=member.get('kind'), file=join(anchorfile, '#', member.findtext('anchor')))
for member_symbol, arglist, kind, anchor_link in function_list:
try:
normalised_arglist = normalise(member_symbol + arglist)[1]
except ParseException as e:
print('Skipping %s %s%s. Error reported from parser was: %s' % (kind, member_symbol, arglist, e))
else:
if mapping.get(member_symbol) and isinstance(mapping[member_symbol], FunctionList):
mapping[member_symbol].add_overload(normalised_arglist, anchor_link)
else:
mapping[member_symbol] = FunctionList()
mapping[member_symbol].add_overload(normalised_arglist, anchor_link)
return mapping
def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set:
"""
Match the requested symbol reverse piecewise (split on ``::``) against the candidates.
This allows you to under-specify the base namespace so that ``"MyClass"`` can match ``my_namespace::MyClass``
Args:
candidates: set of possible matches for symbol
symbol: the symbol to match against
sep: the separator between identifier elements
Returns:
set of matches
"""
piecewise_list = set()
for item in candidates:
split_symbol = symbol.split(sep)
split_item = item.split(sep)
split_symbol.reverse()
split_item.reverse()
min_length = len(split_symbol)
split_item = split_item[:min_length]
if split_symbol == split_item:
piecewise_list.add(item)
return piecewise_list
def join(*args):
return ''.join(args)
def create_role(app, tag_filename, rootdir, cache_name, pdf=""):
# Tidy up the root directory path
if not rootdir.endswith(('/', '\\')):
rootdir = join(rootdir, os.sep)
try:
if is_url(tag_filename):
hresponse = requests.head(tag_filename, allow_redirects=True)
if hresponse.status_code != 200:
raise FileNotFoundError
try:
modification_time = parsedate(hresponse.headers['last-modified']).timestamp()
except KeyError: # no last-modified header from server
modification_time = time.time()
def _parse():
response = requests.get(tag_filename, allow_redirects=True)
if response.status_code != 200:
raise FileNotFoundError
return ET.fromstring(response.text)
else:
modification_time = os.path.getmtime(tag_filename)
def _parse():
return ET.parse(tag_filename)
report_info(app.env, bold('Checking tag file cache for %s: ' % cache_name))
if not hasattr(app.env, 'doxylink_cache'):
# no cache present at all, initialise it
report_info(app.env, 'No cache at all, rebuilding...')
mapping = SymbolMap(_parse())
app.env.doxylink_cache = {cache_name: {'mapping': mapping, 'mtime': modification_time}}
elif not app.env.doxylink_cache.get(cache_name):
# Main cache is there but the specific sub-cache for this tag file is not
report_info(app.env, 'Sub cache is missing, rebuilding...')
mapping = SymbolMap(_parse())
app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time}
elif app.env.doxylink_cache[cache_name]['mtime'] < modification_time:
# tag file has been modified since sub-cache creation
report_info(app.env, 'Sub-cache is out of date, rebuilding...')
mapping = SymbolMap(_parse())
app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time}
elif not app.env.doxylink_cache[cache_name].get('version') or app.env.doxylink_cache[cache_name].get('version') != __version__:
# sub-cache doesn't have a version or the version doesn't match
report_info(app.env, 'Sub-cache schema version doesn\'t match, rebuilding...')
mapping = SymbolMap(_parse())
app.env.doxylink_cache[cache_name] = {'mapping': mapping, 'mtime': modification_time}
else:
# The cache is up to date
report_info(app.env, 'Sub-cache is up-to-date')
except FileNotFoundError:
tag_file_found = False
report_warning(app.env, standout('Could not find tag file %s. Make sure your `doxylink` config variable is set correctly.' % tag_filename))
else:
tag_file_found = True
def find_doxygen_link(name, rawtext, text, lineno, inliner, options={}, content=[]):
# from :name:`title <part>`
has_explicit_title, title, part = split_explicit_title(text)
part = utils.unescape(part)
warning_messages = []
if not tag_file_found:
warning_messages.append('Could not find match for `%s` because tag file not found' % part)
return [nodes.inline(title, title)], []
try:
url = app.env.doxylink_cache[cache_name]['mapping'][part]
except LookupError as error:
inliner.reporter.warning('Could not find match for `%s` in `%s` tag file. Error reported was %s' % (part, tag_filename, error), line=lineno)
return [nodes.inline(title, title)], []
except ParseException as error:
inliner.reporter.warning('Error while parsing `%s`. Is not a well-formed C++ function call or symbol.'
'If this is not the case, it is a doxylink bug so please report it.'
'Error reported was: %s' % (part, error), line=lineno)
return [nodes.inline(title, title)], []
if pdf:
full_url = join(pdf, '#', url.file)
full_url = full_url.replace('.html#', '_') # for links to variables and functions
full_url = full_url.replace('.html', '') # for links to files
# If it's an absolute path then the link will work regardless of the document directory
# Also check if it is a URL (i.e. it has a 'scheme' like 'http' or 'file')
elif os.path.isabs(rootdir) or urllib.parse.urlparse(rootdir).scheme:
full_url = join(rootdir, url.file)
# But otherwise we need to add the relative path of the current document to the root source directory to the link
else:
relative_path_to_docsrc = os.path.relpath(app.env.srcdir, os.path.dirname(inliner.document.attributes['source']))
full_url = join(relative_path_to_docsrc, '/', rootdir, url.file) # We always use the '/' here rather than os.sep since this is a web link avoids problems like documentation/.\../library/doc/ (mixed slashes)
if url.kind == 'function' and app.config.add_function_parentheses and normalise(title)[1] == '' and not has_explicit_title:
title = join(title, '()')
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
return find_doxygen_link
def extract_configuration(values):
if len(values) == 3:
tag_filename, rootdir, pdf_filename = values
elif len(values) == 2:
tag_filename = values[0]
if values[1].endswith('.pdf'):
pdf_filename = values[1]
rootdir = ""
else:
rootdir = values[1]
pdf_filename = ""
else:
raise ValueError("Config variable `doxylink` is incorrectly configured. Expected a tuple with 2 to 3 "
"elements; got %s" % values)
return tag_filename, rootdir, pdf_filename
def fetch_file(app, source, output_path):
"""Fetches file and puts it in the desired location if it does not exist yet.
Local files will be copied and remote files will be downloaded.
Directories in the ``output_path`` get created if needed.
Args:
app: Sphinx' application instance
source (str): Path to local file or URL to remote file
output_path (str): Path with filename to copy/download the source to, relative to Sphinx' output directory
"""
if not os.path.isabs(output_path):
output_path = os.path.join(app.outdir, output_path)
if os.path.exists(output_path):
return
os.makedirs(os.path.dirname(output_path), exist_ok=True)
if is_url(source):
response = requests.get(source, allow_redirects=True)
if response.status_code != 200:
report_warning(app.env,
standout("Could not find file %r. Make sure your `doxylink_pdf_files` config variable is "
"set correctly." % source))
return
with open(output_path, 'wb') as file:
file.write(response.content)
else:
if not os.path.isabs(source):
source = os.path.join(app.outdir, source)
if os.path.exists(source):
shutil.copy(source, output_path)
else:
report_warning(app.env,
standout("Expected a URL or a path that exists as value for `doxylink_pdf_files` "
"config variable; got %r" % source))
def process_configuration(app, tag_filename, rootdir, pdf_filename):
"""Processes the configured values for ``doxylink`` and ``doxylink_pdf_files`` and warns about potential issues.
The type of builder decides which values shall be used.
Args:
app: Sphinx' application instance
tag_filename (str): Path to the Doxygen tag file
rootdir (str): Path to the root directory of Doxygen HTML documentation
pdf_filename (str): Path to the pdf file; may be empty when LaTeX builder is not used
"""
if app.builder.format == 'latex':
if not pdf_filename:
if is_url(rootdir):
report_warning(app.env,
"Linking from PDF to remote Doxygen html is not supported yet; got %r."
"Consider linking to a Doxygen pdf file instead as "
"third element of the tuple in the `doxylink` config variable." % rootdir)
else:
report_warning(app.env,
"Linking from PDF to local Doxygen html is not possible; got %r."
"Consider linking to a Doxygen pdf file instead as third element of the tuple in the "
"`doxylink` config variable." % rootdir)
elif pdf_filename in app.config.doxylink_pdf_files:
source = app.config.doxylink_pdf_files[pdf_filename]
fetch_file(app, source, pdf_filename)
elif pdf_filename and not rootdir:
report_warning(app.env,
"Linking from HTML to Doxygen pdf (%r) is not supported. Consider setting "
"the root directory of Doxygen's HTML output as value instead." % pdf_filename)
def setup_doxylink_roles(app):
for name, values in app.config.doxylink.items():
tag_filename, rootdir, pdf_filename = extract_configuration(values)
process_configuration(app, tag_filename, rootdir, pdf_filename)
app.add_role(name, create_role(app, tag_filename, rootdir, name, pdf=pdf_filename))
|
"""This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models.
Reference: https://arxiv.org/abs/2006.11477
Reference: https://arxiv.org/abs/1904.05862
Reference: https://arxiv.org/abs/2110.13900
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
Authors
* Titouan Parcollet 2021
* Boumadane Abdelmoumene 2021
"""
import os
import torch
import logging
import pathlib
import numpy as np
import torch.nn.functional as F
from torch import nn
from huggingface_hub import model_info
from speechbrain.pretrained.fetching import fetch
# We check if transformers is installed.
try:
import transformers
from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel
from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig
from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2ForPreTraining
from transformers.models.wav2vec2.modeling_wav2vec2 import (
_compute_mask_indices,
)
except ImportError:
MSG = "Please install transformers from HuggingFace to use wav2vec2 / Hubert\n"
MSG += "E.G. run: pip install transformers"
raise ImportError(MSG)
logger = logging.getLogger(__name__)
HF_models = {
"wav2vec2": Wav2Vec2Model,
"hubert": HubertModel,
"wavlm": WavLMModel,
"data2vec": Data2VecAudioModel
}
HF_config = {
"wav2vec2": Wav2Vec2Config,
"hubert": HubertConfig,
"wavlm": WavLMConfig,
"data2vec": Data2VecAudioConfig
}
class HuggingFaceWav2Vec2(nn.Module):
"""This lobe enables the integration of HuggingFace and SpeechBrain
pretrained wav2vec2.0/Hubert models.
Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477
Source paper Hubert: https://arxiv.org/abs/2106.07447
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The model can be used as a fixed feature extractor or can be finetuned. It
will download automatically the model from HuggingFace or use a local path.
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
output_norm : bool (default: True)
If True, a layer_norm (affine) will be applied to the output obtained
from the wav2vec model.
freeze : bool (default: True)
If True, the model is frozen. If False, the model will be trained
alongside with the rest of the pipeline.
freeze_feature_extractor : bool (default: False)
When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False
all the wav2vec model will be trained including featue_extractor module.
apply_spec_augment : bool (default: False)
If True, the model will apply spec augment on the output of feature extractor
(inside huggingface Wav2VecModel() class).
If False, the model will not apply spec augment. We set this to false to prevent from doing it twice.
Example
-------
>>> inputs = torch.rand([10, 600])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2(model_hub, save_path)
>>> outputs = model(inputs)
"""
def __init__(
self,
source,
save_path,
output_norm=True,
freeze=True,
freeze_feature_extractor=False,
apply_spec_augment=False,
load_pretrained_weights=True,
):
super().__init__()
# Download the extractor from HuggingFace.
# The extractor is only used to retrieve the normalisation information
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
source, cache_dir=save_path
)
# Select specific self-supervised loader (eg. Wav2Vec2, Hubert)
if "hubert" in source:
config = HF_config.get("hubert")
model = HF_models.get("hubert")
elif "wavlm" in source:
config = HF_config.get("wavlm")
model = HF_models.get("wavlm")
elif "data2vec" in source:
config = HF_config.get("data2vec")
model = HF_models.get("data2vec")
else:
config = HF_config.get("wav2vec2")
model = HF_models.get("wav2vec2")
# Download and load the model
self._from_pretrained(
source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights
)
# set apply_spec_augment
self.model.config.apply_spec_augment = apply_spec_augment
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
self.normalize_wav = self.feature_extractor.do_normalize
self.freeze = freeze
self.freeze_feature_extractor = freeze_feature_extractor
self.output_norm = output_norm
if self.freeze:
logger.warning(
"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen."
)
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_feature_extractor:
self.model.feature_extractor._freeze_parameters()
def _from_pretrained(self, source, config, model, save_path, load_weights):
"""This function manages the source checking and loading of the params.
# 1. Is the model from HF or a local path
# 2. Is the model pretrained with HF or SpeechBrain
# 3. Download (if appropriate) and load with respect to 1. and 2.
"""
is_sb, ckpt_file = self._check_model_source(source)
if not load_weights:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
elif is_sb:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
self.model.gradient_checkpointing_disable() # Required by DDP
# fetch the checkpoint file
ckpt_full_path = fetch(
filename=ckpt_file, source=source, savedir=save_path
)
# We transfer the parameters from the checkpoint.
self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)
else:
if load_weights:
self.model = model.from_pretrained(source, cache_dir=save_path)
else:
self.model=model()
def _load_sb_pretrained_w2v2_parameters(self, path):
"""Loads the parameter of a w2v2 model pretrained with SpeechBrain and the
HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom
loading because HuggingFace adds a level to the checkpoint when storing
the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain
and HuggingFaceWav2Vec2.
In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter
would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it
is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading).
"""
modified_state_dict = {}
orig_state_dict = torch.load(path, map_location="cpu")
# We remove the .wav2vec2 in the state dict.
for key, params in orig_state_dict.items():
if "wav2vec2." in key:
save_key = key.replace("model.wav2vec2.", "")
modified_state_dict[save_key] = params
incompatible_keys = self.model.load_state_dict(
modified_state_dict, strict=False
)
for missing_key in incompatible_keys.missing_keys:
logger.warning(
f"During parameter transfer to {self.model} loading from "
+ f"{path}, the transferred parameters did not have "
+ f"parameters for the key: {missing_key}"
)
for unexpected_key in incompatible_keys.unexpected_keys:
logger.warning(
f"The param with the key: {unexpected_key} is discarded as it "
+ "is useless for wav2vec 2.0 finetuning."
)
def _check_model_source(self, path):
"""Checks if the pretrained model has been trained with SpeechBrain and
is hosted locally or on a HuggingFace hub.
"""
checkpoint_filename = ""
source = pathlib.Path(path)
is_local = True
is_sb = True
# If path is a huggingface hub.
if not source.exists():
is_local = False
if is_local:
# Test for HuggingFace model
if any(File.endswith(".bin") for File in os.listdir(path)):
is_sb = False
return is_sb, checkpoint_filename
# Test for SpeechBrain model and get the filename.
for File in os.listdir(path):
if File.endswith(".ckpt"):
checkpoint_filename = os.path.join(path, File)
is_sb = True
return is_sb, checkpoint_filename
else:
files = model_info(
path
).siblings # get the list of files of the Hub
# Test if it's an HuggingFace model or a SB one
for File in files:
if File.rfilename.endswith(".ckpt"):
checkpoint_filename = File.rfilename
is_sb = True
return is_sb, checkpoint_filename
for File in files:
if File.rfilename.endswith(".bin"):
checkpoint_filename = File.rfilename
is_sb = False
return is_sb, checkpoint_filename
err_msg = f"{path} does not contain a .bin or .ckpt checkpoint !"
raise FileNotFoundError(err_msg)
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
# If we freeze, we simply remove all grads and features from the graph.
if self.freeze:
with torch.no_grad():
return self.extract_features(wav).detach()
return self.extract_features(wav)
def extract_features(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
# Extract wav2vec output
out = self.model(wav)[0]
# We normalize the output if required
if self.output_norm:
out = F.layer_norm(out, out.shape)
return out
class HuggingFaceWav2Vec2Pretrain(nn.Module):
"""This lobe enables the integration of HuggingFace
wav2vec2.0 models to be pretrained.
Source paper: https://arxiv.org/abs/2006.11477
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The return is an HuggingFace format and the mask indices that contains:
https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining
For instance, it returns the loss that can be accessed with .loss
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
mask_prob : float (default: 0.65)
Probability of masking a given frame. Default is taken from the paper.
mask_length : float (default: 10)
Length (i.e. number of consecutive masked frames). Default is taken from
the paper.
Example
-------
>>> inputs = torch.rand([10, 32000])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path)
>>> outputs, _ = model(inputs)
"""
def __init__(
self,
source,
save_path,
mask_prob=0.65,
mask_length=10,
normalize_wav=True,
):
super().__init__()
self.mask_prob = mask_prob
self.mask_length = mask_length
self.normalize_wav = normalize_wav
# Download the config of the model from HuggingFace.
self.config = Wav2Vec2Config.from_pretrained(
source, cache_dir=save_path
)
self.config.output_hidden_states = (
True # We want the hidden states as well!
)
self.model = Wav2Vec2ForPreTraining(self.config)
self.model.gradient_checkpointing_disable() # Required by DDP
self.model.train()
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
batch_size, raw_sequence_length = wav.shape
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
sequence_length = self.model._get_feat_extract_output_lengths(
raw_sequence_length
)
# 1. Compute the indices that will be masked
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.mask_prob,
mask_length=self.mask_length,
)
torch_mask_time_indices = torch.tensor(
mask_time_indices, device=wav.device, dtype=torch.long,
)
# 2. Sample the negative samples from the entire sequence.
# Fairseq does it only on the masked indices, but this only work if you
# have long sentences. For more versatily, we sample on the entire sequence.
# value.
full_sentence_indices = np.ones((batch_size, sequence_length))
# print(np.sum(mask_time_indices, axis=1))
negative_sample_indices = torch.tensor(
transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices(
(batch_size, sequence_length),
num_negatives=self.config.num_negatives,
mask_time_indices=full_sentence_indices,
),
device=wav.device,
dtype=torch.long,
)
return (
self.model(
wav,
mask_time_indices=torch_mask_time_indices,
sampled_negative_indices=negative_sample_indices,
),
torch_mask_time_indices,
)
|
newchat_xpath= "//*[@id='side']/header/div[2]/div/span/div[2]"
search_xpath= "//*[@id='app']/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/div/div[2]"
#user_xpath= "//span[@title='{}']"
message_xpath= "//*[@id='main']/footer/div[1]/div[2]/div/div[2]"
sendbutton_xpath= "//*[@id='main']/footer/div[1]/div[3]"
|
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from vrchatapi.exceptions import ApiAttributeError
def lazy_import():
from vrchatapi.model.release_status import ReleaseStatus
from vrchatapi.model.tag import Tag
from vrchatapi.model.world_id import WorldID
globals()['ReleaseStatus'] = ReleaseStatus
globals()['Tag'] = Tag
globals()['WorldID'] = WorldID
class CreateWorldRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('asset_url',): {
'min_length': 1,
},
('image_url',): {
'min_length': 1,
},
('name',): {
'min_length': 1,
},
('asset_version',): {
'inclusive_minimum': 0,
},
('author_name',): {
'min_length': 1,
},
('capacity',): {
'inclusive_maximum': 40,
'inclusive_minimum': 0,
},
('unity_package_url',): {
'min_length': 1,
},
('unity_version',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'asset_url': (str,), # noqa: E501
'image_url': (str,), # noqa: E501
'name': (str,), # noqa: E501
'asset_version': (int,), # noqa: E501
'author_id': (str,), # noqa: E501
'author_name': (str,), # noqa: E501
'capacity': (int,), # noqa: E501
'description': (str,), # noqa: E501
'id': (WorldID,), # noqa: E501
'platform': (str,), # noqa: E501
'release_status': (ReleaseStatus,), # noqa: E501
'tags': ([Tag],), # noqa: E501
'unity_package_url': (str,), # noqa: E501
'unity_version': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'asset_url': 'assetUrl', # noqa: E501
'image_url': 'imageUrl', # noqa: E501
'name': 'name', # noqa: E501
'asset_version': 'assetVersion', # noqa: E501
'author_id': 'authorId', # noqa: E501
'author_name': 'authorName', # noqa: E501
'capacity': 'capacity', # noqa: E501
'description': 'description', # noqa: E501
'id': 'id', # noqa: E501
'platform': 'platform', # noqa: E501
'release_status': 'releaseStatus', # noqa: E501
'tags': 'tags', # noqa: E501
'unity_package_url': 'unityPackageUrl', # noqa: E501
'unity_version': 'unityVersion', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, asset_url, image_url, name, *args, **kwargs): # noqa: E501
"""CreateWorldRequest - a model defined in OpenAPI
Args:
asset_url (str):
image_url (str):
name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
asset_version (int): [optional] # noqa: E501
author_id (str): A users unique ID, usually in the form of `usr_c1644b5b-3ca4-45b4-97c6-a2a0de70d469`. Legacy players can have old IDs in the form of `8JoV9XEdpo`. The ID can never be changed.. [optional] # noqa: E501
author_name (str): [optional] # noqa: E501
capacity (int): [optional] # noqa: E501
description (str): [optional] # noqa: E501
id (WorldID): [optional] # noqa: E501
platform (str): This can be `standalonewindows` or `android`, but can also pretty much be any random Unity verison such as `2019.2.4-801-Release` or `2019.2.2-772-Release` or even `unknownplatform`.. [optional] # noqa: E501
release_status (ReleaseStatus): [optional] # noqa: E501
tags ([Tag]): [optional] # noqa: E501
unity_package_url (str): [optional] # noqa: E501
unity_version (str): [optional] if omitted the server will use the default value of "5.3.4p1" # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.asset_url = asset_url
self.image_url = image_url
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, asset_url, image_url, name, *args, **kwargs): # noqa: E501
"""CreateWorldRequest - a model defined in OpenAPI
Args:
asset_url (str):
image_url (str):
name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
asset_version (int): [optional] # noqa: E501
author_id (str): A users unique ID, usually in the form of `usr_c1644b5b-3ca4-45b4-97c6-a2a0de70d469`. Legacy players can have old IDs in the form of `8JoV9XEdpo`. The ID can never be changed.. [optional] # noqa: E501
author_name (str): [optional] # noqa: E501
capacity (int): [optional] # noqa: E501
description (str): [optional] # noqa: E501
id (WorldID): [optional] # noqa: E501
platform (str): This can be `standalonewindows` or `android`, but can also pretty much be any random Unity verison such as `2019.2.4-801-Release` or `2019.2.2-772-Release` or even `unknownplatform`.. [optional] # noqa: E501
release_status (ReleaseStatus): [optional] # noqa: E501
tags ([Tag]): [optional] # noqa: E501
unity_package_url (str): [optional] # noqa: E501
unity_version (str): [optional] if omitted the server will use the default value of "5.3.4p1" # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.asset_url = asset_url
self.image_url = image_url
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Fetch' implementations
Classes for obtaining upstream sources for the
BitBake build tools.
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
import os, re
import signal
import logging
import urllib.request, urllib.parse, urllib.error
if 'git' not in urllib.parse.uses_netloc:
urllib.parse.uses_netloc.append('git')
import operator
import collections
import subprocess
import pickle
import errno
import bb.persist_data, bb.utils
import bb.checksum
import bb.process
import bb.event
__version__ = "2"
_checksum_cache = bb.checksum.FileChecksumCache()
logger = logging.getLogger("BitBake.Fetcher")
class BBFetchException(Exception):
"""Class all fetch exceptions inherit from"""
def __init__(self, message):
self.msg = message
Exception.__init__(self, message)
def __str__(self):
return self.msg
class UntrustedUrl(BBFetchException):
"""Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS"""
def __init__(self, url, message=''):
if message:
msg = message
else:
msg = "The URL: '%s' is not trusted and cannot be used" % url
self.url = url
BBFetchException.__init__(self, msg)
self.args = (url,)
class MalformedUrl(BBFetchException):
"""Exception raised when encountering an invalid url"""
def __init__(self, url, message=''):
if message:
msg = message
else:
msg = "The URL: '%s' is invalid and cannot be interpreted" % url
self.url = url
BBFetchException.__init__(self, msg)
self.args = (url,)
class FetchError(BBFetchException):
"""General fetcher exception when something happens incorrectly"""
def __init__(self, message, url = None):
if url:
msg = "Fetcher failure for URL: '%s'. %s" % (url, message)
else:
msg = "Fetcher failure: %s" % message
self.url = url
BBFetchException.__init__(self, msg)
self.args = (message, url)
class ChecksumError(FetchError):
"""Exception when mismatched checksum encountered"""
def __init__(self, message, url = None, checksum = None):
self.checksum = checksum
FetchError.__init__(self, message, url)
class NoChecksumError(FetchError):
"""Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set"""
class UnpackError(BBFetchException):
"""General fetcher exception when something happens incorrectly when unpacking"""
def __init__(self, message, url):
msg = "Unpack failure for URL: '%s'. %s" % (url, message)
self.url = url
BBFetchException.__init__(self, msg)
self.args = (message, url)
class NoMethodError(BBFetchException):
"""Exception raised when there is no method to obtain a supplied url or set of urls"""
def __init__(self, url):
msg = "Could not find a fetcher which supports the URL: '%s'" % url
self.url = url
BBFetchException.__init__(self, msg)
self.args = (url,)
class MissingParameterError(BBFetchException):
"""Exception raised when a fetch method is missing a critical parameter in the url"""
def __init__(self, missing, url):
msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
self.url = url
self.missing = missing
BBFetchException.__init__(self, msg)
self.args = (missing, url)
class ParameterError(BBFetchException):
"""Exception raised when a url cannot be proccessed due to invalid parameters."""
def __init__(self, message, url):
msg = "URL: '%s' has invalid parameters. %s" % (url, message)
self.url = url
BBFetchException.__init__(self, msg)
self.args = (message, url)
class NetworkAccess(BBFetchException):
"""Exception raised when network access is disabled but it is required."""
def __init__(self, url, cmd):
msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
self.url = url
self.cmd = cmd
BBFetchException.__init__(self, msg)
self.args = (url, cmd)
class NonLocalMethod(Exception):
def __init__(self):
Exception.__init__(self)
class MissingChecksumEvent(bb.event.Event):
def __init__(self, url, md5sum, sha256sum):
self.url = url
self.checksums = {'md5sum': md5sum,
'sha256sum': sha256sum}
bb.event.Event.__init__(self)
class URI(object):
"""
A class representing a generic URI, with methods for
accessing the URI components, and stringifies to the
URI.
It is constructed by calling it with a URI, or setting
the attributes manually:
uri = URI("http://example.com/")
uri = URI()
uri.scheme = 'http'
uri.hostname = 'example.com'
uri.path = '/'
It has the following attributes:
* scheme (read/write)
* userinfo (authentication information) (read/write)
* username (read/write)
* password (read/write)
Note, password is deprecated as of RFC 3986.
* hostname (read/write)
* port (read/write)
* hostport (read only)
"hostname:port", if both are set, otherwise just "hostname"
* path (read/write)
* path_quoted (read/write)
A URI quoted version of path
* params (dict) (read/write)
* query (dict) (read/write)
* relative (bool) (read only)
True if this is a "relative URI", (e.g. file:foo.diff)
It stringifies to the URI itself.
Some notes about relative URIs: while it's specified that
a URI beginning with <scheme>:// should either be directly
followed by a hostname or a /, the old URI handling of the
fetch2 library did not comform to this. Therefore, this URI
class has some kludges to make sure that URIs are parsed in
a way comforming to bitbake's current usage. This URI class
supports the following:
file:relative/path.diff (IETF compliant)
git:relative/path.git (IETF compliant)
git:///absolute/path.git (IETF compliant)
file:///absolute/path.diff (IETF compliant)
file://relative/path.diff (not IETF compliant)
But it does not support the following:
file://hostname/absolute/path.diff (would be IETF compliant)
Note that the last case only applies to a list of
"whitelisted" schemes (currently only file://), that requires
its URIs to not have a network location.
"""
_relative_schemes = ['file', 'git']
_netloc_forbidden = ['file']
def __init__(self, uri=None):
self.scheme = ''
self.userinfo = ''
self.hostname = ''
self.port = None
self._path = ''
self.params = {}
self.query = {}
self.relative = False
if not uri:
return
# We hijack the URL parameters, since the way bitbake uses
# them are not quite RFC compliant.
uri, param_str = (uri.split(";", 1) + [None])[:2]
urlp = urllib.parse.urlparse(uri)
self.scheme = urlp.scheme
reparse = 0
# Coerce urlparse to make URI scheme use netloc
if not self.scheme in urllib.parse.uses_netloc:
urllib.parse.uses_params.append(self.scheme)
reparse = 1
# Make urlparse happy(/ier) by converting local resources
# to RFC compliant URL format. E.g.:
# file://foo.diff -> file:foo.diff
if urlp.scheme in self._netloc_forbidden:
uri = re.sub("(?<=:)//(?!/)", "", uri, 1)
reparse = 1
if reparse:
urlp = urllib.parse.urlparse(uri)
# Identify if the URI is relative or not
if urlp.scheme in self._relative_schemes and \
re.compile("^\w+:(?!//)").match(uri):
self.relative = True
if not self.relative:
self.hostname = urlp.hostname or ''
self.port = urlp.port
self.userinfo += urlp.username or ''
if urlp.password:
self.userinfo += ':%s' % urlp.password
self.path = urllib.parse.unquote(urlp.path)
if param_str:
self.params = self._param_str_split(param_str, ";")
if urlp.query:
self.query = self._param_str_split(urlp.query, "&")
def __str__(self):
userinfo = self.userinfo
if userinfo:
userinfo += '@'
return "%s:%s%s%s%s%s%s" % (
self.scheme,
'' if self.relative else '//',
userinfo,
self.hostport,
self.path_quoted,
self._query_str(),
self._param_str())
def _param_str(self):
return (
''.join([';', self._param_str_join(self.params, ";")])
if self.params else '')
def _query_str(self):
return (
''.join(['?', self._param_str_join(self.query, "&")])
if self.query else '')
def _param_str_split(self, string, elmdelim, kvdelim="="):
ret = collections.OrderedDict()
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]:
ret[k] = v
return ret
def _param_str_join(self, dict_, elmdelim, kvdelim="="):
return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()])
@property
def hostport(self):
if not self.port:
return self.hostname
return "%s:%d" % (self.hostname, self.port)
@property
def path_quoted(self):
return urllib.parse.quote(self.path)
@path_quoted.setter
def path_quoted(self, path):
self.path = urllib.parse.unquote(path)
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
if not path or re.compile("^/").match(path):
self.relative = False
else:
self.relative = True
@property
def username(self):
if self.userinfo:
return (self.userinfo.split(":", 1))[0]
return ''
@username.setter
def username(self, username):
password = self.password
self.userinfo = username
if password:
self.userinfo += ":%s" % password
@property
def password(self):
if self.userinfo and ":" in self.userinfo:
return (self.userinfo.split(":", 1))[1]
return ''
@password.setter
def password(self, password):
self.userinfo = "%s:%s" % (self.username, password)
def decodeurl(url):
"""Decodes an URL into the tokens (scheme, network location, path,
user, password, parameters).
"""
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
if not m:
raise MalformedUrl(url)
type = m.group('type')
location = m.group('location')
if not location:
raise MalformedUrl(url)
user = m.group('user')
parm = m.group('parm')
locidx = location.find('/')
if locidx != -1 and type.lower() != 'file':
host = location[:locidx]
path = location[locidx:]
elif type.lower() == 'file':
host = ""
path = location
else:
host = location
path = ""
if user:
m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
if m:
user = m.group('user')
pswd = m.group('pswd')
else:
user = ''
pswd = ''
p = collections.OrderedDict()
if parm:
for s in parm.split(';'):
if s:
if not '=' in s:
raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s))
s1, s2 = s.split('=')
p[s1] = s2
return type, host, urllib.parse.unquote(path), user, pswd, p
def encodeurl(decoded):
"""Encodes a URL from tokens (scheme, network location, path,
user, password, parameters).
"""
type, host, path, user, pswd, p = decoded
if not type:
raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
url = '%s://' % type
if user and type != "file":
url += "%s" % user
if pswd:
url += ":%s" % pswd
url += "@"
if host and type != "file":
url += "%s" % host
if path:
# Standardise path to ensure comparisons work
while '//' in path:
path = path.replace("//", "/")
url += "%s" % urllib.parse.quote(path)
if p:
for parm in p:
url += ";%s=%s" % (parm, p[parm])
return url
def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
if not ud.url or not uri_find or not uri_replace:
logger.error("uri_replace: passed an undefined value, not replacing")
return None
uri_decoded = list(decodeurl(ud.url))
uri_find_decoded = list(decodeurl(uri_find))
uri_replace_decoded = list(decodeurl(uri_replace))
logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
result_decoded = ['', '', '', '', '', {}]
for loc, i in enumerate(uri_find_decoded):
result_decoded[loc] = uri_decoded[loc]
regexp = i
if loc == 0 and regexp and not regexp.endswith("$"):
# Leaving the type unanchored can mean "https" matching "file" can become "files"
# which is clearly undesirable.
regexp += "$"
if loc == 5:
# Handle URL parameters
if i:
# Any specified URL parameters must match
for k in uri_replace_decoded[loc]:
if uri_decoded[loc][k] != uri_replace_decoded[loc][k]:
return None
# Overwrite any specified replacement parameters
for k in uri_replace_decoded[loc]:
for l in replacements:
uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l])
result_decoded[loc][k] = uri_replace_decoded[loc][k]
elif (re.match(regexp, uri_decoded[loc])):
if not uri_replace_decoded[loc]:
result_decoded[loc] = ""
else:
for k in replacements:
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
#bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc]))
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1)
if loc == 2:
# Handle path manipulations
basename = None
if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball:
# If the source and destination url types differ, must be a mirrortarball mapping
basename = os.path.basename(mirrortarball)
# Kill parameters, they make no sense for mirror tarballs
uri_decoded[5] = {}
elif ud.localpath and ud.method.supports_checksum(ud):
basename = os.path.basename(ud.localpath)
if basename and not result_decoded[loc].endswith(basename):
result_decoded[loc] = os.path.join(result_decoded[loc], basename)
else:
return None
result = encodeurl(result_decoded)
if result == ud.url:
return None
logger.debug(2, "For url %s returning %s" % (ud.url, result))
return result
methods = []
urldata_cache = {}
saved_headrevs = {}
def fetcher_init(d):
"""
Called to initialize the fetchers once the configuration data is known.
Calls before this must not hit the cache.
"""
# When to drop SCM head revisions controlled by user policy
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
if srcrev_policy == "cache":
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
elif srcrev_policy == "clear":
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
try:
bb.fetch2.saved_headrevs = revs.items()
except:
pass
revs.clear()
else:
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
_checksum_cache.init_cache(d)
for m in methods:
if hasattr(m, "init"):
m.init(d)
def fetcher_parse_save():
_checksum_cache.save_extras()
def fetcher_parse_done():
_checksum_cache.save_merge()
def fetcher_compare_revisions():
"""
Compare the revisions in the persistant cache with current values and
return true/false on whether they've changed.
"""
data = bb.persist_data.persist('BB_URI_HEADREVS', d).items()
data2 = bb.fetch2.saved_headrevs
changed = False
for key in data:
if key not in data2 or data2[key] != data[key]:
logger.debug(1, "%s changed", key)
changed = True
return True
else:
logger.debug(2, "%s did not change", key)
return False
def mirror_from_string(data):
mirrors = (data or "").replace('\\n',' ').split()
# Split into pairs
if len(mirrors) % 2 != 0:
bb.warn('Invalid mirror data %s, should have paired members.' % data)
return list(zip(*[iter(mirrors)]*2))
def verify_checksum(ud, d, precomputed={}):
"""
verify the MD5 and SHA256 checksum for downloaded src
Raises a FetchError if one or both of the SRC_URI checksums do not match
the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no
checksums specified.
Returns a dict of checksums that can be stored in a done stamp file and
passed in as precomputed parameter in a later call to avoid re-computing
the checksums from the file. This allows verifying the checksums of the
file against those in the recipe each time, rather than only after
downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571.
"""
_MD5_KEY = "md5"
_SHA256_KEY = "sha256"
if ud.ignore_checksums or not ud.method.supports_checksum(ud):
return {}
if _MD5_KEY in precomputed:
md5data = precomputed[_MD5_KEY]
else:
md5data = bb.utils.md5_file(ud.localpath)
if _SHA256_KEY in precomputed:
sha256data = precomputed[_SHA256_KEY]
else:
sha256data = bb.utils.sha256_file(ud.localpath)
if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected:
# If strict checking enabled and neither sum defined, raise error
strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
if strict == "1":
logger.error('No checksum specified for %s, please add at least one to the recipe:\n'
'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
(ud.localpath, ud.md5_name, md5data,
ud.sha256_name, sha256data))
raise NoChecksumError('Missing SRC_URI checksum', ud.url)
bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d)
if strict == "ignore":
return {
_MD5_KEY: md5data,
_SHA256_KEY: sha256data
}
# Log missing sums so user can more easily add them
logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
'SRC_URI[%s] = "%s"',
ud.localpath, ud.md5_name, md5data)
logger.warning('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n'
'SRC_URI[%s] = "%s"',
ud.localpath, ud.sha256_name, sha256data)
# We want to alert the user if a checksum is defined in the recipe but
# it does not match.
msg = ""
mismatch = False
if ud.md5_expected and ud.md5_expected != md5data:
msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected)
mismatch = True;
if ud.sha256_expected and ud.sha256_expected != sha256data:
msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected)
mismatch = True;
if mismatch:
msg = msg + '\nIf this change is expected (e.g. you have upgraded to a new version without updating the checksums) then you can use these lines within the recipe:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"\nOtherwise you should retry the download and/or check with upstream to determine if the file has become corrupted or otherwise unexpectedly modified.\n' % (ud.md5_name, md5data, ud.sha256_name, sha256data)
if len(msg):
raise ChecksumError('Checksum mismatch!%s' % msg, ud.url, md5data)
return {
_MD5_KEY: md5data,
_SHA256_KEY: sha256data
}
def verify_donestamp(ud, d, origud=None):
"""
Check whether the done stamp file has the right checksums (if the fetch
method supports them). If it doesn't, delete the done stamp and force
a re-download.
Returns True, if the donestamp exists and is valid, False otherwise. When
returning False, any existing done stamps are removed.
"""
if not ud.needdonestamp or (origud and not origud.needdonestamp):
return True
if not os.path.exists(ud.donestamp):
return False
if (not ud.method.supports_checksum(ud) or
(origud and not origud.method.supports_checksum(origud))):
# done stamp exists, checksums not supported; assume the local file is
# current
return True
if not os.path.exists(ud.localpath):
# done stamp exists, but the downloaded file does not; the done stamp
# must be incorrect, re-trigger the download
bb.utils.remove(ud.donestamp)
return False
precomputed_checksums = {}
# Only re-use the precomputed checksums if the donestamp is newer than the
# file. Do not rely on the mtime of directories, though. If ud.localpath is
# a directory, there will probably not be any checksums anyway.
if (os.path.isdir(ud.localpath) or
os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)):
try:
with open(ud.donestamp, "rb") as cachefile:
pickled = pickle.Unpickler(cachefile)
precomputed_checksums.update(pickled.load())
except Exception as e:
# Avoid the warnings on the upgrade path from emtpy done stamp
# files to those containing the checksums.
if not isinstance(e, EOFError):
# Ignore errors, they aren't fatal
logger.warning("Couldn't load checksums from donestamp %s: %s "
"(msg: %s)" % (ud.donestamp, type(e).__name__,
str(e)))
try:
checksums = verify_checksum(ud, d, precomputed_checksums)
# If the cache file did not have the checksums, compute and store them
# as an upgrade path from the previous done stamp file format.
if checksums != precomputed_checksums:
with open(ud.donestamp, "wb") as cachefile:
p = pickle.Pickler(cachefile, 2)
p.dump(checksums)
return True
except ChecksumError as e:
# Checksums failed to verify, trigger re-download and remove the
# incorrect stamp file.
logger.warning("Checksum mismatch for local file %s\n"
"Cleaning and trying again." % ud.localpath)
if os.path.exists(ud.localpath):
rename_bad_checksum(ud, e.checksum)
bb.utils.remove(ud.donestamp)
return False
def update_stamp(ud, d):
"""
donestamp is file stamp indicating the whole fetching is done
this function update the stamp after verifying the checksum
"""
if not ud.needdonestamp:
return
if os.path.exists(ud.donestamp):
# Touch the done stamp file to show active use of the download
try:
os.utime(ud.donestamp, None)
except:
# Errors aren't fatal here
pass
else:
try:
checksums = verify_checksum(ud, d)
# Store the checksums for later re-verification against the recipe
with open(ud.donestamp, "wb") as cachefile:
p = pickle.Pickler(cachefile, 2)
p.dump(checksums)
except ChecksumError as e:
# Checksums failed to verify, trigger re-download and remove the
# incorrect stamp file.
logger.warning("Checksum mismatch for local file %s\n"
"Cleaning and trying again." % ud.localpath)
if os.path.exists(ud.localpath):
rename_bad_checksum(ud, e.checksum)
bb.utils.remove(ud.donestamp)
raise
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
# SIGPIPE errors are known issues with gzip/bash
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def get_autorev(d):
# only not cache src rev in autorev case
if d.getVar('BB_SRCREV_POLICY') != "cache":
d.setVar('BB_DONT_CACHE', '1')
return "AUTOINC"
def get_srcrev(d, method_name='sortable_revision'):
"""
Return the revision string, usually for use in the version string (PV) of the current package
Most packages usually only have one SCM so we just pass on the call.
In the multi SCM case, we build a value based on SRCREV_FORMAT which must
have been set.
The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
incremental, other code is then responsible for turning that into an increasing value (if needed)
A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if
that fetcher provides a method with the given name and the same signature as sortable_revision.
"""
scms = []
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
urldata = fetcher.ud
for u in urldata:
if urldata[u].method.supports_srcrev():
scms.append(u)
if len(scms) == 0:
raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
if len(rev) > 10:
rev = rev[:10]
if autoinc:
return "AUTOINC+" + rev
return rev
#
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
#
format = d.getVar('SRCREV_FORMAT')
if not format:
raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
name_to_rev = {}
seenautoinc = False
for scm in scms:
ud = urldata[scm]
for name in ud.names:
autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
seenautoinc = seenautoinc or autoinc
if len(rev) > 10:
rev = rev[:10]
name_to_rev[name] = rev
# Replace names by revisions in the SRCREV_FORMAT string. The approach used
# here can handle names being prefixes of other names and names appearing
# as substrings in revisions (in which case the name should not be
# expanded). The '|' regular expression operator tries matches from left to
# right, so we need to sort the names with the longest ones first.
names_descending_len = sorted(name_to_rev, key=len, reverse=True)
name_to_rev_re = "|".join(re.escape(name) for name in names_descending_len)
format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format)
if seenautoinc:
format = "AUTOINC+" + format
return format
def localpath(url, d):
fetcher = bb.fetch2.Fetch([url], d)
return fetcher.localpath(url)
def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
"""
Run cmd returning the command output
Raise an error if interrupted or cmd fails
Optionally echo command output to stdout
Optionally remove the files/directories listed in cleanup upon failure
"""
# Need to export PATH as binary could be in metadata paths
# rather than host provided
# Also include some other variables.
# FIXME: Should really include all export varaiables?
exportvars = ['HOME', 'PATH',
'HTTP_PROXY', 'http_proxy',
'HTTPS_PROXY', 'https_proxy',
'FTP_PROXY', 'ftp_proxy',
'FTPS_PROXY', 'ftps_proxy',
'NO_PROXY', 'no_proxy',
'ALL_PROXY', 'all_proxy',
'GIT_PROXY_COMMAND',
'GIT_SSL_CAINFO',
'GIT_SMART_HTTP',
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
'SOCKS5_USER', 'SOCKS5_PASSWD',
'DBUS_SESSION_BUS_ADDRESS',
'P4CONFIG']
if not cleanup:
cleanup = []
# If PATH contains WORKDIR which contains PV which contains SRCPV we
# can end up in circular recursion here so give the option of breaking it
# in a data store copy.
try:
d.getVar("PV")
except bb.data_smart.ExpansionError:
d = bb.data.createCopy(d)
d.setVar("PV", "fetcheravoidrecurse")
origenv = d.getVar("BB_ORIGENV", False)
for var in exportvars:
val = d.getVar(var) or (origenv and origenv.getVar(var))
if val:
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
logger.debug(1, "Running %s", cmd)
success = False
error_message = ""
try:
(output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir)
success = True
except bb.process.NotFoundError as e:
error_message = "Fetch command %s" % (e.command)
except bb.process.ExecutionError as e:
if e.stdout:
output = "output:\n%s\n%s" % (e.stdout, e.stderr)
elif e.stderr:
output = "output:\n%s" % e.stderr
else:
output = "no output"
error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output)
except bb.process.CmdError as e:
error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
if not success:
for f in cleanup:
try:
bb.utils.remove(f, True)
except OSError:
pass
raise FetchError(error_message)
return output
def check_network_access(d, info, url):
"""
log remote network access, and error if BB_NO_NETWORK is set or the given
URI is untrusted
"""
if d.getVar("BB_NO_NETWORK") == "1":
raise NetworkAccess(url, info)
elif not trusted_network(d, url):
raise UntrustedUrl(url, info)
else:
logger.debug(1, "Fetcher accessed the network with the command %s" % info)
def build_mirroruris(origud, mirrors, ld):
uris = []
uds = []
replacements = {}
replacements["TYPE"] = origud.type
replacements["HOST"] = origud.host
replacements["PATH"] = origud.path
replacements["BASENAME"] = origud.path.split("/")[-1]
replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.')
def adduri(ud, uris, uds, mirrors, tarballs):
for line in mirrors:
try:
(find, replace) = line
except ValueError:
continue
for tarball in tarballs:
newuri = uri_replace(ud, find, replace, replacements, ld, tarball)
if not newuri or newuri in uris or newuri == origud.url:
continue
if not trusted_network(ld, newuri):
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
continue
# Create a local copy of the mirrors minus the current line
# this will prevent us from recursively processing the same line
# as well as indirect recursion A -> B -> C -> A
localmirrors = list(mirrors)
localmirrors.remove(line)
try:
newud = FetchData(newuri, ld)
newud.setup_localpath(ld)
except bb.fetch2.BBFetchException as e:
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
logger.debug(1, str(e))
try:
# setup_localpath of file:// urls may fail, we should still see
# if mirrors of the url exist
adduri(newud, uris, uds, localmirrors, tarballs)
except UnboundLocalError:
pass
continue
uris.append(newuri)
uds.append(newud)
adduri(newud, uris, uds, localmirrors, tarballs)
adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None])
return uris, uds
def rename_bad_checksum(ud, suffix):
"""
Renames files to have suffix from parameter
"""
if ud.localpath is None:
return
new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix)
bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath))
bb.utils.movefile(ud.localpath, new_localpath)
def try_mirror_url(fetch, origud, ud, ld, check = False):
# Return of None or a value means we're finished
# False means try another url
if ud.lockfile and ud.lockfile != origud.lockfile:
lf = bb.utils.lockfile(ud.lockfile)
try:
if check:
found = ud.method.checkstatus(fetch, ud, ld)
if found:
return found
return False
if not verify_donestamp(ud, ld, origud) or ud.method.need_update(ud, ld):
ud.method.download(ud, ld)
if hasattr(ud.method,"build_mirror_data"):
ud.method.build_mirror_data(ud, ld)
if not ud.localpath or not os.path.exists(ud.localpath):
return False
if ud.localpath == origud.localpath:
return ud.localpath
# We may be obtaining a mirror tarball which needs further processing by the real fetcher
# If that tarball is a local file:// we need to provide a symlink to it
dldir = ld.getVar("DL_DIR")
if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
# Create donestamp in old format to avoid triggering a re-download
if ud.donestamp:
bb.utils.mkdirhier(os.path.dirname(ud.donestamp))
open(ud.donestamp, 'w').close()
dest = os.path.join(dldir, os.path.basename(ud.localpath))
if not os.path.exists(dest):
# In case this is executing without any file locks held (as is
# the case for file:// URLs), two tasks may end up here at the
# same time, in which case we do not want the second task to
# fail when the link has already been created by the first task.
try:
os.symlink(ud.localpath, dest)
except FileExistsError:
pass
if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld):
origud.method.download(origud, ld)
if hasattr(origud.method, "build_mirror_data"):
origud.method.build_mirror_data(origud, ld)
return origud.localpath
# Otherwise the result is a local file:// and we symlink to it
if not os.path.exists(origud.localpath):
if os.path.islink(origud.localpath):
# Broken symbolic link
os.unlink(origud.localpath)
# As per above, in case two tasks end up here simultaneously.
try:
os.symlink(ud.localpath, origud.localpath)
except FileExistsError:
pass
update_stamp(origud, ld)
return ud.localpath
except bb.fetch2.NetworkAccess:
raise
except IOError as e:
if e.errno in [os.errno.ESTALE]:
logger.warning("Stale Error Observed %s." % ud.url)
return False
raise
except bb.fetch2.BBFetchException as e:
if isinstance(e, ChecksumError):
logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url))
logger.warning(str(e))
if os.path.exists(ud.localpath):
rename_bad_checksum(ud, e.checksum)
elif isinstance(e, NoChecksumError):
raise
else:
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
logger.debug(1, str(e))
try:
ud.method.clean(ud, ld)
except UnboundLocalError:
pass
return False
finally:
if ud.lockfile and ud.lockfile != origud.lockfile:
bb.utils.unlockfile(lf)
def try_mirrors(fetch, d, origud, mirrors, check = False):
"""
Try to use a mirrored version of the sources.
This method will be automatically called before the fetchers go.
d Is a bb.data instance
uri is the original uri we're trying to download
mirrors is the list of mirrors we're going to try
"""
ld = d.createCopy()
uris, uds = build_mirroruris(origud, mirrors, ld)
for index, uri in enumerate(uris):
ret = try_mirror_url(fetch, origud, uds[index], ld, check)
if ret != False:
return ret
return None
def trusted_network(d, url):
"""
Use a trusted url during download if networking is enabled and
BB_ALLOWED_NETWORKS is set globally or for a specific recipe.
Note: modifies SRC_URI & mirrors.
"""
if d.getVar('BB_NO_NETWORK') == "1":
return True
pkgname = d.expand(d.getVar('PN', False))
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
if not trusted_hosts:
trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS')
# Not enabled.
if not trusted_hosts:
return True
scheme, network, path, user, passwd, param = decodeurl(url)
if not network:
return True
network = network.split(':')[0]
network = network.lower()
for host in trusted_hosts.split(" "):
host = host.lower()
if host.startswith("*.") and ("." + network).endswith(host[1:]):
return True
if host == network:
return True
return False
def srcrev_internal_helper(ud, d, name):
"""
Return:
a) a source revision if specified
b) latest revision if SRCREV="AUTOINC"
c) None if not specified
"""
srcrev = None
pn = d.getVar("PN")
attempts = []
if name != '' and pn:
attempts.append("SRCREV_%s_pn-%s" % (name, pn))
if name != '':
attempts.append("SRCREV_%s" % name)
if pn:
attempts.append("SRCREV_pn-%s" % pn)
attempts.append("SRCREV")
for a in attempts:
srcrev = d.getVar(a)
if srcrev and srcrev != "INVALID":
break
if 'rev' in ud.parm and 'tag' in ud.parm:
raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url))
if 'rev' in ud.parm or 'tag' in ud.parm:
if 'rev' in ud.parm:
parmrev = ud.parm['rev']
else:
parmrev = ud.parm['tag']
if srcrev == "INVALID" or not srcrev:
return parmrev
if srcrev != parmrev:
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
return parmrev
if srcrev == "INVALID" or not srcrev:
raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url)
if srcrev == "AUTOINC":
srcrev = ud.method.latest_revision(ud, d, name)
return srcrev
def get_checksum_file_list(d):
""" Get a list of files checksum in SRC_URI
Returns the resolved local paths of all local file entries in
SRC_URI as a space-separated string
"""
fetch = Fetch([], d, cache = False, localonly = True)
dl_dir = d.getVar('DL_DIR')
filelist = []
for u in fetch.urls:
ud = fetch.ud[u]
if ud and isinstance(ud.method, local.Local):
paths = ud.method.localpaths(ud, d)
for f in paths:
pth = ud.decodedurl
if '*' in pth:
f = os.path.join(os.path.abspath(f), pth)
if f.startswith(dl_dir):
# The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
if os.path.exists(f):
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
else:
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
filelist.append(f + ":" + str(os.path.exists(f)))
return " ".join(filelist)
def get_file_checksums(filelist, pn):
"""Get a list of the checksums for a list of local files
Returns the checksums for a list of local files, caching the results as
it proceeds
"""
return _checksum_cache.get_checksums(filelist, pn)
class FetchData(object):
"""
A class which represents the fetcher state for a given URI.
"""
def __init__(self, url, d, localonly = False):
# localpath is the location of a downloaded result. If not set, the file is local.
self.donestamp = None
self.needdonestamp = True
self.localfile = ""
self.localpath = None
self.lockfile = None
self.mirrortarballs = []
self.basename = None
self.basepath = None
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url))
self.date = self.getSRCDate(d)
self.url = url
if not self.user and "user" in self.parm:
self.user = self.parm["user"]
if not self.pswd and "pswd" in self.parm:
self.pswd = self.parm["pswd"]
self.setup = False
if "name" in self.parm:
self.md5_name = "%s.md5sum" % self.parm["name"]
self.sha256_name = "%s.sha256sum" % self.parm["name"]
else:
self.md5_name = "md5sum"
self.sha256_name = "sha256sum"
if self.md5_name in self.parm:
self.md5_expected = self.parm[self.md5_name]
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
self.md5_expected = None
else:
self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name)
if self.sha256_name in self.parm:
self.sha256_expected = self.parm[self.sha256_name]
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
self.sha256_expected = None
else:
self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name)
self.ignore_checksums = False
self.names = self.parm.get("name",'default').split(',')
self.method = None
for m in methods:
if m.supports(self, d):
self.method = m
break
if not self.method:
raise NoMethodError(url)
if localonly and not isinstance(self.method, local.Local):
raise NonLocalMethod()
if self.parm.get("proto", None) and "protocol" not in self.parm:
logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN'))
self.parm["protocol"] = self.parm.get("proto", None)
if hasattr(self.method, "urldata_init"):
self.method.urldata_init(self, d)
if "localpath" in self.parm:
# if user sets localpath for file, use it instead.
self.localpath = self.parm["localpath"]
self.basename = os.path.basename(self.localpath)
elif self.localfile:
self.localpath = self.method.localpath(self, d)
dldir = d.getVar("DL_DIR")
if not self.needdonestamp:
return
# Note: .done and .lock files should always be in DL_DIR whereas localpath may not be.
if self.localpath and self.localpath.startswith(dldir):
basepath = self.localpath
elif self.localpath:
basepath = dldir + os.sep + os.path.basename(self.localpath)
elif self.basepath or self.basename:
basepath = dldir + os.sep + (self.basepath or self.basename)
else:
bb.fatal("Can't determine lock path for url %s" % url)
self.donestamp = basepath + '.done'
self.lockfile = basepath + '.lock'
def setup_revisions(self, d):
self.revisions = {}
for name in self.names:
self.revisions[name] = srcrev_internal_helper(self, d, name)
# add compatibility code for non name specified case
if len(self.names) == 1:
self.revision = self.revisions[self.names[0]]
def setup_localpath(self, d):
if not self.localpath:
self.localpath = self.method.localpath(self, d)
def getSRCDate(self, d):
"""
Return the SRC Date for the component
d the bb.data module
"""
if "srcdate" in self.parm:
return self.parm['srcdate']
pn = d.getVar("PN")
if pn:
return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE")
return d.getVar("SRCDATE") or d.getVar("DATE")
class FetchMethod(object):
"""Base class for 'fetch'ing data"""
def __init__(self, urls=None):
self.urls = []
def supports(self, urldata, d):
"""
Check to see if this fetch class supports a given url.
"""
return 0
def localpath(self, urldata, d):
"""
Return the local filename of a given url assuming a successful fetch.
Can also setup variables in urldata for use in go (saving code duplication
and duplicate code execution)
"""
return os.path.join(d.getVar("DL_DIR"), urldata.localfile)
def supports_checksum(self, urldata):
"""
Is localpath something that can be represented by a checksum?
"""
# We cannot compute checksums for directories
if os.path.isdir(urldata.localpath) == True:
return False
if urldata.localpath.find("*") != -1:
return False
return True
def recommends_checksum(self, urldata):
"""
Is the backend on where checksumming is recommended (should warnings
be displayed if there is no checksum)?
"""
return False
def _strip_leading_slashes(self, relpath):
"""
Remove leading slash as os.path.join can't cope
"""
while os.path.isabs(relpath):
relpath = relpath[1:]
return relpath
def setUrls(self, urls):
self.__urls = urls
def getUrls(self):
return self.__urls
urls = property(getUrls, setUrls, None, "Urls property")
def need_update(self, ud, d):
"""
Force a fetch, even if localpath exists?
"""
if os.path.exists(ud.localpath):
return False
return True
def supports_srcrev(self):
"""
The fetcher supports auto source revisions (SRCREV)
"""
return False
def download(self, urldata, d):
"""
Fetch urls
Assumes localpath was called first
"""
raise NoMethodError(url)
def unpack(self, urldata, rootdir, data):
iterate = False
file = urldata.localpath
# Localpath can't deal with 'dir/*' entries, so it converts them to '.',
# but it must be corrected back for local files copying
if urldata.basename == '*' and file.endswith('/.'):
file = '%s/%s' % (file.rstrip('/.'), urldata.path)
try:
unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True)
except ValueError as exc:
bb.fatal("Invalid value for 'unpack' parameter for %s: %s" %
(file, urldata.parm.get('unpack')))
base, ext = os.path.splitext(file)
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']:
efile = os.path.join(rootdir, os.path.basename(base))
else:
efile = file
cmd = None
if unpack:
if file.endswith('.tar'):
cmd = 'tar x --no-same-owner -f %s' % file
elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
cmd = 'tar xz --no-same-owner -f %s' % file
elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'):
cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
cmd = 'gzip -dc %s > %s' % (file, efile)
elif file.endswith('.bz2'):
cmd = 'bzip2 -dc %s > %s' % (file, efile)
elif file.endswith('.tar.xz'):
cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
elif file.endswith('.xz'):
cmd = 'xz -dc %s > %s' % (file, efile)
elif file.endswith('.tar.lz'):
cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file
elif file.endswith('.lz'):
cmd = 'lzip -dc %s > %s' % (file, efile)
elif file.endswith('.tar.7z'):
cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
elif file.endswith('.7z'):
cmd = '7za x -y %s 1>/dev/null' % file
elif file.endswith('.zip') or file.endswith('.jar'):
try:
dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
except ValueError as exc:
bb.fatal("Invalid value for 'dos' parameter for %s: %s" %
(file, urldata.parm.get('dos')))
cmd = 'unzip -q -o'
if dos:
cmd = '%s -a' % cmd
cmd = "%s '%s'" % (cmd, file)
elif file.endswith('.rpm') or file.endswith('.srpm'):
if 'extract' in urldata.parm:
unpack_file = urldata.parm.get('extract')
cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file)
iterate = True
iterate_file = unpack_file
else:
cmd = 'rpm2cpio.sh %s | cpio -id' % (file)
elif file.endswith('.deb') or file.endswith('.ipk'):
output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True)
datafile = None
if output:
for line in output.decode().splitlines():
if line.startswith('data.tar.'):
datafile = line
break
else:
raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar.* file", urldata.url)
else:
raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url)
cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile)
# If 'subdir' param exists, create a dir and use it as destination for unpack cmd
if 'subdir' in urldata.parm:
subdir = urldata.parm.get('subdir')
if os.path.isabs(subdir):
if not os.path.realpath(subdir).startswith(os.path.realpath(rootdir)):
raise UnpackError("subdir argument isn't a subdirectory of unpack root %s" % rootdir, urldata.url)
unpackdir = subdir
else:
unpackdir = os.path.join(rootdir, subdir)
bb.utils.mkdirhier(unpackdir)
else:
unpackdir = rootdir
if not unpack or not cmd:
# If file == dest, then avoid any copies, as we already put the file into dest!
dest = os.path.join(unpackdir, os.path.basename(file))
if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)):
destdir = '.'
# For file:// entries all intermediate dirs in path must be created at destination
if urldata.type == "file":
# Trailing '/' does a copying to wrong place
urlpath = urldata.path.rstrip('/')
# Want files places relative to cwd so no leading '/'
urlpath = urlpath.lstrip('/')
if urlpath.find("/") != -1:
destdir = urlpath.rsplit("/", 1)[0] + '/'
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
cmd = 'cp -fpPRH %s %s' % (file, destdir)
if not cmd:
return
path = data.getVar('PATH')
if path:
cmd = "PATH=\"%s\" %s" % (path, cmd)
bb.note("Unpacking %s to %s/" % (file, unpackdir))
ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=unpackdir)
if ret != 0:
raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url)
if iterate is True:
iterate_urldata = urldata
iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file)
self.unpack(urldata, rootdir, data)
return
def clean(self, urldata, d):
"""
Clean any existing full or partial download
"""
bb.utils.remove(urldata.localpath)
def try_premirror(self, urldata, d):
"""
Should premirrors be used?
"""
return True
def checkstatus(self, fetch, urldata, d):
"""
Check the status of a URL
Assumes localpath was called first
"""
logger.info("URL %s could not be checked for status since no method exists.", url)
return True
def latest_revision(self, ud, d, name):
"""
Look in the cache for the latest revision, if not present ask the SCM.
"""
if not hasattr(self, "_latest_revision"):
raise ParameterError("The fetcher for this URL does not support _latest_revision", url)
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
key = self.generate_revision_key(ud, d, name)
try:
return revs[key]
except KeyError:
revs[key] = rev = self._latest_revision(ud, d, name)
return rev
def sortable_revision(self, ud, d, name):
latest_rev = self._build_revision(ud, d, name)
return True, str(latest_rev)
def generate_revision_key(self, ud, d, name):
key = self._revision_key(ud, d, name)
return "%s-%s" % (key, d.getVar("PN") or "")
def latest_versionstring(self, ud, d):
"""
Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
by searching through the tags output of ls-remote, comparing
versions and returning the highest match as a (version, revision) pair.
"""
return ('', '')
class Fetch(object):
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
if localonly and cache:
raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
if len(urls) == 0:
urls = d.getVar("SRC_URI").split()
self.urls = urls
self.d = d
self.ud = {}
self.connection_cache = connection_cache
fn = d.getVar('FILE')
mc = d.getVar('__BBMULTICONFIG') or ""
if cache and fn and mc + fn in urldata_cache:
self.ud = urldata_cache[mc + fn]
for url in urls:
if url not in self.ud:
try:
self.ud[url] = FetchData(url, d, localonly)
except NonLocalMethod:
if localonly:
self.ud[url] = None
pass
if fn and cache:
urldata_cache[mc + fn] = self.ud
def localpath(self, url):
if url not in self.urls:
self.ud[url] = FetchData(url, self.d)
self.ud[url].setup_localpath(self.d)
return self.d.expand(self.ud[url].localpath)
def localpaths(self):
"""
Return a list of the local filenames, assuming successful fetch
"""
local = []
for u in self.urls:
ud = self.ud[u]
ud.setup_localpath(self.d)
local.append(ud.localpath)
return local
def download(self, urls=None):
"""
Fetch all urls
"""
if not urls:
urls = self.urls
network = self.d.getVar("BB_NO_NETWORK")
premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1")
for u in urls:
ud = self.ud[u]
ud.setup_localpath(self.d)
m = ud.method
localpath = ""
if ud.lockfile:
lf = bb.utils.lockfile(ud.lockfile)
try:
self.d.setVar("BB_NO_NETWORK", network)
if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
localpath = ud.localpath
elif m.try_premirror(ud, self.d):
logger.debug(1, "Trying PREMIRRORS")
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
localpath = try_mirrors(self, self.d, ud, mirrors, False)
if localpath:
try:
# early checksum verification so that if the checksum of the premirror
# contents mismatch the fetcher can still try upstream and mirrors
update_stamp(ud, self.d)
except ChecksumError as e:
logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
logger.debug(1, str(e))
localpath = ""
if premirroronly:
self.d.setVar("BB_NO_NETWORK", "1")
firsterr = None
verified_stamp = verify_donestamp(ud, self.d)
if not localpath and (not verified_stamp or m.need_update(ud, self.d)):
try:
if not trusted_network(self.d, ud.url):
raise UntrustedUrl(ud.url)
logger.debug(1, "Trying Upstream")
m.download(ud, self.d)
if hasattr(m, "build_mirror_data"):
m.build_mirror_data(ud, self.d)
localpath = ud.localpath
# early checksum verify, so that if checksum mismatched,
# fetcher still have chance to fetch from mirror
update_stamp(ud, self.d)
except bb.fetch2.NetworkAccess:
raise
except BBFetchException as e:
if isinstance(e, ChecksumError):
logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u)
logger.debug(1, str(e))
if os.path.exists(ud.localpath):
rename_bad_checksum(ud, e.checksum)
elif isinstance(e, NoChecksumError):
raise
else:
logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u)
logger.debug(1, str(e))
firsterr = e
# Remove any incomplete fetch
if not verified_stamp:
m.clean(ud, self.d)
logger.debug(1, "Trying MIRRORS")
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
localpath = try_mirrors(self, self.d, ud, mirrors)
if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1):
if firsterr:
logger.error(str(firsterr))
raise FetchError("Unable to fetch URL from any source.", u)
update_stamp(ud, self.d)
except IOError as e:
if e.errno in [os.errno.ESTALE]:
logger.error("Stale Error Observed %s." % u)
raise ChecksumError("Stale Error Detected")
except BBFetchException as e:
if isinstance(e, ChecksumError):
logger.error("Checksum failure fetching %s" % u)
raise
finally:
if ud.lockfile:
bb.utils.unlockfile(lf)
def checkstatus(self, urls=None):
"""
Check all urls exist upstream
"""
if not urls:
urls = self.urls
for u in urls:
ud = self.ud[u]
ud.setup_localpath(self.d)
m = ud.method
logger.debug(1, "Testing URL %s", u)
# First try checking uri, u, from PREMIRRORS
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
ret = try_mirrors(self, self.d, ud, mirrors, True)
if not ret:
# Next try checking from the original uri, u
ret = m.checkstatus(self, ud, self.d)
if not ret:
# Finally, try checking uri, u, from MIRRORS
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
ret = try_mirrors(self, self.d, ud, mirrors, True)
if not ret:
raise FetchError("URL %s doesn't work" % u, u)
def unpack(self, root, urls=None):
"""
Unpack urls to root
"""
if not urls:
urls = self.urls
for u in urls:
ud = self.ud[u]
ud.setup_localpath(self.d)
if ud.lockfile:
lf = bb.utils.lockfile(ud.lockfile)
ud.method.unpack(ud, root, self.d)
if ud.lockfile:
bb.utils.unlockfile(lf)
def clean(self, urls=None):
"""
Clean files that the fetcher gets or places
"""
if not urls:
urls = self.urls
for url in urls:
if url not in self.ud:
self.ud[url] = FetchData(url, d)
ud = self.ud[url]
ud.setup_localpath(self.d)
if not ud.localfile and ud.localpath is None:
continue
if ud.lockfile:
lf = bb.utils.lockfile(ud.lockfile)
ud.method.clean(ud, self.d)
if ud.donestamp:
bb.utils.remove(ud.donestamp)
if ud.lockfile:
bb.utils.unlockfile(lf)
class FetchConnectionCache(object):
"""
A class which represents an container for socket connections.
"""
def __init__(self):
self.cache = {}
def get_connection_name(self, host, port):
return host + ':' + str(port)
def add_connection(self, host, port, connection):
cn = self.get_connection_name(host, port)
if cn not in self.cache:
self.cache[cn] = connection
def get_connection(self, host, port):
connection = None
cn = self.get_connection_name(host, port)
if cn in self.cache:
connection = self.cache[cn]
return connection
def remove_connection(self, host, port):
cn = self.get_connection_name(host, port)
if cn in self.cache:
self.cache[cn].close()
del self.cache[cn]
def close_connections(self):
for cn in list(self.cache.keys()):
self.cache[cn].close()
del self.cache[cn]
from . import cvs
from . import git
from . import gitsm
from . import gitannex
from . import local
from . import svn
from . import wget
from . import ssh
from . import sftp
from . import s3
from . import perforce
from . import bzr
from . import hg
from . import osc
from . import repo
from . import clearcase
from . import npm
methods.append(local.Local())
methods.append(wget.Wget())
methods.append(svn.Svn())
methods.append(git.Git())
methods.append(gitsm.GitSM())
methods.append(gitannex.GitANNEX())
methods.append(cvs.Cvs())
methods.append(ssh.SSH())
methods.append(sftp.SFTP())
methods.append(s3.S3())
methods.append(perforce.Perforce())
methods.append(bzr.Bzr())
methods.append(hg.Hg())
methods.append(osc.Osc())
methods.append(repo.Repo())
methods.append(clearcase.ClearCase())
methods.append(npm.Npm())
|
# -*- coding: utf-8 -*-
from os.path import dirname, abspath
import sys
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import utils.config_loader as config
import utils.config_loader as config
import utils.tools as tools
import torch
import shutil
versions = ['sl', 'alpha']
para_org = True
for vv in versions:
if config.meta_model_name.endswith(vv):
para_org = False
def sort_sid2score(sid2score):
sid_score_list = sorted(sid2score.items(), key=lambda item: item[1], reverse=True)
return sid_score_list
def get_rank_records(sid_score_list, sents=None, flat_sents=False):
"""
optional: display sentence in record
:param sid_score_list:
:param sents:
:param flat_sents: if True, iterate sent directly; if False, need use sid to get doc_idx and sent_idx.
:return:
"""
rank_records = []
for sid, score in sid_score_list:
items = [sid, str(score)]
if sents:
if flat_sents:
sent = sents[len(rank_records)] # the current point
else:
doc_idx, sent_idx = tools.get_sent_info(sid)
sent = sents[doc_idx][sent_idx]
items.append(sent)
record = '\t'.join(items)
rank_records.append(record)
return rank_records
def dump_rank_records(rank_records, out_fp, with_rank_idx):
"""
each line is
ranking sid score
sid: config.SEP.join((doc_idx, para_idx, sent_idx))
:param sid_score_list:
:param out_fp:
:return:
"""
lines = rank_records
if with_rank_idx:
lines = ['\t'.join((str(rank), record)) for rank, record in enumerate(rank_records)]
with open(out_fp, mode='a', encoding='utf-8') as f:
f.write('\n'.join(lines))
return len(lines)
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .server import MarsWebAPI, MarsRequestHandler, register_web_handler, get_jinja_env
_jinja_env = get_jinja_env()
class DashboardHandler(MarsRequestHandler):
def get(self):
web_api = MarsWebAPI(self._scheduler)
scheduler_infos = web_api.get_schedulers_info()
worker_infos = web_api.get_workers_meta()
scheduler_summary = {
'count': len(scheduler_infos),
'cpu_used': sum(si['cpu_used'] for si in scheduler_infos.values()),
'cpu_total': sum(si['cpu_total'] for si in scheduler_infos.values()),
'memory_used': sum(si['memory_used'] for si in scheduler_infos.values()),
'memory_total': sum(si['memory_total'] for si in scheduler_infos.values()),
'git_branches': set(si['git_info'] for si in scheduler_infos.values()),
}
worker_summary = {
'count': len(worker_infos),
'cpu_used': sum(wi['hardware']['cpu_used'] for wi in worker_infos.values()),
'cpu_total': sum(wi['hardware']['cpu_total'] for wi in worker_infos.values()),
'memory_used': sum(wi['hardware']['memory_used'] for wi in worker_infos.values()),
'memory_total': sum(wi['hardware']['memory_total'] for wi in worker_infos.values()),
'git_branches': set(wi['details']['git_info'] for wi in worker_infos.values()),
}
template = _jinja_env.get_template('dashboard.html')
self.write_rendered(
template,
scheduler_summary=scheduler_summary,
worker_summary=worker_summary
)
register_web_handler('/', DashboardHandler)
|
"""
Our modification of the OpenAI Gym Continuous Mountain Car by Olivier Sigaud:
https://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py
which was (ultimately) based on Sutton's implementation:
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
"""
from pilco.errors import EnvironmentError
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
class MountainCar(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30}
def __init__(self):
# State and action bounds
self.min_action = -1.0
self.max_action = 1.0
self.min_position = - 3.0
self.max_position = 3.0
self.max_speed = 0.07
self.goal_position = 0.5
# Force per mass the car can output
self.power = 0.0015
self.low_state = np.array([self.min_position, -self.max_speed],
dtype=np.float32)
self.high_state = np.array([self.max_position, self.max_speed],
dtype=np.float32)
self.viewer = None
# Allowed action space
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,),
dtype=np.float32)
self.seed()
# Temporary hack to work with rest of library
self.env = self
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
# Check if action is in permissible space
if not self.action_space.contains(action):
raise EnvironmentError(f'Expected action in the range of [-1., 1.] '
f'got action {action}.')
# Unpack positiion and valocity
position, velocity = self.state
# Increment position by velocity
position_ = position + velocity
# Increment velocity by Euler rule and clip
velocity_ = velocity + action * self.power - 0.0025 * np.cos(3 * position)
velocity_ = np.clip(velocity_, - self.max_speed, self.max_speed)
self.state = np.array([position_, velocity_])
return self.state, None, False, {}
def reset(self):
self.state = np.array([-0.5, 0.])
return np.array(self.state)
def _height(self, xs):
return 0.55 + 0.45 * np.sin(3 * xs)
def render(self, mode='human'):
# Set picture size
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
# Set car size
carwidth = 40
carheight = 20
if self.viewer is None:
from gym.envs.classic_control import rendering
# Car constants
clearance = 10
# Overall viewer
self.viewer = rendering.Viewer(screen_width, screen_height)
# Track on which the car moves
xs = np.linspace(self.min_position, self.max_position, 200)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
# Add car
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
self.cartrans = rendering.Transform()
# Car chasis
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
# Front wheel
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
# Back wheel
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
# Flagpole on mountain peak
flagx = scale * (0.5 - self.min_position)
flagy1 = scale * self._height(self.goal_position)
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1),
(flagx, flagy2))
self.viewer.add_geom(flagpole)
# Flag on flagpole
flag = rendering.FilledPolygon([(flagx, flagy2),
(flagx, flagy2 - 10),
(flagx + 25, flagy2 - 5)])
flag.set_color(.8, .8, 0)
self.viewer.add_geom(flag)
# Translate and rotate car
self.cartrans.set_translation(scale * (self.state[0] - self.min_position),
scale * self._height(self.state[0]))
self.cartrans.set_rotation(np.cos(3 * self.state[0]))
return self.viewer.render(return_rgb_array=mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
__all__ = (
"mesh_linked_uv_islands",
"mesh_linked_triangles",
"edge_face_count_dict",
"edge_face_count",
"edge_loops_from_edges",
"ngon_tessellate",
"triangle_random_points",
)
def mesh_linked_uv_islands(mesh):
"""
Splits the mesh into connected polygons, use this for separating cubes from
other mesh elements within 1 mesh datablock.
:arg mesh: the mesh used to group with.
:type mesh: :class:`bpy.types.Mesh`
:return: lists of lists containing polygon indices
:rtype: list
"""
uv_loops = [luv.uv[:] for luv in mesh.uv_layers.active.data]
poly_loops = [poly.loop_indices for poly in mesh.polygons]
luv_hash = {}
luv_hash_get = luv_hash.get
luv_hash_ls = [None] * len(uv_loops)
for pi, poly_indices in enumerate(poly_loops):
for li in poly_indices:
uv = uv_loops[li]
uv_hub = luv_hash_get(uv)
if uv_hub is None:
uv_hub = luv_hash[uv] = [pi]
else:
uv_hub.append(pi)
luv_hash_ls[li] = uv_hub
poly_islands = []
# 0 = none, 1 = added, 2 = searched
poly_tag = [0] * len(poly_loops)
while True:
poly_index = -1
for i in range(len(poly_loops)):
if poly_tag[i] == 0:
poly_index = i
break
if poly_index != -1:
island = [poly_index]
poly_tag[poly_index] = 1
poly_islands.append(island)
else:
break # we're done
added = True
while added:
added = False
for poly_index in island[:]:
if poly_tag[poly_index] == 1:
for li in poly_loops[poly_index]:
for poly_index_shared in luv_hash_ls[li]:
if poly_tag[poly_index_shared] == 0:
added = True
poly_tag[poly_index_shared] = 1
island.append(poly_index_shared)
poly_tag[poly_index] = 2
return poly_islands
def mesh_linked_triangles(mesh):
"""
Splits the mesh into connected triangles, use this for separating cubes from
other mesh elements within 1 mesh datablock.
:arg mesh: the mesh used to group with.
:type mesh: :class:`bpy.types.Mesh`
:return: lists of lists containing triangles.
:rtype: list
"""
# Build vert face connectivity
vert_tris = [[] for i in range(len(mesh.vertices))]
for t in mesh.loop_triangles:
for v in t.vertices:
vert_tris[v].append(t)
# sort triangles into connectivity groups
tri_groups = [[t] for t in mesh.loop_triangles]
# map old, new tri location
tri_mapping = list(range(len(mesh.loop_triangles)))
# Now clump triangles iteratively
ok = True
while ok:
ok = False
for t in mesh.loop_triangles:
mapped_index = tri_mapping[t.index]
mapped_group = tri_groups[mapped_index]
for v in t.vertices:
for nxt_t in vert_tris[v]:
if nxt_t != t:
nxt_mapped_index = tri_mapping[nxt_t.index]
# We are not a part of the same group
if mapped_index != nxt_mapped_index:
ok = True
# Assign mapping to this group so they
# all map to this group
for grp_t in tri_groups[nxt_mapped_index]:
tri_mapping[grp_t.index] = mapped_index
# Move triangles into this group
mapped_group.extend(tri_groups[nxt_mapped_index])
# remove reference to the list
tri_groups[nxt_mapped_index] = None
# return all tri groups that are not null
# this is all the triangles that are connected in their own lists.
return [tg for tg in tri_groups if tg]
def edge_face_count_dict(mesh):
"""
:return: dict of edge keys with their value set to the number of
faces using each edge.
:rtype: dict
"""
face_edge_count = {}
loops = mesh.loops
edges = mesh.edges
for poly in mesh.polygons:
for i in poly.loop_indices:
key = edges[loops[i].edge_index].key
try:
face_edge_count[key] += 1
except:
face_edge_count[key] = 1
return face_edge_count
def edge_face_count(mesh):
"""
:return: list face users for each item in mesh.edges.
:rtype: list
"""
edge_face_count = edge_face_count_dict(mesh)
get = dict.get
return [get(edge_face_count, ed.key, 0) for ed in mesh.edges]
def edge_loops_from_edges(mesh, edges=None):
"""
Edge loops defined by edges
Takes me.edges or a list of edges and returns the edge loops
return a list of vertex indices.
[ [1, 6, 7, 2], ...]
closed loops have matching start and end values.
"""
line_polys = []
# Get edges not used by a face
if edges is None:
edges = mesh.edges
if not hasattr(edges, "pop"):
edges = edges[:]
while edges:
current_edge = edges.pop()
vert_end, vert_start = current_edge.vertices[:]
line_poly = [vert_start, vert_end]
ok = True
while ok:
ok = False
# for i, ed in enumerate(edges):
i = len(edges)
while i:
i -= 1
ed = edges[i]
v1, v2 = ed.vertices
if v1 == vert_end:
line_poly.append(v2)
vert_end = line_poly[-1]
ok = 1
del edges[i]
# break
elif v2 == vert_end:
line_poly.append(v1)
vert_end = line_poly[-1]
ok = 1
del edges[i]
# break
elif v1 == vert_start:
line_poly.insert(0, v2)
vert_start = line_poly[0]
ok = 1
del edges[i]
# break
elif v2 == vert_start:
line_poly.insert(0, v1)
vert_start = line_poly[0]
ok = 1
del edges[i]
# break
line_polys.append(line_poly)
return line_polys
def ngon_tessellate(from_data, indices, fix_loops=True, debug_print=True):
"""
Takes a polyline of indices (ngon) and returns a list of face
index lists. Designed to be used for importers that need indices for an
ngon to create from existing verts.
:arg from_data: either a mesh, or a list/tuple of vectors.
:type from_data: list or :class:`bpy.types.Mesh`
:arg indices: a list of indices to use this list
is the ordered closed polyline
to fill, and can be a subset of the data given.
:type indices: list
:arg fix_loops: If this is enabled polylines
that use loops to make multiple
polylines are delt with correctly.
:type fix_loops: bool
"""
from mathutils.geometry import tessellate_polygon
from mathutils import Vector
vector_to_tuple = Vector.to_tuple
if not indices:
return []
def mlen(co):
# Manhatten length of a vector, faster then length.
return abs(co[0]) + abs(co[1]) + abs(co[2])
def vert_treplet(v, i):
return v, vector_to_tuple(v, 6), i, mlen(v)
def ed_key_mlen(v1, v2):
if v1[3] > v2[3]:
return v2[1], v1[1]
else:
return v1[1], v2[1]
if not fix_loops:
# Normal single concave loop filling.
if type(from_data) in {tuple, list}:
verts = [Vector(from_data[i]) for ii, i in enumerate(indices)]
else:
verts = [from_data.vertices[i].co for ii, i in enumerate(indices)]
# same as reversed(range(1, len(verts))):
for i in range(len(verts) - 1, 0, -1):
if verts[i][1] == verts[i - 1][0]:
verts.pop(i - 1)
fill = tessellate_polygon([verts])
else:
# Separate this loop into multiple loops be finding edges that are
# used twice. This is used by Light-Wave LWO files a lot.
if type(from_data) in {tuple, list}:
verts = [
vert_treplet(Vector(from_data[i]), ii)
for ii, i in enumerate(indices)
]
else:
verts = [
vert_treplet(from_data.vertices[i].co, ii)
for ii, i in enumerate(indices)
]
edges = [(i, i - 1) for i in range(len(verts))]
if edges:
edges[0] = (0, len(verts) - 1)
if not verts:
return []
edges_used = set()
edges_doubles = set()
# We need to check if any edges are used twice location based.
for ed in edges:
edkey = ed_key_mlen(verts[ed[0]], verts[ed[1]])
if edkey in edges_used:
edges_doubles.add(edkey)
else:
edges_used.add(edkey)
# Store a list of unconnected loop segments split by double edges.
# will join later
loop_segments = []
v_prev = verts[0]
context_loop = [v_prev]
loop_segments = [context_loop]
for v in verts:
if v != v_prev:
# Are we crossing an edge we removed?
if ed_key_mlen(v, v_prev) in edges_doubles:
context_loop = [v]
loop_segments.append(context_loop)
else:
if context_loop and context_loop[-1][1] == v[1]:
pass
else:
context_loop.append(v)
v_prev = v
# Now join loop segments
def join_seg(s1, s2):
if s2[-1][1] == s1[0][1]:
s1, s2 = s2, s1
elif s1[-1][1] == s2[0][1]:
pass
else:
return False
# If were still here s1 and s2 are 2 segments in the same poly-line.
s1.pop() # remove the last vert from s1
s1.extend(s2) # add segment 2 to segment 1
if s1[0][1] == s1[-1][1]: # remove endpoints double
s1.pop()
del s2[:] # Empty this segment s2 so we don't use it again.
return True
joining_segments = True
while joining_segments:
joining_segments = False
segcount = len(loop_segments)
for j in range(segcount - 1, -1, -1): # reversed(range(segcount)):
seg_j = loop_segments[j]
if seg_j:
for k in range(j - 1, -1, -1): # reversed(range(j)):
if not seg_j:
break
seg_k = loop_segments[k]
if seg_k and join_seg(seg_j, seg_k):
joining_segments = True
loop_list = loop_segments
for verts in loop_list:
while verts and verts[0][1] == verts[-1][1]:
verts.pop()
loop_list = [verts for verts in loop_list if len(verts) > 2]
# DONE DEALING WITH LOOP FIXING
# vert mapping
vert_map = [None] * len(indices)
ii = 0
for verts in loop_list:
if len(verts) > 2:
for i, vert in enumerate(verts):
vert_map[i + ii] = vert[2]
ii += len(verts)
fill = tessellate_polygon([[v[0] for v in loop] for loop in loop_list])
# draw_loops(loop_list)
#raise Exception("done loop")
# map to original indices
fill = [[vert_map[i] for i in f] for f in fill]
if not fill:
if debug_print:
print('Warning Cannot scanfill, fallback on a triangle fan.')
fill = [[0, i - 1, i] for i in range(2, len(indices))]
else:
# Use real scan-fill.
# See if its flipped the wrong way.
flip = None
for fi in fill:
if flip is not None:
break
for i, vi in enumerate(fi):
if vi == 0 and fi[i - 1] == 1:
flip = False
break
elif vi == 1 and fi[i - 1] == 0:
flip = True
break
if not flip:
for i, fi in enumerate(fill):
fill[i] = tuple([ii for ii in reversed(fi)])
return fill
def triangle_random_points(num_points, loop_triangles):
"""
Generates a list of random points over mesh loop triangles.
:arg num_points: the number of random points to generate on each triangle.
:type int:
:arg loop_triangles: list of the triangles to generate points on.
:type loop_triangles: :class:`bpy.types.MeshLoopTriangle`, sequence
:return: list of random points over all triangles.
:rtype: list
"""
from random import random
# For each triangle, generate the required number of random points
sampled_points = [None] * (num_points * len(loop_triangles))
for i, lt in enumerate(loop_triangles):
# Get triangle vertex coordinates
verts = lt.id_data.vertices
ltv = lt.vertices[:]
tv = (verts[ltv[0]].co, verts[ltv[1]].co, verts[ltv[2]].co)
for k in range(num_points):
u1 = random()
u2 = random()
u_tot = u1 + u2
if u_tot > 1:
u1 = 1.0 - u1
u2 = 1.0 - u2
side1 = tv[1] - tv[0]
side2 = tv[2] - tv[0]
p = tv[0] + u1 * side1 + u2 * side2
sampled_points[num_points * i + k] = p
return sampled_points
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
import profiles.urls
import accounts.urls
from . import views
urlpatterns = [
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^users/', include(profiles.urls, namespace='profiles')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(accounts.urls, namespace='accounts')),
url(r'^apis/', include('apis.urls', namespace='apis')),
url(r'^weather/', include('secondhome.urls', namespace='pets')),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Include django debug toolbar if DEBUG is on
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2021, Lars Asplund lars.anders.asplund@gmail.com
from pathlib import Path
from vunit.verilog import VUnit
ROOT = Path(__file__).parent
VU = VUnit.from_argv()
VU.add_library("lib").add_source_files(ROOT / "test" / "*.sv")
VU.set_sim_option("modelsim.vsim_flags.gui", ["-novopt"])
VU.main()
|
# encoding: utf-8
# module System.Drawing.Configuration calls itself Configuration
# from System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class SystemDrawingSection(ConfigurationSection):
""" SystemDrawingSection() """
BitmapSuffix = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: BitmapSuffix(self: SystemDrawingSection) -> str
Set: BitmapSuffix(self: SystemDrawingSection) = value
"""
ElementProperty = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the System.Configuration.ConfigurationElementProperty object that represents the System.Configuration.ConfigurationElement object itself.
"""
EvaluationContext = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the System.Configuration.ContextInformation object for the System.Configuration.ConfigurationElement object.
"""
HasContext = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Properties = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
num1 = input("Enter the first number:\n ")
num2 = input("Enter the second number:\n ")
num3 = input("Enter the third number:\n ")
num4 = input("Enter the fourth number:\n ")
if (num1>num2) and (num2>num3):
print("The greatest number is:", num1)
elif (num2>num1) and (num1>num3):
print("The greatest nymber is:", num2)
else:
print("The greatest number is:", num3)
|
"""
IRN API v1
Allows users to extract, create, update and configure IRN data. # noqa: E501
The version of the OpenAPI document: 1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.IRNContacts.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.IRNContacts.exceptions import ApiAttributeError
class OperationType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (int,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""OperationType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501
Keyword Args:
value (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""OperationType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501
Keyword Args:
value (int):, must be one of [0, 1, 2, 3, 4, 5, 6, ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
|
from __future__ import unicode_literals
import copy
from funcy import merge
from schema import Optional
from contextlib import contextmanager
from dvc.external_repo import external_repo
from dvc.utils.compat import str
from .local import DependencyLOCAL
class DependencyREPO(DependencyLOCAL):
PARAM_REPO = "repo"
PARAM_URL = "url"
PARAM_REV = "rev"
PARAM_REV_LOCK = "rev_lock"
REPO_SCHEMA = {
Optional(PARAM_URL): str,
Optional(PARAM_REV): str,
Optional(PARAM_REV_LOCK): str,
}
def __init__(self, def_repo, stage, *args, **kwargs):
self.def_repo = def_repo
super(DependencyREPO, self).__init__(stage, *args, **kwargs)
def _parse_path(self, remote, path):
return None
@property
def is_in_repo(self):
return False
def __str__(self):
return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL])
@contextmanager
def _make_repo(self, **overrides):
with external_repo(**merge(self.def_repo, overrides)) as repo:
yield repo
def status(self):
with self._make_repo() as repo:
current = repo.find_out_by_relpath(self.def_path).info
with self._make_repo(rev_lock=None) as repo:
updated = repo.find_out_by_relpath(self.def_path).info
if current != updated:
return {str(self): "update available"}
return {}
def save(self):
pass
def dumpd(self):
return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}
def download(self, to, resume=False):
with self._make_repo(
cache_dir=self.repo.cache.local.cache_dir
) as repo:
self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
out = repo.find_out_by_relpath(self.def_path)
repo.fetch(out.stage.path)
to.info = copy.copy(out.info)
to.checkout()
def update(self):
with self._make_repo(rev_lock=None) as repo:
self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
|
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
import numpy as np
from niapy.task import StoppingTask
from niapy.problems import Problem
from niapy.algorithms.basic import ParticleSwarmAlgorithm
class MyProblem(Problem):
def __init__(self, dimension, lower=-10, upper=10, *args, **kwargs):
super().__init__(dimension, lower, upper, *args, **kwargs)
def _evaluate(self, x):
return np.sum(x ** 2)
# we will run Particle Swarm Algorithm on custom problem
task = StoppingTask(problem=MyProblem(dimension=10), max_iters=1000)
algo = ParticleSwarmAlgorithm(population_size=40, c1=2.0, c2=2.0, w=0.7, min_velocity=-4, max_velocity=4)
best = algo.run(task=task)
print('%s -> %s ' % (best[0], best[1]))
|
from contextlib import contextmanager
from logging import getLogger
from django.conf import settings
from elasticsearch.helpers import bulk as es_bulk
from elasticsearch_dsl import analysis, Index
from elasticsearch_dsl.connections import connections
logger = getLogger(__name__)
# Normalises values to improve sorting (by keeping e, E, è, ê etc. together)
lowercase_asciifolding_normalizer = analysis.normalizer(
'lowercase_asciifolding_normalizer',
filter=('lowercase', 'asciifolding'),
)
# Trigram tokenizer enables us to support partial matching
trigram = analysis.tokenizer(
'trigram',
'nGram',
min_gram=3,
max_gram=3,
token_chars=('letter', 'digit'),
)
# Filters out "-" so that t-shirt and tshirt can be matched
special_chars = analysis.char_filter('special_chars', 'mapping', mappings=('-=>',))
trigram_analyzer = analysis.CustomAnalyzer(
'trigram_analyzer',
tokenizer=trigram,
char_filter=special_chars,
filter=('lowercase',),
)
space_remover = analysis.token_filter(
'space_remover',
type='pattern_replace',
pattern=' ',
replacement='',
)
AREA_REGEX = r'[a-z]{1,2}'
DISTRICT_REGEX = r'(?:[0-9][a-z]|[0-9]{1,2})'
SECTOR_REGEX = r'[0-9]'
UNIT_REGEX = r'[a-z]{2}'
postcode_filter = analysis.token_filter(
'postcode_filter',
type='pattern_capture',
# Index whole postcode (with space)
preserve_original=True,
patterns=[
# Index postcode area
# See the Royal Mail programmer's guide for the exact definitions
rf'^({AREA_REGEX}){DISTRICT_REGEX} {SECTOR_REGEX}{UNIT_REGEX}',
# Index postcode district (with sub-district code ignored)
# This is so `wc1` query would match `wc1ab` and `wc1a1ab`, but not `wc111ab`
# Area + one or two digits
rf'^(({AREA_REGEX}[0-9]) {SECTOR_REGEX}{UNIT_REGEX}|'
rf'({AREA_REGEX}[0-9]{{2}}) {SECTOR_REGEX}{UNIT_REGEX}|'
rf'({AREA_REGEX}[0-9])[a-z]? {SECTOR_REGEX}{UNIT_REGEX})',
# Index postcode district (including sub-district)
rf'^({AREA_REGEX}{DISTRICT_REGEX}) {SECTOR_REGEX}{UNIT_REGEX}',
# Index postcode sector
rf'^({AREA_REGEX}{DISTRICT_REGEX} {SECTOR_REGEX}){UNIT_REGEX}',
],
)
# Token filter that adds a space to well-formed UK postcodes that don't have one.
normalise_postcode_filter = analysis.token_filter(
'normalise_postcode_filter',
type='pattern_replace',
pattern=rf'^'
rf'(?<area>{AREA_REGEX})'
rf'(?<district>{DISTRICT_REGEX})'
rf'(?<sector>{SECTOR_REGEX})'
rf'(?<unit>{UNIT_REGEX})'
rf'$',
replacement=r'${area}${district} ${sector}${unit}',
)
postcode_analyzer = analysis.CustomAnalyzer(
'postcode_analyzer_v2',
type='custom',
tokenizer='keyword',
filter=(space_remover, 'lowercase', normalise_postcode_filter, postcode_filter),
)
postcode_search_analyzer = analysis.CustomAnalyzer(
'postcode_search_analyzer_v2',
type='custom',
tokenizer='keyword',
filter=('lowercase', normalise_postcode_filter),
)
english_possessive_stemmer = analysis.token_filter(
'english_possessive_stemmer',
type='stemmer',
language='possessive_english',
)
english_stemmer = analysis.token_filter(
'english_stemmer',
type='stemmer',
language='english',
)
english_stop = analysis.token_filter(
'english_stop',
type='stop',
stopwords='_english_',
)
english_analyzer = analysis.CustomAnalyzer(
'english_analyzer',
tokenizer='standard',
filter=[
english_possessive_stemmer,
'lowercase',
english_stop,
english_stemmer,
],
)
ANALYZERS = (
trigram_analyzer,
english_analyzer,
)
def configure_connection():
"""Configure Elasticsearch default connection."""
connections_default = {
'hosts': [settings.ES_URL],
'verify_certs': settings.ES_VERIFY_CERTS,
}
connections.configure(default=connections_default)
def get_client():
"""Gets an instance of the Elasticsearch client from the connection cache."""
return connections.get_connection()
def index_exists(index_name):
"""Checks if an index exists."""
client = get_client()
return client.indices.exists(index_name)
def create_index(index_name, mapping, alias_names=()):
"""
Creates an index, initialises it with a mapping, and optionally associates aliases with it.
Note: If you need to perform multiple alias operations atomically, you should use
start_alias_transaction() instead of specifying aliases when creating an index.
"""
index = Index(index_name)
for analyzer in ANALYZERS:
index.analyzer(analyzer)
index.settings(**settings.ES_INDEX_SETTINGS)
index.mapping(mapping)
# ES allows you to specify filter criteria for aliases but we don't make use of that –
# hence the empty dict for each alias
alias_mapping = {alias_name: {} for alias_name in alias_names}
index.aliases(**alias_mapping)
index.create()
def delete_index(index_name):
"""Deletes an index."""
logger.info(f'Deleting the {index_name} index...')
client = get_client()
client.indices.delete(index_name)
def get_indices_for_aliases(*alias_names):
"""Gets the indices referenced by one or more aliases."""
client = get_client()
alias_to_index_mapping = {alias_name: set() for alias_name in alias_names}
index_to_alias_mapping = client.indices.get_alias(name=alias_names)
for index_name, index_properties in index_to_alias_mapping.items():
for alias_name in index_properties['aliases']:
alias_to_index_mapping[alias_name].add(index_name)
return [alias_to_index_mapping[alias_name] for alias_name in alias_names]
def get_aliases_for_index(index_name):
"""Gets the aliases referencing an index."""
client = get_client()
alias_response = client.indices.get_alias(index=index_name)
return alias_response[index_name]['aliases'].keys()
def alias_exists(alias_name):
"""Checks if an alias exists."""
client = get_client()
return client.indices.exists_alias(name=alias_name)
def delete_alias(alias_name):
"""Deletes an alias entirely (dissociating it from all indices)."""
logger.info(f'Deleting the {alias_name} alias...')
client = get_client()
client.indices.delete_alias('_all', alias_name)
class _AliasUpdater:
"""Helper class for making multiple alias updates atomically."""
def __init__(self):
"""Initialises the instance with an empty list of pending operations."""
self.actions = []
def associate_indices_with_alias(self, alias_name, index_names):
"""Adds a pending operation to associate a new or existing alias with a set of indices."""
self.actions.append({
'add': {
'alias': alias_name,
'indices': list(index_names),
},
})
def dissociate_indices_from_alias(self, alias_name, index_names):
"""Adds a pending operation to dissociate an existing alias from a set of indices."""
self.actions.append({
'remove': {
'alias': alias_name,
'indices': list(index_names),
},
})
def commit(self):
"""Commits (flushes) pending operations."""
client = get_client()
client.indices.update_aliases(body={
'actions': self.actions,
})
self.actions = []
@contextmanager
def start_alias_transaction():
"""
Returns a context manager that can be used to create and update aliases atomically.
Changes are committed when the context manager exits.
Usage example:
with start_alias_transaction() as alias_transaction:
alias_transaction.dissociate_indices_from_alias(
'some-alias',
['an-index', 'another-index],
)
alias_transaction.associate_indices_with_alias(
'another-alias',
['new-index],
)
"""
alias_updater = _AliasUpdater()
yield alias_updater
alias_updater.commit()
def associate_index_with_alias(alias_name, index_name):
"""
Associates a new or existing alias with an index.
This is only intended to be a convenience function for simple operations. For more complex
operations, use start_alias_transaction().
"""
client = get_client()
client.indices.put_alias(index_name, alias_name)
def bulk(
actions=None,
chunk_size=500,
max_chunk_bytes=settings.ES_BULK_MAX_CHUNK_BYTES,
**kwargs,
):
"""Send data in bulk to Elasticsearch."""
return es_bulk(
get_client(),
actions=actions,
chunk_size=chunk_size,
max_chunk_bytes=max_chunk_bytes,
**kwargs,
)
|
from abc import ABC, abstractmethod
import numpy as np
import random
from typing import Callable, Dict, Optional, Tuple, Sequence
from .reward_spaces import Subtask
from ..lux.game import Game
class SubtaskSampler(ABC):
def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):
self.subtask_constructors = subtask_constructors
@abstractmethod
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
pass
# noinspection PyMethodMayBeStatic
def get_info(self) -> Dict[str, np.ndarray]:
return {}
class RandomSampler(SubtaskSampler):
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
return self.subtask_constructors[random.randrange(len(self.subtask_constructors))]()
class DifficultySampler(SubtaskSampler):
def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):
super(DifficultySampler, self).__init__(subtask_constructors)
self.active_subtask_idx = -1
self.summed_rewards = np.zeros(len(self.subtask_constructors))
self.n_trials = np.zeros(len(self.subtask_constructors))
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
if final_rewards is not None:
self.n_trials[self.active_subtask_idx] += 1
self.summed_rewards[self.active_subtask_idx] += np.mean(final_rewards)
self.active_subtask_idx = np.random.choice(len(self.subtask_constructors), p=self.weights)
return self.subtask_constructors[self.active_subtask_idx]()
@property
def weights(self) -> np.ndarray:
weights = Subtask.get_reward_spec().reward_max - self.summed_rewards / np.maximum(self.n_trials, 1)
return weights / weights.sum()
def get_info(self) -> Dict[str, np.ndarray]:
return {
f"LOGGING_{subtask.__name__}_subtask_difficulty": self.weights[i]
for i, subtask in enumerate(self.subtask_constructors)
}
class MultiSubtask(Subtask):
def __init__(
self,
subtask_constructors: Sequence[Callable[..., Subtask]] = (),
subtask_sampler_constructor: Callable[..., SubtaskSampler] = RandomSampler,
**kwargs
):
super(MultiSubtask, self).__init__(**kwargs)
self.subtask_constructors = subtask_constructors
self.subtask_sampler = subtask_sampler_constructor(self.subtask_constructors)
self.active_subtask = self.subtask_sampler.sample(None)
self.info = {
f"LOGGING_{subtask.__name__}_subtask_reward": np.array([float("nan"), float("nan")])
for subtask in self.subtask_constructors
}
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
reward, done = self.active_subtask.compute_rewards_and_done(game_state, done)
for subtask in self.subtask_constructors:
reward_key = f"LOGGING_{subtask.__name__}_subtask_reward"
if isinstance(self.active_subtask, subtask):
self.info[reward_key] = np.array(reward)
else:
self.info[reward_key] = np.array([float("nan"), float("nan")])
if done:
self.active_subtask = self.subtask_sampler.sample(reward)
return reward, done
def completed_task(self, game_state: Game) -> np.ndarray:
raise NotImplementedError
def get_info(self) -> Dict[str, np.ndarray]:
return dict(**self.info, **self.subtask_sampler.get_info())
def get_subtask_encoding(self, subtask_encoding_dict: dict) -> int:
return self.active_subtask.get_subtask_encoding(subtask_encoding_dict)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations:
"""PublicIPAddressesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> AsyncLROPoller["models.PublicIPAddress"]:
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.PublicIPAddress"]:
"""Updates public IP address tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to update public IP address tags.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
async def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
|
#!/usr/bin/env python
"""Test of "New Hunt" wizard."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import foreman
from grr_response_server import foreman_rules
from grr_response_server import hunt as lib_hunt
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
from grr.test_lib import test_lib
class TestNewHuntWizard(gui_test_lib.GRRSeleniumHuntTest):
"""Test the "new hunt wizard" GUI."""
@staticmethod
def FindForemanRules(hunt_urn, token):
rules = data_store.REL_DB.ReadAllForemanRules()
return [rule for rule in rules if rule.hunt_id == hunt_urn.Basename()]
def testNewHuntWizard(self):
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
# Change "path" and "pathtype" values
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) "
"input", "/tmp")
self.Select(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) "
"select", "TSK")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Back" button and check that all the values in the form
# remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.assertEqual(
"/tmp",
self.GetValue(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) input"))
self.assertEqual(
"TSK",
self.GetSelectedLabel(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) select"
))
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Configure the hunt to use dummy output plugin.
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Filepath Regex')) "
"input", "some regex")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Empty set of rules should be valid.
self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")
# A note informs what an empty set of rules means.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Alternative match mode that matches a client if
# any of the rules evaluates to true can be selected.
self.Select(
"css=grr-configure-rules-page "
"label:contains('Match mode') ~ * select", "Match any")
# The note depends on the match mode.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Create 3 foreman rules. Note that "Add" button adds rules
# to the beginning of a list. So we always use :nth(0) selector.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
rule = foreman_rules.ForemanRegexClientRule
label = rule.ForemanStringField.SYSTEM.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Attribute regex') ~ * input", "Linux")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select",
"Integer")
rule = foreman_rules.ForemanIntegerClientRule
label = rule.ForemanIntegerField.CLIENT_CLOCK.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Operator') ~ * select", "GREATER_THAN")
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Value') ~ * input", "1336650631137737")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Click("css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Os darwin') ~ * input[type=checkbox]")
# Click on "Back" button
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button again and check that all the values that
# we've just entered remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Paths')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('/tmp')")
# Check that output plugins are shown.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('Client rule set')"))
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Select newly created hunt.
self.Click("css=grr-hunts-list td:contains('gui_user')")
# Check that correct details are displayed in hunt details tab.
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('GenericHunt')")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('Flow Arguments')")
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Client Rule Set')"))
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(hunt.args.standard.flow_args.paths[0], "/tmp")
self.assertEqual(hunt.args.standard.flow_args.pathtype,
rdf_paths.PathSpec.PathType.TSK)
# self.assertEqual(hunt.args.flow_args.ignore_errors, True)
self.assertEqual(hunt.output_plugins[0].plugin_name, "DummyOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)
lib_hunt.StartHunt(hunt.hunt_id)
hunt_rules = self.FindForemanRules(
rdfvalue.RDFURN("hunts").Add(hunt.hunt_id), token=self.token)
# Check that the hunt was created with correct rules
self.assertLen(hunt_rules, 1)
lifetime = hunt_rules[0].GetLifetime()
lifetime -= rdfvalue.DurationSeconds("2w")
self.assertLessEqual(lifetime, rdfvalue.DurationSeconds("1s"))
r = hunt_rules[0].client_rule_set
self.assertEqual(r.match_mode,
foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
self.assertLen(r.rules, 3)
self.assertEqual(r.rules[0].rule_type,
foreman_rules.ForemanClientRule.Type.OS)
self.assertEqual(r.rules[0].os.os_windows, False)
self.assertEqual(r.rules[0].os.os_linux, False)
self.assertEqual(r.rules[0].os.os_darwin, True)
self.assertEqual(r.rules[1].rule_type,
foreman_rules.ForemanClientRule.Type.INTEGER)
self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
self.assertEqual(
r.rules[1].integer.operator,
foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
self.assertEqual(r.rules[1].integer.value, 1336650631137737)
self.assertEqual(r.rules[2].rule_type,
foreman_rules.ForemanClientRule.Type.REGEX)
self.assertEqual(r.rules[2].regex.field, "SYSTEM")
self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
def testWizardStepCounterIsShownCorrectly(self):
# Open up and click on View Hunts.
self.Open("/#/hunts")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on the FileFinder item in Filesystem flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=File Finder")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 1 out of 6')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 2 out of 6')")
def testLiteralExpressionIsProcessedCorrectly(self):
"""Literals are raw bytes. Testing that raw bytes are processed right."""
# Open up and click on View Hunts.
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", "foo\\x0d\\xc8bar")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(
self.IsElementPresent,
"css=grr-wizard-form:contains('%s')" % file_finder.FileFinder.__name__)
self.WaitUntil(self.IsTextPresent, b"foo\\x0d\\xc8bar")
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct literal value.
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(
hunt.args.standard.flow_args.conditions[0].contents_literal_match
.literal, b"foo\x0d\xc8bar")
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# There should be no dummy output plugin visible.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-wizard-form:contains('Dummy do do')")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider(
{"AdminUI.new_hunt_wizard.default_output_plugin": "DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Dummy output plugin should be added by default.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('DummyOutputPlugin')")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner1", u"foo")
self.AddClientLabel(client_id, u"owner2", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to hunt parameters page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
# Check that there's an option present for labels 'bar' (this option
# should be selected) and for label 'foo'.
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:selected[label=bar]")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:not(:selected)[label=foo]")
def testLabelsHuntRuleMatchesCorrectClients(self):
client_ids = self.SetupClients(10)
self.AddClientLabel(client_ids[1], u"owner1", u"foo")
self.AddClientLabel(client_ids[1], u"owner2", u"bar")
self.AddClientLabel(client_ids[7], u"GRR", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page, hunt parameters page
# and then to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Select 'Clients With Label' rule.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "foo")
self.Click("css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Add label')) button")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "bar")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Match mode')) select", "Match any")
# Click 'Next' to go to hunt overview page. Then click 'Next' to go to
# submit the hunt and wait until it's created.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
hunt = hunts_list[0]
lib_hunt.StartHunt(hunt.hunt_id)
foreman_obj = foreman.Foreman()
for client_id in client_ids:
tasks_assigned = foreman_obj.AssignTasksToClient(client_id)
if client_id in [client_ids[1], client_ids[7]]:
self.assertTrue(tasks_assigned)
else:
self.assertFalse(tasks_assigned)
def CreateSampleHunt(self, description, token=None):
self.StartHunt(
description=description,
flow_runner_args=rdf_flow_runner.FlowRunnerArgs(
flow_name=transfer.GetFile.__name__),
flow_args=transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.TSK,
)),
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyOutputPlugin",
plugin_args=gui_test_lib.DummyOutputPlugin.args_type(
filename_regex="blah!", fetch_binaries=True))
],
client_rate=60,
paused=True,
token=token)
def testPathAutocomplete(self):
# Open Hunts
self.Open("/#/hunts")
# Open "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
input_selector = "css=grr-form-glob-expression input[uib-typeahead]"
# Change "path"
self.Type(input_selector, "/foo/%%path")
self.WaitUntil(self.IsElementPresent,
"css=[uib-typeahead-popup]:contains('%%environ_path%%')")
self.GetElement(input_selector).send_keys(keys.Keys.ENTER)
self.WaitUntilEqual("/foo/%%environ_path%%", self.GetValue,
input_selector + ":text")
if __name__ == "__main__":
app.run(test_lib.main)
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes.
Batches provide the ability to execute multiple operations
in a single request to the Cloud Datastore API.
See
https://cloud.google.com/datastore/docs/concepts/entities#Datastore_Batch_operations
"""
from gcloud.datastore import helpers
from gcloud.datastore.key import _dataset_ids_equal
from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutuation.
For example, the following snippet of code will put the two ``save``
operations and the ``delete`` operation into the same mutation, and send
them to the server in a single API request::
>>> from gcloud.datastore.batch import Batch
>>> batch = Batch()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case
:meth:`commit` will be called automatically if its block exits without
raising an exception::
>>> with Batch() as batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> with Batch() as batch:
... do_some_work(batch)
... raise Exception() # rolls back
:type client: :class:`gcloud.datastore.client.Client`
:param client: The client used to connect to datastore.
"""
_id = None # "protected" attribute, always None for non-transactions
def __init__(self, client):
self._client = client
self._commit_request = _datastore_pb2.CommitRequest()
self._partial_key_entities = []
def current(self):
"""Return the topmost batch / transaction, or None."""
return self._client.current_batch
@property
def dataset_id(self):
"""Getter for dataset ID in which the batch will run.
:rtype: :class:`str`
:returns: The dataset ID in which the batch will run.
"""
return self._client.dataset_id
@property
def namespace(self):
"""Getter for namespace in which the batch will run.
:rtype: :class:`str`
:returns: The namespace in which the batch will run.
"""
return self._client.namespace
@property
def connection(self):
"""Getter for connection over which the batch will run.
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: The connection over which the batch will run.
"""
return self._client.connection
def _add_partial_key_entity_pb(self):
"""Adds a new mutation for an entity with a partial key.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
return self.mutations.insert_auto_id.add()
def _add_complete_key_entity_pb(self):
"""Adds a new mutation for an entity with a completed key.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
return self.mutations.upsert.add()
def _add_delete_key_pb(self):
"""Adds a new mutation for a key to be deleted.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Key`
:returns: The newly created key protobuf that will be
deleted when sent with a commit.
"""
return self.mutations.delete.add()
@property
def mutations(self):
"""Getter for the changes accumulated by this batch.
Every batch is committed with a single commit request containing all
the work to be done as mutations. Inside a batch, calling :meth:`put`
with an entity, or :meth:`delete` with a key, builds up the request by
adding a new mutation. This getter returns the protobuf that has been
built-up so far.
:rtype: :class:`gcloud.datastore._generated.datastore_pb2.Mutation`
:returns: The Mutation protobuf to be sent in the commit request.
"""
return self._commit_request.mutation
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert_auto_id`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: ValueError if entity has no key assigned, or if the key's
``dataset_id`` does not match ours.
"""
if entity.key is None:
raise ValueError("Entity must have a key")
if not _dataset_ids_equal(self.dataset_id, entity.key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
"""Remember a key to be deleted during :meth:`commit`.
:type key: :class:`gcloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: ValueError if key is not complete, or if the key's
``dataset_id`` does not match ours.
"""
if key.is_partial:
raise ValueError("Key must be complete")
if not _dataset_ids_equal(self.dataset_id, key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
key_pb = helpers._prepare_key_for_request(key.to_protobuf())
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
def commit(self):
"""Commits the batch.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
"""
_, updated_keys = self.connection.commit(
self.dataset_id, self._commit_request, self._id)
# If the back-end returns without error, we are guaranteed that
# :meth:`Connection.commit` will return keys that match (length and
# order) directly ``_partial_key_entities``.
for new_key_pb, entity in zip(updated_keys,
self._partial_key_entities):
new_id = new_key_pb.path_element[-1].id
entity.key = entity.key.completed_key(new_id)
def rollback(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def __enter__(self):
self._client._push_batch(self)
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
self._client._pop_batch()
def _assign_entity_to_pb(entity_pb, entity):
"""Copy ``entity`` into ``entity_pb``.
Helper method for ``Batch.put``.
:type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:param entity_pb: The entity owned by a mutation.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: The entity being updated within the batch / transaction.
"""
bare_entity_pb = helpers.entity_to_protobuf(entity)
key_pb = helpers._prepare_key_for_request(bare_entity_pb.key)
bare_entity_pb.key.CopyFrom(key_pb)
entity_pb.CopyFrom(bare_entity_pb)
|
#!/usr/bin/env python3
"""Visualise statistic by machine economic."""
from __future__ import annotations
import pandas as pd
from matplotlib import pyplot as plt
from typing import Dict
from .mechanic_report import MechReports
from .administration.logger_cfg import Logs
from .support_modules.custom_exceptions import MainMenu
from .support_modules.standart_functions import (
BasicFunctionsS
as BasF_S
)
LOGGER = Logs().give_logger(__name__)
class MechEconomic(MechReports):
"""Visualise statistic by machine economic."""
__slots__ = (
'mech_econ_path',
'mech_econ_data',
'mech_econ_file',
)
def __init__(self, user):
"""Load mech econom data."""
super().__init__(user)
self.mech_econ_data = {}
self.mech_econ_path = (
super().get_root_path() / 'data' / 'mech_ecomomic'
)
if self.mech_econ_path.exists():
self.mech_econ_file = super().load_data(
data_path=self.mech_econ_path,
user=user,
)
else:
self.mech_econ_file = pd.DataFrame(self.mech_econ_data, index=[0])
def _save_mech_econom(self):
"""Save mech econom and create log file."""
self.mech_econ_file = self.mech_econ_file.append(
self.mech_econ_data,
ignore_index=True
)
self._dump_mech_econ_data()
self._log_mech_econ_creation()
def _dump_mech_econ_data(self):
"""Dump salary data to file."""
super().dump_data(
data_path=self.mech_econ_path,
base_to_dump=self.mech_econ_file,
user=self.user,
)
def _log_mech_econ_creation(self):
"""Save log about salary creation."""
report_name = '{}-{}'.format(
self.mech_econ_data['year'],
self.mech_econ_data['month'],
)
LOGGER.warning(
f"User '{self.user.login}' create mechanic econom.: {report_name}"
)
def _visualise_one_day_cost(self):
"""Visualise cost of one day by each machine."""
year = self._chose_year()
data_by_year = super().give_dataframe_by_year(year)
data_for_plot = {
'mach': [],
'day_cost': [],
}
for mach in super().maint_dict['mach_name']:
totall_cost = sum(self.mech_econ_file[mach])
total_work = sum(data_by_year.work)
number_of_wdays = total_work
day_cost = round(totall_cost/number_of_wdays, 0)
data_for_plot['mach'].append(mach)
data_for_plot['day_cost'].append(day_cost)
data_for_plot = pd.DataFrame(data_for_plot)
self._create_one_day_cost_plot(data_for_plot)
def _input_machines_econ(self, mech_econ_date):
"""Input money, spent for machine in month."""
self.mech_econ_data['year'] = mech_econ_date['year']
self.mech_econ_data['month'] = mech_econ_date['month']
super().clear_screen()
print("Введите сумму для каждой техники:")
for mach in super().maint_dict['mach_name']:
self.mech_econ_data[mach] = float(input(f"{mach}: "))
save = input(
"\nДанные введены."
"\n[s] - сохранить данные: "
)
if save.lower() == 's':
self._save_mech_econom()
print("Данные сохранены.")
else:
print("Вы отменили сохранение.")
input("\n[ENTER] - выйти.")
def _visualise_statistic(self, year):
"""Visualise statistic."""
mech_econ_year = self.mech_econ_file.year == year
data_by_year = (
self.mech_econ_file[mech_econ_year]
.sort_values(by=['month'])
)
super().print_all_dataframe(data_by_year)
input("\n[ENTER] - выйти.")
def _chose_year(self):
"""Show statistic about drill instrument."""
print("[ENTER] - выход"
"\nВыберете год:")
year = super().choise_from_list(
sorted(set(self.mech_econ_file.year)),
none_option=True
)
if year:
return year
else:
raise MainMenu
@BasF_S.set_plotter_parametrs
def _create_one_day_cost_plot(self, dataframe):
"""Create one day cost plot."""
figure = plt.figure()
x_cost = list(range(len(super().maint_dict['mach_name'])))
axle = figure.add_subplot(111)
axle.bar(
x_cost, dataframe.day_cost, 0.3, alpha=0.4, color='r',
label='Коэффициент', tick_label=dataframe.mach
)
axle.tick_params(labelrotation=90)
axle.set_title(
"Коэффициент целесообразности содержания техники руб/час. ",
fontsize="x-large")
axle.set_ylabel('руб.')
axle.legend()
axle.grid(
True, linestyle='--', which='major',
color='grey', alpha=.25, axis='y'
)
figure.tight_layout()
plt.show()
def create_mech_econom(self):
"""Create mechanic econom data report."""
mech_econ_date = self.input_date()
check = super().check_date_in_dataframe(
self.mech_econ_file,
mech_econ_date
)
if check:
print("Данные за этот месяц уже внесены.")
input("\n[ENTER] - выйти.")
else:
self._input_machines_econ(mech_econ_date)
def show_econ_statistic(self, stat_variants: Dict):
"""Show machine economic statistic."""
stat_variants = {
'Целесообразность затрат на содержание техники.':
self._visualise_one_day_cost,
}
print("[ENTER] - выйти."
"\nВыберете вид отчета:")
stat = super().choise_from_list(stat_variants, none_option=True)
if stat:
stat_variants[stat]()
|
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, dim_c=3):
world = World()
# set any world properties first
world.dim_c = dim_c
num_landmarks = 3
# add agents
world.agents = [Agent() for i in range(2)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.size = 0.075
# speaker
world.agents[0].movable = False
# listener
world.agents[1].silent = True
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.04
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# assign goals to agents
for agent in world.agents:
agent.goal_a = None
agent.goal_b = None
# want listener to go to the goal landmark
world.agents[0].goal_a = world.agents[1]
world.agents[0].goal_b = np.random.choice(world.landmarks)
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25,0.25,0.25])
# random properties for landmarks
world.landmarks[0].color = np.array([0.65,0.15,0.15])
world.landmarks[1].color = np.array([0.15,0.65,0.15])
world.landmarks[2].color = np.array([0.15,0.15,0.65])
# special colors for goals
world.agents[0].goal_a.color = world.agents[0].goal_b.color + np.array([0.45, 0.45, 0.45])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
# returns data for benchmarking purposes
return reward(agent, reward)
def reward(self, agent, world):
# squared distance from listener to landmark
a = world.agents[0]
dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))
return -dist2
def observation(self, agent, world):
# goal color
goal_color = np.zeros(world.dim_color)
if agent.goal_b is not None:
goal_color = agent.goal_b.color
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
for other in world.agents:
if other is agent or (other.state.c is None): continue
comm.append(other.state.c)
# speaker
if not agent.movable:
return np.concatenate([goal_color])
# listener
if agent.silent:
return np.concatenate([agent.state.p_vel] + entity_pos + comm)
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Enrico Faulhaber <enrico.faulhaber@frm2.tum.de>
#
# *****************************************************************************
"""Special widgets for the SANS1 statusmonitor."""
from nicos.core.status import BUSY, DISABLED, ERROR, NOTREACHED, OK, UNKNOWN, \
WARN
from nicos.guisupport.qt import QBrush, QColor, QLineF, QPainter, QPen, \
QRectF, QSize, Qt, QTextOption, QWidget
from nicos.guisupport.widget import NicosWidget, PropDef
_magenta = QBrush(QColor('#A12F86'))
_yellow = QBrush(QColor('yellow'))
_white = QBrush(QColor('white'))
_grey = QBrush(QColor('lightgrey'))
_black = QBrush(QColor('black'))
_blue = QBrush(QColor('blue'))
_red = QBrush(QColor('red'))
_olive = QBrush(QColor('olive'))
_orange = QBrush(QColor('#ffa500'))
statusbrush = {
BUSY: _yellow,
WARN: _orange,
ERROR: _red,
NOTREACHED: _red,
DISABLED: _white,
OK: _white,
UNKNOWN: _olive,
}
class Tube2(NicosWidget, QWidget):
"""Sans1Tube with two detectors..."""
designer_description = 'SANS-1 tube with two detectors'
def __init__(self, parent, designMode=False):
# det1pos, det1shift, det1tilt, det2pos
self._curval = [0, 0, 0, 0]
self._curstr = ['', '', '', '']
self._curstatus = [OK, OK, OK, OK]
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
devices = PropDef('devices', 'QStringList', [], 'position, shift and '
'tilt of det1, position of det2')
height = PropDef('height', int, 10, 'Widget height in characters')
width = PropDef('width', int, 30, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
posscale = PropDef('posscale', float, 20000, 'Length of the tube')
color = PropDef('color', 'QColor', _magenta.color(), 'Color of the tube')
def sizeHint(self):
return QSize(round(self.props['width'] * self._scale) + 10,
round(self.props['height'] * self._scale) +
round(self.props['name'] and self._scale * 2.5 or 0) + 40)
def registerKeys(self):
for dev in self.props['devices']:
self.registerDevice(str(dev))
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
try:
idx = self.props['devices'].index(dev)
except ValueError:
return
self._curval[idx] = value
self._curstr[idx] = unitvalue
self.update()
def on_devStatusChange(self, dev, code, status, expired):
try:
idx = self.props['devices'].index(dev)
except ValueError:
return
self._curstatus[idx] = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(QBrush(self.color))
painter.setRenderHint(QPainter.Antialiasing)
fontscale = float(self._scale)
h = self.props['height'] * fontscale
w = self.props['width'] * fontscale
posscale = (w - 120) / self.props['posscale']
if self.props['name']:
painter.setFont(self.font())
painter.drawText(QRectF(5, 0, w, fontscale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
yoff = fontscale * 2.5
else:
yoff = 0
painter.setPen(self.color)
painter.drawEllipse(QRectF(5, 5 + yoff, 50, h))
painter.drawRect(QRectF(30, 5 + yoff, w - 50, h))
painter.setPen(QColor('black'))
painter.drawArc(QRectF(5, 5 + yoff, 50, h), 1440, 2880)
painter.drawLine(QLineF(30, 5 + yoff, w - 25, 5 + yoff))
painter.drawLine(QLineF(30, 5 + yoff + h, w - 25, 5 + yoff + h))
painter.drawEllipse(QRectF(w - 45, 5 + yoff, 50, h))
# draw Detector 1
minx = 0
pos_val = self._curval[0]
if pos_val is not None:
pos_status = self._curstatus[0]
pos_str = self._curstr[0]
shift_val = self._curval[1]
shift_status = self._curstatus[1]
shift_str = self._curstr[1]
if shift_val > 0:
shift_str += ' ↓'
elif shift_val < 0:
shift_str += ' ↑'
# Not used at the moment, prepared for later use
tilt_val = self._curval[2]
tilt_status = self._curstatus[2]
tilt_str = self._curstr[2]
if tilt_str.endswith('deg'):
tilt_str = tilt_str[:-3] + '°'
stat = max(pos_status, shift_status, tilt_status)
painter.setBrush(statusbrush[stat])
# tf = QTransform()
# tf.rotate(tilt_val)
painter.resetTransform()
painter.translate(60 + pos_val * posscale + fontscale / 2.,
15 + yoff + shift_val * posscale + (h - 20) / 2.)
painter.rotate(-tilt_val)
painter.drawRect(QRectF(-fontscale / 2., - (h - 20) / 2., fontscale,
h - 20)) # XXX tilt ???
painter.resetTransform()
painter.setFont(self.valueFont)
painter.drawText(QRectF(60 + pos_val * posscale - 10.5 * fontscale,
-5 + yoff + h - fontscale, # + (shift_val - 4) * posscale,
9.5 * fontscale, 2 * fontscale),
tilt_str, QTextOption(Qt.AlignRight))
painter.drawText(QRectF(60 + pos_val * posscale - 6.5 * fontscale,
yoff + fontscale, # + (shift_val - 4) * posscale,
9.5 * fontscale, 2 * fontscale),
shift_str, QTextOption(Qt.AlignLeft))
minx = max(minx, 60 + pos_val * posscale + 5 - 4 * fontscale)
painter.drawText(QRectF(minx, h + 10 + yoff, 8 * fontscale, 30),
pos_str, QTextOption(Qt.AlignCenter))
minx = minx + 8 * fontscale
# # draw Detector 2
# pos_val = self._curval[3]
# if pos_val is not None:
# pos_status = self._curstatus[3]
# pos_str = self._curstr[3]
#
# painter.setBrush(statusbrush[pos_status])
# painter.drawRect(60 + pos_val * posscale, 15 + yoff,
# fontscale, h - 20 - 5 * posscale)
# painter.setFont(self.valueFont)
# minx = max(minx, 65 + pos_val * posscale - 4 * fontscale)
# painter.drawText(minx, h + 10 + yoff,
# 8 * fontscale, 30, Qt.AlignCenter, pos_str)
# minx = minx + 8 * fontscale
class BeamOption(NicosWidget, QWidget):
designer_description = 'SANS-1 beam option'
def __init__(self, parent, designMode=False):
self._curstr = ''
self._curstatus = OK
self._fixed = ''
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
dev = PropDef('dev', str, '', 'NICOS device name')
height = PropDef('height', int, 4, 'Widget height in characters')
width = PropDef('width', int, 10, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
def sizeHint(self):
return QSize(round(self.props['width'] * self._scale),
round(self.props['height'] * self._scale) +
round(self.props['name'] and self._scale * 2.5 or 0))
def registerKeys(self):
self.registerDevice(self.props['dev'])
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
self._curstr = unitvalue
self.update()
def on_devMetaChange(self, dev, fmtstr, unit, fixed):
self._fixed = fixed
self.update()
def on_devStatusChange(self, dev, code, status, expired):
self._curstatus = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(_magenta)
painter.setRenderHint(QPainter.Antialiasing)
w = self.props['width'] * self._scale
h = self.props['height'] * self._scale
if self.props['name']:
painter.setFont(self.font())
painter.drawText(QRectF(0, 0, w, self._scale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
yoff = self._scale * 2.5
else:
yoff = 0
painter.setBrush(statusbrush[self._curstatus])
painter.drawRect(QRectF(2, 2 + yoff, w - 4, h - 4))
painter.setFont(self.valueFont)
painter.drawText(QRectF(2, 2 + yoff, w - 4, h - 4),
self._curstr, QTextOption(Qt.AlignCenter))
class CollimatorTable(NicosWidget, QWidget):
"""Displays a list of 'beam options' as a vertical stack.
Options are displayed as vertical stack of named elements drawn on top
of a centered blue line ('the beam').
If the device value is in 'options', the correspondig element is drawn
on top of 'the beam' by moving the whole stack vertically.
If the device value is in 'disabled_options', the whole
stack of options is vertically shifted 'out of beam'.
Other values are ignored as they are considered temporary
(while moving an option).
If the device state happens to be in error, the name label is
displayed in red to indicate the error.
"""
designer_description = 'SANS-1 collimator table'
def __init__(self, parent, designMode=False):
self._curstr = ''
self._curstatus = OK
self._fixed = ''
self.shift = -1
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
dev = PropDef('dev', str, '', 'NICOS device name of a switcher')
options = PropDef('options', 'QStringList', [], 'list of valid switcher-'
'values to display in top-down order (first element '
'will be displayed on top location)')
disabled_options = PropDef('disabled_options', 'QStringList', [],
'list of valid switcher values for which '
'all options are display out-of-beam')
height = PropDef('height', int, 4, 'Widget height in characters')
width = PropDef('width', int, 10, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
def registerKeys(self):
self.registerDevice(self.props['dev'])
def sizeHint(self):
return QSize(round(self._scale * self.props['width']),
round(self._scale * 2.5 * self.props['height']) +
round(self.props['name'] and 2.5 * self._scale or 0))
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
self._curstr = strvalue
self.update()
def on_devMetaChange(self, dev, fmtstr, unit, fixed):
self._fixed = fixed
self.update()
def on_devStatusChange(self, dev, code, status, expired):
self._curstatus = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
h = self._scale * 2.5 * self.props['height']
w = self._scale * self.props['width']
# cache pen
pen = painter.pen()
if self.props['name']:
painter.setFont(self.font())
if self._curstatus != OK:
painter.fillRect(QRectF(0, 0, w, self._scale * 2.5),
statusbrush[self._curstatus])
if self._fixed:
painter.setPen(QPen(_blue.color()))
else:
painter.setPen(QPen(_black.color()))
painter.drawText(QRectF(0, 0, w, self._scale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
painter.setPen(pen)
yoff = self._scale * 2.5
else:
yoff = 0
painter.setPen(QPen(_blue.color()))
y = h * 0.5 + yoff
painter.drawLine(QLineF(0, y, w, y))
painter.drawLine(QLineF(0, y+1, w, y+1))
painter.drawLine(QLineF(0, y+2, w, y+2))
# reset pen
painter.setPen(pen)
painter.setBrush(statusbrush[self._curstatus])
if self._curstr in self.props['options']:
self.shift = self.props['options'].index(self._curstr)
if self._curstr in self.props['disabled_options']:
self.shift = len(self.props['options'])
painter.setFont(self.valueFont)
h0 = max(2 * self._scale, 2 * self._scale + 4)
painter.setClipRect(QRectF(0, yoff, w, h))
for i, t in enumerate(self.props['options']):
y = h * 0.5 + yoff + h0 * (self.shift - i - 0.45)
b = statusbrush[self._curstatus]
if t == self._curstr:
painter.setBrush(b)
else:
painter.setBrush(_grey if b == statusbrush[OK] else b)
painter.drawRect(QRectF(5, y + 2, w - 10, h0 - 4))
painter.drawText(QRectF(5, y + 2, w - 10, h0 - 4),
t, QTextOption(Qt.AlignCenter))
|
#!/usr/bin/env python
"""Manager of worker subprocesses.
This module invokes the worker subprocesses that perform the cloud
security monitoring tasks. Each worker subprocess wraps around a cloud,
store, event, or alert plugin and executes the plugin in a separate
subprocess.
"""
import logging.config
import multiprocessing as mp
import time
import schedule
from cloudmarker import util, workers
# Define module-level logger.
_log = logging.getLogger(__name__)
def main():
"""Run the framework based on the schedule."""
args = util.parse_cli()
config = util.load_config(args.config)
logging.config.dictConfig(config['logger'])
# Run the audits according to the schedule set in the configuration if the
# 'force' flag is not set in the command line.
if args.force:
_log.info('Starting job now')
job(config)
else:
_log.info('Scheduled to run job everyday at %s', config['schedule'])
schedule.every().day.at(config['schedule']).do(job, config)
while True:
schedule.run_pending()
time.sleep(60)
def job(config):
"""Run the audits.
Arguments:
config (dict): Configuration dictionary.
"""
# Create an audit object for each audit configured to be run.
audits = []
for audit_name in config['run']:
audits.append(Audit(audit_name, config))
# Start all audits.
for audit in audits:
audit.start()
# Wait for all audits to terminate.
for audit in audits:
audit.join()
class Audit:
"""Audit manager.
This class encapsulates a set of worker subprocesses and worker
input queues for a single audit configuration.
"""
def __init__(self, audit_name, config):
"""Create an instance of :class:`Audit` from configuration.
A single audit definition (from a list of audit definitions
under the ``audits`` key in the configuration) is instantiated.
Each audit definition contains lists of cloud plugins, store
plugins, event plugins, and alert plugins. These plugins are
instantiated and multiprocessing queues are set up to take
records from one plugin and feed them to another plugin as per
the audit workflow.
Arguments:
audit_name (str): Key name for an audit configuration. This
key is looked for in ``config['audits']``.
config (dict): Configuration dictionary. This is the
entire configuration dictionary that contains
top-level keys named ``clouds``, ``stores``, ``events``,
``alerts``, ``audits``, ``run``, etc.
"""
audit_config = config['audits'][audit_name]
# We keep all workers in these lists.
self._cloud_workers = []
self._store_workers = []
self._event_workers = []
self._alert_workers = []
# We keep all queues in these lists.
self._store_queues = []
self._event_queues = []
self._alert_queues = []
# Create alert workers and queues.
for name in audit_config['alerts']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['alerts'][name]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._alert_workers.append(worker)
self._alert_queues.append(input_queue)
# Create event_workers workers and queues.
for name in audit_config['events']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['events'][name]),
input_queue,
self._alert_queues,
)
worker = mp.Process(target=workers.event_worker, args=args)
self._event_workers.append(worker)
self._event_queues.append(input_queue)
# Create store workers and queues.
for name in audit_config['stores']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['stores'][name]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._store_workers.append(worker)
self._store_queues.append(input_queue)
# Create cloud workers.
for name in audit_config['clouds']:
args = (
audit_name + '-' + name,
util.load_plugin(config['clouds'][name]),
self._store_queues + self._event_queues
)
worker = mp.Process(target=workers.cloud_worker, args=args)
self._cloud_workers.append(worker)
def start(self):
"""Start audit by starting all workers."""
for w in (self._cloud_workers + self._store_workers +
self._event_workers + self._alert_workers):
w.start()
def join(self):
"""Wait until all workers terminate."""
# Wait for cloud workers to terminate.
for w in self._cloud_workers:
w.join()
# Stop store workers and event workers.
for q in self._store_queues + self._event_queues:
q.put(None)
# Wait for store workers and event_workers workers to terminate.
for w in self._store_workers + self._event_workers:
w.join()
# Stop alert workers.
for q in self._alert_queues:
q.put(None)
# Wait for alert workers to terminate.
for w in self._alert_workers:
w.join()
|
import os
import shutil
import pytest
from pyuplift.utils import retrieve_from_gz
data_home = os.path.join(os.sep.join(__file__.split(os.sep)[:-1]), 'data')
def test_retrieve_from_gz():
output_path = os.path.join(data_home, 'test.test')
archive_path = output_path + '.gz'
retrieve_from_gz(archive_path, output_path)
with open(output_path, 'r') as f:
text = f.read()
os.remove(output_path)
assert text == 'good'
|
# Recorder that records agent states as dataframes and also stores a carla recording, in synchronous mode
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import pandas as pd
from tqdm import tqdm
import math
CARLA_VERSION = "0.9.11"
try:
# sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
if CARLA_VERSION == "0.9.9":
sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
elif CARLA_VERSION == "0.9.11":
sys.path.append("./libs/carla-0.9.11-py3.7-linux-x86_64.egg")
except IndexError:
pass
import carla
import argparse
import random
import time
import logging
import click
import pathlib
import spawn
current_dir = pathlib.Path(__file__).parent.absolute()
SEED = 27
random.seed(SEED)
def get_metadata(actor, frame_id):
type_id = actor.type_id
def splitCarlaVec(vect):
return vect.x, vect.y, vect.z
id = actor.id
# clsname = ClientSideBoundingBoxes.get_class_name(actor)
tf = actor.get_transform()
roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw
loc = actor.get_location()
pos_x, pos_y, pos_z = splitCarlaVec(loc)
try:
bbox3d = actor.bounding_box
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(
bbox3d.location
)
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)
except:
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None
velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())
acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())
angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(
actor.get_angular_velocity()
)
try:
# need to do this because Carla's Actor object doesnt support getattr
traffic_light_state = actor.state.name
except:
traffic_light_state = None
return (
frame_id,
id,
type_id,
pos_x,
pos_y,
pos_z,
roll,
pitch,
yaw,
velocity_x,
velocity_y,
velocity_z,
acc_x,
acc_y,
acc_z,
angular_vel_x,
angular_vel_y,
angular_vel_z,
bbox3d_offset_x,
bbox3d_offset_y,
bbox3d_offset_z,
bbox3d_extent_x,
bbox3d_extent_y,
bbox3d_extent_z,
traffic_light_state,
)
global_collision = False
def collision_detect_callback(event):
actor_we_collide_against = event.other_actor
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)
if "vehicle." in actor_we_collide_against.type_id:
global global_collision
global_collision = True
def attach_collision_sensor(actor, world):
blueprint_library = world.get_blueprint_library()
collision_sensor = world.spawn_actor(
blueprint_library.find("sensor.other.collision"),
carla.Transform(),
attach_to=actor,
)
collision_sensor.listen(lambda event: collision_detect_callback(event))
return collision_sensor
def run(
client,
round_name,
recording_dir,
speed_violation_prob=60,
tl_violation_prob=70,
perc_speed_diff=-30,
num_vehicles=25,
SESSION_DURATION=60,
):
safe = True # avoid spawning vehicles whose geometry is not ideal for carla
actor_list = []
sensors = []
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
try:
FPS = 5
DELTA_T = 1 / FPS
world = client.get_world()
blueprints = world.get_blueprint_library().filter("vehicle.*")
traffic_manager = client.get_trafficmanager()
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
if CARLA_VERSION == "0.9.11":
print("Using deterministic Traffic Manager")
traffic_manager.set_random_device_seed(SEED)
settings = client.get_world().get_settings()
if not settings.synchronous_mode:
traffic_manager.set_synchronous_mode(True)
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = DELTA_T
client.get_world().apply_settings(settings)
else:
synchronous_master = False
recording_dir_path = pathlib.Path(recording_dir)
recording_dir_path.mkdir(exist_ok=True)
session_recording = str(recording_dir_path / f"{round_name}.csv")
carla_session_recording = str(
recording_dir_path.absolute() / f"{round_name}_carla_recording"
)
print("Recording on file: %s" % client.start_recorder(carla_session_recording))
vehicles_list, walkers_list, all_actors = spawn.spawn(
client, world, num_vehicles, 0, safe
)
world.tick()
print("spawned %d vehicles, press Ctrl+C to exit." % len(actor_list))
# fmt: off
df_columns = [
"frame_id", "id", "type_id", "pos_x", "pos_y", "pos_z", "roll", "pitch", "yaw",
"velocity_x", "velocity_y", "velocity_z", "acc_x", "acc_y", "acc_z",
"angular_vel_x", "angular_vel_y", "angular_vel_z",
"bbox3d_offset_x", "bbox3d_offset_y", "bbox3d_offset_z",
"bbox3d_extent_x", "bbox3d_extent_y", "bbox3d_extent_z", "traffic_light_color",
]
# fmt: on
# get all non vehicle agents
global global_collision
global_collision = False
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
sensors.append(attach_collision_sensor(actor, world))
non_vehicles = [
x
for x in actors
if ("vehicle" not in x.type_id and "traffic_light" not in x.type_id)
] # signs, traffic lights etc
frame_id = 0
df_arr = []
non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]
df_arr += non_vehicle_arr
pbar = tqdm(total=FPS * SESSION_DURATION)
max_frames = FPS * SESSION_DURATION
collision_detected_once = False
while frame_id < max_frames:
if global_collision and not collision_detected_once:
# Todo, if detected, start a countdown of N frames and break only after N iterations
print("detected collision, exiting!")
collision_detected_once = True
max_frames = frame_id + 5
# continue
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
# print(actor.type_id)
tm_port = traffic_manager.get_port()
actor.set_autopilot(True, tm_port)
traffic_manager.ignore_lights_percentage(actor, tl_violation_prob)
traffic_manager.distance_to_leading_vehicle(actor, 3)
if random.random() * 100 < speed_violation_prob:
traffic_manager.vehicle_percentage_speed_difference(
actor, perc_speed_diff
)
vehicles_and_lights = [
x
for x in actors
if "vehicle" in x.type_id or "traffic_light" in x.type_id
]
metadata_arr = [
get_metadata(actor, frame_id) for actor in vehicles_and_lights
]
df_arr += metadata_arr
frame_id += 1
pbar.update(1)
world.tick()
df = pd.DataFrame(df_arr, columns=df_columns)
pbar.close()
print(f"Saving CSV({len(df.frame_id.unique())} frames)")
# df.to_parquet(f"session_data.parquet")
df.to_csv(session_recording, index=False)
world.tick()
# if args.recorder_time > 0:
# time.sleep(args.recorder_time)
# else:
# while True:
# world.wait_for_tick()
# # time.sleep(0.1)
finally:
if synchronous_master:
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
print("\ndestroying %d actors" % (len(sensors) + len(vehicles_list)))
# all_agents = sensors + vehicles_list
for s in sensors:
s.destroy()
client.apply_batch_sync([carla.command.DestroyActor(x) for x in vehicles_list])
print("Stop recording")
client.stop_recorder()
@click.command()
@click.option(
"-s",
"--scenario_type",
type=click.Choice(["tl_sl", "nominal"], case_sensitive=False),
required=True,
)
@click.option("-n", "--num_rounds", default=100)
@click.option("--test", is_flag=True)
def main(scenario_type, num_rounds, test):
# print(scenario_type, test, num_rounds)
if test:
random.seed(72)
if scenario_type.lower() == "tl_sl":
SPEED_VIOLATION_PROB = 60
TL_VIOLATION_PROB = 70
PERC_SPEED_DIFF = -30
SCENARIO_NAME = "tl_sl"
# NUM_ROUNDS = 100
elif scenario_type.lower() == "nominal":
SPEED_VIOLATION_PROB = 0
TL_VIOLATION_PROB = 0
PERC_SPEED_DIFF = 0
SCENARIO_NAME = "nominal"
# NUM_ROUNDS = 200
NUM_ROUNDS = num_rounds
print(f"Recording {SCENARIO_NAME} data")
try:
host = "127.0.0.1" # IP of the host server (default: 127.0.0.1)
port = 2000 # TCP port to listen to (default: 2000)",
client = carla.Client(host, port)
if test:
scenario_dir = f"test_{SCENARIO_NAME}_recordings"
else:
scenario_dir = f"{SCENARIO_NAME}_recordings"
round_names = []
for i in range(NUM_ROUNDS):
run(
client,
f"{scenario_type}_round_{i}",
scenario_dir,
SPEED_VIOLATION_PROB,
TL_VIOLATION_PROB,
PERC_SPEED_DIFF,
)
round_names.append(f"{scenario_type}_round_{i}")
# client.reload_world()
except KeyboardInterrupt:
pass
finally:
print("\ndone.")
if __name__ == "__main__":
main()
|
from system.core.model import Model
from flask import jsonify
class Lead(Model):
def __init__(self):
super(Lead, self).__init__()
def get_leads(self, name, early, late, page, sort, order):
query = 'SELECT * FROM leads'
data = {}
prev = False
if name != '':
query += ' WHERE CONCAT(first_name, " ", last_name) LIKE "%":name"%"'
prev = True
data['name'] = name
if early != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime > :start'
prev = True
data['start'] = early
if late != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime < :stop'
data['stop'] = late
if sort != '':
self.check_sort(sort)
query += ' ORDER BY ' + sort
if order != '':
if order == 'DESC':
query += ' DESC'
elif order == 'ASC':
query += ' ASC'
pages = self.db.query_db(query, data)
query += ' LIMIT :offset, 10'
data['offset'] = int(page)*10-10
results = self.db.query_db(query, data)
return jsonify({'people': results, 'pages': pages})
def check_sort(self, sort):
legal_vals = ['id','first_name','last_name','registered_datetime','email']
if not sort in legal_vals:
sort = 'registered_datetime'
|
from django.conf import settings
from django.contrib.auth import get_user, views as auth_views
from django.contrib.auth.decorators import login_required
from django.core.files import File
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from .forms import ScoreForm
from .models import Score, Result
from . import voiceleading
from music21 import converter
import os.path
# Create your views here.
def index(request):
user = get_user(request)
if request.method == 'POST':
score_form = ScoreForm(request.POST, request.FILES)
new_score = score_form.save()
if user.is_authenticated:
new_score.user = user
new_score.score_display_name = os.path.basename(new_score.score.name)
new_score.save()
fname = str.format('{0}/{1}', settings.MEDIA_ROOT, new_score.score.url)
stream = converter.parse(fname)
end_height = 1
for musical_test in new_score.musical_tests.all():
musical_test_failures = getattr(voiceleading, musical_test.func)(
stream,
chordified_stream=stream.chordify(),
)
r = Result(score=new_score,musical_test=musical_test)
r.passed = (len(musical_test_failures) == 0)
r.save()
stream, end_height = voiceleading.annotate_stream(musical_test_failures, stream, end_height)
output_path = os.path.join("{}_checked.xml".format(fname[:-4]))
stream.write(
"musicxml", output_path
)
with open(output_path) as fp:
contents = File(fp)
new_score.checked_score.save(output_path, contents)
new_score.checked_score_display_name = f"{new_score.score_display_name[:-4]}_checked.xml"
new_score.save()
return HttpResponseRedirect(
reverse('harmony_checker:checked', args=(new_score.id,))
)
else:
score_form = ScoreForm()
return render(
request,
'harmony_checker/index.html',
{'score_form': score_form, 'user': user, 'title': "Check Harmony"}
)
def checked(request, score_id):
user = get_user(request)
score = get_object_or_404(Score, pk=score_id)
results = Result.objects.filter(score=score_id)
#generate checked score display name
return render(
request,
'harmony_checker/checked.html',
{
'score': score,
'results': results,
'user': user,
'title': 'Results'
}
)
def checked_score(request, score_id):
score = get_object_or_404(Score, pk=score_id)
response = HttpResponse(score.checked_score, content_type='application/xml')
response['Content-Disposition'] = f"attachment; filename={score.checked_score_display_name}"
return response
def score(request, score_id):
score = get_object_or_404(Score, pk=score_id)
response = HttpResponse(score.score, content_type='application/xml')
response['Content-Disposition'] = f"attachment; filename={score.score_display_name}"
return response
@login_required
def profile(request):
user = get_user(request)
scores = Score.objects.filter(user=user).order_by('-upload_date')
return render(
request,
'harmony_checker/profile.html',
{
'user': user,
'scores': scores,
'title': "User Profile"
}
)
|
import re
def test_repr(tracer, rpc_stub):
class A:
pass
tracer.start()
match = re.match("foo", "foobar")
a = A()
tracer.stop()
from utils import return_GetFrame
frame_proto = return_GetFrame(rpc_stub, "test_repr")
binding_match_event = frame_proto.events[0]
assert (
binding_match_event.binding.repr
== "<re.Match object; span=(0, 3), match='foo'>"
)
assert (
binding_match_event.binding.value
== '{"repr": "<re.Match object; span=(0, 3), match=\'foo\'>"}'
)
binding_a_event = frame_proto.events[2]
assert binding_a_event.binding.repr == "<test_to_json.test_repr.<locals>.A object>"
assert binding_a_event.binding.value == "{}"
|
# -*- coding: utf-8 -*-
#
# ======================================================================================================================
# Copyright (©) 2015-2019 LCS
# Laboratoire Catalyse et Spectrochimie, Caen, France.
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
# See full LICENSE agreement in the root directory
# ======================================================================================================================
"""
This module implements the |NDDataset| class.
"""
__all__ = ['NDDataset']
import textwrap
import warnings
import sys
import numpy as np
from traitlets import HasTraits, Instance, Bool, Float, validate, default, Dict, Union
from traittypes import Array
from spectrochempy.core.project.baseproject import AbstractProject
from spectrochempy.core.dataset.ndarray import NDArray, DEFAULT_DIM_NAME
from spectrochempy.core.dataset.ndcomplex import NDComplexArray
from spectrochempy.core.dataset.coord import Coord, LinearCoord
from spectrochempy.core.dataset.coordset import CoordSet
from spectrochempy.core.dataset.ndmath import NDMath, _set_ufuncs, _set_operators
from spectrochempy.core.dataset.ndio import NDIO
from spectrochempy.core.dataset.ndplot import NDPlot
from spectrochempy.core import error_, warning_
from spectrochempy.utils import (colored_output, SpectroChemPyException, SpectroChemPyWarning, MaskedConstant)
HAS_XARRAY = False
try:
import xarray as xr
HAS_XARRAY = True # pragma: no cover
except ImportError:
xr = None # pragma: no cover
# ======================================================================================================================
# NDDataset class definition
# ======================================================================================================================
class NDDataset(NDIO, NDPlot, NDMath, NDComplexArray):
# coordinates
_coordset = Instance(CoordSet, allow_none=True)
# model data (e.g., for fit)
_modeldata = Array(Float(), allow_none=True)
# some setting for NDDataset
_copy = Bool(False)
_labels_allowed = Bool(False) # no labels for NDDataset
# dataset can be members of a project.
# we use the abstract class to avoid circular imports.
_parent = Instance(AbstractProject, allow_none=True)
# For the GUI interface
# parameters state
_state = Dict()
# processed data (for GUI)
_processeddata = Array(Float(), allow_none=True)
# processed mask (for GUI)
_processedmask = Union((Bool(), Array(Bool()), Instance(MaskedConstant)))
# baseline data (for GUI)
_baselinedata = Array(Float(), allow_none=True)
# reference data (for GUI)
_referencedata = Array(Float(), allow_none=True)
# ------------------------------------------------------------------------------------------------------------------
# initialisation
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def __init__(self, data=None, coordset=None, coordunits=None, coordtitles=None, **kwargs):
"""
The main N-dimensional dataset class used by |scpy|.
The NDDataset is the main object use by SpectroChemPy. Like numpy ndarrays, NDDataset have the capability to be
sliced, sorted and subject to mathematical operations. But, in addition, NDDataset may have units,
can be masked
and each dimensions can have coordinates also with units. This make NDDataset aware of unit compatibility,
e.g.,
for binary operation such as additions or subtraction or during the application of mathematical operations.
In addition or in replacement of numerical data for coordinates, NDDataset can also have labeled coordinates
where labels can be different kind of objects (strings, datetime, numpy nd.ndarray or othe NDDatasets, etc…).
Parameters
----------
data : array of floats
Data array contained in the object. The data can be a list, a tuple, a |ndarray|, a ndarray-like,
a |NDArray| or any subclass of |NDArray|. Any size or shape of data is accepted. If not given, an empty
|NDArray| will be inited.
At the initialisation the provided data will be eventually casted to a numpy-ndarray.
If a subclass of |NDArray| is passed which already contains some mask, labels, or units, these elements
will
be used to accordingly set those of the created object. If possible, the provided data will not be copied
for `data` input, but will be passed by reference, so you should make a copy of the `data` before passing
them if that's the desired behavior or set the `copy` argument to True.
coordset : An instance of |CoordSet|, optional
`coords` contains the coordinates for the different dimensions of the `data`. if `coords` is provided,
it must specified the `coord` and `labels` for all dimensions of the `data`.
Multiple `coord`'s can be specified in an |CoordSet| instance for each dimension.
coordunits : list, optional
A list of units corresponding to the dimensions in the order of the coordset.
coordtitles : list, optional
A list of titles corresponding of the dimensions in the order of the coordset.
**kwargs : dict
See other parameters.
Other Parameters
----------------
dtype : str or dtype, optional, default=np.float64
If specified, the data will be casted to this dtype, else the data will be casted to float64 or complex128.
dims : list of chars, optional
If specified the list must have a length equal to the number od data dimensions (ndim) and the chars
must be
taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in
this order.
name : str, optional
A user friendly name for this object. If not given, the automatic `id` given at the object creation will be
used as a name.
labels : array of objects, optional
Labels for the `data`. labels can be used only for 1D-datasets.
The labels array may have an additional dimension, meaning several series of labels for the same data.
The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of
|NDArray|.
mask : array of bool or `NOMASK`, optional
Mask for the data. The mask array must have the same shape as the data. The given array can be a list,
a tuple, or a |ndarray|. Each values in the array must be `False` where the data are *valid* and True when
they are not (like in numpy masked arrays). If `data` is already a :class:`~numpy.ma.MaskedArray`, or any
array object (such as a |NDArray| or subclass of it), providing a `mask` here will causes the mask from the
masked array to be ignored.
units : |Unit| instance or str, optional
Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also
explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_
package.
title : str, optional
The title of the dimension. It will later be used for instance for labelling plots of the data.
It is optional but recommended to give a title to each ndarray.
dlabel : str, optional
Alias of `title`.
meta : dict-like object, optional
Additional metadata for this object. Must be dict-like but no
further restriction is placed on meta.
author : str, optional
Name(s) of the author(s) of this dataset. BNy default, name of the computer note where this dataset is
created.
description : str, optional
A optional description of the nd-dataset. A shorter alias is `desc`.
history : str, optional
A string to add to the object history.
copy : bool, optional
Perform a copy of the passed object. Default is False.
See Also
--------
Coord : Explicit coordinates object.
LinearCoord : Implicit coordinates objet.
CoordSet : Set of coordinates.
Notes
-----
The underlying array in a |NDDataset| object can be accessed through the `data` attribute, which will return
a conventional |ndarray|.
Examples
--------
Usage by an end-user
>>> from spectrochempy import *
>>> x = NDDataset([1, 2, 3])
>>> print(x.data) # doctest: +NORMALIZE_WHITESPACE
[ 1 2 3]
"""
super().__init__(data, **kwargs)
self._parent = None
# eventually set the coordinates with optional units and title
if isinstance(coordset, CoordSet):
self.set_coordset(**coordset)
else:
if coordset is None:
coordset = [None] * self.ndim
if coordunits is None:
coordunits = [None] * self.ndim
if coordtitles is None:
coordtitles = [None] * self.ndim
_coordset = []
for c, u, t in zip(coordset, coordunits, coordtitles):
if not isinstance(c, CoordSet):
if isinstance(c, LinearCoord):
coord = LinearCoord(c)
else:
coord = Coord(c)
if u is not None:
coord.units = u
if t is not None:
coord.title = t
else:
if u: # pragma: no cover
warning_('units have been set for a CoordSet, but this will be ignored '
'(units are only defined at the coordinate level')
if t: # pragma: no cover
warning_('title will be ignored as they are only defined at the coordinates level')
coord = c
_coordset.append(coord)
if _coordset and set(_coordset) != {Coord()}: # if they are no coordinates do nothing
self.set_coordset(*_coordset)
# ------------------------------------------------------------------------------------------------------------------
# special methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def __dir__(self):
# WARNING: be carefull to keep the present order of the three first elements! Needed for save/load operations
return ['dims', 'coordset', 'data', 'name', 'title', 'mask', 'units', 'meta', 'preferences',
'author', 'description', 'history', 'date', 'modified', 'origin', 'roi', 'offset', 'transposed',
'modeldata', 'processeddata', 'baselinedata', 'referencedata', 'state'] + NDIO().__dir__()
# ..................................................................................................................
def __getitem__(self, items):
saveditems = items
# coordinate selection to test first
if isinstance(items, str):
try:
return self._coordset[items]
except Exception:
pass
# slicing
new, items = super().__getitem__(items, return_index=True)
if new is None:
return None
if self._coordset is not None:
names = self._coordset.names # all names of the current coordinates
new_coords = [None] * len(names)
for i, item in enumerate(items):
# get the corresponding dimension name in the dims list
name = self.dims[i]
# get the corresponding index in the coordinate's names list
idx = names.index(name)
if self._coordset[idx].is_empty:
new_coords[idx] = Coord(None, name=name)
elif isinstance(item, slice):
# add the slice on the corresponding coordinates on the dim to the new list of coordinates
if not isinstance(self._coordset[idx], CoordSet):
new_coords[idx] = self._coordset[idx][item]
else:
# we must slice all internal coordinates
newc = []
for c in self._coordset[idx]:
newc.append(c[item])
new_coords[idx] = CoordSet(*newc[::-1], name=name) # we reverse to be sure
# the order will be # kept for internal coordinates
new_coords[idx]._default = self._coordset[idx]._default # set the same default coord
new_coords[idx]._is_same_dim = self._coordset[idx]._is_same_dim
elif isinstance(item, (np.ndarray, list)):
new_coords[idx] = self._coordset[idx][item]
new.set_coordset(*new_coords, keepnames=True)
new.history = f'Slice extracted: ({saveditems})'
return new
# ..................................................................................................................
def __getattr__(self, item):
# when the attribute was not found
if item in ["__numpy_ufunc__", "interface", '_pytestfixturefunction', '__dataclass_fields__',
'_ipython_canary_method_should_not_exist_', '_baseclass', '_fill_value', '_ax_lines', '_axcb',
'clevels', '__wrapped__', 'coords', '__await__',
'__aiter__'] or '_validate' in item or '_changed' in item:
# raise an error so that traits, ipython operation and more ... will be handled correctly
raise AttributeError
# syntax such as ds.x, ds.y, etc...
if item[0] in self.dims or self._coordset:
# look also properties
attribute = None
index = 0
# print(item)
if len(item) > 2 and item[1] == '_':
attribute = item[1:]
item = item[0]
index = self.dims.index(item)
if self._coordset:
try:
c = self._coordset[item]
if isinstance(c, str) and c in self.dims:
# probaly a reference to another coordinate name
c = self._coordset[c]
if c.name in self.dims or c._parent_dim in self.dims:
if attribute is not None:
# get the attribute
return getattr(c, attribute)
else:
return c
else:
raise AttributeError
except Exception as err:
if item in self.dims:
return None
else:
raise err
elif attribute is not None:
if attribute == 'size':
# we want the size but there is no coords, get it from the data shape
return self.shape[index]
else:
raise AttributeError(f'Can not find `{attribute}` when no coordinate is defined')
return None
raise AttributeError
def __setattr__(self, key, value):
if key in DEFAULT_DIM_NAME: # syntax such as ds.x, ds.y, etc...
# Note the above test is important to avoid errors with traitlets
# even if it looks redundant with the folllowing
if key in self.dims:
if self._coordset is None:
# we need to create a coordset first
self.set_coordset(dict((self.dims[i], None) for i in range(self.ndim)))
idx = self._coordset.names.index(key)
_coordset = self._coordset
listcoord = False
if isinstance(value, list):
listcoord = all([isinstance(item, Coord) for item in value])
if listcoord:
_coordset[idx] = list(CoordSet(value).to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, CoordSet):
if len(value) > 1:
value = CoordSet(value)
_coordset[idx] = list(value.to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, (Coord, LinearCoord)):
value.name = key
_coordset[idx] = value
else:
_coordset[idx] = Coord(value, name=key)
_coordset = self._valid_coordset(_coordset)
self._coordset.set(_coordset)
else:
raise AttributeError(f'Coordinate `{key}` is not used.')
else:
super().__setattr__(key, value)
# ..................................................................................................................
def __eq__(self, other, attrs=None):
attrs = self.__dir__()
for attr in (
'filename', 'preferences', 'name', 'description', 'history', 'date', 'modified', 'origin',
'show_datapoints', 'roi', 'offset', 'modeldata', 'processeddata', 'baselinedata', 'referencedata',
'state'):
# these attibutes are not used for comparison (comparison based on data and units!)
try:
attrs.remove(attr)
except ValueError:
pass
return super().__eq__(other, attrs)
# ..................................................................................................................
def __hash__(self):
# all instance of this class has same hash, so they can be compared
return super().__hash__ + hash(self._coordset)
# ------------------------------------------------------------------------------------------------------------------
# Default values
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@default('_coordset')
def _coordset_default(self):
return None
# ..................................................................................................................
@default('_modeldata')
def _modeldata_default(self):
return None
# ..................................................................................................................
@default('_processeddata')
def _processeddata_default(self):
return None
# ..................................................................................................................
@default('_baselinedata')
def _baselinedata_default(self):
return None
# ..................................................................................................................
@default('_referencedata')
def _referencedata_default(self):
return None
# ------------------------------------------------------------------------------------------------------------------
# GUI options
# ------------------------------------------------------------------------------------------------------------------
# TODO: refactor the spectrochempy preference system to have a common basis
@property
def state(self):
# state of the controller window for this dataset
return self._state
@state.setter
def state(self, val):
self._state = val
@property
def processeddata(self):
return self._processeddata
@processeddata.setter
def processeddata(self, val):
self._processeddata = val
@property
def processedmask(self):
return self._processedmask
@processedmask.setter
def processedmask(self, val):
self._processedmask = val
@property
def baselinedata(self):
return self._baselinedata
@baselinedata.setter
def baselinedata(self, val):
self._baselinedata = val
@property
def referencedata(self):
return self._referencedata
@referencedata.setter
def referencedata(self, val):
self._referencedata = val
# ------------------------------------------------------------------------------------------------------------------
# Validators
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@validate('_coordset')
def _coordset_validate(self, proposal):
coords = proposal['value']
return self._valid_coordset(coords)
def _valid_coordset(self, coords):
# uses in coords_validate and setattr
if coords is None:
return
for k, coord in enumerate(coords):
if coord is not None and not isinstance(coord, CoordSet) and coord.data is None:
continue
# For coord to be acceptable, we require at least a NDArray, a NDArray subclass or a CoordSet
if not isinstance(coord, (LinearCoord, Coord, CoordSet)):
if isinstance(coord, NDArray):
coord = coords[k] = Coord(coord)
else:
raise TypeError('Coordinates must be an instance or a subclass of Coord class or NDArray, or of '
f' CoordSet class, but an instance of {type(coord)} has been passed')
if self.dims and coord.name in self.dims:
# check the validity of the given coordinates in terms of size (if it correspond to one of the dims)
size = coord.size
if self.implements('NDDataset'):
idx = self._get_dims_index(coord.name)[0] # idx in self.dims
if size != self._data.shape[idx]:
raise ValueError(f'the size of a coordinates array must be None or be equal'
f' to that of the respective `{coord.name}`'
f' data dimension but coordinate size={size} != data shape[{idx}]='
f'{self._data.shape[idx]}')
else:
pass # bypass this checking for any other derived type (should be done in the subclass)
coords._parent = self
return coords
# ..................................................................................................................
@property
def _dict_dims(self):
_dict = {}
for index, dim in enumerate(self.dims):
if dim not in _dict:
_dict[dim] = {'size': self.shape[index], 'coord': getattr(self, dim)}
return _dict
# ------------------------------------------------------------------------------------------------------------------
# public methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def add_coordset(self, *coords, dims=None, **kwargs):
"""
Add one or a set of coordinates from a dataset.
Parameters
----------
*coords : iterable
Coordinates object(s).
dims : list
Name of the coordinates.
**kwargs : dict
Keywords passed to the coordset.
"""
if not coords and not kwargs:
# reset coordinates
self._coordset = None
return
if self._coordset is None:
# make the whole coordset at once
self._coordset = CoordSet(*coords, dims=dims, **kwargs)
else:
# add one coordinate
self._coordset._append(*coords, **kwargs)
if self._coordset:
# set a notifier to the updated traits of the CoordSet instance
HasTraits.observe(self._coordset, self._dims_update, '_updated')
# force it one time after this initialization
self._coordset._updated = True
# ..................................................................................................................
def coord(self, dim='x'):
"""
Return the coordinates along the given dimension.
Parameters
----------
dim : int or str
A dimension index or name, default index = `x`.
If an integer is provided, it is equivalent to the `axis` parameter for numpy array.
Returns
-------
|Coord|
Coordinates along the given axis.
"""
idx = self._get_dims_index(dim)[0] # should generate an error if the
# dimension name is not recognized
if idx is None:
return None
if self._coordset is None:
return None
# idx is not necessarily the position of the coordinates in the CoordSet
# indeed, transposition may have taken place. So we need to retrieve the coordinates by its name
name = self.dims[idx]
if name in self._coordset.names:
idx = self._coordset.names.index(name)
return self._coordset[idx]
else:
error_(f'could not find this dimenson name: `{name}`')
return None
# ..................................................................................................................
@property
def coordset(self):
"""
|CoordSet| instance.
Contains the coordinates of the various dimensions of the dataset.
It's a readonly property. Use set_coords to change one or more coordinates at once.
"""
if self._coordset and all(c.is_empty for c in self._coordset):
# all coordinates are empty, this is equivalent to None for the coordset
return None
return self._coordset
# ..................................................................................................................
@coordset.setter
def coordset(self, coords):
if isinstance(coords, CoordSet):
self.set_coordset(**coords)
else:
self.set_coordset(coords)
# ..................................................................................................................
@property
def coordnames(self):
"""
List of the |Coord| names.
Read only property.
"""
if self._coordset is not None:
return self._coordset.names
# ..................................................................................................................
@property
def coordtitles(self):
"""
List of the |Coord| titles.
Read only property. Use set_coordtitle to eventually set titles.
"""
if self._coordset is not None:
return self._coordset.titles
# ..................................................................................................................
@property
def coordunits(self):
"""
List of the |Coord| units.
Read only property. Use set_coordunits to eventually set units.
"""
if self._coordset is not None:
return self._coordset.units
# ..................................................................................................................
@property
def data(self):
"""
The ``data`` array.
If there is no data but labels, then the labels are returned instead of data.
"""
return super().data
# ..................................................................................................................
@data.setter
def data(self, data):
# as we can't write super().data = data, we call _set_data
# see comment in the data.setter of NDArray
super()._set_data(data)
# ..................................................................................................................
def delete_coordset(self):
"""
Delete all coordinate settings.
"""
self._coordset = None
# ..................................................................................................................
def implements(self, name=None):
"""
Check if the current object implements `NDDataset`.
Rather than isinstance(obj, NDDataset) use object.implements('NDDataset').
This is useful to check type without importing the module
Parameters
----------
name : str
Name of the object class. If None, the function returns the class name.
If name is given, it checks if it correspond to the current class name.
Returns
-------
str or bool
If name is given, a bool is returned
If name is None, the classname is returned
Examples
--------
>>> from spectrochempy import NDDataset, Coord
>>> co = Coord([1., 2., 3.])
>>> co.implements('NDDataset')
False
>>> co.implements('Coord')
True
>>> ds = NDDataset([1., 2., 3.])
>>> ds.implements()
'NDDataset'
"""
if name is None:
return 'NDDataset'
else:
return name == 'NDDataset'
# ..................................................................................................................
@property
def labels(self):
# not valid for NDDataset
# There is no label for nd-dataset
raise NotImplementedError # pragma: no cover
# ..................................................................................................................
@property
def modeldata(self):
"""
|ndarray| - models data.
Data eventually generated by modelling of the data.
"""
return self._modeldata
# ..................................................................................................................
@modeldata.setter
def modeldata(self, data):
self._modeldata = data
# ..................................................................................................................
@property
def parent(self):
"""
|Project| instance
The parent project of the dataset.
"""
return self._parent
# ..................................................................................................................
@parent.setter
def parent(self, value):
if self._parent is not None:
# A parent project already exists for this dataset but the
# entered values gives a different parent. This is not allowed,
# as it can produce impredictable results. We will first remove it
# from the current project.
self._parent.remove_dataset(self.name)
self._parent = value
# ..................................................................................................................
def set_coordset(self, *args, **kwargs):
"""
Set one or more coordinates at once.
Warnings
--------
This method replace all existing coordinates.
See Also
--------
add_coords, set_coordtitles, set_coordunits
"""
self._coordset = None
self.add_coordset(*args, dims=self.dims, **kwargs)
# ..................................................................................................................
def set_coordtitles(self, *args, **kwargs):
"""
Set titles of the one or more coordinates.
"""
self._coordset.set_titles(*args, **kwargs)
# ..................................................................................................................
def set_coordunits(self, *args, **kwargs):
"""
Set units of the one or more coordinates.
"""
self._coordset.set_units(*args, **kwargs)
# ..................................................................................................................
def sort(self, **kwargs):
"""
Returns the dataset sorted along a given dimension.
(by default, the last dimension [axis=-1]) using the numeric or label values.
Parameters
----------
dim : str or int, optional, default=-1
dimension index or name along which to sort.
pos : int , optional
If labels are multidimensional - allow to sort on a define
row of labels : labels[pos]. Experimental : Not yet checked.
by : str among ['value', 'label'], optional, default=``value``
Indicate if the sorting is following the order of labels or
numeric coord values.
descend : `bool`, optional, default=`False`
If true the dataset is sorted in a descending direction. Default is False except if coordinates
are reversed.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
sorted_dataset
"""
inplace = kwargs.get('inplace', False)
if not inplace:
new = self.copy()
else:
new = self
# parameter for selecting the level of labels (default None or 0)
pos = kwargs.pop('pos', None)
# parameter to say if selection is done by values or by labels
by = kwargs.pop('by', 'value')
# determine which axis is sorted (dims or axis can be passed in kwargs)
# it will return a tuple with axis and dim
axis, dim = self.get_axis(**kwargs)
if axis is None:
axis, dim = self.get_axis(axis=0)
# get the corresponding coordinates (remember the their order can be different form the order
# of dimension in dims. S we cannot jsut take the coord from the indice.
coord = getattr(self, dim) # get the coordinate using the syntax such as self.x
descend = kwargs.pop('descend', None)
if descend is None:
# when non specified, default is False (except for reversed coordinates
descend = coord.reversed
# import warnings
# warnings.simplefilter("error")
indexes = []
for i in range(self.ndim):
if i == axis:
if not coord.has_data:
# sometimes we have only label for Coord objects.
# in this case, we sort labels if they exist!
if coord.is_labeled:
by = 'label'
else:
# nothing to do for sorting
# return self itself
return self
args = coord._argsort(by=by, pos=pos, descend=descend)
setattr(new, dim, coord[args])
indexes.append(args)
else:
indexes.append(slice(None))
new._data = new._data[tuple(indexes)]
if new.is_masked:
new._mask = new._mask[tuple(indexes)]
return new
# ..................................................................................................................
def squeeze(self, *dims, inplace=False):
"""
Remove single-dimensional entries from the shape of a NDDataset.
Parameters
----------
dim : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If a dimension (dim) is selected with shape entry greater than
one, an error is raised.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
squeezed
The input array, but with all or a subset of the
dimensions of length 1 removed.
Raises
------
ValueError
If `dim` is not `None`, and the dimension being squeezed is not
of length 1.
"""
# make a copy of the original dims
old = self.dims[:]
# squeeze the data and determine which axis must be squeezed
new, axis = super().squeeze(*dims, inplace=inplace, return_axis=True)
if axis is not None and new._coordset is not None:
# if there are coordinates they have to be squeezed as well (remove
# coordinate for the squeezed axis)
for i in axis:
dim = old[i]
del new._coordset[dim]
return new
def expand_dims(self, dim=None):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
dim : int or str
Position in the expanded axes where the new axis (or axes) is placed.
Returns
-------
result : ndarray
View of `a` with the number of dimensions increased.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
""" # TODO
# ..................................................................................................................
def swapdims(self, dim1, dim2, inplace=False):
"""
Interchange two dimensions of a NDDataset.
Parameters
----------
dim1 : int
First axis.
dim2 : int
Second axis.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
swaped_dataset
See Also
--------
transpose
"""
new = super().swapdims(dim1, dim2, inplace=inplace)
new.history = f'Data swapped between dims {dim1} and {dim2}'
return new
# ..................................................................................................................
@property
def T(self):
"""
Transposed |NDDataset|.
The same object is returned if `ndim` is less than 2.
"""
return self.transpose()
# ..................................................................................................................
def take(self, indices, **kwargs):
"""
Take elements from an array
Parameters
----------
indices
kwargs
Returns
-------
"""
# handle the various syntax to pass the axis
dims = self._get_dims_from_args(**kwargs)
axis = self._get_dims_index(dims)
axis = axis[0] if axis else None
# indices = indices.tolist()
if axis is None:
# just do a fancy indexing
return self[indices]
if axis < 0:
axis = self.ndim + axis
index = tuple([...] + [indices] + [slice(None) for i in range(self.ndim - 1 - axis)])
new = self[index]
return new
def to_array(self):
"""
Return a numpy masked array (i.e., other NDDataset attributes are lost.
Examples
========
>>> import spectrochempy as scp
>>> dataset = scp.read('wodger.spg')
>>> a = scp.to_array(dataset)
equivalent to:
>>> a = np.ma.array(dataset)
or
>>> a= dataset.masked_data
"""
return np.ma.array(self)
# ..................................................................................................................
def to_xarray(self, **kwargs):
"""
Convert a NDDataset instance to an `~xarray.DataArray` object
( the xarray library must be available )
Parameters
Returns
-------
object : a xarray.DataArray object
"""
# Information about DataArray from the DataArray docstring
#
# Attributes
# ----------
# dims: tuple
# Dimension names associated with this array.
# values: np.ndarray
# Access or modify DataArray values as a numpy array.
# coords: dict-like
# Dictionary of DataArray objects that label values along each dimension.
# name: str or None
# Name of this array.
# attrs: OrderedDict
# Dictionary for holding arbitrary metadata.
# Init docstring
#
# Parameters
# ----------
# data: array_like
# Values for this array. Must be an ``numpy.ndarray``, ndarray like,
# or castable to an ``ndarray``.
# coords: sequence or dict of array_like objects, optional
# Coordinates (tick labels) to use for indexing along each dimension.
# If dict-like, should be a mapping from dimension names to the
# corresponding coordinates. If sequence-like, should be a sequence
# of tuples where the first element is the dimension name and the
# second element is the corresponding coordinate array_like object.
# dims: str or sequence of str, optional
# Name(s) of the data dimension(s). Must be either a string (only
# for 1D data) or a sequence of strings with length equal to the
# number of dimensions. If this argument is omitted, dimension names
# are taken from ``coords`` (if possible) and otherwise default to
# ``['dim_0', ... 'dim_n']``.
# name: str or None, optional
# Name of this array.
# attrs: dict_like or None, optional
# Attributes to assign to the new instance. By default, an empty
# attribute dictionary is initialized.
# encoding: dict_like or None, optional
# Dictionary specifying how to encode this array's data into a
# serialized format like netCDF4. Currently used keys (for netCDF)
# include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
# 'units' and 'calendar' (the later two only for datetime arrays).
# Unrecognized keys are ignored.
if not HAS_XARRAY:
warnings.warn('Xarray is not available! This function can not be used', SpectroChemPyWarning)
return None
x, y = self.x, self.y
tx = x.title
if y:
ty = y.title
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(ty, y.data), (tx, x.data)], )
da.attrs['units'] = self.units
else:
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(tx, x.data)], )
da.attrs['units'] = self.units
da.attrs['title'] = self.title
return da
# ..................................................................................................................
def transpose(self, *dims, inplace=False):
"""
Permute the dimensions of a NDDataset.
Parameters
----------
dims : sequence of dimension indexes or names, optional
By default, reverse the dimensions, otherwise permute the dimensions
according to the values given.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
transposed_array
See Also
--------
swapdims : Interchange two dimensions of a NDDataset.
"""
new = super().transpose(*dims, inplace=inplace)
new.history = f'Data transposed between dims: {dims}' if dims else ''
return new
# ------------------------------------------------------------------------------------------------------------------
# private methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def _cstr(self):
# Display the metadata of the object and partially the data
out = ''
out += ' name: {}\n'.format(self.name)
out += ' author: {}\n'.format(self.author)
out += ' created: {}\n'.format(self._date)
# out += ' modified: {}\n'.format(self._modified) if (self.modified - self.date).seconds > 1 else ''
wrapper1 = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 15, replace_whitespace=True,
width=self._text_width)
pars = self.description.strip().splitlines()
if pars:
out += ' description: '
desc = ''
if pars:
desc += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
desc += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
desc = '\0\0\0{}\0\0\0\n'.format(desc.rstrip())
out += desc
if self._history:
pars = self.history
out += ' history: '
hist = ''
if pars:
hist += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
hist += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
hist = '\0\0\0{}\0\0\0\n'.format(hist.rstrip())
out += hist
out += '{}\n'.format(self._str_value().rstrip())
out += '{}\n'.format(self._str_shape().rstrip()) if self._str_shape() else ''
out += '{}\n'.format(self._str_dims().rstrip())
if not out.endswith('\n'):
out += '\n'
out += '\n'
if not self._html_output:
return colored_output(out.rstrip())
else:
return out.rstrip()
# ..................................................................................................................
def _loc2index(self, loc, dim=-1):
# Return the index of a location (label or coordinates) along the dim
# This can work only if `coords` exists.
if self._coordset is None:
raise SpectroChemPyException('No coords have been defined. Slicing or selection'
' by location ({}) needs coords definition.'.format(loc))
coord = self.coord(dim)
return coord._loc2index(loc)
# ..................................................................................................................
def _str_dims(self):
if self.is_empty:
return ''
if len(self.dims) < 1 or not hasattr(self, "_coordset"):
return ''
if not self._coordset or len(self._coordset) < 1:
return ''
self._coordset._html_output = self._html_output # transfert the html flag if necessary: false by default
txt = self._coordset._cstr()
txt = txt.rstrip() # remove the trailing '\n'
return txt
_repr_dims = _str_dims
# ------------------------------------------------------------------------------------------------------------------
# events
# ------------------------------------------------------------------------------------------------------------------
def _dims_update(self, change=None):
# when notified that a coords names have been updated
_ = self.dims # fire an update
# ..................................................................................................................
# ======================================================================================================================
# module function
# ======================================================================================================================
# make some NDDataset operation accessible from the spectrochempy API
thismodule = sys.modules[__name__]
api_funcs = ['sort', 'copy', 'squeeze', 'swapdims', 'transpose', 'to_array', 'to_xarray', 'take', 'set_complex',
'set_quaternion', 'set_hypercomplex', 'component', 'to', 'to_base_units', 'to_reduced_units', 'ito',
'ito_base_units', 'ito_reduced_units', 'is_units_compatible', 'remove_masks']
# todo: check the fact that some function are defined also in ndmath
for funcname in api_funcs:
setattr(thismodule, funcname, getattr(NDDataset, funcname))
thismodule.__all__.append(funcname)
# load one method from NDIO
load = NDDataset.load
__all__ += ['load']
# ======================================================================================================================
# Set the operators
# ======================================================================================================================
_set_operators(NDDataset, priority=100000)
_set_ufuncs(NDDataset)
|
# coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: beta
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
class ProcessLineage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'operation_logic': 'str',
'pk': 'ProcessLineagePK'
}
attribute_map = {
'name': 'name',
'operation_logic': 'operationLogic',
'pk': 'pk'
}
def __init__(self, name=None, operation_logic=None, pk=None):
"""
ProcessLineage - a model defined in Swagger
"""
self._name = None
self._operation_logic = None
self._pk = None
self.name = name
if operation_logic is not None:
self.operation_logic = operation_logic
self.pk = pk
@property
def name(self):
"""
Gets the name of this ProcessLineage.
:return: The name of this ProcessLineage.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ProcessLineage.
:param name: The name of this ProcessLineage.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def operation_logic(self):
"""
Gets the operation_logic of this ProcessLineage.
data update operation logic, e.g. 'REPLACE', 'UPDATE', 'APPEND'. Default is 'REPLACE'.
:return: The operation_logic of this ProcessLineage.
:rtype: str
"""
return self._operation_logic
@operation_logic.setter
def operation_logic(self, operation_logic):
"""
Sets the operation_logic of this ProcessLineage.
data update operation logic, e.g. 'REPLACE', 'UPDATE', 'APPEND'. Default is 'REPLACE'.
:param operation_logic: The operation_logic of this ProcessLineage.
:type: str
"""
self._operation_logic = operation_logic
@property
def pk(self):
"""
Gets the pk of this ProcessLineage.
:return: The pk of this ProcessLineage.
:rtype: ProcessLineagePK
"""
return self._pk
@pk.setter
def pk(self, pk):
"""
Sets the pk of this ProcessLineage.
:param pk: The pk of this ProcessLineage.
:type: ProcessLineagePK
"""
if pk is None:
raise ValueError("Invalid value for `pk`, must not be `None`")
self._pk = pk
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ProcessLineage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import collections
import itertools
import math
import unittest
import aoc_utils.geometry
from aoc_utils import char_map, data
class TestCoordinatesUtils(unittest.TestCase):
def test_solve_tie(self):
self.assertEqual(None, solve_tie([]))
self.assertEqual((12, 34), solve_tie([(12, 34)]))
self.assertEqual((1, 1), solve_tie([(1, 1), (2, 2)]))
self.assertEqual((1, 1), solve_tie([(2, 2), (1, 1)]))
self.assertEqual((2, 1), solve_tie([(1, 2), (2, 1)]))
self.assertEqual((2, 1), solve_tie([(2, 1), (1, 2)]))
def solve_tie(options):
if len(options):
return sorted_by_priority(options)[0]
def sorted_by_priority(options):
return sorted(options, key=reverse_coordinates)
def reverse_coordinates(coordinates):
return tuple(i for i in reversed(coordinates))
class FindAllClosestRules(char_map.ProgressRules):
def __init__(self, targets, allowed_values):
super(FindAllClosestRules, self).__init__(allowed_values)
self._targets = targets
self._found_one = False
self.results = []
def stop_progressing(self):
return self._found_one
def examine(self, coordinates):
if coordinates in self._targets:
self._found_one = True
self.results.append(coordinates)
return False
return True
def solve_tie(self, coordinate_options):
return solve_tie(coordinate_options)
class TestCaves(unittest.TestCase):
def make_default_caves(self):
caves = Caves([
"#######",
"#E..G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
return caves
def test_init_fighters(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertSetEqual({'E', 'G'}, set(fighters.keys()))
self.assertEqual({(1, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
def test_get_targets(self):
caves = self.make_default_caves()
self.assertListEqual([(4, 1), (2, 3), (5, 3)], list(caves.get_targets("E")))
self.assertListEqual([(1, 1)], list(caves.get_targets("G")))
def test_get_in_range(self):
caves = self.make_default_caves()
self.assertListEqual([(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
list(caves.get_in_range("E")))
self.assertListEqual([(2, 1), (1, 2)],
list(caves.get_in_range("G")))
def test_get_coordinates_around(self):
caves = self.make_default_caves()
self.assertListEqual([(2, 1), (1, 2)], list(caves.get_coordinates_around((1, 1))))
self.assertListEqual([(3, 1), (5, 1)], list(caves.get_coordinates_around((4, 1))))
self.assertListEqual([(2, 2), (1, 3), (3, 3)], list(caves.get_coordinates_around((2, 3))))
self.assertListEqual([(5, 2)], list(caves.get_coordinates_around((5, 3))))
def test_find_all_closest_rules(self):
caves = Caves([
"#######",
"#E#.G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
finder = char_map.MapExplorer(caves._caves)
rules = FindAllClosestRules(
targets=[(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
allowed_values=[EMPTY_VALUE]
)
finder.explore(start_point=(1, 1), rules=rules)
self.assertListEqual([(2, 2), (1, 3)], list(rules.results))
def test_iterate_units(self):
caves = self.make_default_caves()
self.assertListEqual([(1, 1), (4, 1), (2, 3), (5, 3)], caves.iterate_units())
def test_get_attack_target(self):
caves_2 = Caves([
"#######",
"#..EG.#",
"#...#.#",
"#.G.#G#",
"#######",
])
self.assertEqual((4, 1), caves_2.get_attack_target((3, 1), 'E'))
self.assertEqual((3, 1), caves_2.get_attack_target((4, 1), 'G'))
self.assertEqual(None, caves_2.get_attack_target((2, 3), 'G'))
self.assertEqual(None, caves_2.get_attack_target((5, 3), 'G'))
def test_find_next_step(self):
caves = self.make_default_caves()
self.assertEqual((2, 1), caves.find_next_step((1, 1), 'E'))
self.assertEqual((3, 1), caves.find_next_step((4, 1), 'G'))
self.assertEqual((2, 2), caves.find_next_step((2, 3), 'G'))
self.assertEqual(None, caves.find_next_step((5, 3), 'G'))
def test_play_unit(self):
caves = self.make_default_caves()
fighters = caves.fighters
caves.play_unit((1, 1), 'E')
self.assertEqual({(2, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((2, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 197, (2, 3): 200, (5, 3): 200}, fighters['G'])
for _ in range(65):
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 2, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(2, 3): 200, (5, 3): 200}, fighters['G'])
def test_play_round(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertFalse(caves.play_round())
self.assertEqual({(2, 1): 194}, fighters['E'])
self.assertEqual({(3, 1): 200, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 188}, fighters['E'])
self.assertEqual({(3, 1): 197, (2, 2): 200, (5, 3): 200}, fighters['G'])
for _ in range(31):
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 2}, fighters['E'])
self.assertEqual({(3, 1): 104, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertRaises(FightIsOver, caves.play_round)
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertEqual(16533, caves.play())
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play_examples(self):
def check(expected_outcome, cave_lines, echo=False):
caves = Caves(cave_lines)
outcome = caves.play()
if echo:
caves.echo()
self.assertEqual(expected_outcome, outcome)
check(27730, [
'#######',
'#.G...#',
'#...EG#',
'#.#.#G#',
'#..G#E#',
'#.....#',
'#######',
])
check(36334, [
'#######',
'#G..#E#',
'#E#E.E#',
'#G.##.#',
'#...#E#',
'#...E.#',
'#######',
])
check(39514, [
'#######',
'#E..EG#',
'#.#G.E#',
'#E.##E#',
'#G..#.#',
'#..E#.#',
'#######',
])
check(27755, [
'#######',
'#E.G#.#',
'#.#G..#',
'#G.#.G#',
'#G..#.#',
'#...E.#',
'#######',
])
check(28944, [
'#######',
'#.E...#',
'#.#..G#',
'#.###.#',
'#E#G#G#',
'#...#G#',
'#######',
])
check(18740, [
'#########',
'#G......#',
'#.E.#...#',
'#..##..G#',
'#...##..#',
'#...#...#',
'#.G...G.#',
'#.....G.#',
'#########',
])
def test_play_mine(self):
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines)
outcome = caves.play()
self.assertEqual(201123, outcome)
def test_find_minimum_elves_strength(self):
for elf_strength in range(13, 20):
strengths = {'E': elf_strength, 'G': 3}
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines, teams_strength=strengths)
num_elves = len(caves.fighters['E'])
outcome = caves.play()
if len(caves.fighters['E']) == num_elves:
break
self.assertEqual(14, elf_strength)
self.assertEqual(54188, outcome)
TEAMS_STRENGTH = {'E': 3, 'G': 3}
EMPTY_VALUE = '.'
WALL_VALUE = '#'
class FightIsOver(Exception):
pass
class Caves:
def __init__(self, initial_map, teams_strength=TEAMS_STRENGTH):
self._caves = char_map.CharMap(input_lines=initial_map)
self.strength = teams_strength
self.fighters = {team: {} for team in teams_strength}
for position, entry in self._caves.items():
if entry in teams_strength:
self.fighters[entry][position] = 200
def play(self):
rounds = 0
while True:
try:
nobody_moved = self.play_round()
rounds += 1
except FightIsOver:
break
if nobody_moved:
rounds += self.play_frozen_situation()
remaining_hit_points = sum(hp for team in self.fighters.values() for hp in team.values())
return rounds * remaining_hit_points
def play_round(self):
nobody_moved = True
for unit in self.iterate_units():
if not self.game_on():
raise FightIsOver
team = self._caves[unit]
if team == EMPTY_VALUE:
continue
nobody_moved = self.play_unit(unit, team) and nobody_moved
return nobody_moved
def play_frozen_situation(self):
attackers = collections.defaultdict(lambda: 0)
for unit in self.iterate_units():
team = self._caves[unit]
target = self.get_attack_target(unit, team)
attackers[target] += self.strength[team]
rounds = min(
math.floor(self.fighters[self._caves[unit]][unit] / attackers[unit])
for unit in self.iterate_units()
if attackers[unit] > 0
)
for unit in self.iterate_units():
team = self._caves[unit]
self.fighters[team][unit] -= rounds * attackers[unit]
return rounds
def game_on(self):
return all(team for team in self.fighters.values())
def play_unit(self, unit, team):
attack_target = self.get_attack_target(unit, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
new_position = self.find_next_step(unit, team)
if new_position:
self.move_unit(team, unit, new_position)
attack_target = self.get_attack_target(new_position, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
return False
return True
def attack(self, unit, strength):
target_team = self._caves[unit]
self.fighters[target_team][unit] -= strength
if self.fighters[target_team][unit] <= 0:
del self.fighters[target_team][unit]
self._caves[unit] = EMPTY_VALUE
return False
return True
def move_unit(self, team, from_coordinates, to_coordinates):
self._caves[to_coordinates] = team
self._caves[from_coordinates] = EMPTY_VALUE
self.fighters[team][to_coordinates] = self.fighters[team][from_coordinates]
del self.fighters[team][from_coordinates]
def get_attack_target(self, unit, team):
adjacents = []
min_hp = None
for adjacent in self.get_coordinates_around(unit):
opponent = self._caves[adjacent]
if opponent in [EMPTY_VALUE, team]:
continue
hp = self.fighters[opponent][adjacent]
if min_hp is None or hp < min_hp:
min_hp = hp
adjacents = [adjacent]
elif hp == min_hp:
adjacents.append(adjacent)
return solve_tie(adjacents)
def find_next_step(self, unit, team):
in_range = self.get_in_range(team)
if not in_range:
return None
finder = char_map.MapExplorer(self._caves)
rules = FindAllClosestRules(targets=in_range, allowed_values=[EMPTY_VALUE])
finder.explore(unit, rules)
closest = solve_tie(rules.results)
if not closest:
return None
path = finder.shortest_path(start_point=unit, end_point=closest, rules=rules)
return path[1]
def iterate_units(self):
all_units = itertools.chain.from_iterable(team.keys() for team in self.fighters.values())
return sorted_by_priority(all_units)
def get_coordinates_around(self, coordinates):
for delta in char_map.ADJACENT_COORDINATES_DELTAS:
adjacent = aoc_utils.geometry.add_coordinates(coordinates, delta)
if adjacent in self._caves and self._caves[adjacent] != WALL_VALUE:
yield adjacent
def get_in_range(self, opponent):
in_range = []
for target in self.get_targets(opponent):
for coordinates in self.get_coordinates_around(target):
if self._caves[coordinates] == EMPTY_VALUE:
in_range.append(coordinates)
return sorted(in_range, key=lambda tup: (tup[1], tup[0]))
def get_targets(self, opponent):
for coordinates, entry in self._caves.items():
if entry not in [WALL_VALUE, EMPTY_VALUE, opponent]:
yield coordinates
def echo(self):
all_fighters = {unit: hp for team in self.fighters.values() for unit, hp in team.items()}
for y, line in enumerate(self._caves.lines()):
line += " "
line_units = sorted_by_priority(unit for unit in all_fighters if unit[1] == y)
line += " ".join(str(all_fighters[unit]) for unit in line_units)
print(line)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from config import config_options
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
photos = UploadSet("photos", IMAGES)
mail = Mail()
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
# Registering the main app Blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# Registering auth blueprint
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = "/authenticate")
# Configure UploadSet
configure_uploads(app, photos)
return app
|
from osf.models.metaschema import RegistrationSchemaBlock, RegistrationSchema, FileMetadataSchema # noqa
from osf.models.base import Guid, BlackListGuid # noqa
from osf.models.user import OSFUser, Email # noqa
from osf.models.contributor import Contributor, RecentlyAddedContributor, PreprintContributor, DraftRegistrationContributor # noqa
from osf.models.session import Session # noqa
from osf.models.institution import Institution # noqa
from osf.models.collection import CollectionSubmission, Collection # noqa
from osf.models.draft_node import DraftNode # noqa
from osf.models.node import AbstractNode, Node # noqa
from osf.models.sanctions import Sanction, Embargo, Retraction, RegistrationApproval, DraftRegistrationApproval, EmbargoTerminationApproval # noqa
from osf.models.registrations import Registration, DraftRegistrationLog, DraftRegistration # noqa
from osf.models.nodelog import NodeLog # noqa
from osf.models.preprintlog import PreprintLog # noqa
from osf.models.tag import Tag # noqa
from osf.models.comment import Comment # noqa
from osf.models.conference import Conference, MailRecord # noqa
from osf.models.citation import CitationStyle # noqa
from osf.models.archive import ArchiveJob, ArchiveTarget # noqa
from osf.models.queued_mail import QueuedMail # noqa
from osf.models.external import ExternalAccount, ExternalProvider # noqa
from osf.models.oauth import ApiOAuth2Application, ApiOAuth2PersonalToken, ApiOAuth2Scope # noqa
from osf.models.osf_group import OSFGroup # noqa
from osf.models.osf_grouplog import OSFGroupLog # noqa
from osf.models.licenses import NodeLicense, NodeLicenseRecord # noqa
from osf.models.private_link import PrivateLink # noqa
from osf.models.notifications import NotificationDigest, NotificationSubscription # noqa
from osf.models.spam import SpamStatus, SpamMixin # noqa
from osf.models.subject import Subject # noqa
from osf.models.provider import AbstractProvider, CollectionProvider, PreprintProvider, WhitelistedSHAREPreprintProvider, RegistrationProvider # noqa
from osf.models.preprint import Preprint # noqa
from osf.models.request import NodeRequest, PreprintRequest # noqa
from osf.models.identifiers import Identifier # noqa
from osf.models.files import ( # noqa
BaseFileNode,
BaseFileVersionsThrough,
File, Folder, # noqa
FileVersion, TrashedFile, TrashedFileNode, TrashedFolder, FileVersionUserMetadata, # noqa
) # noqa
from osf.models.metadata import FileMetadataRecord # noqa
from osf.models.node_relation import NodeRelation # noqa
from osf.models.analytics import UserActivityCounter, PageCounter # noqa
from osf.models.admin_profile import AdminProfile # noqa
from osf.models.admin_log_entry import AdminLogEntry # noqa
from osf.models.maintenance_state import MaintenanceState # noqa
from osf.models.banner import ScheduledBanner # noqa
from osf.models.quickfiles import QuickFilesNode # noqa
from osf.models.dismissed_alerts import DismissedAlert # noqa
from osf.models.action import ReviewAction # noqa
from osf.models.action import NodeRequestAction, PreprintRequestAction, ReviewAction, RegistrationAction, SchemaResponseAction, BaseAction # noqa
from osf.models.storage import ProviderAssetFile # noqa
from osf.models.chronos import ChronosJournal, ChronosSubmission # noqa
from osf.models.notable_email_domain import NotableEmailDomain # noqa
from osf.models.brand import Brand # noqa
from osf.models.schema_response import SchemaResponse # noqa
from osf.models.schema_response_block import SchemaResponseBlock # noqa
from osf.models.registration_bulk_upload_job import RegistrationBulkUploadJob # noqa
from osf.models.registration_bulk_upload_row import RegistrationBulkUploadRow # noqa
|
# RT - Twitter
from typing import TYPE_CHECKING, Union, Dict, Tuple, List
from discord.ext import commands
import discord
from tweepy.asynchronous import AsyncStream
from tweepy import API, OAuthHandler
from tweepy.errors import NotFound
from tweepy.models import Status
from jishaku.functools import executor_function
from asyncio import Event
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from tweepy.models import Status
from aiomysql import Pool
from rtlib import Backend
class DataManager:
TABLE = "TwitterNotification"
DEFAULT_MAX = 5
def __init__(self, loop: "AbstractEventLoop", pool: "Pool"):
self.pool = pool
loop.create_task(self._prepare_table())
async def _prepare_table(self):
# テーブルを準備します。
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self.TABLE} (
GuildID BIGINT, ChannelID BIGINT, UserName TEXT
);"""
)
await self._update_users(cursor)
self.ready.set()
async def _read(self, cursor, channel, username):
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
return await cursor.fetchone()
async def write(self, channel: discord.TextChannel, username: str) -> None:
"設定を保存します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert not await self._read(cursor, channel, username), "既に設定されています。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(channel.guild.id,)
)
assert len(await cursor.fetchall()) <= self.DEFAULT_MAX, "追加しすぎです。"
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s);",
(channel.guild.id, channel.id, username)
)
async def delete(self, channel: discord.TextChannel, username: str) -> None:
"設定を削除します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert await self._read(cursor, channel, username), "その設定はありません。"
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
async def _update_users(self, cursor):
await cursor.execute(
f"SELECT ChannelID, UserName FROM {self.TABLE};"
)
self.users = {
username: channel_id
for channel_id, username in await cursor.fetchall()
}
async def update_users(self) -> List[Tuple[int, str]]:
"設定のキャッシュを更新します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await self._update_users(cursor)
class TwitterNotification(commands.Cog, DataManager, AsyncStream):
TWITTERID_HEADERS = {
"authority": "tweeterid.com",
"sec-ch-ua": "^\\^Microsoft",
"accept": "*/*",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"x-requested-with": "XMLHttpRequest",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.38",
"sec-ch-ua-platform": "^\\^Windows^\\^",
"origin": "https://tweeterid.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://tweeterid.com/",
"accept-language": "ja,en;q=0.9,en-GB;q=0.8,en-US;q=0.7",
}
BASE_URL = "https://twitter.com/{}/status/{}"
def __init__(self, bot: "Backend"):
self.bot = bot
self.users: Dict[str, int] = {}
self.ready = Event()
oauth = OAuthHandler(
self.bot.secret["twitter"]["consumer_key"],
self.bot.secret["twitter"]["consumer_secret"]
)
oauth.set_access_token(
self.bot.secret["twitter"]["access_token"],
self.bot.secret["twitter"]["access_token_secret"]
)
self.api = API(oauth)
super(commands.Cog, self).__init__(self.bot.loop, self.bot.mysql.pool)
super(DataManager, self).__init__(**self.bot.secret["twitter"])
self.connected = False
self.cache: Dict[str, str] = {}
self.bot.loop.create_task(self.start_stream())
def filter(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = True
super().filter(*args, **kwargs)
def disconnect(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = False
super().disconnect(*args, **kwargs)
def get_url(self, status: Union[Status, Tuple[str, int]]) -> str:
"渡されたStatusからツイートのURLを取得します。"
return self.BASE_URL.format(
status.user.screen_name, status.id_str
) if isinstance(status, Status) else self.BASE_URL.format(*status)
async def on_status(self, status: "Status"):
# ツイートを取得した際に呼ばれる関数です。
if status.user.screen_name in self.users:
# 通知対象のユーザーのツイートなら通知を行います。
if not (channel := self.bot.get_channel(
self.users[status.user.screen_name]
)):
# もし通知するチャンネルが見当たらない場合はその設定を削除する。
return await self.delete(
self.users[status.user.screen_name], status.user.screen_name
)
# Tweetに飛ぶリンクボタンを追加しておく。
view = discord.ui.View(timeout=1)
view.add_item(discord.ui.Button(
label="Tweetを見る", url=self.get_url(status)
))
# メッセージを調整する。
if hasattr(status, "retweeted_status") and status.retweeted_status:
# リツイート
status.text = status.text.replace(
"RT @", "🔁 Retweeted @", 1
)
elif hasattr(status, "quoted_status") and status.quoted_status:
# 引用リツイート
status.text = "🔁 Retweeted [Original]({})\n{}".format(
self.get_url(status.quoted_status), status.text
)
elif (hasattr(status, "in_reply_to_status_id")
and status.in_reply_to_status_id):
# 返信
status.text = "⤴ Replied [Original]({})\n{}".format(
self.get_url((
status.in_reply_to_screen_name,
status.in_reply_to_status_id
)), status.text
)
# メンションが飛ばないように@は全角に置き換えておく。
status.text = status.text.replace("@", "@")
try:
# 通知の送信を行う。
await channel.webhook_send(
content=status.text,
username=status.user.screen_name + \
("✅" if status.user.verified else "") \
+ " - RT Twitter Notification",
avatar_url=(
"" if status.user.default_profile_image
else status.user.profile_image_url_https
), view=view
)
except discord.Forbidden:
await channel.send(
"Twitter通知をしようとしましたが権限がないため通知に失敗しました。\n" \
"チャンネルのWebhookを管理できるように権限を付与してください。\n" \
"またRTにはたくさんの機能があり全てを動かすのなら管理者権限を付与する方が手っ取り早いです。"
)
except Exception as e:
await channel.send(
f"Twitter通知をしようとしましたが失敗しました。\nエラーコード:`{e}`"
)
@executor_function
def get_user_id(self, username: str) -> str:
"ユーザー名からユーザーのIDを取得します。※これは子ルーチン関数です。"
return self.api.get_user(screen_name=username).id_str
async def start_stream(self, disconnect: bool = False) -> None:
"Twitterのストリームを開始します。"
if disconnect and self.connected:
self.disconnect()
if hasattr(self, "ready"):
await self.ready.wait()
del self.ready
if self.users:
follow = []
for username in self.users:
try:
follow.append(await self.get_user_id(username))
except NotFound:
channel = self.bot.get_channel(self.users[username])
await self.delete(channel, username)
del self.users[username]
await channel.send(
"Twitter通知をしようとしましたがエラーが発生しました。\n" \
+ f"{username.replace('@', '@')}のユーザーが見つかりませんでした。"
)
self.filter(follow=follow)
def cog_unload(self):
if self.connected:
self.disconnect()
@commands.group(
aliases=["ツイッター", "tw"], extras={
"headding": {"ja": "Twitter通知", "en": "Twitter Notification"},
"parent": "ServerUseful"
}
)
async def twitter(self, ctx):
"""!lang ja
--------
Twitterの指定したユーザーのツイートを指定したチャンネルに通知させます。
Aliases
-------
tw, ツイッター
!lang en
--------
Notify the specified channel of tweets from the specified user on Twitter.
Aliases
-------
tw"""
if not ctx.invoked_subcommand:
await ctx.reply("使用方法が違います。 / It is used in different ways.")
@twitter.command("set", aliases=["s", "設定"])
@commands.has_permissions(manage_channels=True, manage_webhooks=True)
@commands.cooldown(1, 60, commands.BucketType.channel)
async def set_(self, ctx, onoff: bool, *, username):
"""!lang ja
--------
Twitterの通知を設定します。
このコマンドを実行したチャンネルに指定したユーザーのツイートの通知が来るようになります。
Parameters
----------
onoff : bool
onまたはoffで通知を有効にするか無効にするかです。
username : str
通知する対象のユーザーの名前です。
`@`から始まるものです。
Examples
--------
`rt!twitter set on tasuren1`
RTの開発者のtasurenのTwitterの通知を有効にします。
Aliases
-------
s, 設定
!lang en
--------
Sets up Twitter notifications.
The channel where this command is executed will receive notifications of tweets from the specified user.
Parameters
----------
onoff : bool
Enables or disables notifications with on or off.
username : str
The name of the user to be notified.
It must start with `@`.
Examples
--------
`rt!twitter set on tasuren1`
Enables Twitter notifications for the RT developer tasuren.
Aliases
-------
s"""
await ctx.trigger_typing()
try:
if onoff:
await self.get_user_id(username)
await self.write(ctx.channel, username)
else:
await self.delete(ctx.channel, username)
except AssertionError:
await ctx.reply(
{"ja": "既に設定されています。\nまたは設定しすぎです。",
"en": "The username is already set.\nOr it is set too high."} \
if onoff else {
"ja": "設定されていません。",
"en": "The username is not set yet."
}
)
except NotFound:
await ctx.reply(
{"ja": "そのユーザーが見つかりませんでした。",
"en": "The user is not found."}
)
else:
await self.update_users()
await self.start_stream(True)
await ctx.reply("Ok")
@twitter.command("list", aliases=["l", "一覧"])
async def list_(self, ctx):
"""!lang ja
--------
設定しているTwitter通知のリストを表示します。
Aliases
-------
l, 一覧
!lang en
--------
Displays twitter notification settings
Aliases
-------
l"""
await ctx.reply(
embed=discord.Embed(
title="Twitter",
description="\n".join(
f"<#{channel_id}>:{username}"
for username, channel_id in self.users.items()
)
)
)
def setup(bot):
bot.add_cog(TwitterNotification(bot))
|
import copy
import pprint
import unittest
import requests
from wikibaseintegrator import wbi_core, wbi_fastrun, wbi_functions, wbi_datatype
from wikibaseintegrator.wbi_core import MWApiError
__author__ = 'Sebastian Burgstaller-Muehlbacher'
__license__ = 'AGPLv3'
class TestMediawikiApiCall(unittest.TestCase):
def test_all(self):
with self.assertRaises(MWApiError):
wbi_functions.mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, mediawiki_api_url="https://www.wikidataaaaaaa.org",
max_retries=3, retry_after=1, allow_anonymous=True)
with self.assertRaises(requests.HTTPError):
wbi_functions.mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/400", max_retries=3, retry_after=1, allow_anonymous=True)
wbi_functions.mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, max_retries=3, retry_after=1, allow_anonymous=True)
with self.assertRaises(MWApiError):
wbi_functions.mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/502", max_retries=3, retry_after=1, allow_anonymous=True)
class TestDataType(unittest.TestCase):
def test_quantity(self):
dt = wbi_datatype.Quantity(quantity='34.5', prop_nr='P43')
dt_json = dt.get_json_representation()
if not dt_json['mainsnak']['datatype'] == 'quantity':
raise
value = dt_json['mainsnak']['datavalue']
if not value['value']['amount'] == '+34.5':
raise
if not value['value']['unit'] == '1':
raise
dt2 = wbi_datatype.Quantity(quantity='34.5', prop_nr='P43', upper_bound='35.3', lower_bound='33.7', unit="Q11573")
value = dt2.get_json_representation()['mainsnak']['datavalue']
if not value['value']['amount'] == '+34.5':
raise
if not value['value']['unit'] == 'http://www.wikidata.org/entity/Q11573':
raise
if not value['value']['upperBound'] == '+35.3':
raise
if not value['value']['lowerBound'] == '+33.7':
raise
def test_geoshape(self):
dt = wbi_datatype.GeoShape(value='Data:Inner_West_Light_Rail_stops.map', prop_nr='P43')
dt_json = dt.get_json_representation()
if not dt_json['mainsnak']['datatype'] == 'geo-shape':
raise
value = dt_json['mainsnak']['datavalue']
if not value['value'] == 'Data:Inner_West_Light_Rail_stops.map':
raise
if not value['type'] == 'string':
raise
def test_live_item(self):
"""
Test an item against Wikidata
"""
item = wbi_core.ItemEngine(item_id='Q423111')
mass_statement = [x for x in item.statements if x.get_prop_nr() == 'P2067'].pop()
pprint.pprint(mass_statement.get_json_representation())
if not mass_statement:
raise
# TODO: get json directly from the API and compare part to ItemEngine
class TestFastRun(unittest.TestCase):
"""
some basic tests for fastrun mode
"""
def test_fast_run(self):
statements = [
wbi_datatype.ExternalID(value='P40095', prop_nr='P352'),
wbi_datatype.ExternalID(value='YER158C', prop_nr='P705')
]
frc = wbi_fastrun.FastRunContainer(base_filter={'P352': '', 'P703': 'Q27510868'},
base_data_type=wbi_datatype.BaseDataType, engine=wbi_core.ItemEngine)
fast_run_result = frc.write_required(data=statements)
if fast_run_result:
message = 'fastrun failed'
else:
message = 'successful fastrun'
print(fast_run_result, message)
# here, fastrun should succeed, if not, test failed
# if fast_run_result:
# raise ValueError
def test_fastrun_label(self):
# tests fastrun label, description and aliases, and label in another language
data = [wbi_datatype.ExternalID('/m/02j71', 'P646')]
fast_run_base_filter = {'P361': 'Q18589965'}
item = wbi_core.ItemEngine(item_id="Q2", data=data, fast_run=True, fast_run_base_filter=fast_run_base_filter)
frc = wbi_core.ItemEngine.fast_run_store[0]
frc.debug = True
assert item.get_label('en') == "Earth"
descr = item.get_description('en')
assert len(descr) > 3
aliases = item.get_aliases()
assert "the Earth" in aliases
assert list(item.fast_run_container.get_language_data("Q2", 'en', 'label'))[0] == "Earth"
assert item.fast_run_container.check_language_data("Q2", ['not the Earth'], 'en', 'label')
assert "the Earth" in item.get_aliases()
assert "planet" in item.get_description()
assert item.get_label("es") == "Tierra"
item.set_description(descr)
item.set_description("fghjkl")
assert item.json_representation['descriptions']['en'] == {'language': 'en', 'value': 'fghjkl'}
item.set_label("Earth")
item.set_label("xfgfdsg")
assert item.json_representation['labels']['en'] == {'language': 'en', 'value': 'xfgfdsg'}
item.set_aliases(["fake alias"], if_exists='APPEND')
assert {'language': 'en', 'value': 'fake alias'} in item.json_representation['aliases']['en']
# something thats empty (for now.., can change, so this just makes sure no exception is thrown)
frc.check_language_data("Q2", ['Ewiase'], 'ak', 'label')
frc.check_language_data("Q2", ['not Ewiase'], 'ak', 'label')
frc.check_language_data("Q2", [''], 'ak', 'description')
frc.check_language_data("Q2", [], 'ak', 'aliases')
frc.check_language_data("Q2", ['sdf', 'sdd'], 'ak', 'aliases')
item.get_label("ak")
item.get_description("ak")
item.get_aliases("ak")
item.set_label("label", lang="ak")
item.set_description("d", lang="ak")
item.set_aliases(["a"], lang="ak", if_exists='APPEND')
def test_sitelinks():
data = [wbi_datatype.ItemID(value='Q12136', prop_nr='P31')]
item = wbi_core.ItemEngine(item_id='Q622901', data=data)
item.get_sitelink("enwiki")
assert "enwiki" not in item.json_representation['sitelinks']
item.set_sitelink("enwiki", "something")
assert item.get_sitelink("enwiki")['title'] == "something"
assert "enwiki" in item.json_representation['sitelinks']
def test_nositelinks():
# this item doesn't and probably wont ever have any sitelinks (but who knows?? maybe one day..)
data = [wbi_datatype.ItemID(value='Q5', prop_nr='P31')]
item = wbi_core.ItemEngine(item_id='Q27869338', data=data)
item.get_sitelink("enwiki")
assert "enwiki" not in item.json_representation['sitelinks']
item.set_sitelink("enwiki", "something")
assert item.get_sitelink("enwiki")['title'] == "something"
assert "enwiki" in item.json_representation['sitelinks']
####
# tests for statement equality, with and without refs
####
def test_ref_equals():
# statements are identical
oldref = [wbi_datatype.ExternalID(value='P58742', prop_nr='P352', is_reference=True),
wbi_datatype.ItemID(value='Q24784025', prop_nr='P527', is_reference=True),
wbi_datatype.Time(time='+2001-12-31T12:01:13Z', prop_nr='P813', is_reference=True)]
olditem = wbi_datatype.ItemID("Q123", "P123", references=[oldref])
newitem = copy.deepcopy(olditem)
assert olditem.equals(newitem, include_ref=False)
assert olditem.equals(newitem, include_ref=True)
# dates are a month apart
newitem = copy.deepcopy(olditem)
newitem.references[0][2] = wbi_datatype.Time(time='+2002-01-31T12:01:13Z', prop_nr='P813')
assert olditem.equals(newitem, include_ref=False)
assert not olditem.equals(newitem, include_ref=True)
# multiple refs
newitem = copy.deepcopy(olditem)
newitem.references.append([wbi_datatype.ExternalID(value='99999', prop_nr='P352')])
assert olditem.equals(newitem, include_ref=False)
assert not olditem.equals(newitem, include_ref=True)
olditem.references.append([wbi_datatype.ExternalID(value='99999', prop_nr='P352')])
assert olditem.equals(newitem, include_ref=True)
|
import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import tensorflow as tf
import gensim
import datetime as dt
from tqdm import tqdm_notebook as tqdm
# import multiprocessing as mp
# from itertools import repeat, product
# from functools import partial
# to be able to pickle class methods for multi processing
# https://stackoverflow.com/questions/27318290/why-can-i-pass-an-instance-method-to-multiprocessing-process-but-not-a-multipro
def _instance_method_alias(obj, arg):
"""
Alias for instance method that allows the method to be called in a
multiprocessing pool
"""
return obj.convertSent2WordIds(arg)
def get_embeddings_from_ft(fasttext_vec_file, dim, vocab_words):
"""
convert fast text .vec file to numpy array
created embedding will be in order of words in vocab_words
"""
# gathering words from fasttext vec file--------------------
ft_lines = None
with open(fasttext_vec_file, "r") as f:
ft_lines = f.readlines()
ft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])
ft_vocab_size = ft_shape[0]
ft_wvs_dict = {}
for i, line in enumerate(ft_lines[1:]):
str_list = line.split()
word = str(str_list[0].strip())
vec = np.array([np.float(f) for f in str_list[1:]])
assert dim == len(vec), "fast text some vectors doesn't match dimensions "+str(dim)+" != "+str(len(vec))
ft_wvs_dict[word] = vec
assert ft_vocab_size == len(ft_wvs_dict), "fast text vectors file read issue "+str(ft_vocab_size)+" != "+str(len(ft_wvs_dict))
# creating embedding matrix from the file --------------------
wvs_embedding = np.random.randn(len(vocab_words), dim)
for i,word in enumerate(vocab_words):
if word in ft_wvs_dict:
wvs_embedding[i] = ft_wvs_dict[word]
return wvs_embedding
#=============================================================
# DOCUMENT PREPROCESSING
#=============================================================
CHAR_ALPHABETS = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}\n "
char_start_tag_idx = len(CHAR_ALPHABETS) + 0
char_end_tag_idx = len(CHAR_ALPHABETS) + 1
char_unknown_tag_idx = len(CHAR_ALPHABETS) + 2
# when sentences are converted to characters
# these are appended to signal end of sentences
char_sent_start_tag_idx = len(CHAR_ALPHABETS) + 3
char_sent_end_tag_idx = len(CHAR_ALPHABETS) + 4
CHAR_ALPHABETS_LEN = len(CHAR_ALPHABETS) + 4
class GenerateDataset(object):
"""
This class takes in preprocessed data frame and
generated datasets as necessary
"""
def __init__(self, data_frame, vocab_idx):
self.data_frame = data_frame
self.vocab_idx = vocab_idx
self.vocab_size = len(vocab_idx)
# constants ================================================================================
self.sentence_start_tag_idx = self.vocab_idx["<SOSent>"]
self.sentence_end_tag_idx = self.vocab_idx["<EOSent>"]
self.word_unknown_tag_idx = self.vocab_idx["<UNK>"]
self.default_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
"doc_unit" : "words",
"doc_form" : "text",
"doc_cntx_dir" : "forward",
"divide_document": "single_unit"
}
def convertSent2WordIds(self, sentence, add_start_end_tag=False):
"""
sentence is a list of word.
It is converted to list of ids based on vocab_idx
"""
sent2id = []
if add_start_end_tag:
sent2id = [self.sentence_start_tag_idx]
try:
sent2id = sent2id + [self.vocab_idx[word] if self.vocab_idx[word]<self.vocab_size else self.word_unknown_tag_idx for word in sentence]
except KeyError as e:
print(e)
print (sentence)
raise ValueError('Fix this issue dude')
if add_start_end_tag:
sent2id = sent2id + [self.sentence_end_tag_idx]
return sent2id
def convertDoc2Sent2WordIds(self, document, add_start_end_tag=False):
"""
document is a list of sentence.
sentence is a list of word.
so given sent_list will be converted to list of list of ids based on vocab_idx
"""
return [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]
def convertWord2Char2Ids(self, word, add_start_end_tag=False):
"""
word is a char sequence or list of characters,
return list of ids in word or char sequence
"""
char2id = []
if add_start_end_tag:
char2id = [char_start_tag_idx]
char2id = char2id + [CHAR_ALPHABETS.find(char) for char in word]
if add_start_end_tag:
char2id = char2id + [char_end_tag_idx]
return char2id
def convertSent2Word2Char2Ids(self, sentence, add_start_end_tag=False, unit="chars"):
"""
sentence is list of words
word is list of characters
returns list of list of ids
"""
sent2words2char2id = []
if unit == "chars":
"""
all the words are grouped as list of chars with pre-post added tags
"""
if add_start_end_tag:
sent2words2char2id = [[char_sent_start_tag_idx]]
sent2words2char2id = sent2words2char2id + [self.convertWord2Char2Ids(word, add_start_end_tag) if self.vocab_idx[word] < self.vocab_size else [char_unknown_tag_idx] for word in sentence]
if add_start_end_tag:
sent2words2char2id = sent2words2char2id + [[char_sent_end_tag_idx]]
elif unit == "raw_chars":
"""
just a stream of characters
"""
if add_start_end_tag:
sent2words2char2id = [char_sent_start_tag_idx]
for word in sentence:
if self.vocab_idx[word] < self.vocab_size:
sent2words2char2id += [charid for charid in self.convertWord2Char2Ids(word, add_start_end_tag)]
else:
sent2words2char2id += [char_unknown_tag_idx]
if add_start_end_tag:
sent2words2char2id = sent2words2char2id + [char_sent_end_tag_idx]
else:
assert False, "give valid doc_unit argument"
return sent2words2char2id
def convertDoc2Sent2Word2Char2Ids(self, document, doc_form="sentences", add_start_end_tag=False, unit="chars"):
"""
document is a list of sentence.
sentence is a list of word.
so given sent_list will be converted to list of list of ids based on vocab_idx
returns list of list if doc_form == "text"
returns list of list of list if doc_form == "sentences"
"""
doc2word2char2ids = []
if doc_form == "sentences":
doc2word2char2ids = [self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit) for sentence in document]
elif doc_form == "text":
doc2word2char2ids = [list_or_charid for list_or_charid in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit)]
else:
assert False, "give valid doc_form argument"
return doc2word2char2ids
def generate_data(self, unit_dict=None, has_class=False, add_start_end_tag=False):
"""
dataframe expects to have Sentences, Variations, Genes, Class(has_class)
Sentences Text attribute converted to list of sentences which in turn converted to list of words
Variations just one sentence which is a list of words
Genes just one sentence which is a list of words
returns information based on request
unit_dict contains these 5 keys that can have
gene_unit can be ["words", "chars", "raw_chars"]
variation_unit can be ["words", "chars", "raw_chars"]
doc_unit can be ["words", "word_list", chars", "raw_chars"]
doc_form can be ["sentences", "text"]
doc_cntx_dir can be ["forward", "backward"]
divide_document can be ["single_unit", "multiple_units"]
"""
if not unit_dict:
unit_dict = self.default_unit_dict
try:
unit_dict["doc_cntx_dir"]
except KeyError as e:
unit_dict["doc_cntx_dir"] = "forward"
ids_document = []
ids_labels = []
ids_genes = []
ids_variations = []
# since sometimes the data will be shuffled in the frame
# during train test split
for index in self.data_frame.index:
document = self.data_frame.Sentences[index]
if unit_dict["divide_document"] == "single_unit": #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# doc units --------------------------------------------------------------
if unit_dict["doc_unit"] == "words" or unit_dict["doc_unit"] == "word_list":
if unit_dict["doc_form"] == "sentences":
ids_document.append(self.convertDoc2Sent2WordIds(document, add_start_end_tag))
else: # unit_dict["doc_form"] == "text"
# using multiprocess to process each sentence in document and concatenate them to a single sentence
# get_wordid_list = lambda d, setag : [wid for s in d for wid in self.convertSent2WordIds(s, setag)]
# text_word_list = []
# with mp.Pool(processes = 5) as pool:
# # text_word_list = pool.starmap(get_wordid_list, product(document, [add_start_end_tag]*len(document)))
# # text_word_list = pool.starmap(get_wordid_list, zip(document, repeat(add_start_end_tag)))
# text_word_list = pool.map(partial(get_wordid_list, setag=add_start_end_tag), document)
# without multiprocessing
if unit_dict["doc_unit"] == "words":
text_word_list = [word_id for sentence in document for word_id in self.convertSent2WordIds(sentence, add_start_end_tag)]
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = text_word_list[::-1]
else: # unit_dict["doc_unit"] == "word_list": sentence form a list
text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag)[::-1] for sentence in document]
ids_document.append(text_word_list)
elif unit_dict["doc_unit"] == "chars" or unit_dict["doc_unit"] == "raw_chars":
if unit_dict["doc_form"] == "sentences":
for sentence in document:
ids_document.append(self.convertDoc2Sent2Word2Char2Ids(document,
doc_form=unit_dict["doc_form"], unit=unit_dict["doc_unit"], add_start_end_tag=add_start_end_tag))
else: # unit_dict["doc_form"] == "text"
text_char_list = [word_as_char_list_id for sentence in document for word_as_char_list_id in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit=unit_dict["doc_unit"])]
ids_document.append(text_char_list)
else:
assert False, "give valid doc_unit key-value"
# others --------------------------------------------------------------
if has_class:
ids_labels.append(self.data_frame.Class[index])
if unit_dict["gene_unit"] == "words":
ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))
else:
ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],
add_start_end_tag, unit=unit_dict["doc_unit"]))
if unit_dict["variation_unit"] == "words":
ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))
else:
ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],
add_start_end_tag, unit=unit_dict["doc_unit"]))
else: # unit_dict["divide_document"] == "multiple_unit" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
for sentence in document:
# doc units --------------------------------------------------------------
if unit_dict["doc_unit"] == "words":
# doesnt matter if
# unit_dict["doc_form"] == "sentences"
# unit_dict["doc_form"] == "text"
try:
sentence_list = self.convertSent2WordIds(sentence, add_start_end_tag)
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = self.convertSent2WordIds(sentence, add_start_end_tag)[::-1]
ids_document.append(sentence_list)
except ValueError as e:
print(e)
print (index)
raise ValueError('Fix this issue dude !')
elif unit_dict["doc_unit"] == "chars" or unit_dict["doc_unit"] == "raw_chars":
# doesnt matter if
# unit_dict["doc_form"] == "sentences"
# unit_dict["doc_form"] == "text"
ids_document.append(self.convertSent2Word2Char2Ids(sentence, add_start_end_tag,
unit=unit_dict["doc_unit"]))
# others --------------------------------------------------------------
if has_class:
ids_labels.append(self.data_frame.Class[index])
if unit_dict["gene_unit"] == "words":
ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))
else:
ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],
add_start_end_tag, unit=unit_dict["gene_unit"]))
if unit_dict["variation_unit"] == "words":
ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))
else:
ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],
add_start_end_tag, unit=unit_dict["variation_unit"]))
return ids_document, ids_genes, ids_variations, ids_labels
def placeholder_function(self, unit_dict=None, limit_dict=None,
has_class=False, add_start_end_tag=False):
"""
dataframe expects to have Sentences, Variations, Genes, Class(has_class)
Sentences Text attribute converted to list of sentences which in turn converted to list of words
Variations just one sentence which is a list of words
Genes just one sentence which is a list of words
returns information based on request
unit_dict contains these 5 keys that can have
gene_unit can be ["words", "chars"]
variation_unit can be ["words", "chars"]
doc_unit can be ["words", "chars"]
doc_form can be ["sentences", "text"]
divide_document can be ["single_unit", "multiple_units"]
limit_dict contains max sequence len to form valid matrices
Text attribute options
max_text_seq_len => maximum number of words in a text
max_text_document_len => maximum number of sentences in a document
max_text_sentence_len => maximum number of words in a sentence
max_text_word_len => maximum number of chars in a word
Gene Attribute options
max_gene_sentence_len => maximum number of words in a sentence
max_gene_word_len => maximum number of chars in a word
Variation Attribute options
max_variation_sentence_len => maximum number of words in a sentence
max_variation_word_len => maximum number of chars in a word
"""
ids_document, ids_genes, ids_variations, ids_labels = self.generate_dataset(unit_dict, has_class, add_start_end_tag)
# testing ======================================================================================
def test_class():
document = [
['beautiful', 'is', 'better', 'than', 'ugly.'],
['explicit', 'is', 'better', 'than', 'implicit.'],
['simple', 'is', 'better', 'than', 'complex.'],
['complex', 'is', 'better', 'than', 'complicated.'],
['flat', 'is', 'better', 'than', 'nested.'],
# ['sparse', 'is', 'better', 'than', 'dense.'],
# ['readability', 'counts.'],
# ['special', 'cases', "aren't", 'special', 'enough', 'to', 'break', 'the', 'rules.'],
# ['although', 'practicality', 'beats', 'purity.'],
# ['errors', 'should', 'never', 'pass', 'silently.'],
# ['unless', 'explicitly', 'silenced.'],
# ['in', 'the', 'face', 'of', 'ambiguity,', 'refuse', 'the', 'temptation', 'to', 'guess.'],
# ['there', 'should', 'be', 'one--', 'and', 'preferably', 'only', 'one', '--obvious', 'way', 'to', 'do', 'it.'],
# ['although', 'that', 'way', 'may', 'not', 'be', 'obvious', 'at', 'first', 'unless', "you're", 'Dutch.'],
# ['now', 'is', 'better', 'than', 'never.'], ['Although', 'never', 'is', 'often', 'better', 'than', '*right*', 'now.'],
# ['if', 'the', 'implementation', 'is', 'hard', 'to', 'explain,', "it's", 'a', 'bad', 'idea.'],
# ['if', 'the', 'implementation', 'is', 'easy', 'to', 'explain,', 'it', 'may', 'be', 'a', 'good', 'idea.'],
# ['namespaces', 'are', 'one', 'honking', 'great', 'idea', '--', "let's", 'do', 'more', 'of', 'those!'],
]
data_dict = {
"ID" : 0,
"Gene" : [["beautiful"]],
"Variation" : [["complex", "simple"]],
"Class" : 0,
"Sentences" : [document[:]]
}
custom_unit_dict = {
"gene_unit" : "raw_chars",
"variation_unit" : "raw_chars",
# text transformed to sentences attribute
"doc_unit" : "raw_chars",
"doc_form" : "sentences",
# "doc_cntx_dir" : "forward",
"divide_document" : "single_unit"
}
df = pd.DataFrame(data=data_dict)
corpus = sorted(list(set([word for sentence in document for word in sentence])))
corpus_wordidx = {word:i for i,word in enumerate(corpus)}
corpus_wordidx["<SOSent>"] = len(corpus)
corpus_wordidx["<EOSent>"] = len(corpus) + 1
corpus_wordidx["<UNK>"] = len(corpus) + 2
gen_data = GenerateDataset(df, corpus_wordidx)
x_T, x_G, x_V, x_C = gen_data.generate_data(custom_unit_dict, has_class=True, add_start_end_tag=True)
print("data", df.Sentences[0], "\n")
print(corpus_wordidx)
index = 0
print("text",np.array(x_T).shape, x_T[index])
print("gene",np.array(x_G).shape, x_G[index])
print("variation",np.array(x_V).shape, x_V[index])
print("classes",np.array(x_C).shape, x_C[index])
if __name__ == "__main__":
test_class()
|
import sys, re, os, selenium, time, argparse
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from urllib.request import urlopen, urlretrieve
class YoutubeDownloader(object):
def __init__(self):
self.driver = webdriver.Chrome()
def download_video(self, directory, query):
driver = self.driver
download_link = "http://www.ssyoutube.com/watch?v=" + query.split("?v=")[1]
driver.get(download_link)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], directory + "/" + name + ".mp4")
break
driver.close()
def parse_links(self, query):
driver = self.driver
driver.get(query)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
title = soup.select('yt-formatted-string.title > a:nth-child(1)')[0].text
links = list()
for a in soup.find_all('a'):
if "index=" in a['href']:
links.append(a['href'].split('v=')[-1])
return title, links
def download_playlist(self, links, list_dir, number):
driver = self.driver
num = 0
for link in links:
if(num == number):
break
num = num + 1
download_link = "http://www.ssyoutube.com/watch?v=" + link
driver.get(download_link)
time.sleep(15)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], list_dir + "/" + name + ".mp4")
break
driver.close()
def create_base_directory(self, directory):
direct = os.path.dirname(directory)
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory)
return direct
def create_list_directory(self, directory, title):
direct = os.path.dirname(os.path.join(directory, title))
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory, title)
return direct
def download(self, query, crawl_type, number, directory):
direct = self.create_base_directory(directory)
if(crawl_type == 'video'):
self.download_video(direct, query)
elif(crawl_type == 'playlist'):
title, links = self.parse_links(query)
list_dir = self.create_list_directory(direct, title)
self.download_playlist(links, list_dir, number)
def main():
parser = argparse.ArgumentParser(description='Youtube Downloader')
parser.add_argument('-q', '--query', type=str, help='Link of video or playlist')
parser.add_argument('-t', '--crawl_type', type=str, default='video', help="Options: 'video' | 'playlist'")
parser.add_argument('-n', '--number', type=int, default=0, help='Number of videos to download from playlist: integer, -1 to download all')
parser.add_argument('-d', '--directory', type=str, default='./Videos/', help='Directory to save results')
# parser.add_argument('-l', '--headless', action='store_true', help='If set, script will be run headless')
args = parser.parse_args()
downloader = YoutubeDownloader()
downloader.download(query=args.query,
crawl_type=args.crawl_type,
number=args.number,
directory=args.directory)
if __name__ == "__main__":
main()
|
import pytest
from os.path import join
import mackinac
@pytest.mark.fixtures('download_data')
class TestReconstruct:
def test_reconstruct_features(self, universal_folder, bacteria_folder, b_theta_features,
b_theta_summary, b_theta_id):
template = mackinac.create_template_model(
universal_folder,
bacteria_folder,
'bacteria',
'Bacteria template')
model = mackinac.reconstruct_model_from_features(
b_theta_features,
template,
b_theta_id,
'negbio',
gc_content=b_theta_summary['gc_content'] / 100.0
)
assert model.id == b_theta_id
assert len(model.reactions) == 923 # Value can change if genome annotation changes
assert len(model.metabolites) == 999 # Value can change if genome annotation changes
assert len(model.compartments) == 2
def test_reconstruct_likelihoods(self, universal_folder, bacteria_folder, b_theta_features,
b_theta_summary, b_theta_id, search_program_path,
search_db_path, fid_role_path, work_folder):
template = mackinac.create_template_model(
universal_folder,
bacteria_folder,
'bacteria',
'Bacteria template')
likelihoods = mackinac.calculate_likelihoods(
b_theta_id,
b_theta_features,
template,
search_program_path=search_program_path,
search_db_path=search_db_path,
fid_role_path=fid_role_path,
work_folder=work_folder)
assert len(likelihoods.reaction_values) == 5652
assert likelihoods.reaction_values['rxn00006']['likelihood'] == 0.0
assert pytest.approx(likelihoods.reaction_values['rxn14380']['likelihood'], 0.9594912486067599)
model = mackinac.reconstruct_model_from_likelihoods(
likelihoods,
template,
b_theta_id,
'negbio',
gc_content=b_theta_summary['gc_content'] / 100.0
)
assert model.id == b_theta_id
assert len(model.reactions) == 1164 # Value can change if genome annotation changes
assert len(model.metabolites) == 1260 # Value can change if genome annotation changes
assert len(model.compartments) == 2
|
#!/usr/bin/env python
# Author: Alex Tereschenko <alext.mkrs@gmail.com>
# Copyright (c) 2016 Alex Tereschenko.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
from spi_checks_shared import *
class SpiChecksLsbmode(u.TestCase):
def setUp(self):
self.spi = m.Spi(MRAA_SPI_BUS_NUM)
def tearDown(self):
del self.spi
def test_spi_set_lsbmode_false(self):
TEST_LSBMODE = False
self.assertEqual(self.spi.lsbmode(TEST_LSBMODE),
m.SUCCESS,
"Setting LSB mode to %s did not return success" %TEST_LSBMODE)
def test_spi_set_lsbmode_true(self):
TEST_LSBMODE = True
self.assertEqual(self.spi.lsbmode(TEST_LSBMODE),
m.SUCCESS,
"Setting LSB mode to %s did not return success" %TEST_LSBMODE)
def test_spi_set_lsbmode_invalid(self):
TEST_LSBMODE = 10
self.assertRaises(TypeError, self.spi.lsbmode, TEST_LSBMODE)
if __name__ == "__main__":
u.main()
|
class ThemeSpaceListGeneric:
list = None
list_text = None
list_text_hi = None
list_title = None
|
from mirage.libs import io
class Loader:
'''
This class permits to dynamically load the modules.
'''
def __init__(self):
'''
This constructor generates the modules list.
'''
import mirage.modules as modules
self.modulesList = {}
for moduleName,module in modules.__modules__.items():
current = module#__import__("modules."+module, fromlist=module)
moduleClass = getattr(current,moduleName)
self.modulesList[moduleName] = moduleClass
def getModulesNames(self):
'''
This method returns a list of existing modules' names.
:return: list of modules' name
:rtype: list of str
'''
return list(self.modulesList.keys())
def load(self,moduleName):
'''
This method returns an instance of a specific module according to the name provided as parameter.
:param moduleName: name of a module
:type moduleName: str
:return: an instance of the module
:rtype: core.module.Module
'''
if moduleName in self.modulesList:
return self.modulesList[moduleName]()
else:
return None
def list(self,pattern=""):
'''
Display the list of module, filtered by the string provided as ``pattern``.
:param pattern: filter
:type pattern: str
'''
displayDict = {}
for module in self.modulesList:
info = self.modulesList[module]().info()
technology = (info["technology"]).upper()
if (
pattern in info["description"] or
pattern in info["name"] or
pattern in info["technology"] or
pattern in info["type"]
):
if not technology in displayDict:
displayDict[technology] = []
displayDict[technology].append([info["name"], info["type"], info["description"]])
for module in sorted(displayDict):
if displayDict[module]:
io.chart(["Name", "Type","Description"], sorted(displayDict[module]), "{} Modules".format(module))
|
#!/usr/bin/env python
import boto3
import json
import sys
client = boto3.client('ecs')
data = json.load(sys.stdin)
family_prefix = data['family_prefix']
task_def = client.list_task_definitions(familyPrefix=family_prefix,
status="ACTIVE", sort="DESC", maxResults=1)
task_arn = task_def["taskDefinitionArns"]
sys.stdout.write(json.dumps({"task_arn": "%s" % task_arn[0]}))
sys.exit(0)
|
import itertools
import os
import numpy as np
import pandas as pd
from utils.Recording import Recording
import utils.settings as settings
def load_opportunity_dataset(opportunity_dataset_path: str) -> "list[Recording]":
"""
Returns a list of Recordings from the opportunity dataset
"""
print("Will read the opportunity dataset")
opportunity_dataset_path += "/dataset"
subject_ids = range(1, 5)
recording_ids = range(1, 6)
# see loader/opportunity_col_names to make your selection
selected_feature_names = [
"IMU-BACK-accX",
"IMU-BACK-accY",
"IMU-BACK-accZ",
"IMU-BACK-Quaternion1",
"IMU-BACK-Quaternion2",
"IMU-BACK-Quaternion3",
"IMU-BACK-Quaternion4",
"IMU-RLA-accX",
"IMU-RLA-accY",
"IMU-RLA-accZ",
"IMU-RLA-Quaternion1",
"IMU-RLA-Quaternion2",
"IMU-RLA-Quaternion3",
"IMU-RLA-Quaternion4",
"IMU-LLA-accX",
"IMU-LLA-accY",
"IMU-LLA-accZ",
"IMU-LLA-Quaternion1",
"IMU-LLA-Quaternion2",
"IMU-LLA-Quaternion3",
"IMU-LLA-Quaternion4",
"IMU-L-SHOE-EuX",
"IMU-L-SHOE-EuY",
"IMU-L-SHOE-EuZ",
"IMU-L-SHOE-Nav_Ax",
"IMU-L-SHOE-Nav_Ay",
"IMU-L-SHOE-Nav_Az",
"IMU-L-SHOE-Body_Ax",
"IMU-L-SHOE-Body_Ay",
"IMU-L-SHOE-Body_Az",
"IMU-L-SHOE-AngVelBodyFrameX",
"IMU-L-SHOE-AngVelBodyFrameY",
"IMU-L-SHOE-AngVelBodyFrameZ",
"IMU-L-SHOE-AngVelNavFrameX",
"IMU-L-SHOE-AngVelNavFrameY",
"IMU-L-SHOE-AngVelNavFrameZ",
"IMU-R-SHOE-EuX",
"IMU-R-SHOE-EuY",
"IMU-R-SHOE-EuZ",
"IMU-R-SHOE-Nav_Ax",
"IMU-R-SHOE-Nav_Ay",
"IMU-R-SHOE-Nav_Az",
"IMU-R-SHOE-Body_Ax",
"IMU-R-SHOE-Body_Ay",
"IMU-R-SHOE-Body_Az",
"IMU-R-SHOE-AngVelBodyFrameX",
"IMU-R-SHOE-AngVelBodyFrameY",
"IMU-R-SHOE-AngVelBodyFrameZ",
"IMU-R-SHOE-AngVelNavFrameX",
"IMU-R-SHOE-AngVelNavFrameY",
"IMU-R-SHOE-AngVelNavFrameZ",
]
print(f"Selected features (n_features: {len(selected_feature_names)}):\n", "\n".join(["\t" + str(feature_name) for feature_name in selected_feature_names]))
# Get column names
col_names = []
with open("src/loader/opportunity_col_names", "r") as file:
lines = file.read().splitlines()
for line in lines:
col_names.append(line)
recordings = []
for sub, rec in itertools.product(subject_ids, recording_ids):
file_name = f"S{sub}-ADL{rec}.dat"
file_path = os.path.join(opportunity_dataset_path, file_name)
print(f"Reading {file_path} ...")
file_df = pd.read_csv(file_path, delimiter=" ", header=None)
file_df.columns = col_names # give them the real column names
recordings.append(Recording(
sensor_frame = file_df.loc[:, selected_feature_names],
time_frame = file_df.loc[:, 'MILLISEC'],
activities = file_df.loc[:, 'HL_Activity'].map(
lambda label: settings.DATA_CONFIG.raw_label_to_activity_idx(label)
), # Use `[0]` to get only one activity | maps 0, 101, 102, 103, 104, 105 to 0, 1, 2, 3, 4, 5
subject=int(sub),
recording_index=int(rec)
))
print(f"\n => Total {len(recordings)} recordings read")
return recordings
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetProtectedItemResult',
'AwaitableGetProtectedItemResult',
'get_protected_item',
]
@pulumi.output_type
class GetProtectedItemResult:
"""
Base class for backup items.
"""
def __init__(__self__, e_tag=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
ProtectedItemResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetProtectedItemResult(GetProtectedItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProtectedItemResult(
e_tag=self.e_tag,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_protected_item(container_name: Optional[str] = None,
fabric_name: Optional[str] = None,
filter: Optional[str] = None,
protected_item_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectedItemResult:
"""
Use this data source to access information about an existing resource.
:param str container_name: Container name associated with the backed up item.
:param str fabric_name: Fabric name associated with the backed up item.
:param str filter: OData filter options.
:param str protected_item_name: Backed up item name whose details are to be fetched.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['containerName'] = container_name
__args__['fabricName'] = fabric_name
__args__['filter'] = filter
__args__['protectedItemName'] = protected_item_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:recoveryservices/v20190513:getProtectedItem', __args__, opts=opts, typ=GetProtectedItemResult).value
return AwaitableGetProtectedItemResult(
e_tag=__ret__.e_tag,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
|
from estudent.school import School
def run_example():
school = School.create_school_with_students("Hogwart")
print(school)
print(f"W szkole może być maksymalnie {school.MAX_STUDENTS_NUMBER} uczniów")
if __name__ == '__main__':
run_example()
|
from urllib.request import urlopen
import torch
from torch import nn
import numpy as np
from skimage.morphology import label
import os
from HD_BET.paths import folder_with_parameter_files
def get_params_fname(fold):
return os.path.join(folder_with_parameter_files, "%d.model" % fold)
def maybe_download_parameters(fold=0, force_overwrite=False):
"""
Downloads the parameters for some fold if it is not present yet.
:param fold:
:param force_overwrite: if True the old parameter file will be deleted (if present) prior to download
:return:
"""
assert 0 <= fold <= 4, "fold must be between 0 and 4"
if not os.path.isdir(folder_with_parameter_files):
maybe_mkdir_p(folder_with_parameter_files)
out_filename = get_params_fname(fold)
if force_overwrite and os.path.isfile(out_filename):
os.remove(out_filename)
if not os.path.isfile(out_filename):
url = "https://zenodo.org/record/2540695/files/%d.model?download=1" % fold
print("Downloading", url, "...")
data = urlopen(url).read()
with open(out_filename, 'wb') as f:
f.write(data)
def init_weights(module):
if isinstance(module, nn.Conv3d):
module.weight = nn.init.kaiming_normal(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant(module.bias, 0)
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class SetNetworkToVal(object):
def __init__(self, use_dropout_sampling=False, norm_use_average=True):
self.norm_use_average = norm_use_average
self.use_dropout_sampling = use_dropout_sampling
def __call__(self, module):
if isinstance(module, nn.Dropout3d) or isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout):
module.train(self.use_dropout_sampling)
elif isinstance(module, nn.InstanceNorm3d) or isinstance(module, nn.InstanceNorm2d) or \
isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or \
isinstance(module, nn.BatchNorm1d):
module.train(not self.norm_use_average)
def postprocess_prediction(seg):
# basically look for connected components and choose the largest one, delete everything else
print("running postprocessing... ")
mask = seg != 0
lbls = label(mask, connectivity=mask.ndim)
lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]
largest_region = np.argmax(lbls_sizes[1:]) + 1
seg[lbls != largest_region] = 0
return seg
def subdirs(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
subfolders = subdirs # I am tired of confusing those
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
|
""" Azure Automation assets module to be used with Azure Automation during offline development """
#!/usr/bin/env python2
# ----------------------------------------------------------------------------------
#
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------------
# Constant keys for extracing items from automation assets.
_KEY_NAME = "Name"
_KEY_VALUE = "Value"
_KEY_USERNAME = "Username"
_KEY_PASSWORD = "Password"
_KEY_CERTPATH = "CertPath"
_KEY_CONNECTION_FIELDS = "ValueFields"
# Assets supported in Azure automation within python scripts
_KEY_VARIABLE = "Variable"
_KEY_CERTIFICATE = "Certificate"
_KEY_CREDENTIAL = "Credential"
_KEY_CONNECTION = "Connection"
# Get Azure Automation asset json file
def _get_automation_asset_file():
import os
if os.environ.get('AUTOMATION_ASSET_FILE') is not None:
return os.environ.get('AUTOMATION_ASSET_FILE')
return os.path.join(os.path.dirname(__file__), "localassets.json")
# Helper function to find an asset of a specific type and name in the asset file
def _get_asset_value(asset_file, asset_type, asset_name):
import json
json_data = open(asset_file)
json_string = json_data.read()
local_assets = json.loads(json_string)
return_value = None
for asset, asset_values in local_assets.iteritems():
if asset == asset_type:
for value in asset_values:
if value[_KEY_NAME] == asset_name:
return_value = value
break
if return_value != None:
# Found the value so break out of loop
break
return return_value
# Returns an asset from the asses file
def _get_asset(asset_type, asset_name):
local_assets_file = _get_automation_asset_file()
# Look in assets file for value
return_value = _get_asset_value(local_assets_file, asset_type, asset_name)
if return_value is None:
raise LookupError("asset:" + asset_name + " not found")
return return_value
# Helper function to set an asset of a specific type and name in the assetFile
def _set_asset_value(asset_file, asset_type, asset_name, asset_value):
import json
json_data = open(asset_file)
json_string = json_data.read()
local_assets = json.loads(json_string)
item_found = False
for asset, asset_values in local_assets.iteritems():
if asset == asset_type:
for value in asset_values:
if value[_KEY_NAME] == asset_name:
value[_KEY_VALUE] = asset_value
with open(asset_file, 'w') as asset_file_content:
asset_file_content.write(json.dumps(local_assets, indent=4))
item_found = True
break
if item_found:
break
return item_found
# Updates an asset in the assets file
def _set_asset(asset_type, asset_name, asset_value):
local_assets_file = _get_automation_asset_file()
# Check assets file for value.
item_found = _set_asset_value(local_assets_file,
asset_type, asset_name, asset_value)
if item_found is False:
raise LookupError("asset:" + asset_name + " not found")
# Below are the 5 supported calls that can be made to automation assets from within
# a python script
def get_automation_variable(name):
""" Returns an automation variable """
variable = _get_asset(_KEY_VARIABLE, name)
return variable[_KEY_VALUE]
def set_automation_variable(name, value):
""" Sets an automation variable """
_set_asset(_KEY_VARIABLE, name, value)
def get_automation_credential(name):
""" Returns an automation credential as a dictionay with username and password as keys """
credential = _get_asset(_KEY_CREDENTIAL, name)
# Return a dictionary of the credential asset
credential_dictionary = {}
credential_dictionary['username'] = credential['Username']
credential_dictionary['password'] = credential['Password']
return credential_dictionary
def get_automation_connection(name):
""" Returns an automation connection dictionary """
connection = _get_asset(_KEY_CONNECTION, name)
return connection[_KEY_CONNECTION_FIELDS]
def get_automation_certificate(name):
""" Returns an automation certificate in PKCS12 bytes """
from OpenSSL import crypto
certificate = _get_asset(_KEY_CERTIFICATE, name)
pks12_cert = crypto.load_pkcs12(open(certificate[_KEY_CERTPATH], 'rb').read(),
certificate[_KEY_PASSWORD])
return crypto.PKCS12.export(pks12_cert)
|
# Generated by Django 3.1.7 on 2021-03-23 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0007_auto_20210323_1910'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='customer_id',
field=models.BigIntegerField(default='791425045985934', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='supermarket',
name='supermarket_id',
field=models.BigIntegerField(default='874067270903651', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='transaction',
name='transaction_id',
field=models.BigIntegerField(default='363109663162057', primary_key=True, serialize=False),
),
]
|
#!/usr/bin/env python3
import sys
import csv
"""
Field Name Full name Format Example
1 SEQ Sequence number Int (6) 86415
2 KM_REF Kilometre reference Char (6) ST5265
3 DEF_NAM Definitive name Char (60) Felton
4 TILE_REF Tile reference Char (4) ST46
5 LAT_DEG Latitude degrees Int (2) 51
6 LAT_MIN Latitude minutes Float (3.1) 23.1
7 LONG_DEG Longitude degrees Int (2) 2
8 LONG_MIN Longitude minutes Float (3.1) 41
9 NORTH Northings Int (7) 165500
10 EAST Eastings Int (7) 352500
11 GMT Greenwich Meridian Char (1) W
12 CO_CODE County code Char (2) NS
13 COUNTY County name Char (20) N Som
14 FULL_COUNTY Full county name Char (60) North Somerset
15 F_CODE Feature code Char (3) O
16 E_DATE Edit date Char (11) 01-MAR-1993
17 UPDATE_CO Update code Char (1) l
18 SHEET_1 Primary sheet no Int (3) 172
19 SHEET_2 Second sheet no Int (3) 182
20 SHEET_3 Third sheet no Int (3) 0
"""
county = {}
for row in csv.reader(sys.stdin, delimiter=':', quoting=csv.QUOTE_NONE):
county[row[11]] = {
'gaz50k': row[11],
'county': row[12],
'name': row[13],
}
fields = [ 'gaz50k', 'county', 'name' ]
print("\t".join(fields))
for code in county:
print("\t".join([county[code][field] or "" for field in fields]))
|
#!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
version = imp.load_source('version', 'lib/version.py')
def readhere(path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, path), 'r') as fd:
return fd.read()
def readreqs(path):
return [req for req in
[line.strip() for line in readhere(path).split('\n')]
if req and not req.startswith(('#', '-r'))]
install_requires = readreqs('requirements.txt')
tests_requires = install_requires + readreqs('requirements_travis.txt')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum-ZCL",
version=version.ELECTRUM_VERSION,
install_requires=install_requires,
tests_require=tests_requires,
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'checkpoints.json',
'checkpoints_testnet.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum-zcl'],
data_files=data_files,
description="Lightweight Zclassic Wallet",
author="Zclassic CE",
author_email="team@zclassic-ce.org",
license="MIT Licence",
url="https://zclassic-ce.org",
long_description="""Lightweight Zclassic Wallet"""
)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 16 22:30:11 2020
@author: Easin
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.keras import Model, layers
import numpy as np
import matplotlib.pyplot as plt
# MNIST dataset parameters.
num_classes = 10 # total classes (0-9 digits).
num_features = 784 # data features (img shape: 28*28).
# Training parameters.
learning_rate = 0.1
training_steps = 2000
batch_size = 256
display_step = 100
# Network parameters.
n_hidden_1 = 128 # 1st layer number of neurons.
n_hidden_2 = 256 # 2nd layer number of neurons.
# Prepare MNIST data.
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Convert to float32.
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
# Flatten images to 1-D vector of 784 features (28*28).
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])
# Normalize images value from [0, 255] to [0, 1].
x_train, x_test = x_train / 255., x_test / 255.
# Use tf.data API to shuffle and batch data.
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# Create TF Model.
class NeuralNet(Model):
# Set layers.
def __init__(self):
super(NeuralNet, self).__init__()
# First fully-connected hidden layer.
self.fc1 = layers.Dense(n_hidden_1, activation=tf.nn.relu)
# First fully-connected hidden layer.
self.fc2 = layers.Dense(n_hidden_2, activation=tf.nn.relu)
# Second fully-connecter hidden layer.
self.out = layers.Dense(num_classes)
# Set forward pass.
def call(self, x, is_training=False):
x = self.fc1(x)
x = self.fc2(x)
x = self.out(x)
if not is_training:
# tf cross entropy expect logits without softmax, so only
# apply softmax when not training.
x = tf.nn.softmax(x)
return x
# Build neural network model.
neural_net = NeuralNet()
# Cross-Entropy Loss.
# Note that this will apply 'softmax' to the logits.
def cross_entropy_loss(x, y):
# Convert labels to int 64 for tf cross-entropy function.
y = tf.cast(y, tf.int64)
# Apply softmax to logits and compute cross-entropy.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x)
# Average loss across the batch.
return tf.reduce_mean(loss)
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of highest score in prediction vector (i.e. argmax).
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
# Stochastic gradient descent optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# Optimization process.
def run_optimization(x, y):
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
# Forward pass.
pred = neural_net(x, is_training=True)
# Compute loss.
loss = cross_entropy_loss(pred, y)
# Variables to update, i.e. trainable variables.
trainable_variables = neural_net.trainable_variables
# Compute gradients.
gradients = g.gradient(loss, trainable_variables)
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, trainable_variables))
# Run training for the given number of steps.
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
# Run the optimization to update W and b values.
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = neural_net(batch_x, is_training=True)
loss = cross_entropy_loss(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# Test model on validation set.
pred = neural_net(x_test, is_training=False)
print("Test Accuracy: %f" % accuracy(pred, y_test))
# Predict 5 images from validation set.
n_images = 5
test_images = x_test[:n_images]
predictions = neural_net(test_images)
# Display image and model prediction.
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
|
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
'''
Description:
Check whether the ElasticSearch Domains are in VPC and not as a public endpoint.
Trigger:
Periodic
Reports on:
AWS::Elasticsearch::Domain
Feature:
In order to: to protect my data for exposure
As: a Security Officer
I want: To ensure that all my ElasticSearch Domains to be in VPC and not as a public endpoint.
Rule Parameters:
None
Scenarios:
Scenario 1:
Given: No ElasticSearch Domain is present
Then: Return NOT_APPLICABLE on AWS::::Account
Scenario 2:
Given: At least one ElasticSearch Domain is present
And: No 'VPCOptions' key is present in the list of "DomainName" on DescribeElasticsearchDomains API
Then: Return NON_COMPLIANT on this Domain
Scenario 3:
Given: At least one ElasticSearch Domain is present
And: The 'VPCOptions' key is present in the list of "DomainName" on DescribeElasticsearchDomains API
Then: Return COMPLIANT on this Domain
'''
import json
import sys
import time
import datetime
import boto3
import botocore
try:
import liblogging
except ImportError:
pass
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::Elasticsearch::Domain'
# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account).
ASSUME_ROLE_MODE = True
# Other parameters (no change needed)
CONFIG_ROLE_TIMEOUT_SECONDS = 900
PAUSE_TO_AVOID_THROTTLE_SECONDS = 4
#############
# Main Code #
#############
def get_all_domain_details(es_client, es_domains):
es_domain_list_details = []
es_domains_names_only = []
for es_domain in es_domains:
es_domains_names_only.append(es_domain['DomainName'])
while es_domains_names_only:
time.sleep(PAUSE_TO_AVOID_THROTTLE_SECONDS)
domain_details = es_client.describe_elasticsearch_domains(DomainNames=es_domains_names_only[:5])['DomainStatusList']
es_domain_list_details += domain_details
del es_domains_names_only[:5]
return es_domain_list_details
def evaluate_compliance(event, configuration_item, valid_rule_parameters):
es_client = get_client('es', event)
es_domain_list = es_client.list_domain_names()['DomainNames']
if not es_domain_list:
return build_evaluation(event['accountId'], 'NOT_APPLICABLE', event, resource_type='AWS::::Account')
es_domain_list_details = get_all_domain_details(es_client, es_domain_list)
evaluation_list = []
for es_domain_details in es_domain_list_details:
if 'VPCOptions' not in es_domain_details:
compliance_type = 'NON_COMPLIANT'
else:
compliance_type = 'COMPLIANT'
evaluation_list.append(build_evaluation(es_domain_details['DomainName'], compliance_type, event))
if evaluation_list:
return evaluation_list
return build_evaluation(event['accountId'], 'NOT_APPLICABLE', event, resource_type='AWS::::Account')
def evaluate_parameters(rule_parameters):
"""Evaluate the rule parameters dictionary validity. Raise a ValueError for invalid parameters.
Return:
anything suitable for the evaluate_compliance()
Keyword arguments:
rule_parameters -- the Key/Value dictionary of the Config Rules parameters
"""
valid_rule_parameters = rule_parameters
return valid_rule_parameters
####################
# Helper Functions #
####################
# Build an error to be displayed in the logs when the parameter is invalid.
def build_parameters_value_error_response(ex):
"""Return an error dictionary when the evaluate_parameters() raises a ValueError.
Keyword arguments:
ex -- Exception text
"""
return build_error_response(internal_error_message="Parameter value is invalid",
internal_error_details="An ValueError was raised during the validation of the Parameter value",
customer_error_code="InvalidParameterValueException",
customer_error_message=str(ex))
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event):
"""Return the service boto client. It should be used instead of directly calling the client.
Keyword arguments:
service -- the service name used for calling the boto.client()
event -- the event variable given in the lambda handler
"""
if not ASSUME_ROLE_MODE:
return boto3.client(service)
credentials = get_assume_role_credentials(event["executionRoleArn"])
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_cc = {}
if annotation:
eval_cc['Annotation'] = annotation
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = annotation
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
####################
# Boilerplate Code #
####################
# Helper function used to validate input
def check_defined(reference, reference_name):
if not reference:
raise Exception('Error: ', reference_name, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configuration_item = result['configurationItems'][0]
return convert_api_configuration(configuration_item)
# Convert from the API model to the original invocation model
def convert_api_configuration(configuration_item):
for k, v in configuration_item.items():
if isinstance(v, datetime.datetime):
configuration_item[k] = str(v)
configuration_item['awsAccountId'] = configuration_item['accountId']
configuration_item['ARN'] = configuration_item['arn']
configuration_item['configurationStateMd5Hash'] = configuration_item['configurationItemMD5Hash']
configuration_item['configurationItemVersion'] = configuration_item['version']
configuration_item['configuration'] = json.loads(configuration_item['configuration'])
if 'relationships' in configuration_item:
for i in range(len(configuration_item['relationships'])):
configuration_item['relationships'][i]['name'] = configuration_item['relationships'][i]['relationshipName']
return configuration_item
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invoking_event):
check_defined(invoking_event, 'invokingEvent')
if is_oversized_changed_notification(invoking_event['messageType']):
configuration_item_summary = check_defined(invoking_event['configuration_item_summary'], 'configurationItemSummary')
return get_configuration(configuration_item_summary['resourceType'], configuration_item_summary['resourceId'], configuration_item_summary['configurationItemCaptureTime'])
if is_scheduled_notification(invoking_event['messageType']):
return None
return check_defined(invoking_event['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configuration_item, event):
try:
check_defined(configuration_item, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configuration_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return status in ('OK', 'ResourceDiscovered') and not event_left_scope
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName="configLambdaExecution",
DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS)
if 'liblogging' in sys.modules:
liblogging.logSession(role_arn, assume_role_response)
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account).
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evaluations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evaluations + latest_evaluations
def lambda_handler(event, context):
if 'liblogging' in sys.modules:
liblogging.logEvent(event)
global AWS_CONFIG_CLIENT
#print(event)
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
valid_rule_parameters = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
AWS_CONFIG_CLIENT = get_client('config', event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
configuration_item = get_configuration_item(invoking_event)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
else:
return build_internal_error_response('Unexpected message type', str(invoking_event))
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
if configuration_item:
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
else:
evaluations.append(build_evaluation(event['accountId'], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
result_token = event['resultToken']
test_mode = False
if result_token == 'TESTMODE':
# Used solely for RDK test to skip actual put_evaluation API call
test_mode = True
# Invoke the Config API to report the result of the evaluation
evaluation_copy = []
evaluation_copy = evaluations[:]
while evaluation_copy:
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode)
del evaluation_copy[:100]
# Used solely for RDK test to be able to test Lambda function
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internal_error_message, internal_error_details=None):
return build_error_response(internal_error_message, internal_error_details, 'InternalError', 'InternalError')
def build_error_response(internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None):
error_response = {
'internalErrorMessage': internal_error_message,
'internalErrorDetails': internal_error_details,
'customerErrorMessage': customer_error_message,
'customerErrorCode': customer_error_code
}
print(error_response)
return error_response
|
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import pathlib
import shutil
import tempfile
import unittest
import os
from fuse.utils.file_io.file_io import create_dir
import wget
from fuse_examples.classification.knight.eval.eval import eval
from fuse_examples.classification.knight.make_targets_file import make_targets_file
import fuse_examples.classification.knight.baseline.fuse_baseline as baseline
class KnightTestTestCase(unittest.TestCase):
def setUp(self):
self.root = tempfile.mkdtemp()
def test_eval(self):
dir_path = pathlib.Path(__file__).parent.resolve()
target_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_targets.csv")
task1_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task1_predictions.csv")
task2_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task2_predictions.csv")
eval(target_filename=target_filename, task1_prediction_filename=task1_prediction_filename, task2_prediction_filename=task2_prediction_filename, output_dir=self.root)
def test_make_targets(self):
dir_path = pathlib.Path(__file__).parent.resolve()
data_path = os.path.join(self.root, "data")
cache_path = os.path.join(self.root, "cache")
split = os.path.join(dir_path, "../classification/knight/baseline/splits_final.pkl")
output_filename = os.path.join(self.root, "output/validation_targets.csv")
create_dir(os.path.join(data_path, "knight", "data"))
create_dir(os.path.dirname(output_filename))
wget.download("https://raw.github.com/neheller/KNIGHT/main/knight/data/knight.json", os.path.join(data_path, "knight", "data"))
make_targets_file(data_path=data_path, cache_path=cache_path, split=split, output_filename=output_filename)
@unittest.skip("Not ready yet")
# TODOs: set KNIGHT data
# 1 Set 'KNIGHT_DATA' ahead (and not in the test)
# 2, Add code that skip test if this var wasn't set
# 2. Modify main() to support overriding the arguments and override number of epochs to 2 (and maybe number of samples)
# 3. Use and test make predictions (inference script)
def test_train(self):
os.environ['KNIGHT_DATA'] = "/projects/msieve/MedicalSieve/PatientData/KNIGHT"
os.environ['KNIGHT_CACHE'] = os.path.join(self.root, "train", "cache")
os.environ['KNIGHT_RESULTS'] = os.path.join(self.root, "train", "results")
baseline.main()
def tearDown(self):
# Delete temporary directories
shutil.rmtree(self.root)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Sep 20, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=unused-wildcard-import,unused-variable
import os
import logging
import sys
import getpass
import codecs
from builtins import object
from libs.ConsoleColors import *
try:
from urllib.parse import quote, quote_plus
except ImportError:
from urllib import quote, quote_plus
from sqlalchemy import create_engine
from tornado.options import options
class DatabaseConnection(object):
def __init__(
self,
database,
hostname="",
port="",
username="",
password="",
dialect="",
ssl_ca="",
):
self.database = database
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.dialect = dialect
self.ssl_ca = ssl_ca
def __str__(self):
""" Construct the database connection string """
if self.dialect == "sqlite":
db_conn = self._sqlite()
elif self.dialect.startswith("postgres"):
db_conn = self._postgresql()
elif self.dialect == "mysql":
db_conn = self._mysql()
else:
raise ValueError("Database dialect not supported")
self._test_connection(db_conn)
return db_conn
def _postgresql(self):
"""
Configured to use postgresql, there is no built-in support
for postgresql so make sure we can import the 3rd party
python lib 'pypostgresql'
"""
logging.debug("Configured to use Postgresql for a database")
try:
import pypostgresql
except ImportError:
print(WARN + "You must install 'pypostgresql'")
os._exit(1)
db_host, db_name, db_user, db_password = self._db_credentials()
postgres = "postgresql+pypostgresql://%s:%s@%s/%s" % (
db_user,
db_password,
db_host,
db_name,
)
if self._test_connection(postgres):
return postgres
else:
logging.fatal("Cannot connect to database with any available driver")
os._exit(1)
def _sqlite(self):
"""
SQLite connection string, always save db file to cwd, or in-memory
"""
logging.debug("Configured to use SQLite for a database")
db_name = self.database
if not len(db_name):
db_name = "rtb"
if not db_name.endswith(".db"):
db_name = "%s.db" % db_name
return "sqlite:///%s" % db_name
def _mysql(self):
""" Configure db_connection for MySQL """
logging.debug("Configured to use MySQL for a database")
db_server, db_name, db_user, db_password = self._db_credentials()
db_charset = "utf8mb4"
db_connection = "%s:%s@%s/%s?charset=%s" % (
db_user,
db_password,
db_server,
db_name,
db_charset,
)
if self.ssl_ca != "":
db_connection = db_connection + "&ssl_ca=" + self.ssl_ca
codecs.register(
lambda name: codecs.lookup("utf8") if name == "utf8mb4" else None
)
__mysql = "mysql://%s" % db_connection
__mysqlclient = "mysql+mysqldb://%s" % db_connection
__pymysql = "mysql+pymysql://%s" % db_connection
__mysqlconnector = "mysql+mysqlconnector://%s" % db_connection
if self._test_connection(__mysql):
return __mysql
elif self._test_connection(__mysqlclient):
return __mysqlclient
elif self._test_connection(__pymysql):
return __pymysql
elif self._test_connection(__mysqlconnector):
return __mysqlconnector
else:
logging.fatal(
"Cannot connect to database with any available driver. Verify correct username & password in rootthebox.cfg and db dependecies."
)
os._exit(1)
def _test_connection(self, connection_string):
"""
Test the connection string to see if we can connect to the database
"""
try:
engine = create_engine(connection_string)
connection = engine.connect()
connection.close()
return True
except Exception as e:
if options.debug:
logging.exception("Database connection failed: %s" % e)
return False
def _db_credentials(self):
""" Pull db creds and return them url encoded """
if self.password == "" or self.password == "RUNTIME":
sys.stdout.write(PROMPT + "Database password: ")
sys.stdout.flush()
self.password = getpass.getpass()
elif self.password == "ENV":
self.password = os.environ["sql_password"]
db_host = quote(self.hostname)
db_name = quote(self.database)
db_user = quote(self.username)
db_password = quote_plus(self.password)
if "@" in db_password:
logging.warning(
"%sWARNING:%s Using the '@' symbol in your database password can cause login issues with SQL Alchemy.%s"
% (WARN + bold + R, W, WARN)
)
return db_host, db_name, db_user, db_password
|
from datetime import timedelta
from django.contrib.auth import get_user_model
from django.utils import timezone
from churches.models import Church
from schedules.forms import EventForm, AttendantAdminForm, AttendantForm
from schedules.models import Event, Attendant
from schedules.tests._setup import EventSetupTestCase
class EventFormTests(EventSetupTestCase):
def test_init(self):
user = get_user_model().objects.get(username='test_user')
form = EventForm(user=user)
self.assertEqual(form.fields['church'].queryset.count(), 1)
def test_invalid_clean_end(self):
user = get_user_model().objects.get(username='test_user')
church = Church.objects.get(name='Church 1')
data = {'church': church, 'start': timezone.now(), 'end': timezone.now() - timedelta(days=1),
'title': 'Test', 'visibility': 'public', 'attendance_limit': 0}
form = EventForm(user=user, data=data)
self.assertFalse(form.is_valid())
def test_valid_clean_end(self):
user = get_user_model().objects.get(username='test_user')
church = Church.objects.get(name='Church 1')
data = {'church': church, 'start': timezone.now(), 'end': timezone.now() + timedelta(days=1),
'title': 'Test', 'visibility': 'public', 'attendance_limit': 0}
form = EventForm(user=user, data=data)
self.assertTrue(form.is_valid())
class AttendantAdminFormTests(EventSetupTestCase):
def test_valid_clean_amount(self):
e = Event.objects.get(title='Title 5')
a = Attendant.objects.get(full_name='Attendant 1')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 2}
form = AttendantAdminForm(instance=a, data=data)
self.assertTrue(form.is_valid())
def test_invalid_clean_amount(self):
e = Event.objects.get(title='Title 5')
a = Attendant.objects.get(full_name='Attendant 1')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 4}
form = AttendantAdminForm(instance=a, data=data)
self.assertFalse(form.is_valid())
class AttendantFormTests(EventSetupTestCase):
def test_valid_clean_amount(self):
e = Event.objects.get(title='Title 5')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 1}
form = AttendantForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_clean_amount(self):
e = Event.objects.get(title='Title 5')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 2}
form = AttendantForm(data=data)
self.assertFalse(form.is_valid())
|
""" Module containing a class for encapsulating the settings of the tree search
"""
import os
import yaml
from aizynthfinder.utils.logging import logger
from aizynthfinder.utils.paths import data_path
from aizynthfinder.mcts.policy import Policy
from aizynthfinder.mcts.stock import Stock, MongoDbInchiKeyQuery
class Configuration:
"""
Encapsulating the settings of the tree search, including the policy,
the stock and various parameters.
All the parameters can be retrieved as attributes of the Configuration
object, e.g.
.. code-block::
config.max_transforms # The maximum of transform
config.iteration_limit # The maximum number of iterations
On instantiation it will read default parameters from a config.yml
file located in the `data` folder of the package.
"""
def __init__(self):
self._properties = {}
filename = os.path.join(data_path(), "config.yml")
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
self._update_from_config(_config)
self.stock = Stock()
self.policy = Policy(self)
self._logger = logger()
def __eq__(self, other):
return self._properties == other._properties
@classmethod
def from_file(cls, filename):
"""
Loads a configuration from a yaml file.
The parameters not set in the yaml file are taken from the default values.
The policies and stocks specified in the yaml file are directly loaded.
:param filename: the path to a yaml file
:type filename: str
:return: a Configuration object with settings from the yaml file
:rtype: Configuration
"""
config_obj = Configuration()
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
config_obj._update_from_config(_config)
for key, policy_spec in _config.get("policy", {}).get("files", {}).items():
modelfile, templatefile = policy_spec
config_obj.policy.load_policy(modelfile, templatefile, key)
for key, stockfile in _config.get("stock", {}).get("files", {}).items():
config_obj.stock.load_stock(stockfile, key)
if "mongodb" in _config.get("stock", {}):
query_obj = MongoDbInchiKeyQuery(**(_config["stock"]["mongodb"] or {}))
config_obj.stock.load_stock(query_obj, "mongodb_stock")
return config_obj
def update(self, **settings):
""" Update the configuration using dictionary of parameters
"""
for setting, value in settings.items():
setattr(self, setting, value)
self._logger.info(f"Setting {setting.replace('_', ' ')} to {value}")
def _update_from_config(self, config):
self._properties.update(config.get("finder", {}).get("properties", {}))
self._properties.update(config.get("policy", {}).get("properties", {}))
self._properties.update(config.get("properties", {}))
self.__dict__.update(self._properties)
|
__all__ = ('GUI_STATE_CANCELLED', 'GUI_STATE_CANCELLING', 'GUI_STATE_READY', 'GUI_STATE_SWITCHING_CTX',
'GUI_STATE_SWITCHING_PAGE', 'PaginationBase')
from ...backend.futures import Task, CancelledError
from ...discord.core import KOKORO
from ...discord.exceptions import DiscordException, ERROR_CODES
GUI_STATE_READY = 0
GUI_STATE_SWITCHING_PAGE = 1
GUI_STATE_CANCELLING = 2
GUI_STATE_CANCELLED = 3
GUI_STATE_SWITCHING_CTX = 4
GUI_STATE_VALUE_TO_NAME = {
GUI_STATE_READY : 'ready',
GUI_STATE_SWITCHING_PAGE : 'switching_page',
GUI_STATE_CANCELLING : 'cancelling',
GUI_STATE_CANCELLED : 'cancelled',
GUI_STATE_SWITCHING_CTX : 'switching_context',
}
class PaginationBase:
"""
Base class for pagination like objects.
Attributes
----------
_canceller : `None` or `Function`
The function called when the ``Pagination`` is cancelled or when it expires. This is a onetime use and after
it was used, is set as `None`.
_task_flag : `int`
A flag to store the state of the ``Pagination``.
Possible values:
+---------------------------+-------+-----------------------------------------------------------------------+
| Respective name | Value | Description |
+===========================+=======+=======================================================================+
| GUI_STATE_READY | 0 | The Pagination does nothing, is ready to be used. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_PAGE | 1 | The Pagination is currently changing it's page. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLING | 2 | The pagination is currently changing it's page, but it was cancelled |
| | | meanwhile. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLED | 3 | The pagination is, or is being cancelled right now. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_CTX | 4 | The Pagination is switching context. Not used by the default class, |
| | | but expected. |
+---------------------------+-------+-----------------------------------------------------------------------+
_timeouter : `None` or ``Timeouter``
Executes the timing out feature on the ``Pagination``.
channel : ``ChannelTextBase`` instance
The channel where the ``Pagination`` is executed.
client : ``Client`` of ``Embed`` (or any compatible)
The client who executes the ``Pagination``.
message : `None` or ``Message``
The message on what the ``Pagination`` is executed.
"""
__slots__ = ('_canceller', '_task_flag', '_timeouter', 'channel', 'client', 'message')
async def __new__(cls, client, channel):
"""
Pagination instances should have asynchronous constructor.
Parameters
----------
Raises
------
NotImplementedError
"""
raise NotImplementedError
async def __call__(self, client, event):
"""
Called when a reaction is added or removed from the respective message.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who executes the ``Pagination``.
event : ``ReactionAddEvent``, ``ReactionDeleteEvent``
The received event.
"""
pass
async def _canceller_function(self, exception):
"""
Used when the ``Pagination`` is cancelled.
First of all removes the pagination from waitfors, so it will not wait for reaction events, then sets the
``._task_flag`` of the it to `GUI_STATE_CANCELLED`.
If `exception` is given as `TimeoutError`, then removes the ``Pagination``'s reactions from the respective
message.
This method is a coroutine.
Parameters
----------
exception : `None` or ``BaseException`` instance
Exception to cancel the ``Pagination`` with.
"""
client = self.client
message = self.message
client.events.reaction_add.remove(message, self)
client.events.reaction_delete.remove(message, self)
if self._task_flag == GUI_STATE_SWITCHING_CTX:
# the message is not our, we should not do anything with it.
return
self._task_flag = GUI_STATE_CANCELLED
if not await self._handle_close_exception(exception):
await client.events.error(client, f'{self!r}._canceller_function', exception)
async def _handle_close_exception(self, exception):
"""
Handles close exception if any.
This method is a coroutine.
Parameters
----------
exception : `None` or `BaseException`
The close exception to handle.
Returns
-------
exception_handled : `bool`
Whether the exception was handled.
"""
if exception is None:
return True
client = self.client
message = self.message
if isinstance(exception, CancelledError):
try:
await client.message_delete(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.missing_access, # client removed
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, TimeoutError):
if self.channel.cached_permissions_for(client).can_manage_messages:
try:
await client.reaction_clear(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, PermissionError):
return True
return False
def cancel(self, exception=None):
"""
Cancels the pagination, if it is not cancelled yet.
Parameters
----------
exception : `None` or ``BaseException`` instance, Optional
Exception to cancel the pagination with. Defaults to `None`
Returns
-------
canceller_task : `None` or ``Task``
"""
if self._task_flag in (GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE, GUI_STATE_CANCELLING):
self._task_flag = GUI_STATE_CANCELLED
canceller = self._canceller
if canceller is None:
return
self._canceller = None
timeouter = self._timeouter
if (timeouter is not None):
timeouter.cancel()
return Task(canceller(self, exception), KOKORO)
def __repr__(self):
"""Returns the pagination instance's representation."""
repr_parts = [
'<', self.__class__.__name__,
' client=', repr(self.client),
', channel=', repr(self.channel),
', state='
]
task_flag = self._task_flag
repr_parts.append(repr(task_flag))
repr_parts.append(' (')
task_flag_name = GUI_STATE_VALUE_TO_NAME[task_flag]
repr_parts.append(task_flag_name)
repr_parts.append(')')
# Third party things go here
repr_parts.append('>')
return ''.join(repr_parts)
def is_active(self):
"""
Returns whether the menu is still active.
Returns
-------
is_active : `bool`
"""
return (self._task_flag in (GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE))
|
import pytest
from problems.problem_0242 import Solution
@pytest.mark.parametrize('test_input, expected', (
(('anagram', 'nagaram'), True),
(('rat', 'car'), False),
))
def test_is_anagram(test_input, expected):
assert Solution.isAnagram(*test_input) == expected
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import oneflow.typing as oft
import test_global_storage
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def RunOneflowBinaryOp(device_type, flow_op, x, y, data_type):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="x",
shape=x.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
y += flow.get_variable(
name="y",
shape=y.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
loss = flow_op(x, y)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
return loss
# Oneflow
out = FlowJob(x, y).get().numpy()
x_diff = test_global_storage.Get("x_diff")
y_diff = test_global_storage.Get("y_diff")
return out, x_diff, y_diff
def RunTensorFlowBinaryOp(tf_op, x, y):
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = tf.Variable(y)
out = tf_op(x, y)
x_diff = tape.gradient(out, x)
y_diff = tape.gradient(out, y)
return out.numpy(), x_diff, y_diff
def compare_with_tensorflow(
test_case,
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
x_minval=-10,
x_maxval=10,
y_minval=-10,
y_maxval=10,
compare_grad=True,
out_rtol=1e-5,
out_atol=1e-5,
diff_rtol=1e-5,
diff_atol=1e-5,
):
test_case.assertTrue(device_type in ["gpu", "cpu"])
np_type = type_name_to_np_type[data_type]
x = np.random.uniform(low=x_minval, high=x_maxval, size=x_shape).astype(np_type)
y = np.random.uniform(low=y_minval, high=y_maxval, size=y_shape).astype(np_type)
of_out, of_x_diff, of_y_diff, = RunOneflowBinaryOp(
device_type, flow_op, x, y, data_type
)
tf_out, tf_x_diff, tf_y_diff = RunTensorFlowBinaryOp(tf_op, x, y)
test_case.assertTrue(
np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
)
if compare_grad:
test_case.assertTrue(
np.allclose(
of_x_diff,
tf_x_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
test_case.assertTrue(
np.allclose(
of_y_diff,
tf_y_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBinaryElementwiseOps(flow.unittest.TestCase):
def test_floordiv(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.floordiv]
arg_dict["tf_op"] = [tf.math.floordiv]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [-10]
arg_dict["x_maxval"] = [10]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pow(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.pow]
arg_dict["tf_op"] = [tf.math.pow]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xdivy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xdivy]
arg_dict["tf_op"] = [tf.math.xdivy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [100]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xlogy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xlogy]
arg_dict["tf_op"] = [tf.math.xlogy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_atan2(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.atan2]
arg_dict["tf_op"] = [tf.math.atan2]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDeployments
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_generated_dialogflowcx_v3_Deployments_ListDeployments_async]
from google.cloud import dialogflowcx_v3
async def sample_list_deployments():
# Create a client
client = dialogflowcx_v3.DeploymentsAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ListDeploymentsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_deployments(request=request)
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflowcx_v3_Deployments_ListDeployments_async]
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import string
import struct
from core.domain import obj_services
from core.domain import rte_component_registry
from core.tests import test_utils
import feconf
import schema_utils
import schema_utils_test
import utils
# File names ending in any of these suffixes will be ignored when checking for
# RTE component validity.
IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store']
RTE_THUMBNAIL_HEIGHT_PX = 16
RTE_THUMBNAIL_WIDTH_PX = 16
_COMPONENT_CONFIG_SCHEMA = [
('backend_id', basestring), ('category', basestring),
('description', basestring), ('frontend_id', basestring),
('tooltip', basestring), ('icon_data_url', basestring),
('preview_url_template', basestring), ('is_complex', bool),
('requires_fs', bool), ('is_block_element', bool),
('customization_arg_specs', list)]
class RteComponentUnitTests(test_utils.GenericTestBase):
"""Tests that all the default RTE comopnents are valid."""
def _is_camel_cased(self, name):
"""Check whether a name is in CamelCase."""
return name and (name[0] in string.ascii_uppercase)
def _is_alphanumeric_string(self, input_string):
"""Check whether a string is alphanumeric."""
return bool(re.compile("^[a-zA-Z0-9_]+$").match(input_string))
def _validate_customization_arg_specs(self, customization_arg_specs):
for ca_spec in customization_arg_specs:
self.assertEqual(set(ca_spec.keys()), set([
'name', 'description', 'schema', 'default_value']))
self.assertTrue(isinstance(ca_spec['name'], basestring))
self.assertTrue(self._is_alphanumeric_string(ca_spec['name']))
self.assertTrue(isinstance(ca_spec['description'], basestring))
self.assertGreater(len(ca_spec['description']), 0)
# The default value might not pass validation checks (e.g. the
# Image component has a required field whose default value is
# empty). Thus, when checking the default value schema, we don't
# apply the custom validators.
schema_utils_test.validate_schema(ca_spec['schema'])
self.assertEqual(
ca_spec['default_value'],
schema_utils.normalize_against_schema(
ca_spec['default_value'], ca_spec['schema'],
apply_custom_validators=False))
if ca_spec['schema']['type'] == 'custom':
obj_class = obj_services.Registry.get_object_class_by_type(
ca_spec['schema']['obj_type'])
self.assertIsNotNone(obj_class.edit_html_filename)
self.assertIsNotNone(obj_class.edit_js_filename)
self.assertEqual(
ca_spec['default_value'],
obj_class.normalize(ca_spec['default_value']))
def _listdir_omit_ignored(self, directory):
"""List all files and directories within 'directory', omitting the ones
whose name ends in one of the IGNORED_FILE_SUFFIXES.
"""
names = os.listdir(directory)
for suffix in IGNORED_FILE_SUFFIXES:
names = [name for name in names if not name.endswith(suffix)]
return names
def test_image_thumbnails_for_rte_components(self):
"""Test the thumbnails for the RTE component icons."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_name, component_specs) in rte_components.iteritems():
generated_image_filepath = os.path.join(
os.getcwd(), feconf.RTE_EXTENSIONS_DIR,
component_name, '%s.png' % component_name)
relative_icon_data_url = component_specs['icon_data_url'][1:]
defined_image_filepath = os.path.join(
os.getcwd(), feconf.EXTENSIONS_DIR_PREFIX,
'extensions', relative_icon_data_url)
self.assertEqual(generated_image_filepath, defined_image_filepath)
with open(generated_image_filepath, 'rb') as f:
img_data = f.read()
width, height = struct.unpack('>LL', img_data[16:24])
self.assertEqual(int(width), RTE_THUMBNAIL_WIDTH_PX)
self.assertEqual(int(height), RTE_THUMBNAIL_HEIGHT_PX)
def test_rte_components_are_valid(self):
"""Test that the default RTE components are valid."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_id, component_specs) in rte_components.iteritems():
# Check that the component id is valid.
self.assertTrue(self._is_camel_cased(component_id))
# Check that the component directory exists.
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
self.assertTrue(os.path.isdir(component_dir))
# In this directory there should be a /directives directory, an
# an icon .png file and a protractor.js file, and an optional
# preview .png file.
# In /directives directory should be HTML file, a JS file,
# there could be multiple JS and HTML files.
dir_contents = self._listdir_omit_ignored(component_dir)
self.assertLessEqual(len(dir_contents), 4)
directives_dir = os.path.join(component_dir, 'directives')
png_file = os.path.join(component_dir, '%s.png' % component_id)
preview_file = os.path.join(
component_dir, '%sPreview.png' % component_id)
protractor_file = os.path.join(component_dir, 'protractor.js')
self.assertTrue(os.path.isdir(directives_dir))
self.assertTrue(os.path.isfile(png_file))
self.assertTrue(os.path.isfile(protractor_file))
if len(dir_contents) == 5:
self.assertTrue(os.path.isfile(preview_file))
main_js_file = os.path.join(
directives_dir, '%sDirective.js' % component_id)
main_html_file = os.path.join(
directives_dir, '%s_directive.html' % component_id.lower())
self.assertTrue(os.path.isfile(main_js_file))
self.assertTrue(os.path.isfile(main_html_file))
js_file_content = utils.get_file_contents(main_js_file)
self.assertIn(
'oppiaNoninteractive%s' % component_id, js_file_content)
self.assertNotIn('<script>', js_file_content)
self.assertNotIn('</script>', js_file_content)
# Check that the configuration file contains the correct
# top-level keys, and that these keys have the correct types.
for item, item_type in _COMPONENT_CONFIG_SCHEMA:
self.assertTrue(isinstance(
component_specs[item], item_type))
# The string attributes should be non-empty.
if item_type == basestring:
self.assertTrue(component_specs[item])
self._validate_customization_arg_specs(
component_specs['customization_arg_specs']) # pylint: disable=protected-access
def test_html_contains_all_imports(self):
"""Test that the rich_text_components.html file contains script-imports
for all directives of all RTE components.
"""
js_files_paths = []
for component_id in feconf.ALLOWED_RTE_EXTENSIONS:
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
directives_dir = os.path.join(component_dir, 'directives')
directive_filenames = os.listdir(directives_dir)
js_files_paths.extend(
os.path.join(directives_dir, filename) for filename
in directive_filenames if filename.endswith('.js'))
js_files_paths.sort()
prefix = '<script src="{{ASSET_DIR_PREFIX}}/'
suffix = '"></script>'
html_script_tags = [
'%s%s%s' % (prefix, path, suffix) for path in js_files_paths]
generated_html = '\n'.join(html_script_tags)
rtc_html_file = os.path.join(
feconf.FRONTEND_TEMPLATES_DIR, 'components',
'rich_text_components.html')
with open(rtc_html_file, 'r') as f:
rtc_html_file_contents = f.read()
self.assertEqual(generated_html, rtc_html_file_contents.strip())
|
"""WOInfo Plugin for WordOps"""
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from wo.core.variables import WOVariables
from pynginxconfig import NginxConfig
from wo.core.aptget import WOAptGet
from wo.core.shellexec import WOShellExec
from wo.core.logging import Log
import os
import configparser
def wo_info_hook(app):
pass
class WOInfoController(CementBaseController):
class Meta:
label = 'info'
stacked_on = 'base'
stacked_type = 'nested'
description = ('Display configuration information related to Nginx,'
' PHP and MySQL')
arguments = [
(['--mysql'],
dict(help='Get MySQL configuration information',
action='store_true')),
(['--php'],
dict(help='Get PHP configuration information',
action='store_true')),
(['--php72'],
dict(help='Get PHP 7.2 configuration information',
action='store_true')),
(['--nginx'],
dict(help='Get Nginx configuration information',
action='store_true')),
]
usage = "wo info [options]"
@expose(hide=True)
def info_nginx(self):
"""Display Nginx information"""
version = os.popen("nginx -v 2>&1 | cut -d':' -f2 | cut -d' ' -f2 | "
"cut -d'/' -f2 | tr -d '\n'").read()
allow = os.popen("grep ^allow /etc/nginx/common/acl.conf | "
"cut -d' ' -f2 | cut -d';' -f1 | tr '\n' ' '").read()
nc = NginxConfig()
nc.loadf('/etc/nginx/nginx.conf')
user = nc.get('user')[1]
worker_processes = nc.get('worker_processes')[1]
worker_connections = nc.get([('events',), 'worker_connections'])[1]
keepalive_timeout = nc.get([('http',), 'keepalive_timeout'])[1]
fastcgi_read_timeout = nc.get([('http',),
'fastcgi_read_timeout'])[1]
client_max_body_size = nc.get([('http',),
'client_max_body_size'])[1]
data = dict(version=version, allow=allow, user=user,
worker_processes=worker_processes,
keepalive_timeout=keepalive_timeout,
worker_connections=worker_connections,
fastcgi_read_timeout=fastcgi_read_timeout,
client_max_body_size=client_max_body_size)
self.app.render((data), 'info_nginx.mustache')
@expose(hide=True)
def info_php(self):
"""Display PHP information"""
version = os.popen("{0} -v 2>/dev/null | head -n1 | cut -d' ' -f2 |".format("php5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php") +
" cut -d'+' -f1 | tr -d '\n'").read
config = configparser.ConfigParser()
config.read('/etc/{0}/fpm/php.ini'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
expose_php = config['PHP']['expose_php']
memory_limit = config['PHP']['memory_limit']
post_max_size = config['PHP']['post_max_size']
upload_max_filesize = config['PHP']['upload_max_filesize']
max_execution_time = config['PHP']['max_execution_time']
config.read('/etc/{0}/fpm/pool.d/www.conf'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
www_listen = config['www']['listen']
www_ping_path = config['www']['ping.path']
www_pm_status_path = config['www']['pm.status_path']
www_pm = config['www']['pm']
www_pm_max_requests = config['www']['pm.max_requests']
www_pm_max_children = config['www']['pm.max_children']
www_pm_start_servers = config['www']['pm.start_servers']
www_pm_min_spare_servers = config['www']['pm.min_spare_servers']
www_pm_max_spare_servers = config['www']['pm.max_spare_servers']
www_request_terminate_time = (config['www']
['request_terminate_timeout'])
try:
www_xdebug = (config['www']['php_admin_flag[xdebug.profiler_enable'
'_trigger]'])
except Exception as e:
www_xdebug = 'off'
config.read('/etc/{0}/fpm/pool.d/debug.conf'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
debug_listen = config['debug']['listen']
debug_ping_path = config['debug']['ping.path']
debug_pm_status_path = config['debug']['pm.status_path']
debug_pm = config['debug']['pm']
debug_pm_max_requests = config['debug']['pm.max_requests']
debug_pm_max_children = config['debug']['pm.max_children']
debug_pm_start_servers = config['debug']['pm.start_servers']
debug_pm_min_spare_servers = config['debug']['pm.min_spare_servers']
debug_pm_max_spare_servers = config['debug']['pm.max_spare_servers']
debug_request_terminate = (config['debug']
['request_terminate_timeout'])
try:
debug_xdebug = (config['debug']['php_admin_flag[xdebug.profiler_'
'enable_trigger]'])
except Exception as e:
debug_xdebug = 'off'
data = dict(version=version, expose_php=expose_php,
memory_limit=memory_limit, post_max_size=post_max_size,
upload_max_filesize=upload_max_filesize,
max_execution_time=max_execution_time,
www_listen=www_listen, www_ping_path=www_ping_path,
www_pm_status_path=www_pm_status_path, www_pm=www_pm,
www_pm_max_requests=www_pm_max_requests,
www_pm_max_children=www_pm_max_children,
www_pm_start_servers=www_pm_start_servers,
www_pm_min_spare_servers=www_pm_min_spare_servers,
www_pm_max_spare_servers=www_pm_max_spare_servers,
www_request_terminate_timeout=www_request_terminate_time,
www_xdebug_profiler_enable_trigger=www_xdebug,
debug_listen=debug_listen, debug_ping_path=debug_ping_path,
debug_pm_status_path=debug_pm_status_path,
debug_pm=debug_pm,
debug_pm_max_requests=debug_pm_max_requests,
debug_pm_max_children=debug_pm_max_children,
debug_pm_start_servers=debug_pm_start_servers,
debug_pm_min_spare_servers=debug_pm_min_spare_servers,
debug_pm_max_spare_servers=debug_pm_max_spare_servers,
debug_request_terminate_timeout=debug_request_terminate,
debug_xdebug_profiler_enable_trigger=debug_xdebug)
self.app.render((data), 'info_php.mustache')
@expose(hide=True)
def info_php72(self):
"""Display PHP information"""
version = os.popen("php7.2 -v 2>/dev/null | head -n1 | cut -d' ' -f2 |"
" cut -d'+' -f1 | tr -d '\n'").read
config = configparser.ConfigParser()
config.read('/etc/php/7.2/fpm/php.ini')
expose_php = config['PHP']['expose_php']
memory_limit = config['PHP']['memory_limit']
post_max_size = config['PHP']['post_max_size']
upload_max_filesize = config['PHP']['upload_max_filesize']
max_execution_time = config['PHP']['max_execution_time']
config.read('/etc/php/7.2/fpm/pool.d/www.conf')
www_listen = config['www']['listen']
www_ping_path = config['www']['ping.path']
www_pm_status_path = config['www']['pm.status_path']
www_pm = config['www']['pm']
www_pm_max_requests = config['www']['pm.max_requests']
www_pm_max_children = config['www']['pm.max_children']
www_pm_start_servers = config['www']['pm.start_servers']
www_pm_min_spare_servers = config['www']['pm.min_spare_servers']
www_pm_max_spare_servers = config['www']['pm.max_spare_servers']
www_request_terminate_time = (config['www']
['request_terminate_timeout'])
try:
www_xdebug = (config['www']['php_admin_flag[xdebug.profiler_enable'
'_trigger]'])
except Exception as e:
www_xdebug = 'off'
config.read('/etc/php/7.2/fpm/pool.d/debug.conf')
debug_listen = config['debug']['listen']
debug_ping_path = config['debug']['ping.path']
debug_pm_status_path = config['debug']['pm.status_path']
debug_pm = config['debug']['pm']
debug_pm_max_requests = config['debug']['pm.max_requests']
debug_pm_max_children = config['debug']['pm.max_children']
debug_pm_start_servers = config['debug']['pm.start_servers']
debug_pm_min_spare_servers = config['debug']['pm.min_spare_servers']
debug_pm_max_spare_servers = config['debug']['pm.max_spare_servers']
debug_request_terminate = (config['debug']
['request_terminate_timeout'])
try:
debug_xdebug = (config['debug']['php_admin_flag[xdebug.profiler_'
'enable_trigger]'])
except Exception as e:
debug_xdebug = 'off'
data = dict(version=version, expose_php=expose_php,
memory_limit=memory_limit, post_max_size=post_max_size,
upload_max_filesize=upload_max_filesize,
max_execution_time=max_execution_time,
www_listen=www_listen, www_ping_path=www_ping_path,
www_pm_status_path=www_pm_status_path, www_pm=www_pm,
www_pm_max_requests=www_pm_max_requests,
www_pm_max_children=www_pm_max_children,
www_pm_start_servers=www_pm_start_servers,
www_pm_min_spare_servers=www_pm_min_spare_servers,
www_pm_max_spare_servers=www_pm_max_spare_servers,
www_request_terminate_timeout=www_request_terminate_time,
www_xdebug_profiler_enable_trigger=www_xdebug,
debug_listen=debug_listen, debug_ping_path=debug_ping_path,
debug_pm_status_path=debug_pm_status_path,
debug_pm=debug_pm,
debug_pm_max_requests=debug_pm_max_requests,
debug_pm_max_children=debug_pm_max_children,
debug_pm_start_servers=debug_pm_start_servers,
debug_pm_min_spare_servers=debug_pm_min_spare_servers,
debug_pm_max_spare_servers=debug_pm_max_spare_servers,
debug_request_terminate_timeout=debug_request_terminate,
debug_xdebug_profiler_enable_trigger=debug_xdebug)
self.app.render((data), 'info_php.mustache')
@expose(hide=True)
def info_mysql(self):
"""Display MySQL information"""
version = os.popen("mysql -V | awk '{print($5)}' | cut -d ',' "
"-f1 | tr -d '\n'").read()
host = "localhost"
port = os.popen("mysql -e \"show variables\" | grep ^port | awk "
"'{print($2)}' | tr -d '\n'").read()
wait_timeout = os.popen("mysql -e \"show variables\" | grep "
"^wait_timeout | awk '{print($2)}' | "
"tr -d '\n'").read()
interactive_timeout = os.popen("mysql -e \"show variables\" | grep "
"^interactive_timeout | awk "
"'{print($2)}' | tr -d '\n'").read()
max_used_connections = os.popen("mysql -e \"show global status\" | "
"grep Max_used_connections | awk "
"'{print($2)}' | tr -d '\n'").read()
datadir = os.popen("mysql -e \"show variables\" | grep datadir | awk"
" '{print($2)}' | tr -d '\n'").read()
socket = os.popen("mysql -e \"show variables\" | grep \"^socket\" | "
"awk '{print($2)}' | tr -d '\n'").read()
data = dict(version=version, host=host, port=port,
wait_timeout=wait_timeout,
interactive_timeout=interactive_timeout,
max_used_connections=max_used_connections,
datadir=datadir, socket=socket)
self.app.render((data), 'info_mysql.mustache')
@expose(hide=True)
def default(self):
"""default function for info"""
if (not self.app.pargs.nginx and not self.app.pargs.php
and not self.app.pargs.mysql and not self.app.pargs.php72):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
if WOAptGet.is_installed(self, 'php7.2-fpm'):
self.app.pargs.php = True
if self.app.pargs.nginx:
if WOAptGet.is_installed(self, 'nginx-custom') or WOAptGet.is_installed(self, 'nginx-common'):
self.info_nginx()
else:
Log.error(self, "Nginx is not installed")
if self.app.pargs.php:
if (WOVariables.wo_platform_distro == 'debian' or WOVariables.wo_platform_codename == 'precise'):
if WOAptGet.is_installed(self, 'php5-fpm'):
self.info_php()
else:
Log.error(self, "PHP5 is not installed")
else:
if WOAptGet.is_installed(self, 'php5.6-fpm'):
self.info_php()
else:
Log.error(self, "PHP5.6 is not installed")
if self.app.pargs.php72:
if WOAptGet.is_installed(self, 'php7.2-fpm'):
self.info_php72()
else:
Log.error(self, "PHP 7.2 is not installed")
if self.app.pargs.mysql:
if WOShellExec.cmd_exec(self, "mysqladmin ping"):
self.info_mysql()
else:
Log.error(self, "MySQL is not installed")
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(WOInfoController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', wo_info_hook)
|
# ledtheatre is Licensed under the MIT License
# Copyright 2017 Andrew Alcock
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='ledtheatre',
version='0.1.3',
description='Control LED lights on the Adafruit PCA9685 PWM card',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Hardware',
],
url='https://github.com/aalcock/ledtheatre',
author='Andrew Alcock',
author_email='andrew@alcock.sg',
license='MIT',
packages=['ledtheatre'],
install_requires=[
'Adafruit_PCA9685>=1.0.1'
],
zip_safe=True)
|
from .single_level import *
from .multi_roi_extractor import *
|
from decouple import config
ADDRESS = config("ADDRESS")
PORT = config("PORT")
TS_USER = config("TS_USER")
PASSWORD = config("PASSWORD")
TOKEN = config("TOKEN")
PERSISTENCE_FILE = config("PERSISTENCE_FILE")
|
from datetime import datetime
from datetime import date
class MainParent(object):
pass
class SubParent1(MainParent):
def foo(self):
pass
pass
class SubParent2(MainParent):
pass
class Child(SubParent1, SubParent2):
def spam(self):
pass
pass
class NoParentsAllowed(datetime, object):
def foo(self):
pass
pass
class NoMembers(object):
pass
class BadMro(MainParent, object, SubParent1, SubParent2):
pass
class HugeChild(SubParent1, date): #SubParent1 is disabled
__metaclass__ = None # Anyway, this field should be ignored and processed separately as "metaclass", not "class field"
def __init__(self):
self.instance_field_1 = 42
self.instance_field_2 = 100500
CLASS_FIELD = 42
(CLASS_FIELD_A,CLASS_FIELD_B) = (42,100500) #We do not support tuples in class assignments for now (see ClassFieldsManager)
def _set(self, val): # Should not be treated as method (part of property)
pass
def _get(self): # Should not be treated as method (part of property)
return None
name = property(fget=_get, fset=_set)
@property
def some_property(self): # Should not be treated as method (part of property)
return None
@some_property.setter
def some_property(self, val): # Should not be treated as method (part of property)
pass
def foo(self): #should be disabled
self.some_property = 12
def bar(self):
pass
@classmethod
def static_1(cls): # Could be abstract in Py3K
pass
@staticmethod
def static_2(): # Could be abstract in Py3K
pass
@staticmethod
def bad_method(): #Code has errors, so method should be not be marked as static
pass
class Bar(object):
C = 1
class Foo(Bar):
def __init__(self):
self.foo = 12
class ParentWithConflicts(Bar):
CLASS_FIELD = 42
def __init__(self):
self.instance_field = 12
def my_func(self):
pass
class ChildWithConflicts(ParentWithConflicts, Bar): # Bar -> conflict
CLASS_FIELD = 42 # Conflict
GOOD_FIELD = 32
def __init__(self):
self.instance_field = 12 # Conflict
self.good_instance_field = "egg"
def my_func(self): # Conflict
pass
|
"""Test code for relu activation"""
import os
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def verify_relu(m, n):
A = tvm.placeholder((m, n), name='A')
B = topi.nn.relu(A)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_elemwise(B)
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def verify_leaky_relu(m, alpha):
A = tvm.placeholder((m,), name='A')
B = topi.nn.leaky_relu(A, alpha)
s = tvm.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_relu():
verify_relu(10, 128)
def test_leaky_relu():
verify_leaky_relu(100, 0.1)
if __name__ == "__main__":
test_relu()
test_leaky_relu()
|
import threading
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def isScalar(x):
return not isinstance(x, (list, tuple))
def isList(x):
return isinstance(x, (list))
def asString(x):
return str(x)
def makeDict():
return {'a': 1.0, 'c': 3.0, 'b': 2.0}
def makeTuple():
return (1.0, 2.0, 3.0)
def makeIterator(x):
return iter(x)
def makeGenerator(n):
i = 0
while i < n:
yield i
i += 1
def iterateOnThread(iter):
results = []
def iteration_worker():
for i in iter:
results.append(i)
thread = threading.Thread(target = iteration_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return results
def invokeOnThread(f, *args, **kwargs):
result = []
def invoke_worker():
result.append(f(*args, **kwargs))
thread = threading.Thread(target = invoke_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return result[0]
def reflect(x):
return x
def callFunc(f, *args, **kwargs):
return f(*args, **kwargs)
def testThrowError():
throwError()
def throwError():
raise ValueError('A very specific bad thing happened')
class PythonClass(object):
FOO = 1
BAR = 2
@classmethod
def class_method(cls):
return cls.FOO
class PythonCallable(object):
FOO = 1
BAR = 2
""" Call a callable
Args:
arg1: First argument.
"""
def __call__(self, arg1):
return arg1
def create_callable():
return PythonCallable()
dict_with_callable = dict(callable = create_callable())
|
import inspect
import itertools
import logging
import time
from typing import (
Any,
Callable,
List,
Iterator,
Iterable,
Generic,
Union,
Optional,
TYPE_CHECKING,
)
import ray
from ray.data.context import DatasetContext
from ray.data.dataset import Dataset, T, U
from ray.data.impl.pipeline_executor import (
PipelineExecutor,
PipelineSplitExecutorCoordinator,
)
from ray.data.block import Block
from ray.data.row import TableRow
from ray.data.impl import progress_bar
from ray.data.impl.block_batching import batch_blocks, BatchType
from ray.data.impl.block_list import BlockList
from ray.data.impl.plan import ExecutionPlan
from ray.data.impl.stats import DatasetPipelineStats, DatasetStats
from ray.util.annotations import PublicAPI, DeveloperAPI
if TYPE_CHECKING:
import pyarrow
logger = logging.getLogger(__name__)
# Operations that can be naively applied per dataset row in the pipeline.
_PER_DATASET_OPS = ["map", "map_batches", "add_column", "flat_map", "filter"]
# Operations that apply to each dataset holistically in the pipeline.
_HOLISTIC_PER_DATASET_OPS = ["repartition", "random_shuffle", "sort"]
# Similar to above but we should force evaluation immediately.
_PER_DATASET_OUTPUT_OPS = [
"write_json",
"write_csv",
"write_parquet",
"write_datasource",
]
# Operations that operate over the stream of output batches from the pipeline.
_OUTPUT_ITER_OPS = ["take", "take_all", "show", "to_tf", "to_torch"]
@PublicAPI
class DatasetPipeline(Generic[T]):
"""Implements a pipeline of Datasets.
Unlike Datasets, which execute all transformations synchronously,
DatasetPipelines implement pipelined execution. This allows for the
overlapped execution of data input (e.g., reading files), computation
(e.g. feature preprocessing), and output (e.g., distributed ML training).
A DatasetPipeline can be created by either repeating a Dataset
(``ds.repeat(times=None)``), by turning a single Dataset into a pipeline
(``ds.window(blocks_per_window=10)``), or defined explicitly using
``DatasetPipeline.from_iterable()``.
DatasetPipeline supports the all the per-record transforms of Datasets
(e.g., map, flat_map, filter), holistic transforms (e.g., repartition),
and output methods (e.g., iter_rows, to_tf, to_torch, write_datasource).
"""
def __init__(
self,
base_iterable: Iterable[Callable[[], Dataset[T]]],
stages: List[Callable[[Dataset[Any]], Dataset[Any]]] = None,
length: int = None,
progress_bars: bool = progress_bar._enabled,
_executed: List[bool] = None,
):
"""Construct a DatasetPipeline (internal API).
The constructor is not part of the DatasetPipeline API. Use the
``Dataset.repeat()``, ``Dataset.window()``, or
``DatasetPipeline.from_iterable()`` methods to construct a pipeline.
"""
self._base_iterable = base_iterable
self._stages = stages or []
self._optimized_stages = None
self._length = length
self._progress_bars = progress_bars
self._uuid = None # For testing only.
# Whether the pipeline execution has started.
# This variable is shared across all pipelines descending from this.
self._executed = _executed or [False]
self._dataset_iter = None
self._first_dataset = None
self._schema = None
self._stats = DatasetPipelineStats()
def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]:
"""Return a local row iterator over the data in the pipeline.
If the dataset is a tabular dataset (Arrow/Pandas blocks), dict-like mappings
:py:class:`~ray.data.row.TableRow` are yielded for each row by the iterator.
If the dataset is not tabular, the raw row is yielded.
Examples:
>>> import ray
>>> for i in ray.data.range(1000000).repeat(5).iter_rows(): # doctest: +SKIP
... print(i) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
Returns:
A local iterator over the records in the pipeline.
"""
def gen_rows() -> Iterator[Union[T, TableRow]]:
time_start = time.perf_counter()
for ds in self.iter_datasets():
wait_start = time.perf_counter()
for row in ds.iter_rows(prefetch_blocks=prefetch_blocks):
self._stats.iter_wait_s.add(time.perf_counter() - wait_start)
with self._stats.iter_user_s.timer():
yield row
wait_start = time.perf_counter()
self._stats.iter_total_s.add(time.perf_counter() - time_start)
return gen_rows()
def iter_batches(
self,
*,
prefetch_blocks: int = 0,
batch_size: int = None,
batch_format: str = "native",
drop_last: bool = False,
) -> Iterator[BatchType]:
"""Return a local batched iterator over the data in the pipeline.
Examples:
>>> import ray
>>> ds = ray.data.range(1000000).repeat(5) # doctest: +SKIP
>>> for pandas_df in ds.iter_batches(): # doctest: +SKIP
... print(pandas_df) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
Specify "native" to use the current block format (promoting
Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``. Default is "native".
drop_last: Whether to drop the last batch if it's incomplete.
Returns:
An iterator over record batches.
"""
time_start = time.perf_counter()
yield from batch_blocks(
self._iter_blocks(),
self._stats,
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format=batch_format,
drop_last=drop_last,
)
self._stats.iter_total_s.add(time.perf_counter() - time_start)
def _iter_blocks(self) -> Iterator[Block]:
ds_wait_start = time.perf_counter()
for ds in self.iter_datasets():
self._stats.iter_ds_wait_s.add(time.perf_counter() - ds_wait_start)
yield from ds._plan.execute().iter_blocks()
ds_wait_start = time.perf_counter()
def split(
self, n: int, *, equal: bool = False, locality_hints: List[Any] = None
) -> List["DatasetPipeline[T]"]:
"""Split the pipeline into ``n`` disjoint pipeline shards.
This returns a list of sub-pipelines that can be passed to Ray tasks
and actors and used to read the pipeline records in parallel.
Examples:
>>> import ray
>>> pipe = ray.data.range(10).repeat(50) # doctest: +SKIP
>>> workers = ... # doctest: +SKIP
>>> # Split up a pipeline to process over `n` worker actors.
>>> shards = pipe.split( # doctest: +SKIP
... len(workers), locality_hints=workers)
>>> for shard, worker in zip(shards, workers): # doctest: +SKIP
... worker.consume.remote(shard) # doctest: +SKIP
Time complexity: O(1)
Implementation detail: this launches a coordinator actor that is used
to execute the pipeline and push data blocks to each pipeline shard.
Reading from an individual shard will be blocked if other shards are
falling behind. A warning will be printed if a shard has been blocked
on read for more than 10 seconds.
Args:
n: Number of child pipelines to return.
equal: Whether to guarantee each split has an equal
number of records. This may drop records if they cannot be
divided equally among the splits.
locality_hints: A list of Ray actor handles of size ``n``. The
system will try to co-locate the blocks of the ith pipeline
shard with the ith actor to maximize data locality.
Returns:
A list of ``n`` disjoint pipeline splits.
"""
return self._split(
n,
lambda ds, equal=equal: ds.split(
n, equal=equal, locality_hints=locality_hints
),
)
def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline[T]"]:
"""Split the datasets within the pipeline at the given indices
(like np.split).
This will split each dataset contained within this pipeline, thereby
producing len(indices) + 1 pipelines with the first pipeline containing
the [0, indices[0]) slice from each dataset, the second pipeline
containing the [indices[0], indices[1]) slice from each dataset, and so
on, with the final pipeline will containing the
[indices[-1], self.count()) slice from each dataset.
Examples:
>>> import ray
>>> p1, p2, p3 = ray.data.range( # doctest: +SKIP
... 8).repeat(2).split_at_indices([2, 5]) # doctest: +SKIP
>>> p1.take() # doctest: +SKIP
[0, 1, 0, 1]
>>> p2.take() # doctest: +SKIP
[2, 3, 4, 2, 3, 4]
>>> p3.take() # doctest: +SKIP
[5, 6, 7, 5, 6, 7]
Time complexity: O(num splits)
See also: ``DatasetPipeline.split``
Args:
indices: List of sorted integers which indicate where the pipeline
will be split. If an index exceeds the length of the pipeline,
an empty pipeline will be returned.
Returns:
The pipeline splits.
"""
if len(indices) < 1:
raise ValueError("indices must be at least of length 1")
if sorted(indices) != indices:
raise ValueError("indices must be sorted")
if indices[0] < 0:
raise ValueError("indices must be positive")
return self._split(len(indices) + 1, lambda ds: ds.split_at_indices(indices))
def _split(self, n: int, splitter: Callable[[Dataset], "DatasetPipeline[T]"]):
resources = {}
if not ray.util.client.ray.is_connected():
# Pin the coordinator (and any child actors) to the local node to avoid
# errors during node failures. If the local node dies, then the driver
# will fate-share with the coordinator anyway.
resources["node:{}".format(ray.util.get_node_ip_address())] = 0.0001
coordinator = PipelineSplitExecutorCoordinator.options(
resources=resources,
placement_group=None,
).remote(self, n, splitter, DatasetContext.get_current())
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
class SplitIterator:
def __init__(self, split_index, coordinator):
self.split_index = split_index
self.coordinator = coordinator
self.warn_threshold = 100
self.wait_delay_s = 0.1
def __iter__(self):
return self
def __next__(self):
ds = None
tries = 0
while ds is None:
ds = ray.get(
self.coordinator.next_dataset_if_ready.remote(self.split_index)
)
# Wait for other shards to catch up reading.
if not ds:
time.sleep(self.wait_delay_s)
tries += 1
if tries > self.warn_threshold:
print(
"Warning: reader on shard {} of the pipeline "
"has been blocked more than {}s waiting for "
"other readers to catch up. All pipeline shards "
"must be read from concurrently.".format(
self.split_index,
self.wait_delay_s * self.warn_threshold,
)
)
self.warn_threshold *= 2
return lambda: ds
return [
# Disable progress bars for the split readers since they would
# overwhelm the console.
DatasetPipeline(
SplitIterator(idx, coordinator),
length=self._length,
progress_bars=False,
)
for idx in range(n)
]
def rewindow(
self, *, blocks_per_window: int, preserve_epoch: bool = True
) -> "DatasetPipeline[T]":
"""Change the windowing (blocks per dataset) of this pipeline.
Changes the windowing of this pipeline to the specified size. For
example, if the current pipeline has two blocks per dataset, and
`.rewindow(blocks_per_window=4)` is requested, adjacent datasets will
be merged until each dataset is 4 blocks. If
`.rewindow(blocks_per_window)` was requested the datasets will be
split into smaller windows.
Args:
blocks_per_window: The new target blocks per window.
preserve_epoch: Whether to preserve epoch boundaries. If set to
False, then windows can contain data from two adjacent epochs.
"""
class WindowIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
self._buffer: Optional[Dataset[T]] = None
def __next__(self) -> Dataset[T]:
try:
# Merge windows until we meet the requested window size.
if self._buffer is None:
self._buffer = next(self._original_iter)
while self._buffer.num_blocks() < blocks_per_window:
next_ds = next(self._original_iter)
if (
preserve_epoch
and self._buffer._get_epoch() != next_ds._get_epoch()
):
partial_window = self._buffer
self._buffer = next_ds
return lambda: partial_window
else:
self._buffer = self._buffer.union(next_ds)
# Slice off the left-most chunk and return it.
res, self._buffer = self._buffer._divide(blocks_per_window)
assert res.num_blocks() <= blocks_per_window, res
if self._buffer.num_blocks() == 0:
self._buffer = None
return lambda: res
except StopIteration:
# Return the left-over data as a single window.
if self._buffer and self._buffer.num_blocks() > 0:
res = self._buffer
assert res.num_blocks() <= blocks_per_window, res
self._buffer = None
return lambda: res
else:
raise
class WindowIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return WindowIterator(self._original_iter)
if self._length == float("inf"):
length = float("inf")
else:
length = None
return DatasetPipeline(WindowIterable(self.iter_datasets()), length=length)
def repeat(self, times: int = None) -> "DatasetPipeline[T]":
"""Repeat this pipeline a given number or times, or indefinitely.
This operation is only allowed for pipelines of a finite length. An
error will be raised for pipelines of infinite length.
Note that every repeat of the pipeline is considered an "epoch" for
the purposes of ``iter_epochs()``. If there are multiple repeat calls,
the latest repeat takes precedence for the purpose of defining epochs.
Args:
times: The number of times to loop over this pipeline, or None
to repeat indefinitely.
"""
if self._length == float("inf"):
raise ValueError("Cannot repeat a pipeline of infinite length.")
class RepeatIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
# Holds results to repeat.
self._results = []
# Incrementing cursor over results.
self._i = 0
# This is calculated later.
self._max_i = None
def __next__(self) -> Dataset[T]:
# Still going through the original pipeline.
if self._original_iter:
try:
make_ds = next(self._original_iter)
self._results.append(make_ds)
def gen():
res = make_ds()
res._set_epoch(0)
return res
return gen
except StopIteration:
self._original_iter = None
# Calculate the cursor limit.
if times:
self._max_i = len(self._results) * (times - 1)
else:
self._max_i = float("inf")
# Going through a repeat of the pipeline.
if self._i < self._max_i:
make_ds = self._results[self._i % len(self._results)]
epoch = 1 + self._i // len(self._results)
def gen():
res = make_ds()
res._set_epoch(epoch)
return res
self._i += 1
return gen
else:
raise StopIteration
class RepeatIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return RepeatIterator(self._original_iter)
if not times:
length = float("inf")
elif times and self._length:
length = times * self._length
else:
length = None
return DatasetPipeline(
RepeatIterable(iter(self._base_iterable)),
stages=self._stages.copy(),
length=length,
)
def schema(
self, fetch_if_missing: bool = False
) -> Union[type, "pyarrow.lib.Schema"]:
"""Return the schema of the dataset pipeline.
For datasets of Arrow records, this will return the Arrow schema.
For dataset of Python objects, this returns their Python type.
Note: This is intended to be a method for peeking schema before
the execution of DatasetPipeline. If execution has already started,
it will simply return the cached schema from the previous call.
Time complexity: O(1)
Args:
fetch_if_missing: If True, synchronously fetch the schema if it's
not known. Default is False, where None is returned if the
schema is not known.
Returns:
The Python type or Arrow schema of the records, or None if the
schema is not known.
"""
if not self._executed[0]:
self._schema = self._peek().schema(fetch_if_missing)
return self._schema
def count(self) -> int:
"""Count the number of records in the dataset pipeline.
This blocks until the entire pipeline is fully executed.
Time complexity: O(dataset size / parallelism)
Returns:
The number of records in the dataset pipeline.
"""
if self._length == float("inf"):
raise ValueError("Cannot count a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [len(batch)])
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def sum(self) -> int:
"""Sum the records in the dataset pipeline.
This blocks until the entire pipeline is fully executed.
Time complexity: O(dataset size / parallelism)
Returns:
The sum of the records in the dataset pipeline.
"""
if self._length == float("inf"):
raise ValueError("Cannot sum a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [batch.sum()[0]], batch_format="pandas")
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def show_windows(self, limit_per_dataset: int = 10) -> None:
"""Print up to the given number of records from each window/dataset.
This is helpful as a debugging tool for understanding the structure of
dataset pipelines.
Args:
limit_per_dataset: Rows to print per window/dataset.
"""
epoch = None
for i, ds in enumerate(self.iter_datasets()):
if ds._get_epoch() != epoch:
epoch = ds._get_epoch()
print("------ Epoch {} ------".format(epoch))
print("=== Window {} ===".format(i))
ds.show(limit_per_dataset)
def iter_epochs(self) -> Iterator["DatasetPipeline[T]"]:
"""Split this pipeline up by epoch.
This allows reading of data per-epoch for repeated Datasets, which is
useful for ML training. For example, ``ray.data.range(10).repeat(50)``
generates a pipeline with 500 rows total split across 50 epochs. This
method allows iterating over the data individually per epoch
(repetition) of the original data.
Examples:
>>> import ray
>>> epochs = ray.data.range(10).repeat(50).iter_epochs() # doctest: +SKIP
>>> for i, epoch in enumerate(epochs): # doctest: +SKIP
... print("Epoch", i) # doctest: +SKIP
... for row in epoch.iter_rows(): # doctest: +SKIP
... print(row) # doctest: +SKIP
Returns:
Iterator over epoch objects, where each epoch is a DatasetPipeline
containing data from that epoch only.
"""
class Peekable:
def __init__(self, base_iter: Iterator[T]):
self._iter = base_iter
self._buffer = None
def _fill_buffer_if_possible(self):
if self._buffer is None:
try:
self._buffer = next(self._iter)
assert self._buffer is not None
except StopIteration:
pass
def peek(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
return self._buffer
def __next__(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
item = self._buffer
self._buffer = None
return item
class SingleEpochIterator:
def __init__(self, peekable_iter: Iterator[Dataset[T]], epoch: int):
self._iter = peekable_iter
self._epoch = epoch
def __next__(self) -> Dataset[T]:
if self._iter.peek()._get_epoch() > self._epoch:
raise StopIteration
ds = next(self._iter)
return lambda: ds
def __iter__(self):
return self
class EpochDelimitedIterator:
def __init__(self, pipe):
self._iter = Peekable(pipe.iter_datasets())
self._cur_epoch = None
def __next__(self) -> "DatasetPipeline[T]":
if self._cur_epoch is None:
self._cur_epoch = self._iter.peek()._get_epoch()
else:
self._cur_epoch += 1
warned = False
while self._iter.peek()._get_epoch() < self._cur_epoch:
if not warned:
warned = True
logger.warn(
"Data from epoch {} was not fully read, "
"skipping to next epoch.".format(self._cur_epoch - 1)
)
next(self._iter)
epoch_pipe = DatasetPipeline.from_iterable(
SingleEpochIterator(self._iter, epoch=self._cur_epoch)
)
return epoch_pipe
def __iter__(self):
return self
return EpochDelimitedIterator(self)
@DeveloperAPI
def iter_datasets(self) -> Iterator[Dataset[T]]:
"""Iterate over the output datasets of this pipeline.
Returns:
Iterator over the datasets outputted from this pipeline.
"""
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
if self._first_dataset is None:
self._peek()
iter = itertools.chain([self._first_dataset], self._dataset_iter)
self._first_dataset = None
self._dataset_iter = None
return iter
@DeveloperAPI
def foreach_window(
self, fn: Callable[[Dataset[T]], Dataset[U]]
) -> "DatasetPipeline[U]":
"""Apply a transform to each dataset/window in this pipeline.
Args:
fn: The function to transform each dataset with.
Returns:
The transformed DatasetPipeline.
"""
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
return DatasetPipeline(
self._base_iterable,
self._stages + [fn],
self._length,
self._progress_bars,
_executed=self._executed,
)
def stats(self, exclude_first_window: bool = True) -> str:
"""Returns a string containing execution timing information.
Args:
exclude_first_window: Whether to exclude the first window from
the pipeline time breakdown. This is generally a good idea
since there is always a stall waiting for the first window to
be initially computed, which can be misleading in the stats.
"""
return self._stats.summary_string(exclude_first_window)
@staticmethod
def from_iterable(
iterable: Iterable[Callable[[], Dataset[T]]],
) -> "DatasetPipeline[T]":
"""Create a pipeline from an sequence of Dataset producing functions.
Args:
iterable: A finite or infinite-length sequence of functions that
each produce a Dataset when called.
"""
if hasattr(iterable, "__len__"):
length = len(iterable)
else:
length = None
return DatasetPipeline(iterable, length=length)
def __repr__(self) -> str:
return "DatasetPipeline(num_windows={}, num_stages={})".format(
self._length, 1 + len(self._stages)
)
def __str__(self) -> str:
return repr(self)
def _get_uuid(self) -> str:
return self._uuid
def _set_uuid(self, uuid: str) -> None:
self._uuid = uuid
def _optimize_stages(self):
"""Optimize this pipeline, fusing stages together as possible."""
context = DatasetContext.get_current()
if not context.optimize_fuse_stages:
self._optimized_stages = self._stages
return
# This dummy dataset will be used to get a set of optimized stages.
dummy_ds = Dataset(
ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)),
0,
True,
)
# Apply all pipeline operations to the dummy dataset.
for stage in self._stages:
dummy_ds = stage(dummy_ds)
# Get the optimized stages.
_, _, stages = dummy_ds._plan._optimize()
# Apply these optimized stages to the datasets underlying the pipeline.
# These optimized stages will be executed by the PipelineExecutor.
optimized_stages = []
for stage in stages:
optimized_stages.append(
lambda ds, stage=stage: Dataset(
ds._plan.with_stage(stage), ds._epoch, True
)
)
self._optimized_stages = optimized_stages
def _peek(self) -> Dataset[T]:
if self._first_dataset is None:
self._optimize_stages()
self._dataset_iter = PipelineExecutor(self)
self._first_dataset = next(self._dataset_iter)
return self._first_dataset
for method in _PER_DATASET_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
setattr(DatasetPipeline, method, make_impl(method))
for method in _HOLISTIC_PER_DATASET_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
def deprecation_warning(method: str):
def impl(*a, **kw):
raise DeprecationWarning(
"`{}` has been renamed to `{}_each_window`.".format(method, method)
)
return impl
setattr(DatasetPipeline, method, deprecation_warning(method))
setattr(DatasetPipeline, method + "_each_window", make_impl(method))
for method in _PER_DATASET_OUTPUT_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
uuid = None
for i, ds in enumerate(self.iter_datasets()):
if uuid is None:
uuid = self._get_uuid() or ds._get_uuid()
ds._set_uuid(f"{uuid}_{i:06}")
getattr(ds, method)(*args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` on each output dataset of this pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, make_impl(method))
for method in _OUTPUT_ITER_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
return delegate(self, *args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` over the stream of output batches from the pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, make_impl(method))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MaitainShopProduct import MaitainShopProduct
class AlipayEcoMycarMaintainServiceproductUpdateModel(object):
def __init__(self):
self._operation_type = None
self._out_product_id = None
self._shop_product = None
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def out_product_id(self):
return self._out_product_id
@out_product_id.setter
def out_product_id(self, value):
self._out_product_id = value
@property
def shop_product(self):
return self._shop_product
@shop_product.setter
def shop_product(self, value):
if isinstance(value, MaitainShopProduct):
self._shop_product = value
else:
self._shop_product = MaitainShopProduct.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.out_product_id:
if hasattr(self.out_product_id, 'to_alipay_dict'):
params['out_product_id'] = self.out_product_id.to_alipay_dict()
else:
params['out_product_id'] = self.out_product_id
if self.shop_product:
if hasattr(self.shop_product, 'to_alipay_dict'):
params['shop_product'] = self.shop_product.to_alipay_dict()
else:
params['shop_product'] = self.shop_product
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarMaintainServiceproductUpdateModel()
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'out_product_id' in d:
o.out_product_id = d['out_product_id']
if 'shop_product' in d:
o.shop_product = d['shop_product']
return o
|
from __future__ import annotations
from pathlib import _PosixFlavour, _WindowsFlavour
from typing import Optional, Callable, Awaitable, Dict, List, TYPE_CHECKING
from errno import EINVAL
import os
import sys
from aiopath.wrap import func_to_async_func as wrap_async
try:
from pathlib import _getfinalpathname
_async_getfinalpathname = wrap_async(_getfinalpathname)
except ImportError:
def _getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
async def _async_getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
if TYPE_CHECKING: # keep mypy quiet
from ._base import AsyncSyncPath, _AsyncSyncAccessor
class _AsyncSyncPosixFlavour(_PosixFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None: continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError(f"Symlink loop from {newpath}")
# Resolve the symbolic link
try: target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
result = _resolve(base, str(path))
return result or sep
async def async_resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
async def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None: continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError(f"Symlink loop from {newpath}")
# Resolve the symbolic link
try: target = await accessor.async_readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = await _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
result = await _resolve(base, str(path))
return result or sep
class _AsyncSyncWindowsFlavour(_WindowsFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _getfinalpathname is not None:
if strict: return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts: List[str] = [] # End of the path after the first one not found
while True:
try: s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
async def async_resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _async_getfinalpathname is not None:
if strict: return self._ext_to_normal(await _async_getfinalpathname(s))
else:
tail_parts: List[str] = [] # End of the path after the first one not found
while True:
try: s = self._ext_to_normal(await _async_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
_async_sync_windows_flavour = _AsyncSyncWindowsFlavour()
_async_sync_posix_flavour = _AsyncSyncPosixFlavour()
|
import argparse
import json
import os
import pickle
import numpy as np
from pocovidnet.evaluate_genesis import GenesisEvaluator
from pocovidnet.evaluate_video import VideoEvaluator
from tensorflow.keras import backend as K
from pocovidnet.videoto3d import Videoto3D
def main():
parser = argparse.ArgumentParser(description='Evaluate genesis and cam')
parser.add_argument('--json', type=str, default="../data/cross_val.json")
parser.add_argument(
'--genesis_weights', type=str, default='video_genesis_lr1e4'
)
parser.add_argument(
'--cam_weights', type=str, default='trained_models_cam'
)
parser.add_argument(
'--videos', type=str, default='../data/pocus_videos/convex'
)
args = parser.parse_args()
with open(args.json, "r") as infile:
cross_val_split = json.load(infile)
VIDEO_DIR = args.videos
all_genesis_preds = []
all_frame_preds = []
for i in range(5):
gen_eval = GenesisEvaluator(
weights_dir=args.genesis_weights, ensemble=False, split=i
)
K.set_image_data_format("channels_last")
normal_eval = VideoEvaluator(
weights_dir=args.cam_weights,
ensemble=False,
split=i,
model_id="vgg_cam",
num_classes=4
)
files = cross_val_split[str(i)]["test"][0]
# print(files)
for f in files:
print("evaluate", f)
# TEST if the video is working
vid3d = Videoto3D("", 64, 64, 5, 5)
vid3d.max_vid = {"cov": 20, "pne": 20, "reg": 20}
X_test, _, fn = vid3d.video3d(
[os.path.join(VIDEO_DIR, f)], ["cov"]
)
if len(np.unique(fn)) != 1:
print("ERROR: WRONG FILE!")
print(fn)
print(X_test.shape)
continue
# run genesis model
K.set_image_data_format("channels_first")
preds = gen_eval(os.path.join(VIDEO_DIR, f))
vid_pred_genesis = np.argmax(np.mean(preds, axis=(0, 1)))
all_genesis_preds.append(preds)
# run cam model
K.set_image_data_format("channels_last")
preds_framebased = normal_eval(os.path.join(VIDEO_DIR, f))
frame_pred = np.argmax(np.mean(preds_framebased, axis=0))
all_frame_preds.append(preds_framebased)
print(preds.shape, preds_framebased.shape)
print(
"genesis pred", vid_pred_genesis, "frame based pred",
frame_pred
)
print("-------------")
with open("evaluation_outputs.dat", "wb") as outfile:
pickle.dump((all_genesis_preds, all_frame_preds), outfile)
if __name__ == '__main__':
main()
|
from rest_framework import serializers
from .models import Skill, Task, Days
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('name', 'user')
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ('name', 'completion_time', 'skill')
class DaysSerializer(serializers.ModelSerializer):
class Meta:
model = Days
fields = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'user')
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import AltcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(AltcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert(h.startswith('getblockchaininfo\n'))
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
|
from typing import Tuple
import pytest
import kornia as kornia
import kornia.testing as utils # test utils
import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck
class TestBoundingBoxInferring:
def test_bounding_boxes_dim_inferring(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h, w) == (2, 3)
def test_bounding_boxes_dim_inferring_batch(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
], [
[2., 2.],
[4., 2.],
[4., 3.],
[2., 3.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h.unique().item(), w.unique().item()) == (2, 3)
def test_gradcheck(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(kornia.kornia.geometry.transform.crop.infer_box_shape,
(boxes,), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.geometry.transform.crop.infer_box_shape
op_script = torch.jit.script(op)
# Define input
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
actual = op_script(boxes)
expected = op(boxes)
assert_allclose(actual, expected)
class TestCropAndResize:
def test_align_corners_true(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.0000, 6.5000, 7.0000],
[10.0000, 10.5000, 11.0000]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
# default should use align_coners True
patches = kornia.crop_and_resize(inp, boxes, (height, width))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_align_corners_false(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.7222, 7.1667, 7.6111],
[9.3889, 9.8333, 10.2778]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
patches = kornia.crop_and_resize(inp, boxes, (height, width), align_corners=False)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[7., 15.],
[8., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
], [
[1., 2.],
[3., 2.],
[3., 3.],
[1., 3.],
]], device=device, dtype=dtype) # 2x4x2
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch_broadcast(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[6., 10.],
[7., 11.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img) # to var
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
boxes = utils.tensor_to_gradcheck_var(boxes, requires_grad=False) # to var
assert gradcheck(kornia.crop_and_resize,
(img, boxes, (4, 2),),
raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.crop_and_resize
op_script = torch.jit.script(op)
# Define input
img = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
crop_height, crop_width = 4, 2
actual = op_script(img, boxes, (crop_height, crop_width))
expected = op(img, boxes, (crop_height, crop_width))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCenterCrop:
def test_center_crop_h2_w4(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[5., 6., 7., 8.],
[9., 10., 11., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (2, 4))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 4, 2
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (height, width))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2_batch(self, device, dtype):
inp = torch.tensor([
[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.]]],
[[[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.]]]
], device=device, dtype=dtype)
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]], [[
[5., 9.],
[6., 10.],
[7., 11.],
[8., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (4, 2))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.center_crop, (img, (4, 2),), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.center_crop
op_script = torch.jit.script(op)
# Define input
img = torch.ones(1, 2, 5, 4, device=device, dtype=dtype)
actual = op_script(img, (4, 2))
expected = op(img, (4, 2))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
def test_jit_trace(self, device, dtype):
# Define script
op = kornia.center_crop
op_script = torch.jit.script(op)
# Define input
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
op_trace = torch.jit.trace(op_script, (img, (torch.tensor(2), torch.tensor(3))))
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
# Run
actual = op_trace(img, (torch.tensor(2), torch.tensor(3)))
expected = op(img, (2, 3))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCropByBoxes:
def test_crop_by_boxes_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.],
]], device=device, dtype=dtype) # 1x4x2
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
dst = torch.tensor([[
[0., 0.],
[2., 0.],
[2., 1.],
[0., 1.],
]], device=device, dtype=dtype) # 1x4x2
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
src = torch.tensor([[
[1., 0.],
[2., 0.],
[2., 1.],
[1., 1.]]], device=device, dtype=dtype)
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]]], device=device, dtype=dtype)
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var
assert gradcheck(kornia.geometry.transform.crop.crop_by_boxes,
(inp, src, dst,),
raise_exception=True)
class TestCropByTransform:
def test_crop_by_transform_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[1., 0., -1.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 2))
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 3))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var
assert gradcheck(kornia.geometry.transform.crop.crop_by_transform_mat,
(inp, transform, (2, 2),),
raise_exception=True)
|
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if(len(nums)==1): return nums[0] # 1的时候不work 两个dp,一个从第一位开始,一个从倒数第二位结束
last, now = 0, 0
last1, now1 = 0, 0
for i, n in enumerate(nums):
if i<len(nums)-1:
last, now = now, max(n+last,now)
print(now, last)
if i>0:
last1, now1 = now1, max(n+last1,now1)
return max(now,now1)
class Solution:
def rob(self, nums):
def rob(nums):
now = prev = 0
for n in nums:
now, prev = max(now, prev + n), now
return now
return max(rob(nums[len(nums) != 1:]), rob(nums[:-1]))
|
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Managed Service for Prometheus'
prefix = 'aps'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateWorkspace = Action('CreateWorkspace')
DeleteWorkspace = Action('DeleteWorkspace')
DescribeWorkspace = Action('DescribeWorkspace')
GetLabels = Action('GetLabels')
GetMetricMetadata = Action('GetMetricMetadata')
GetSeries = Action('GetSeries')
ListWorkspaces = Action('ListWorkspaces')
QueryMetrics = Action('QueryMetrics')
RemoteWrite = Action('RemoteWrite')
UpdateWorkspaceAlias = Action('UpdateWorkspaceAlias')
|
import numpy as np
import struct
LAYER_DENSE = 1
LAYER_CONVOLUTION2D = 2
LAYER_FLATTEN = 3
LAYER_ELU = 4
LAYER_ACTIVATION = 5
LAYER_MAXPOOLING2D = 6
LAYER_LSTM = 7
LAYER_EMBEDDING = 8
ACTIVATION_LINEAR = 1
ACTIVATION_RELU = 2
ACTIVATION_SOFTPLUS = 3
ACTIVATION_SIGMOID = 4
ACTIVATION_TANH = 5
ACTIVATION_HARD_SIGMOID = 6
def write_floats(file, floats):
'''
Writes floats to file in 1024 chunks.. prevents memory explosion
writing very large arrays to disk when calling struct.pack().
'''
step = 1024
written = 0
for i in np.arange(0, len(floats), step):
remaining = min(len(floats) - i, step)
written += remaining
file.write(struct.pack('=%sf' % remaining, *floats[i:i+remaining]))
assert written == len(floats)
def export_model(model, filename):
with open(filename, 'wb') as f:
def write_activation(activation):
if activation == 'linear':
f.write(struct.pack('I', ACTIVATION_LINEAR))
elif activation == 'relu':
f.write(struct.pack('I', ACTIVATION_RELU))
elif activation == 'softplus':
f.write(struct.pack('I', ACTIVATION_SOFTPLUS))
elif activation == 'tanh':
f.write(struct.pack('I', ACTIVATION_TANH))
elif activation == 'sigmoid':
f.write(struct.pack('I', ACTIVATION_SIGMOID))
elif activation == 'hard_sigmoid':
f.write(struct.pack('I', ACTIVATION_HARD_SIGMOID))
else:
assert False, "Unsupported activation type: %s" % activation
model_layers = [l for l in model.layers if type(l).__name__ not in ['Dropout']]
num_layers = len(model_layers)
f.write(struct.pack('I', num_layers))
for layer in model_layers:
layer_type = type(layer).__name__
if layer_type == 'Dense':
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_DENSE))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Convolution2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
# The kernel is accessed in reverse order. To simplify the C side we'll
# flip the weight matrix for each kernel.
weights = weights[:,:,::-1,::-1]
f.write(struct.pack('I', LAYER_CONVOLUTION2D))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', weights.shape[2]))
f.write(struct.pack('I', weights.shape[3]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Flatten':
f.write(struct.pack('I', LAYER_FLATTEN))
elif layer_type == 'ELU':
f.write(struct.pack('I', LAYER_ELU))
f.write(struct.pack('f', layer.alpha))
elif layer_type == 'Activation':
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_ACTIVATION))
write_activation(activation)
elif layer_type == 'MaxPooling2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
pool_size = layer.get_config()['pool_size']
f.write(struct.pack('I', LAYER_MAXPOOLING2D))
f.write(struct.pack('I', pool_size[0]))
f.write(struct.pack('I', pool_size[1]))
elif layer_type == 'LSTM':
inner_activation = layer.get_config()['inner_activation']
activation = layer.get_config()['activation']
return_sequences = int(layer.get_config()['return_sequences'])
weights = layer.get_weights()
W_i = weights[0]
U_i = weights[1]
b_i = weights[2]
W_c = weights[3]
U_c = weights[4]
b_c = weights[5]
W_f = weights[6]
U_f = weights[7]
b_f = weights[8]
W_o = weights[9]
U_o = weights[10]
b_o = weights[11]
f.write(struct.pack('I', LAYER_LSTM))
f.write(struct.pack('I', W_i.shape[0]))
f.write(struct.pack('I', W_i.shape[1]))
f.write(struct.pack('I', U_i.shape[0]))
f.write(struct.pack('I', U_i.shape[1]))
f.write(struct.pack('I', b_i.shape[0]))
f.write(struct.pack('I', W_f.shape[0]))
f.write(struct.pack('I', W_f.shape[1]))
f.write(struct.pack('I', U_f.shape[0]))
f.write(struct.pack('I', U_f.shape[1]))
f.write(struct.pack('I', b_f.shape[0]))
f.write(struct.pack('I', W_c.shape[0]))
f.write(struct.pack('I', W_c.shape[1]))
f.write(struct.pack('I', U_c.shape[0]))
f.write(struct.pack('I', U_c.shape[1]))
f.write(struct.pack('I', b_c.shape[0]))
f.write(struct.pack('I', W_o.shape[0]))
f.write(struct.pack('I', W_o.shape[1]))
f.write(struct.pack('I', U_o.shape[0]))
f.write(struct.pack('I', U_o.shape[1]))
f.write(struct.pack('I', b_o.shape[0]))
W_i = W_i.flatten()
U_i = U_i.flatten()
b_i = b_i.flatten()
W_f = W_f.flatten()
U_f = U_f.flatten()
b_f = b_f.flatten()
W_c = W_c.flatten()
U_c = U_c.flatten()
b_c = b_c.flatten()
W_o = W_o.flatten()
U_o = U_o.flatten()
b_o = b_o.flatten()
write_floats(f, W_i)
write_floats(f, U_i)
write_floats(f, b_i)
write_floats(f, W_f)
write_floats(f, U_f)
write_floats(f, b_f)
write_floats(f, W_c)
write_floats(f, U_c)
write_floats(f, b_c)
write_floats(f, W_o)
write_floats(f, U_o)
write_floats(f, b_o)
write_activation(inner_activation)
write_activation(activation)
f.write(struct.pack('I', return_sequences))
elif layer_type == 'Embedding':
weights = layer.get_weights()[0]
f.write(struct.pack('I', LAYER_EMBEDDING))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
weights = weights.flatten()
write_floats(f, weights)
else:
assert False, "Unsupported layer type: %s" % layer_type
|
import sqlalchemy
from functools import partial
async def create_engine(*args, **kwargs):
engine = sqlalchemy.create_engine(*args, **kwargs)
if engine.driver == "psycopg2":
import asyncpg
p = await asyncpg.create_pool(str(engine.url))
elif engine.driver == "pyodbc":
import aioodbc
p = await aioodbc.create_pool(**engine.url.translate_connect_args())
elif engine.driver == "mysqldb":
import aiomysql
p = aiomysql.create_pool(**engine.url.translate_connect_args())
else:
p = engine.pool
old_creator = engine.pool._creator
def creator(*a, **kw):
result = old_creator(*a, **kw)
async def aenter(self):
self._async_conn = p.acquire()
return await self._async_conn.__aenter__()
async def aexit(self):
return await self._async_conn.__aexit__()
result.__aenter__ = partial(aenter, result)
result.__aexit__ = partial(aexit, result)
engine.pool._creator = creator
return engine
|
from cfn_datadog import Timeboard, Graph, TemplateVariable, Definition, Request
from troposphere import Parameter, Template, Join, ImportValue, Sub
t = Template()
datadog_lambda_stackname = t.add_parameter(Parameter(
"DatadogLambdaStackname",
Type="String",
Description="Stack name of cfn-datadog"
))
time_board_arn = ImportValue(Sub("${DatadogLambdaStackname}-TimeboardDatadogLambdaArn"))
t.add_resource(Timeboard(
'ExampleTimeBoard',
ServiceToken=time_board_arn,
TimeboardTitle="Automated Datadog Test Board",
description="Automated Datadog timeboard created through Cloudformation",
graphs=[Graph(
GraphTitle="Example graph",
definition=Definition(
events=[],
requests=[Request(
q="avg:system.mem.free{*}"
)],
viz="timeseries"
)
), Graph(
GraphTitle="Example graph 2",
definition=Definition(
events=[],
requests=[Request(
q="avg:system.mem.free{*}"
)],
viz="timeseries"
)
),
],
template_variables=[TemplateVariable(
name="host1",
prefix="host",
default="host:my-host"
)],
read_only=True
))
print(t.to_json())
|
import logging
from PyQt5.QtWidgets import *
import envi.qt.memory as e_mem_qt
import envi.qt.memcanvas as e_mem_canvas
import vqt.hotkeys as vq_hotkey
import vivisect.base as viv_base
import vivisect.renderers as viv_rend
import vivisect.qt.views as viv_q_views
import vivisect.qt.ctxmenu as viv_q_ctxmenu
from vqt.main import *
from vivisect.const import *
logger = logging.getLogger(__name__)
# FIXME HACK where do these really live?
qt_horizontal = 1
qt_vertical = 2
class VivCanvasBase(vq_hotkey.HotKeyMixin, e_mem_canvas.VQMemoryCanvas):
def __init__(self, *args, **kwargs):
e_mem_canvas.VQMemoryCanvas.__init__(self, *args, **kwargs)
vq_hotkey.HotKeyMixin.__init__(self)
self.vw = self.mem
self._last_sname = None
self.addHotKey('c', 'viv:make:code')
self.addHotKey('f', 'viv:make:function')
self.addHotKey('s', 'viv:make:string')
self.addHotKey('p', 'viv:make:pointer')
self.addHotKey('u', 'viv:make:unicode')
self.addHotKey('n', 'viv:setname')
self.addHotKey('g', 'viv:getlocation')
self.addHotKey(';', 'viv:comment')
self.addHotKey('S', 'viv:make:struct')
self.addHotKey('ctrl+S', 'viv:make:struct:again')
self.addHotKey('ctrl+meta+S', 'viv:make:struct:multi')
self.addHotKey('U', 'viv:undefine')
self.addHotKey('ctrl+p', 'viv:preview:instr')
self.addHotKey('B', 'viv:bookmark')
self.addHotKey('ctrl+1', 'viv:make:number:one')
self.addHotKey('ctrl+2', 'viv:make:number:two')
self.addHotKey('ctrl+4', 'viv:make:number:four')
self.addHotKey('ctrl+6', 'viv:make:number:sixteen')
self.addHotKey('ctrl+8', 'viv:make:number:eight')
self.addHotKey('down', 'viv:nav:nextva')
self.addHotKey('up', 'viv:nav:prevva')
self.addHotKey('ctrl+down', 'viv:nav:nextundef')
self.addHotKey('ctrl+up', 'viv:nav:prevundef')
self.loadHotKeys(self.vw._viv_gui._vq_settings)
# All extenders must implement vivColorMap
vqtconnect(self.vivColorMap, 'viv:colormap')
def vivColorMap(self, event, einfo):
self._applyColorMap(einfo)
def _applyColorMap(self, cmap):
page = self.page()
inner = ''
for va, color in cmap.items():
inner += '.envi-va-0x%.8x { color: #000000; background-color: %s }\n' % (va, color)
js = 'var node = document.querySelector("#cmapstyle"); node.innerHTML = `%s`;' % inner
page.runJavaScript(js)
@vq_hotkey.hotkey('viv:nav:nextva')
def _hotkey_nav_nextva(self):
if self._canv_curva is None:
return
loc = self.vw.getLocation(self._canv_curva)
if loc is None:
loc = (self._canv_curva, 1, None, None)
nextva = loc[0] + loc[1]
self._selectVa(nextva)
@vq_hotkey.hotkey('viv:nav:prevva')
def _hotkey_nav_prevva(self):
if self._canv_curva is None:
return
loc = self.vw.getPrevLocation(self._canv_curva)
if loc is None:
loc = (self._canv_curva - 1, 1, None, None)
self._selectVa(loc[0])
@vq_hotkey.hotkey('viv:nav:nextundef')
def _hotkey_nav_nextundef(self):
if self._canv_curva is None:
return
vw = self.vw
va = self._canv_curva
loc = vw.getLocation(va)
if loc is None:
# find next defined location
while loc is None and vw.isValidPointer(va):
va += 1
loc = vw.getLocation(va)
va -= 1
lastloc = (va, 1, 0, 0)
else:
# find next undefined location
while loc is not None:
va = loc[0]
lastloc = loc
loc = vw.getLocation(va + loc[1])
# if we didn't fall off the map
if vw.isValidPointer(va+lastloc[1]):
va += lastloc[1]
self._navExpression(hex(va))
self._selectVa(va)
@vq_hotkey.hotkey('viv:nav:prevundef')
def _hotkey_nav_prevundef(self):
if self._canv_curva is None:
return
vw = self.vw
va = self._canv_curva
loc = vw.getLocation(va)
if loc is None:
# find previous defined location
while loc is None and vw.isValidPointer(va):
va -= 1
loc = vw.getLocation(va)
if loc is not None:
va = loc[0]
else:
# find previous undefined location
while loc is not None:
va = loc[0]
loc = vw.getLocation(va-1)
# if we fell off the end of a map
if vw.isValidPointer(va-1):
va -= 1
self._navExpression(hex(va))
self._selectVa(va)
@vq_hotkey.hotkey('viv:make:code')
def _hotkey_make_code(self):
if self._canv_curva is not None:
self.vw.makeCode(self._canv_curva)
@vq_hotkey.hotkey('viv:make:function')
def _hotkey_make_function(self):
if self._canv_curva is not None:
logger.debug('new function (manual): 0x%x', self._canv_curva)
self.vw.makeFunction(self._canv_curva)
@vq_hotkey.hotkey('viv:make:string')
def _hotkey_make_string(self):
if self._canv_curva is not None:
self.vw.makeString(self._canv_curva)
@vq_hotkey.hotkey('viv:make:pointer')
def _hotkey_make_pointer(self):
if self._canv_curva is not None:
self.vw.makePointer(self._canv_curva)
@vq_hotkey.hotkey('viv:make:unicode')
def _hotkey_make_unicode(self):
if self._canv_curva is not None:
self.vw.makeUnicode(self._canv_curva)
@vq_hotkey.hotkey('viv:undefine')
def _hotkey_undefine(self):
if self._canv_curva is not None:
self.vw.delLocation(self._canv_curva)
@vq_hotkey.hotkey('viv:getlocation')
def _hotkey_getlocation(self):
if self._canv_curva is not None:
self.vw.getVivGui().getLocation(self._canv_curva)
@vq_hotkey.hotkey('viv:setname')
def _hotkey_setname(self):
if self._canv_curva is not None:
self.vw.getVivGui().setVaName(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:bookmark')
def _hotkey_bookmark(self):
if self._canv_curva is not None:
self.vw.getVivGui().addBookmark(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:comment')
def _hotkey_comment(self):
if self._canv_curva is not None:
self.vw.getVivGui().setVaComment(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:make:struct')
def _hotkey_make_struct(self):
if self._canv_curva is not None:
sname = self.vw.getVivGui().makeStruct(self._canv_curva)
if sname is not None:
self._last_sname = sname
@vq_hotkey.hotkey('viv:make:struct:again')
def _hotkey_make_struct_again(self):
if self._canv_curva is not None:
if self._last_sname is not None:
self.vw.makeStructure(self._canv_curva, self._last_sname)
@vq_hotkey.hotkey('viv:make:struct:multi')
def _hotkey_make_struct_multi(self, parent=None):
if self._canv_curva is not None:
if self._last_sname is not None:
number, ok = QInputDialog.getText(parent, 'Make Multiple Consecutive Structs', 'Number of Structures')
if ok:
curva = self._canv_curva
number = int(str(number), 0)
for count in range(number):
vs = self.vw.makeStructure(curva, self._last_sname)
curva += len(vs)
def makeStructAgainMulti(self, va, parent=None):
if parent is None:
parent = self
curcomment = self.vw.getComment(va)
if curcomment is None:
curcomment = ''
comment, ok = QInputDialog.getText(parent, 'Enter...', 'Comment', text=curcomment)
if ok:
self.vw.setComment(va, str(comment))
@vq_hotkey.hotkey('viv:make:number:one')
def _hotkey_make_number_one(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 1)
@vq_hotkey.hotkey('viv:make:number:two')
def _hotkey_make_number_two(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 2)
@vq_hotkey.hotkey('viv:make:number:four')
def _hotkey_make_number_four(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 4)
@vq_hotkey.hotkey('viv:make:number:eight')
def _hotkey_make_number_eight(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 8)
@vq_hotkey.hotkey('viv:make:number:sixteen')
def _hotkey_make_number_sixteen(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 16)
@vq_hotkey.hotkey('viv:preview:instr')
def _hotkey_preview_instr(self):
if self._canv_curva is not None:
self.vw.previewCode(self._canv_curva)
def getVaTag(self, va):
loc = self.mem.getLocation(va)
if loc is not None:
va = loc[L_VA]
return e_mem_canvas.VQMemoryCanvas.getVaTag(self, va)
class VQVivMemoryCanvas(VivCanvasBase):
def _wheelEventCallback(self, data):
'''
Ugh. Yes. I know this sucks.
But we have to do this because QtWebEngine does't natively let you get the max scroll size.
You *have* to go through javascript to get those elements, and the only way to be sure of
the function finishing (and being able to get a value outta js) is via this callback
mechanism they set up.
'''
smin = data[0]
spos = data[1]
smax = data[2]
if not len(self._canv_rendvas):
pass
elif spos >= smax:
lastva, lastsize = self._canv_rendvas[-1]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(lastva)
sizeremain = (mapva + mapsize) - (lastva + lastsize)
if sizeremain:
self.renderMemoryAppend(min(sizeremain, 128))
elif spos == smin:
firstva, firstsize = self._canv_rendvas[0]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(firstva)
sizeremain = firstva - mapva
if sizeremain:
self.renderMemoryPrepend(min(sizeremain, 128))
def wheelEvent(self, event):
page = self.page()
page.runJavaScript('''
var pcur = window.innerHeight + window.pageYOffset
var scrollMaxY = Math.max(
document.body.scrollHeight, document.documentElement.scrollHeight,
document.body.offsetHeight, document.documentElement.offsetHeight,
document.body.clientHeight, document.documentElement.clientHeight,
);
[window.innerHeight, pcur, scrollMaxY];
''', self._wheelEventCallback)
return e_mem_canvas.VQMemoryCanvas.wheelEvent(self, event)
def _clearColorMap(self):
page = self.page()
page.runJavaScript('var node = document.querySelector("#cmapstyle"); node.innerHTML = "";')
def _navExpression(self, expr):
if self._canv_navcallback:
self._canv_navcallback(expr)
def initMemWindowMenu(self, va, menu):
nav = self.parent() # our parent is always a VQVivMemoryWindow (nav target)
viv_q_ctxmenu.buildContextMenu(self.vw, va=va, menu=menu, nav=nav)
def _loc_helper(self, va):
'''
we assume we're being handed a valid va since renderMemory checks for valid MemoryMap
'''
nloc = self.mem.getLocation(va)
if nloc is None:
return va, 0
nva, nvsz, nvt, nvti = nloc
return (nva, va-nva)
class VQVivMemoryView(e_mem_qt.VQMemoryWindow, viv_base.VivEventCore):
def __init__(self, vw, vwqgui):
self.vw = vw
self.vwqgui = vwqgui
self._leading = False
self._following = None
self._follow_menu = None # init'd in handler below
e_mem_qt.VQMemoryWindow.__init__(self, vw, syms=vw, parent=vwqgui, mwname='viv')
viv_base.VivEventCore.__init__(self, vw)
vwqgui.addEventCore(self)
self.mem_canvas._canv_rend_middle = True
self.addHotKeyTarget('viv:xrefsto', self._viv_xrefsto)
self.addHotKey('x', 'viv:xrefsto')
def getRendToolsMenu(self):
menu = e_mem_qt.VQMemoryWindow.getRendToolsMenu(self)
if self.vw.server:
leadact = QAction('lead', menu, checkable=True)
def leadToggle():
self._leading = not self._leading
# We can only follow if not leading... (deep huh? ;) )
self._follow_menu.setEnabled(not self._leading)
if self._leading:
self._following = None
self.vw.iAmLeader(self.mwname)
self.updateMemWindowTitle()
def clearFollow():
self._following = None
self.updateMemWindowTitle()
leadact.toggled.connect(leadToggle)
menu.addAction(leadact)
self._follow_menu = menu.addMenu('Follow..')
self._follow_menu.addAction('(disable)', clearFollow)
return menu
def getExprTitle(self):
title = str(self.addr_entry.text())
try:
va = self.vw.parseExpression(title)
name = self.vw.getName(va)
if name is not None:
title = name
except Exception:
title = 'expr error'
if self._leading:
title += ' (leading)'
if self._following is not None:
user, window = self._following
title += ' (following %s %s)' % (user, window)
return title
def _getRenderVaSize(self):
'''
Vivisect steps in and attempts to map to locations when they exist.
since we have a location database, let's use that to make sure we get a
real location if it exists. otherwise, we end up in no-man's land,
since we rely on labels, which only exist for the base of a location.
'''
addr, size = e_mem_qt.VQMemoryWindow._getRenderVaSize(self)
if addr is None:
return addr, size
loc = self.vw.getLocation(addr)
if loc is None:
return addr, size
return loc[L_VA], size
def initMemoryCanvas(self, memobj, syms=None):
return VQVivMemoryCanvas(memobj, syms=syms, parent=self)
def _viv_xrefsto(self):
if self.mem_canvas._canv_curva is not None:
xrefs = self.vw.getXrefsTo(self.mem_canvas._canv_curva)
if len(xrefs) == 0:
self.vw.vprint('No xrefs found!')
return
title = 'Xrefs To: 0x%.8x' % self.mem_canvas._canv_curva
view = viv_q_views.VQXrefView(self.vw, self.vwqgui, xrefs=xrefs, title=title)
dock = self.vwqgui.vqDockWidget(view, floating=True)
dock.resize(800, 600)
def loadDefaultRenderers(self):
import envi.memcanvas.renderers as e_render
# FIXME check endianness
self.mem_canvas.addRenderer("bytes", e_render.ByteRend())
self.mem_canvas.addRenderer("u_int_16", e_render.ShortRend())
self.mem_canvas.addRenderer("u_int_32", e_render.LongRend())
self.mem_canvas.addRenderer("u_int_64", e_render.QuadRend())
vivrend = viv_rend.WorkspaceRenderer(self.vw)
self.mem_canvas.addRenderer('Viv', vivrend)
self.mem_canvas.setRenderer('Viv')
def _updateFunction(self, fva):
for cbva, cbsize, cbfva in self.vw.getFunctionBlocks(fva):
self.mem_canvas.renderMemoryUpdate(cbva, cbsize)
def VTE_IAMLEADER(self, vw, event, einfo):
user, followname = einfo
def VWE_SYMHINT(self, vw, event, einfo):
va, idx, hint = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_ADDLOCATION(self, vw, event, einfo):
va, size, ltype, tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_DELLOCATION(self, vw, event, einfo):
va, size, ltype, tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_ADDFUNCTION(self, vw, event, einfo):
va, meta = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_SETFUNCMETA(self, vw, event, einfo):
fva, key, val = einfo
self._updateFunction(fva)
def VWE_SETFUNCARGS(self, vw, event, einfo):
fva, fargs = einfo
self._updateFunction(fva)
def VWE_COMMENT(self, vw, event, einfo):
va, cmnt = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
@idlethread
def VWE_SETNAME(self, vw, event, einfo):
va, name = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
for fromva, tova, rtype, rflag in self.vw.getXrefsTo(va):
self.mem_canvas.renderMemoryUpdate(fromva, 1)
@idlethread
def VTE_IAMLEADER(self, vw, event, einfo):
user, fname = einfo
def setFollow():
self._following = einfo
self.updateMemWindowTitle()
self._follow_menu.addAction('%s - %s' % (user, fname), setFollow)
@idlethread
def VTE_FOLLOWME(self, vw, event, einfo):
user, fname, expr = einfo
if self._following != (user, fname):
return
self.enviNavGoto(expr)
@idlethread
def enviNavGoto(self, expr, sizeexpr='256', rend=''):
if self._leading:
self.vw.followTheLeader(str(self.mwname), str(expr))
return e_mem_qt.VQMemoryWindow.enviNavGoto(self, expr, sizeexpr=sizeexpr, rend=rend)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.