text
stringlengths 2
999k
|
|---|
#!/usr/bin/env python3
import sys
import yaml
def main():
args = sys.argv[1:]
file = args[0] if args else sys.stdin
data = yaml.safe_load(file)
join_args = data['Fn::Join']
contents = join_args[0].join(join_args[1])
print(contents, end='')
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# class for windows getch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _GetchWindows()
# print instruction
print ("Please enter something: ")
# read user input and save in into x
x = getch()
# print user input saved in x
print(x)
|
import os
from django.conf import settings
from main.tests.test_base import MainTestCase
from odk_viewer.models import ParsedInstance
from odk_viewer.management.commands.remongo import Command
from django.core.management import call_command
from common_tags import USERFORM_ID
class TestRemongo(MainTestCase):
def test_remongo_in_batches(self):
self._publish_transportation_form()
# submit 4 instances
self._make_submissions()
self.assertEqual(ParsedInstance.objects.count(), 4)
# clear mongo
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3)
# mongo db should now have 5 records
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 4)
def test_remongo_with_username_id_string(self):
self._publish_transportation_form()
# submit 1 instances
s = self.surveys[0]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
# publish and submit for a different user
self._logout()
self._create_user_and_login("harry", "harry")
self._publish_transportation_form()
s = self.surveys[1]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
self.assertEqual(ParsedInstance.objects.count(), 2)
# clear mongo
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3, username=self.user.username,
id_string=self.xform.id_string)
# mongo db should now have 2 records
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 1)
def test_indexes_exist(self):
"""
Make sure the required indexes are set, _userform_id as of now
"""
call_command('remongo')
# if index exists, ensure index returns None
# list of indexes to check for
index_list = [USERFORM_ID]
# get index info
index_info = settings.MONGO_DB.instances.index_information()
# index_info looks like this - {u'_id_': {u'key': [(u'_id', 1)], u'v': 1}, u'_userform_id_1': {u'key': [(u'_userform_id', 1)], u'v': 1}}
# lets make a list of the indexes
existing_indexes = [v['key'][0][0] for v in index_info.itervalues() if v['key'][0][1] == 1]
all_indexes_found = True
for index_item in index_list:
if index_item not in existing_indexes:
all_indexes_found = False
break
self.assertTrue(all_indexes_found)
def test_sync_mongo_with_all_option_deletes_existing_records(self):
self._publish_transportation_form()
userform_id = "%s_%s" % (self.user.username, self.xform.id_string)
initial_mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
for i in range(len(self.surveys)):
self._submit_transport_instance(i)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
# check our mongo count
self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
# add dummy instance
settings.MONGO_DB.instances.save(
{"_id": 12345, "_userform_id": userform_id})
# make sure the dummy is returned as part of the forms mongo instances
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
# call sync_mongo WITHOUT the all option
call_command("sync_mongo", remongo=True)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
# call sync_mongo WITH the all option
call_command("sync_mongo", remongo=True, update_all=True)
# check that we are back to just the submitted set
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys))
|
from selenium import webdriver
link = "http://selenium1py.pythonanywhere.com/"
class TestMainPage1():
@classmethod
def setup_class(self):
print("\nstart browser for test suite..")
self.browser = webdriver.Chrome()
@classmethod
def teardown_class(self):
print("quit browser for test suite..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(
".basket-mini .btn-group > a")
class TestMainPage2():
def setup_method(self):
print("start browser for test..")
self.browser = webdriver.Chrome()
def teardown_method(self):
print("quit browser for test..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(
".basket-mini .btn-group > a")
|
import pytest
import numpy as np
import sklearn.linear_model
import sklearn.model_selection
import scipy.linalg
from himalaya.backend import set_backend
from himalaya.backend import ALL_BACKENDS
from himalaya.utils import assert_array_almost_equal
from himalaya.scoring import r2_score
from himalaya.kernel_ridge import solve_multiple_kernel_ridge_random_search
def _create_dataset(backend, n_targets=4):
n_featuress = (100, 200)
n_samples = 80
n_gammas = 3
Xs = [
backend.asarray(backend.randn(n_samples, n_features), backend.float64)
for n_features in n_featuress
]
Ks = backend.stack([X @ X.T for X in Xs])
ws = [
backend.asarray(backend.randn(n_features, n_targets), backend.float64)
for n_features in n_featuress
]
Ys = backend.stack([X @ w for X, w in zip(Xs, ws)])
Y = Ys.sum(0)
gammas = backend.asarray(backend.rand(n_gammas, Ks.shape[0]),
backend.float64)
gammas /= gammas.sum(1)[:, None]
return Ks, Y, gammas, Xs
@pytest.mark.parametrize('local_alpha', [True, False])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_local_alphah(
backend, local_alpha):
_test_solve_multiple_kernel_ridge_random_search(backend=backend,
local_alpha=local_alpha)
@pytest.mark.parametrize('n_targets_batch', [None, 3])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_n_targets_batch(
backend, n_targets_batch):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, n_targets_batch=n_targets_batch)
@pytest.mark.parametrize('n_alphas_batch', [None, 2])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_n_alphas_batch(
backend, n_alphas_batch):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, n_alphas_batch=n_alphas_batch)
@pytest.mark.parametrize('return_weights', ['primal', 'dual'])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_return_weights(
backend, return_weights):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, return_weights=return_weights)
@pytest.mark.parametrize('diagonalize_method', ['eigh', 'svd'])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_diagonalize_method(
backend, diagonalize_method):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, diagonalize_method=diagonalize_method)
def _test_solve_multiple_kernel_ridge_random_search(
backend, n_targets_batch=None, n_alphas_batch=None,
return_weights="dual", diagonalize_method="eigh", local_alpha=True):
backend = set_backend(backend)
Ks, Y, gammas, Xs = _create_dataset(backend)
alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)
n_targets = Y.shape[1]
cv = sklearn.model_selection.check_cv(10)
############
# run solver
results = solve_multiple_kernel_ridge_random_search(
Ks, Y, n_iter=gammas, alphas=alphas, score_func=r2_score, cv=cv,
n_targets_batch=n_targets_batch, Xs=Xs, progress_bar=False,
return_weights=return_weights, n_alphas_batch=n_alphas_batch,
diagonalize_method=diagonalize_method, local_alpha=local_alpha)
best_deltas, refit_weights, cv_scores = results
#########################################
# compare with sklearn.linear_model.Ridge
if local_alpha: # only compare when each target optimizes alpha
test_scores = []
for gamma in backend.sqrt(gammas):
X = backend.concatenate([x * g for x, g in zip(Xs, gamma)], 1)
for train, test in cv.split(X):
for alpha in alphas:
model = sklearn.linear_model.Ridge(
alpha=backend.to_numpy(alpha), fit_intercept=False)
model = model.fit(backend.to_numpy(X[train]),
backend.to_numpy(Y[train]))
predictions = backend.asarray_like(
model.predict(backend.to_numpy(X[test])), Y)
test_scores.append(r2_score(Y[test], predictions))
test_scores = backend.stack(test_scores)
test_scores = test_scores.reshape(len(gammas), cv.get_n_splits(),
len(alphas), n_targets)
test_scores_mean = backend.max(test_scores.mean(1), 1)
assert_array_almost_equal(cv_scores, test_scores_mean, decimal=5)
######################
# test refited_weights
for tt in range(n_targets):
gamma = backend.exp(best_deltas[:, tt])
alpha = 1.0
if return_weights == 'primal':
# compare primal weights with sklearn.linear_model.Ridge
X = backend.concatenate(
[X * backend.sqrt(g) for X, g in zip(Xs, gamma)], 1)
model = sklearn.linear_model.Ridge(fit_intercept=False,
alpha=backend.to_numpy(alpha))
w1 = model.fit(backend.to_numpy(X),
backend.to_numpy(Y[:, tt])).coef_
w1 = np.split(w1, np.cumsum([X.shape[1] for X in Xs][:-1]), axis=0)
w1 = [backend.asarray(w) for w in w1]
w1_scaled = backend.concatenate(
[w * backend.sqrt(g) for w, g, in zip(w1, gamma)])
assert_array_almost_equal(w1_scaled, refit_weights[:, tt],
decimal=5)
elif return_weights == 'dual':
# compare dual weights with scipy.linalg.solve
Ks_64 = backend.asarray(Ks, dtype=backend.float64)
gamma_64 = backend.asarray(gamma, dtype=backend.float64)
K = backend.matmul(Ks_64.T, gamma_64).T
reg = backend.asarray_like(np.eye(K.shape[0]), K) * alpha
Y_64 = backend.asarray(Y, dtype=backend.float64)
c1 = scipy.linalg.solve(backend.to_numpy(K + reg),
backend.to_numpy(Y_64[:, tt]))
c1 = backend.asarray_like(c1, K)
assert_array_almost_equal(c1, refit_weights[:, tt], decimal=5)
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_single_alpha_numpy(backend):
backend = set_backend(backend)
# just a smoke test, so make it minimal
Ks, Y, gammas, Xs = _create_dataset(backend)
alphas = 1.0
# make Y a numpy array
Y = backend.to_numpy(Y)
results = solve_multiple_kernel_ridge_random_search(
Ks, Y, n_iter=gammas, alphas=alphas
)
@pytest.mark.parametrize('backend', ALL_BACKENDS)
@pytest.mark.parametrize('n_kernels', [1, 2])
def test_solve_multiple_kernel_ridge_random_search_global_alpha(backend, n_kernels):
backend = set_backend(backend)
# add more targets to make sure we get some variability
Ks, Y, gammas, Xs = _create_dataset(backend, n_targets=20)
alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)
cv = sklearn.model_selection.check_cv(5)
deltas, *_, best_alphas = solve_multiple_kernel_ridge_random_search(
Ks[:n_kernels],
Y,
n_iter=50,
progress_bar=False,
alphas=alphas,
cv=cv,
local_alpha=False,
return_alphas=True
)
# test that we return a single combination of deltas
deltas = backend.to_numpy(deltas)
if deltas.ndim == 1:
assert np.allclose(deltas[0], deltas)
else:
for dd in deltas:
assert np.allclose(dd[0], dd)
# test that we return a single alpha
best_alphas = backend.to_numpy(best_alphas)
assert np.allclose(best_alphas[0], best_alphas)
|
# -*- coding: utf-8 -*-
"""Helper functions for getting resources."""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional
from urllib.request import urlretrieve
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_DIRECTORY = os.path.abspath(os.path.join(HERE, os.pardir, os.pardir, 'data'))
DATA_DIRECTORY = os.environ.get('REPOSITIONING_COMPARISON_DIRECTORY', DEFAULT_DIRECTORY)
# URLs from dhimmel/integrate
NODE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/nodes.tsv'
EDGE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/edges.sif.gz'
PERMUTATION1_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-1.json.bz2'
PERMUTATION2_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-2.json.bz2'
PERMUTATION3_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-3.json.bz2'
PERMUTATION4_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-4.json.bz2'
PERMUTATION5_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-5.json.bz2'
PERMUTATION_DATA_FILE_FMT = 'hetnet_perm-{}.json.bz2'
PERMUTATION_DATA_URL_FMT = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-{}.json.bz2'
# URLs from dhimmel/learn
TRANSFORMED_FEATURES_URL = 'https://github.com/dhimmel/learn/blob/master/prediction/features/features.tsv.bz2?raw=true'
VALIDATE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/validate/validation-statuses.tsv'
SYMPTOMATIC_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/prediction/predictions/probabilities.tsv'
REPURPOSE_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repurpose_overlap.json'
REPO_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repo_data.csv'
@dataclass
class DataPaths:
"""Container for the paths for training."""
node_data_path: str
edge_data_path: str
transformed_features_path: str
validate_data_path: str
symptomatic_data_path: str
permutation_paths: List[str]
data_edge2vec_path: str
repurpose_data_path: str
repo_data_path: str
def get_data_paths(directory: Optional[str] = None) -> DataPaths:
"""Ensure Himmelstein's data files are downloaded."""
if directory is None:
directory = DATA_DIRECTORY
os.makedirs(directory, exist_ok=True)
node_data_path = os.path.join(directory, 'nodes.tsv')
if not os.path.exists(node_data_path):
logger.info(f'downloading {NODE_DATA_URL}')
urlretrieve(NODE_DATA_URL, node_data_path)
edge_data_path = os.path.join(directory, 'edges.sif.gz')
if not os.path.exists(edge_data_path):
logger.info(f'downloading {EDGE_DATA_URL}')
urlretrieve(EDGE_DATA_URL, edge_data_path)
transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')
if not os.path.exists(transformed_features_path):
logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')
urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)
validate_data_path = os.path.join(directory, 'validation-statuses.tsv')
if not os.path.exists(validate_data_path):
logger.info(f'downloading {VALIDATE_DATA_URL}')
urlretrieve(VALIDATE_DATA_URL, validate_data_path)
symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')
if not os.path.exists(symptomatic_data_path):
logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')
urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)
repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')
if not os.path.exists(repurpose_data_path):
logger.info(f'downloading {REPURPOSE_DATA_URL}')
urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)
repo_data_path = os.path.join(directory, 'repo_data.csv')
if not os.path.exists(repo_data_path):
logger.info(f'downloading {REPO_DATA_URL}')
urlretrieve(REPO_DATA_URL, repo_data_path)
permutation_directory = os.path.join(directory, "permutations")
os.makedirs(permutation_directory, exist_ok=True)
permutation_paths = []
for i in range(5):
permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))
if not os.path.exists(permutation_data_path):
url = PERMUTATION_DATA_URL_FMT.format(i + 1)
logger.info(f'downloading {url}')
urlretrieve(url, permutation_data_path)
permutation_paths.append(permutation_data_path)
data_edge2vec_path = os.path.join(directory, 'data_edge2vec')
return DataPaths(
node_data_path=node_data_path,
edge_data_path=edge_data_path,
transformed_features_path=transformed_features_path,
validate_data_path=validate_data_path,
symptomatic_data_path=symptomatic_data_path,
permutation_paths=permutation_paths,
data_edge2vec_path=data_edge2vec_path,
repurpose_data_path = repurpose_data_path,
repo_data_path = repo_data_path
)
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic linux scsi subsystem and Multipath utilities.
Note, this is not iSCSI.
"""
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _, _LW, _LE
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)")
class LinuxSCSI(executor.Executor):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
super(LinuxSCSI, self).__init__(root_helper, execute,
*args, **kwargs)
def echo_scsi_command(self, path, content):
"""Used to echo strings to scsi subsystem."""
args = ["-a", path]
kwargs = dict(process_input=content,
run_as_root=True,
root_helper=self._root_helper)
self._execute('tee', *args, **kwargs)
def get_name_from_path(self, path):
"""Translates /dev/disk/by-path/ entry to /dev/sdX."""
name = os.path.realpath(path)
if name.startswith("/dev/"):
return name
else:
return None
def remove_scsi_device(self, device):
"""Removes a scsi device based upon /dev/sdX name."""
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
if os.path.exists(path):
# flush any outstanding IO first
self.flush_device_io(device)
LOG.debug("Remove SCSI device(%s) with %s" % (device, path))
self.echo_scsi_command(path, "1")
def wait_for_volume_removal(self, volume_path):
"""This is used to ensure that volumes are gone."""
def _wait_for_volume_removal(volume_path):
LOG.debug("Waiting for SCSI mount point %s to be removed.",
volume_path)
if os.path.exists(volume_path):
if self.tries >= self.scan_attempts:
msg = _LE("Exceeded the number of attempts to detect "
"volume removal.")
LOG.error(msg)
raise exception.VolumePathNotRemoved(
volume_path=volume_path)
LOG.debug("%(path)s still exists, rescanning. Try number: "
"%(tries)s",
{'path': volume_path, 'tries': self.tries})
self.tries = self.tries + 1
else:
LOG.debug("SCSI mount point %s has been removed.", volume_path)
raise loopingcall.LoopingCallDone()
# Setup a loop here to give the kernel time
# to remove the volume from /dev/disk/by-path/
self.tries = 0
self.scan_attempts = 3
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_volume_removal, volume_path)
timer.start(interval=2).wait()
def get_device_info(self, device):
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
root_helper=self._root_helper)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def remove_multipath_device(self, multipath_name):
"""This removes LUNs associated with a multipath device
and the multipath device itself.
"""
LOG.debug("remove multipath device %s" % multipath_name)
mpath_dev = self.find_multipath_device(multipath_name)
if mpath_dev:
devices = mpath_dev['devices']
LOG.debug("multipath LUNs to remove %s" % devices)
for device in devices:
self.remove_scsi_device(device['device'])
self.flush_multipath_device(mpath_dev['id'])
def flush_device_io(self, device):
"""This is used to flush any remaining IO in the buffers."""
try:
LOG.debug("Flushing IO for device %s" % device)
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
msg = _("Failed to flush IO buffers prior to removing"
" device: (%(code)s)") % {'code': exc.exit_code}
LOG.warn(msg)
def flush_multipath_device(self, device):
try:
LOG.debug("Flush multipath device %s" % device)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def flush_multipath_devices(self):
try:
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def find_multipath_device(self, device):
"""Find a multipath device associated with a LUN device name.
device can be either a /dev/sdX entry or a multipath id.
"""
mdev = None
devices = []
out = None
try:
(out, _err) = self._execute('multipath', '-l', device,
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
lines = [line for line in lines
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if lines:
# Use the device name, be it the WWID, mpathN or custom alias
# of a device to build the device path. This should be the
# first item on the first line of output from `multipath -l
# ${path}` or `multipath -l ${wwid}`..
mdev_name = lines[0].split(" ")[0]
mdev = '/dev/mapper/%s' % mdev_name
# Find the WWID for the LUN if we are using mpathN or aliases.
wwid_search = MULTIPATH_WWID_REGEX.search(lines[0])
if wwid_search is not None:
mdev_id = wwid_search.group('wwid')
else:
mdev_id = mdev_name
# Confirm that the device is present.
try:
os.stat(mdev)
except OSError:
LOG.warn(_LW("Couldn't find multipath device %s"), mdev)
return None
LOG.debug("Found multipath device = %(mdev)s"
% {'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"id": mdev_id,
"name": mdev_name,
"devices": devices}
return info
return None
|
import pytest
from django.conf import settings
from django.contrib import messages
from proposals.models import TalkProposal, TutorialProposal
pytestmark = pytest.mark.skipif(
not settings.PROPOSALS_WITHDRAWABLE,
reason='proposal withdrawal disabled',
)
def test_talk_proposal_cancel_login(client):
response = client.get('/en-us/proposals/talk/42/cancel/', follow=True)
assert response.redirect_chain == [
('/en-us/accounts/login/?next=/en-us/proposals/talk/42/cancel/', 302),
]
def test_tutorial_proposal_cancel_login(client):
response = client.get('/en-us/proposals/tutorial/42/cancel/', follow=True)
assert response.redirect_chain == [
('/en-us/accounts/login/?next=/en-us/proposals/tutorial/42/cancel/',
302),
]
@pytest.mark.parametrize('method', ['get', 'post'])
def test_talk_proposal_cancel_denied(bare_user_client, method):
response = getattr(bare_user_client, method)(
'/en-us/proposals/talk/42/cancel/',
)
assert response.status_code == 403
@pytest.mark.parametrize('method', ['get', 'post'])
def test_tutorial_proposal_cancel_denied(bare_user_client, method):
response = getattr(bare_user_client, method)(
'/en-us/proposals/tutorial/42/cancel/',
)
assert response.status_code == 403
def test_talk_proposal_cancel_get(agreed_user_client, talk_proposal):
"""The cancel view should not allow GET, only POST.
"""
response = agreed_user_client.get('/en-us/proposals/talk/42/cancel/')
assert response.status_code == 405
def test_tutorial_proposal_cancel_get(agreed_user_client, tutorial_proposal):
"""The cancel view should not allow GET, only POST.
"""
response = agreed_user_client.get('/en-us/proposals/tutorial/42/cancel/')
assert response.status_code == 405
def test_talk_proposal_cancel_not_owned(another_agreed_user_client, talk_proposal):
response = another_agreed_user_client.post('/en-us/proposals/talk/42/cancel/')
assert response.status_code == 404
def test_tutorial_proposal_cancel_not_owned(
another_agreed_user_client, tutorial_proposal):
response = another_agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/')
assert response.status_code == 404
def test_talk_proposal_cancel(agreed_user_client, talk_proposal):
assert not talk_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', {
'cancelled': True,
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert TalkProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.INFO,
'Talk proposal '
'<strong>Beyond the Style Guides<br></strong> withdrawn.'),
]
def test_talk_proposal_reactivate(agreed_user_client, cancelled_talk_proposal):
assert cancelled_talk_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', {
'cancelled': '',
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert not TalkProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.SUCCESS,
'Talk proposal '
'<strong>Beyond the Style Guides<br></strong> reactivated.'),
]
def test_tutorial_proposal_cancel(agreed_user_client, tutorial_proposal):
assert not tutorial_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', {
'cancelled': True,
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert TutorialProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.INFO,
'Tutorial proposal '
'<strong>Beyond the Style Guides<br></strong> withdrawn.'),
]
def test_tutorial_proposal_reactivate(
agreed_user_client, cancelled_tutorial_proposal):
assert cancelled_tutorial_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', {
'cancelled': '',
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert not TutorialProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.SUCCESS,
'Tutorial proposal '
'<strong>Beyond the Style Guides<br></strong> reactivated.'),
]
|
"""Deals with making images (np arrays). It provides drawing
methods that are difficult to do with the existing Python libraries.
"""
import numpy as np
def blit(im1, im2, pos=None, mask=None):
"""Blit an image over another.
Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the
``mask`` if provided.
"""
if pos is None:
pos = (0, 0) # pragma: no cover
else:
# Cast to tuple in case pos is not subscriptable.
pos = tuple(pos)
im2.paste(im1, pos, mask)
return im2
def color_gradient(
size,
p1,
p2=None,
vector=None,
radius=None,
color_1=0.0,
color_2=1.0,
shape="linear",
offset=0,
):
"""Draw a linear, bilinear, or radial gradient.
The result is a picture of size ``size``, whose color varies
gradually from color `color_1` in position ``p1`` to color ``color_2``
in position ``p2``.
If it is a RGB picture the result must be transformed into
a 'uint8' array to be displayed normally:
Parameters
----------
size : tuple or list
Size (width, height) in pixels of the final image array.
p1 : tuple or list
Position for the first coordinate of the gradient in pixels (x, y).
The color 'before' ``p1`` is ``color_1`` and it gradually changes in
the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``.
p2 : tuple or list, optional
Position for the second coordinate of the gradient in pixels (x, y).
Coordinates (x, y) of the limit point for ``color_1``
and ``color_2``.
vector : tuple or list, optional
A vector (x, y) in pixels that can be provided instead of ``p2``.
``p2`` is then defined as (p1 + vector).
color_1 : tuple or list, optional
Starting color for the gradient. As default, black. Either floats
between 0 and 1 (for gradients used in masks) or [R, G, B] arrays
(for colored gradients).
color_2 : tuple or list, optional
Color for the second point in the gradient. As default, white. Either
floats between 0 and 1 (for gradients used in masks) or [R, G, B]
arrays (for colored gradients).
shape : str, optional
Shape of the gradient. Can be either ``"linear"``, ``"bilinear"`` or
``"circular"``. In a linear gradient the color varies in one direction,
from point ``p1`` to point ``p2``. In a bilinear gradient it also
varies symmetrically from ``p1`` in the other direction. In a circular
gradient it goes from ``color_1`` to ``color_2`` in all directions.
radius : float, optional
If ``shape="radial"``, the radius of the gradient is defined with the
parameter ``radius``, in pixels.
offset : float, optional
Real number between 0 and 1 indicating the fraction of the vector
at which the gradient actually starts. For instance if ``offset``
is 0.9 in a gradient going from p1 to p2, then the gradient will
only occur near p2 (before that everything is of color ``color_1``)
If the offset is 0.9 in a radial gradient, the gradient will
occur in the region located between 90% and 100% of the radius,
this creates a blurry disc of radius ``d(p1, p2)``.
Returns
-------
image
An Numpy array of dimensions (width, height, n_colors) of type float
representing the image of the gradient.
Examples
--------
>>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black
[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]
>>>
>>> color_gradient( # from red to green
... (10, 1), # size
... (0, 0), # p1
... p2=(10, 0),
... color_1=(255, 0, 0), # red
... color_2=(0, 255, 0), # green
... )
[[[ 0. 255. 0. ]
[ 25.5 229.5 0. ]
[ 51. 204. 0. ]
[ 76.5 178.5 0. ]
[102. 153. 0. ]
[127.5 127.5 0. ]
[153. 102. 0. ]
[178.5 76.5 0. ]
[204. 51. 0. ]
[229.5 25.5 0. ]]]
"""
# np-arrayize and change x,y coordinates to y,x
w, h = size
color_1 = np.array(color_1).astype(float)
color_2 = np.array(color_2).astype(float)
if shape == "bilinear":
if vector is None:
if p2 is None:
raise ValueError("You must provide either 'p2' or 'vector'")
vector = np.array(p2) - np.array(p1)
m1, m2 = [
color_gradient(
size,
p1,
vector=v,
color_1=1.0,
color_2=0.0,
shape="linear",
offset=offset,
)
for v in [vector, [-v for v in vector]]
]
arr = np.maximum(m1, m2)
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
p1 = np.array(p1[::-1]).astype(float)
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
if shape == "linear":
if vector is None:
if p2 is not None:
vector = np.array(p2[::-1]) - p1
else:
raise ValueError("You must provide either 'p2' or 'vector'")
else:
vector = np.array(vector[::-1])
norm = np.linalg.norm(vector)
n_vec = vector / norm ** 2 # norm 1/norm(vector)
p1 = p1 + offset * vector
arr = (M - p1).dot(n_vec) / (1 - offset)
arr = np.minimum(1, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
elif shape == "radial":
if (radius or 0) == 0:
arr = np.ones((h, w))
else:
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
arr = arr / ((1 - offset) * radius)
arr = np.minimum(1.0, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return (1 - arr) * color_1 + arr * color_2
raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
def color_split(
size,
x=None,
y=None,
p1=None,
p2=None,
vector=None,
color_1=0,
color_2=1.0,
gradient_width=0,
):
"""Make an image split in 2 colored regions.
Returns an array of size ``size`` divided in two regions called 1 and
2 in what follows, and which will have colors color_1 and color_2
respectively.
Parameters
----------
x : int, optional
If provided, the image is split horizontally in x, the left
region being region 1.
y : int, optional
If provided, the image is split vertically in y, the top region
being region 1.
p1, p2: tuple or list, optional
Positions (x1, y1), (x2, y2) in pixels, where the numbers can be
floats. Region 1 is defined as the whole region on the left when
going from ``p1`` to ``p2``.
p1, vector: tuple or list, optional
``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be
floats. Region 1 is then the region on the left when starting
in position ``p1`` and going in the direction given by ``vector``.
gradient_width : float, optional
If not zero, the split is not sharp, but gradual over a region of
width ``gradient_width`` (in pixels). This is preferable in many
situations (for instance for antialiasing).
Examples
--------
>>> size = [200, 200]
>>>
>>> # an image with all pixels with x<50 =0, the others =1
>>> color_split(size, x=50, color_1=0, color_2=1)
>>>
>>> # an image with all pixels with y<50 red, the others green
>>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])
>>>
>>> # An image split along an arbitrary line (see below)
>>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1)
"""
if gradient_width or ((x is None) and (y is None)):
if p2 is not None:
vector = np.array(p2) - np.array(p1)
elif x is not None:
vector = np.array([0, -1.0])
p1 = np.array([x, 0])
elif y is not None:
vector = np.array([1.0, 0.0])
p1 = np.array([0, y])
x, y = vector
vector = np.array([y, -x]).astype("float")
norm = np.linalg.norm(vector)
vector = max(0.1, gradient_width) * vector / norm
return color_gradient(
size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear"
)
else:
w, h = size
shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))
arr = np.zeros(shape)
if x:
arr[:, :x] = color_1
arr[:, x:] = color_2
elif y:
arr[:y] = color_1
arr[y:] = color_2
return arr
def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):
"""Draw an image with a circle.
Draws a circle of color ``color``, on a background of color ``bg_color``,
on a screen of size ``screensize`` at the position ``center=(x, y)``,
with a radius ``radius`` but slightly blurred on the border by ``blur``
pixels.
Parameters
----------
screensize : tuple or list
Size of the canvas.
center : tuple or list
Center of the circle.
radius : float
Radius of the circle, in pixels.
bg_color : tuple or float, optional
Color for the background of the canvas. As default, black.
blur : float, optional
Blur for the border of the circle.
Examples
--------
>>> from moviepy.video.tools.drawing import circle
>>>
>>> circle(
... (5, 5), # size
... (2, 2), # center
... 2, # radius
... )
array([[0. , 0. , 0. , 0. , 0. ],
[0. , 0.58578644, 1. , 0.58578644, 0. ],
[0. , 1. , 1. , 1. , 0. ],
[0. , 0.58578644, 1. , 0.58578644, 0. ],
[0. , 0. , 0. , 0. , 0. ]])
"""
offset = 1.0 * (radius - blur) / radius if radius else 0
return color_gradient(
screensize,
p1=center,
radius=radius,
color_1=color,
color_2=bg_color,
shape="radial",
offset=offset,
)
|
# -*- coding: utf-8 -*-
import h5py
import pyre
from ..Base import Base
from .Identification import Identification
class SLC(Base, family='nisar.productreader.slc'):
'''
Class for parsing NISAR SLC products into isce structures.
'''
productValidationType = pyre.properties.str(default='SLC')
productValidationType.doc = 'Validation tag to ensure correct product type'
def __init__(self, **kwds):
'''
Constructor to initialize product with HDF5 file.
'''
###Read base product information like Identification
super().__init__(**kwds)
def populateIdentification(self):
'''
Read in the Identification information and assert identity.
'''
with h5py.File(self.filename, 'r', libver='latest', swmr=True) as f:
h5grp = f[self.IdentificationPath]
self.identification = Identification(h5grp)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'stevedore.sphinxext',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tacker'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tacker.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
#!/usr/bin/python
import json
import sys
from collections import defaultdict
from unidecode import unidecode
from HTMLParser import HTMLParser
h = HTMLParser()
_schema_keys_to_yml = {
'description' : 'notes',
'image' : 'photo',
'recipeCuisine' : 'notes',
'recipeInstructions' : 'directions',
'recipeCategory' : 'categories',
'name' : 'name',
'author' : 'source',
'ingredients' : 'ingredients',
'recipeYield' : 'servings',
'prepTime' : 'prep_time',
'cookTime' : 'cook_time',
'source_url' : 'source_url',
'notes' : 'notes',
}
_yml_keys = [
'name',
'servings',
'prep_time',
'cook_time',
'on_favorites',
'categories',
'ingredients',
'directions',
'photo',
'source_url',
'source',
'notes',
]
def remove_non_ascii(input):
''' borrowed from http://stackoverflow.com/a/35492167
'''
return unidecode(unicode(input, encoding = "utf-8"))
def load_json(filename):
''' load the file into a python obj
'''
with open(filename) as fi:
contents = json.load(fi)
return contents
def default_yml_recipe():
'''returns a default empty recipe dict in yml
'''
default_recipe = {}
for key in _yml_keys:
default_recipe[key] = ''
return default_recipe
def translate_json_to_yml_prep(recipes):
''' takes list of dictionaries raw from schema and translates to paprika yml
'''
all_ymls = []
for recipe in recipes:
yml_recipe = defaultdict(str)
for key in _schema_keys_to_yml:
if key in recipe:
handle_key(recipe[key], yml_recipe, key)
all_ymls.append(yml_recipe)
return all_ymls
def handle_key(incomingData, yml_out, key):
'''different handling for each type of key
'''
try:
if key in (
'description',
'recipeCuisine',
'recipeCategory',
'recipeYield',
'prepTime',
'cookTime',
'source_url',
'name',
'notes',
):
if yml_out[_schema_keys_to_yml[key]]:
yml_out[_schema_keys_to_yml[key]] += '\n'
yml_out[_schema_keys_to_yml[key]] += incomingData
elif key in ('recipeInstructions','ingredients'):
yml_out[_schema_keys_to_yml[key]] = '\n'.join(incomingData)
#elif key == 'notes':
# yml_out[_schema_keys_to_yml[key]] = '\n'.join(incomingData)
elif key == 'author':
yml_out[_schema_keys_to_yml[key]] = incomingData['name']
elif key == 'image':
yml_out[_schema_keys_to_yml[key]] = ''
except:
pass
#yml example:
#- name:NAME
# servings:
# etc.
_yml_keys = [
'name',
'servings',
'prep_time',
'cook_time',
'on_favorites',
'categories',
'ingredients',
'directions',
'photo',
'source_url',
'source',
'notes',
]
def yml_prep_to_yml(list_of_yml_dicts):
'''translates the yml_prepped dicts into yml string
'''
strList = []
for item in list_of_yml_dicts:
strList.append('- ')
for key in _yml_keys:
if not item[key]:
continue
line = item[key].replace('<br>','\n').replace('<br/>','\n').replace('\n','\r\n ')
line = line.replace('<span class="fn">','').replace('</span>','')
line = line.replace('<span class ="fn">','').replace('<span class = "fn">','').replace('<span class= "fn">','')
line = h.unescape(line)
line = remove_non_ascii(line.encode('UTF-8'))
line = line.replace('\n','')
strList.append(key)
strList.append(': ')
if key in ('ingredients', 'directions','notes'):
strList.append('|\r\n ')
strList.append(line)
strList.append('\r\n ')
del(strList[-1])
strList.append('\r\n')
print u''.join(strList).encode('utf-8').strip()
if __name__ == '__main__':
json_data = load_json(sys.argv[1])
yml_prep = translate_json_to_yml_prep(json_data)
yml_prep_to_yml(yml_prep)
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import warnings
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \
load_pretrained_weights, save_checkpoint, resume_from_checkpoint
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.lr_schedulers import init_lr_scheduler
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
testloader_dict = trainloader = criterion = None
use_gpu = False
# global variables
parser = argument_parser()
args = parser.parse_args()
def corr_metric(W: 'K x N'):
G = W.permute(1, 0) @ W
return torch.trace(G) / abs(G).sum()
def replace_weight(layer):
with torch.no_grad():
# NECESSARY! The weight of Linear layer has been transposed!
A = layer.weight.t()
M, N = A.size()
M: 2048
N: 1024
U, S, V = torch.svd(A, some=False)
W = A @ V
W: '2048 x 1024 = M x N'
NW = torch.zeros_like(A)
for i in range(N):
curr_N = W.size(1)
W_norm = torch.norm(W, p=2, dim=0)
W_norm: 'curr_N'
index = i
vec_i = A[:, i]
vec_i_norm = torch.norm(vec_i)
co = (A[:, i].view(M, 1).t() @ W).view(curr_N)
co: 'curr_N'
co = co / vec_i_norm
absco = abs(co / W_norm)
maxco_index = torch.max(absco, 0)[1].item()
NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index])
# Remove selected column vector from W
W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})]
layer.weight.copy_(NW.t())
print(layer.weight)
return layer
def main():
global args, criterion, testloader_dict, trainloader, use_gpu
set_random_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
else:
warnings.warn('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_pretrained_weights(model, args.load_weights)
model = nn.DataParallel(model).cuda() if use_gpu else model
criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.resume and check_isfile(args.resume):
args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None)
resumed = True
else:
resumed = False
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
return
time_start = time.time()
# ranklogger = RankLogger(args.source_names, args.target_names)
print('=> Start training')
if not resumed:
train_base(model)
train_RRI(model, 7)
elapsed = round(time.time() - time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed {}'.format(elapsed))
# ranklogger.show_summary()
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False):
losses = AverageMeter()
accs = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
# if fixbase or args.always_fixbase:
# open_specified_layers(model, args.open_layers)
# else:
# open_all_layers(model)
end = time.time()
for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
outputs = model(imgs)
loss = sum(criterion(x, pids) for x in outputs) / len(outputs)
# if isinstance(outputs, (tuple, list)):
# loss = DeepSupervision(criterion, outputs, pids)
# else:
# loss = criterion(outputs, pids)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
accs.update(accuracy(outputs, pids)[0])
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accs
))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
end = time.time()
for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
print('Computing CMC and mAP')
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print('Results ----------')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
print('------------------')
if return_distmat:
return distmat
return cmc[0]
def get_base_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.0003,
'betas': (0.9, 0.999),
}
param_groups = model.parameters()
optimizer = torch.optim.Adam(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1)
return optimizer, scheduler
def get_base_sgd_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.001,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1)
return optimizer, scheduler
def get_RRI_optimizer(
model,
lr
):
kwargs = {
'weight_decay': 5e-4,
'lr': lr,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1)
return optimizer, scheduler
def train_R(model, lr, T, fix_eigen_layer: bool=False):
eigen_layers = model.module.get_fcs()
if fix_eigen_layer:
for eigen_layer in eigen_layers:
eigen_layer.eval()
for p in eigen_layer.parameters():
p.requires_grad = False
stage_name = 'restraint'
else:
model.train()
for p in model.parameters():
p.requires_grad = True
stage_name = 'relaxation'
prefix = '{}_{}_'.format(T, stage_name)
optimizer, scheduler = get_RRI_optimizer(model, lr)
for epoch in range(20):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix=prefix)
def train_base(model):
use_sgd = os.environ.get('sgd') is not None
optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer
optimizer, scheduler = get_base_optimizer(model)
model.train()
print('=== train base ===')
if True:
open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']
print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))
for epoch in range(10):
open_specified_layers(model, open_layers)
train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)
print('Done. All layers are open to train for {} epochs'.format(60))
open_all_layers(model)
optimizer, scheduler = optimizer_getter(model)
for epoch in range(60):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': optimizer.state_dict(),
}, args.save_dir, prefix='base_')
def train_RRI(model, Ts: int=7):
base_lrs = [0.001] * 3 + [0.0001] * 10
for T in range(Ts):
print('=== T = {} ==='.format(T))
print('Replacing eigen layer weight...')
for eigen_layer in model.module.get_fcs():
replace_weight(eigen_layer)
print('Replaced.')
print('--- Restraint ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=True)
print('--- Relaxation ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=False)
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix='final_')
if __name__ == '__main__':
main()
|
# Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The renewals command is intended to be run as part of an automated script
run at least once a day. It will
- recognize revenue for past periods (see :doc:`ledger <ledger>`).
- extends active subscriptions
- create charges for new periods
- trigger expiration notices
Every functions part of the renewals script are explicitly written to be
idempotent. Calling the scripts multiple times for the same timestamp
(i.e. with the ``--at-time`` command line argument) will generate the
appropriate ``Transaction`` and ``Charge`` only once.
**Example cron setup**:
.. code-block:: bash
$ cat /etc/cron.daily/renewals
#!/bin/sh
cd /var/*mysite* && python manage.py renewals
"""
import logging, time
from django.core.management.base import BaseCommand
from ...models import get_broker
from ...renewals import (create_charges_for_balance, complete_charges,
extend_subscriptions, recognize_income, trigger_expiration_notices)
from ...utils import datetime_or_now
from ... import settings
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Recognized backlog, extends subscription and charge due balance
on credit cards"""
def add_arguments(self, parser):
parser.add_argument('--dry-run', action='store_true',
dest='dry_run', default=False,
help='Do not commit transactions nor submit charges to processor')
parser.add_argument('--no-charges', action='store_true',
dest='no_charges', default=False,
help='Do not submit charges to processor')
parser.add_argument('--at-time', action='store',
dest='at_time', default=None,
help='Specifies the time at which the command runs')
def handle(self, *args, **options):
#pylint:disable=broad-except
dry_run = options['dry_run']
no_charges = options['no_charges']
end_period = datetime_or_now(options['at_time'])
if dry_run:
LOGGER.warning("dry_run: no changes will be committed.")
if no_charges:
LOGGER.warning("no_charges: no charges will be submitted.")
try:
recognize_income(end_period, dry_run=dry_run)
except Exception as err:
LOGGER.exception("recognize_income: %s", err)
try:
extend_subscriptions(end_period, dry_run=dry_run)
except Exception as err:
LOGGER.exception("extend_subscriptions: %s", err)
try:
create_charges_for_balance(
end_period, dry_run=dry_run or no_charges)
except Exception as err:
LOGGER.exception(
"Unable to create charges for balance on broker '%s'",
get_broker())
if not (dry_run or no_charges):
# Let's complete the in flight charges after we have given
# them time to settle.
time.sleep(30)
complete_charges()
# Trigger 'expires soon' notifications
expiration_periods = settings.EXPIRE_NOTICE_DAYS
for period in expiration_periods:
trigger_expiration_notices(
end_period, nb_days=period, dry_run=dry_run)
|
from __future__ import print_function
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
print(flow)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
#now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
#print('Getting the upcoming 10 events')
#events_result = service.events().list(calendarId='primary', timeMin=now,
# maxResults=10, singleEvents=True,
# orderBy='startTime').execute()
#events = events_result.get('items', [])
#if not events:
# print('No upcoming events found.')
#for event in events:
# start = event['start'].get('dateTime', event['start'].get('date'))
# print(start, event['summary'])
print("Creating events")
# Refer to the Python quickstart on how to setup the environment:
# https://developers.google.com/calendar/quickstart/python
# Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any
# stored credentials.
event = {
'summary': 'Google I/O 2019',
'location': '800 Howard St., San Francisco, CA 94103',
'description': 'A chance to hear more about Google\'s developer products.',
'start': {
'dateTime': '2019-08-28T09:00:00-07:00',
'timeZone': 'America/Los_Angeles',
},
'end': {
'dateTime': '2019-09-01T17:00:00-07:00',
'timeZone': 'America/Los_Angeles',
},
'recurrence': [
'RRULE:FREQ=DAILY;COUNT=2'
],
'attendees': [
{'email': 'lpage@example.com'},
{'email': 'sbrin@example.com'},
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 24 * 60},
{'method': 'popup', 'minutes': 10},
],
},
}
event = service.events().insert(calendarId='primary', body=event).execute()
print ('Event created: %s' % (event.get('htmlLink')))
if __name__ == '__main__':
main()
|
import database
def load_shard_from_db(conf):
#TODO: load shard from cache if exists
shards = database.load_shard(conf)
return shards
def get_shard(shards, url):
"""
Hash function for shading scheme
returns a dict with hostname and table name
Eg: s = { 'hostname': 'node1', 'table_name': 'url_s1'}
"""
if not shards:
return {}
else:
return shards[hash(str(url['hostname'])+str(url['port'])+str(url['path'])) % len(shards)]
|
from nonebot import on_command, CommandSession
@on_command('help', aliases=('h', '帮助'), only_to_me=False)
async def manual(session: CommandSession):
await session.send(f'[CQ:image,file=/admin/manual.png]')
@manual.args_parser
async def _(session: CommandSession):
# do nothing
return
|
# Generated by Django 2.2.13 on 2020-06-30 06:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0004_hospital'),
]
operations = [
migrations.RemoveField(
model_name='hospital',
name='sv_name',
),
]
|
"""
Definition of urls for polls viewing and voting.
"""
from django.conf.urls import url
from app.models import Poll
import app.views
urlpatterns = [
url(r'^$',
app.views.PollListView.as_view(
queryset=Poll.objects.order_by('-pub_date')[:5],
context_object_name='latest_poll_list',
template_name='app/index.html',),
name='home'),
url(r'^(?P<pk>\d+)/$',
app.views.PollDetailView.as_view(
template_name='app/details.html'),
name='detail'),
url(r'^(?P<pk>\d+)/results/$',
app.views.PollResultsView.as_view(
template_name='app/results.html'),
name='results'),
url(r'^(?P<poll_id>\d+)/vote/$', app.views.vote, name='vote'),
]
|
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body validating middleware.
"""
import functools
import webob
from tacker.api.validation import validators
from tacker.common import exceptions
def schema(request_body_schema):
"""Register a schema to validate request body.
Registered schema will be used for validating request body just before
API method executing.
:param dict request_body_schema: a schema to validate request body
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
schema_validator = validators._SchemaValidator(
request_body_schema)
try:
schema_validator.validate(kwargs['body'])
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Malformed request body"))
return func(*args, **kwargs)
return wrapper
return add_validator
def query_schema(query_params_schema):
"""Register a schema to validate request query parameters.
Registered schema will be used for validating request query params just
before API method executing.
:param query_params_schema: A dict, the JSON-Schema for validating the
query parameters.
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# NOTE(tpatil): The second argument of the method
# calling this method should always be 'request'.
if 'request' in kwargs:
req = kwargs['request']
else:
req = args[1]
try:
req.GET.dict_of_lists()
except UnicodeDecodeError:
msg = _('Query string is not UTF-8 encoded')
raise exceptions.ValidationError(msg)
query_opts = {}
query_opts.update(req.GET)
schema_validator = validators._SchemaValidator(
query_params_schema)
schema_validator.validate(query_opts)
return func(*args, **kwargs)
return wrapper
return add_validator
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_PARENT_RESPONSE, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_ID_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_LINK_REQUEST, MLE_LINK_ACCEPT_AND_REQUEST, ADDR_SOL_URI, SOURCE_ADDRESS_TLV, MODE_TLV, TIMEOUT_TLV, CHALLENGE_TLV, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MLE_FRAME_COUNTER_TLV, ROUTE64_TLV, ADDRESS16_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, TLV_REQUEST_TLV, SCAN_MASK_TLV, CONNECTIVITY_TLV, LINK_MARGIN_TLV, VERSION_TLV, ADDRESS_REGISTRATION_TLV, ACTIVE_TIMESTAMP_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_5_2_LeaderReboot(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER, ED]
},
ED: {
'name': 'MED',
'is_mtd': True,
'mode': 'rsn',
'panid': 0xface,
'whitelist': [ROUTER]
},
}
def _setUpLeader(self):
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_router_selection_jitter(1)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[LEADER].reset()
self._setUpLeader()
self.simulator.go(140)
self.assertEqual(self.nodes[ROUTER].get_state(), 'leader')
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'router')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[ROUTER].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
leader_pkts = pkts.filter_wpan_src64(LEADER)
_rpkts = pkts.filter_wpan_src64(ROUTER)
# Step 2: The DUT MUST send properly formatted MLE Advertisements
_rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
_lpkts = leader_pkts.range(_rpkts.index)
_lpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
# Step 4: Router_1 MUST attempt to reattach to its original partition by
# sending MLE Parent Requests to the All-Routers multicast
# address (FFxx::xx) with a hop limit of 255.
_rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
lreset_start = _rpkts.index
# Step 5: Leader MUST NOT respond to the MLE Parent Requests
_lpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_not_next()
# Step 6:Router_1 MUST attempt to attach to any other Partition
# within range by sending a MLE Parent Request.
_rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
lreset_stop = _rpkts.index
# Step 3: The Leader MUST stop sending MLE advertisements.
leader_pkts.range(lreset_start, lreset_stop).filter_mle_cmd(MLE_ADVERTISEMENT).must_not_next()
# Step 7: Take over leader role of a new Partition and
# begin transmitting MLE Advertisements
with _rpkts.save_index():
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
# Step 8: Router_1 MUST respond with an MLE Child Update Response,
# with the updated TLVs of the new partition
_rpkts.filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV} < set(p.mle.tlv.type))
# Step 9: The Leader MUST send properly formatted MLE Parent
# Requests to the All-Routers multicast address
_lpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
# Step 10: Router_1 MUST send an MLE Parent Response
_rpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_next().must_verify(
lambda p: {
SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, LINK_LAYER_FRAME_COUNTER_TLV, RESPONSE_TLV, CHALLENGE_TLV,
LINK_MARGIN_TLV, CONNECTIVITY_TLV, VERSION_TLV
} < set(p.mle.tlv.type))
# Step 11: Leader send MLE Child ID Request
_lpkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(
lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV,
ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV, ACTIVE_TIMESTAMP_TLV
} < set(p.mle.tlv.type))
#Step 12: Router_1 send MLE Child ID Response
_rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV} < set(
p.mle.tlv.type))
#Step 13: Leader send an Address Solicit Request
_lpkts.filter_coap_request(ADDR_SOL_URI).must_next().must_verify(
lambda p: p.coap.tlv.ext_mac_addr and p.coap.tlv.rloc16 is not nullField and p.coap.tlv.status != 0)
#Step 14: Router_1 send an Address Solicit Response
_rpkts.filter_coap_ack(
ADDR_SOL_URI).must_next().must_verify(lambda p: p.coap.tlv.router_mask_assigned and p.coap.tlv.rloc16 is
not nullField and p.coap.tlv.status == 0)
#Step 15: Leader Send a Multicast Link Request
_lpkts.filter_mle_cmd(MLE_LINK_REQUEST).must_next().must_verify(
lambda p: {VERSION_TLV, TLV_REQUEST_TLV, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, CHALLENGE_TLV} < set(
p.mle.tlv.type))
#Step 16: Router_1 send a Unicast Link Accept
_rpkts.filter_mle_cmd(MLE_LINK_ACCEPT_AND_REQUEST).must_next().must_verify(lambda p: {
VERSION_TLV, SOURCE_ADDRESS_TLV, RESPONSE_TLV, MLE_FRAME_COUNTER_TLV, LINK_MARGIN_TLV, LEADER_DATA_TLV
} < set(p.mle.tlv.type))
#Step 17: Router_1 MUST respond with an ICMPv6 Echo Reply
_rpkts.filter_ping_request().filter_wpan_dst64(MED).must_next()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
#
# Araboly 2000 Advanced Server SP4 -- everyone's favourite board game... with IRC support and fancy colours!
# Copyright (c) 2018 Lucio Andrés Illanes Albornoz <lucio@lucioillanes.de>
# This project is licensed under the terms of the MIT licence.
#
from ArabolyGenerals import ArabolyGenerals
from ArabolyMonad import ArabolyDecorator
from ArabolyTypeClass import ArabolyTypeClass
from ArabolyState import ArabolyGameState, ArabolyOutputLevel, ArabolyStringType
from ArabolyTrade import ArabolyTrade
import copy, os, sys, yaml
@ArabolyDecorator()
class ArabolyFree(ArabolyTypeClass):
"""XXX"""
# {{{ dispatch_board(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_board(args, channel, context, output, src, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) \
or src not in context.players["byName"]:
status = False
else:
output = ArabolyGenerals._board(channel, context, output, src)
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_bugcheck(channel, context, srcFull, status): XXX
@staticmethod
def dispatch_bugcheck(channel, context, srcFull, status):
if not ArabolyGenerals._authorised(channel, context, srcFull):
status = False
else:
snapshotPath = os.path.join("assets", "savefiles", "snapshot.dmp.{}".format(context.clientParams["hostname"]))
print("Saving game snapshot to {}!".format(os.path.join("assets", "savefiles", snapshotPath)))
with open(snapshotPath, "w+") as fileObject:
yaml.dump(context, fileObject)
sys.exit(1)
return channel, context, srcFull, status
# }}}
# {{{ dispatch_help(channel, context): XXX
@staticmethod
def dispatch_help(channel, context, output):
for helpLine in context.graphics["help"]:
output = ArabolyGenerals._push_output(channel, context, output, helpLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
return channel, context, output
# }}}
# {{{ dispatch_join(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_join(args, channel, context, output, src, status):
if context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.SETUP:
status = False
elif src in context.players["byName"] \
or len(args):
status = False
else:
newNum = None
for otherNum in range(len(context.players["numMap"])):
if context.players["numMap"][otherNum] == None:
newNum = otherNum; break;
if newNum == None:
status = False
else:
context.players["byName"][src] = {"field":0, "name":src, "num":newNum, "properties":[], "wallet":1500}
context.players["numMap"][newNum] = src
output = ArabolyGenerals._push_output(channel, context, output, "Player {src} joins Araboly game!".format(**locals()))
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_kick(args, channel, context, output, srcFull, status): XXX
@staticmethod
def dispatch_kick(args, channel, context, output, srcFull, status):
if context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.SETUP:
if len(args) != 1 or len(args[0]) < 1 \
or args[0] not in context.players["byName"]:
status = False
elif ArabolyGenerals._authorised(channel, context, srcFull):
otherPlayers = [args[0]]
output = ArabolyGenerals._push_output(channel, context, output, "Kicking {args[0]} from current Araboly game!".format(**locals()))
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, srcFull, status
# }}}
# {{{ dispatch_melp(channel, context, output): XXX
@staticmethod
def dispatch_melp(channel, context, output):
for explosionLine in context.graphics["explosion"]:
output = ArabolyGenerals._push_output(channel, context, output, explosionLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
output = ArabolyGenerals._push_output(channel, context, output, "\u0001ACTION explodes.\u0001", outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
return channel, context, output
# }}}
# {{{ dispatch_part(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_part(args, channel, context, output, src, status):
if context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.SETUP:
if len(args) > 0 \
or src not in context.players["byName"]:
status = False
else:
otherPlayers = [src]
output = ArabolyGenerals._push_output(channel, context, output, "Player {src} parts Araboly game!".format(**locals()))
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_save(args, channel, context, output, srcFull, status): XXX
def dispatch_save(args, channel, context, output, srcFull, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.BANKRUPTCY \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) != 1 \
or not ArabolyGenerals._authorised(channel, context, srcFull):
status = False
else:
snapshotPath = os.path.join("assets", "savefiles", os.path.basename(args[0]))
output = ArabolyGenerals._push_output(channel, context, output, "Saving snapshot to {snapshotPath}!".format(**locals()))
with open(snapshotPath, "w") as fileObject:
gameSnapshot = copy.deepcopy(context)
delattr(gameSnapshot, "clientParams")
delattr(gameSnapshot, "graphics")
delattr(gameSnapshot, "kades")
yaml.dump(gameSnapshot, fileObject)
output = ArabolyGenerals._push_output(channel, context, output, "Saved snapshot to {snapshotPath}!".format(**locals()))
return args, channel, context, output, srcFull, status
# }}}
# {{{ dispatch_status(args, channel, context, output, src, status): XXX
def dispatch_status(args, channel, context, output, src, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.BANKRUPTCY \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) == 0:
statusPlayer = src
elif len(args) == 1:
statusPlayer = args[0]
else:
status = False
if status:
if not statusPlayer in context.players["byName"].keys():
status = False
else:
playerField = context.board[context.players["byName"][statusPlayer]["field"]]
playerProps = context.players["byName"][statusPlayer]["properties"]
playerWallet = context.players["byName"][statusPlayer]["wallet"]
output = ArabolyGenerals._push_output(channel, context, output, "Araboly status for player {statusPlayer}:".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyGenerals._push_output(channel, context, output, "Field....: {playerField[title]}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyGenerals._push_output(channel, context, output, "Wallet...: ${playerWallet}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
if len(playerProps):
output = ArabolyGenerals._push_output(channel, context, output, "Properties owned:", outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
for playerPropNum in playerProps:
playerProp = context.board[playerPropNum]
mortgagedString = " (\u001fMORTGAGED\u001f)" if playerProp["mortgaged"] else ""
developmentsList = []
for levelNum in range(playerProp["level"] + 1):
developmentsList += playerProp["strings"][ArabolyStringType.NAME][levelNum]
developmentsString = ", level {}, developments: {}".format(playerProp["level"], ", ".join(developmentsList))
output = ArabolyGenerals._push_output(channel, context, output, "\u0003{:02d}${}{} (#{}) -- {}{}".format(playerProp["colourMiRC"], playerProp["price"], mortgagedString, playerProp["field"], playerProp["title"], developmentsString), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyTrade._status(channel, context, output, statusPlayer)
output = ArabolyGenerals._push_output(channel, context, output, "Current turn: {}".format(context.players["numMap"][context.players["curNum"]]), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_stop(args, channel, context, output, src, srcFull, status): XXX
@staticmethod
def dispatch_stop(args, channel, context, output, src, srcFull, status):
if context.state == ArabolyGameState.AUCTION \
or context.state == ArabolyGameState.BANKRUPTCY \
or context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.PROPERTY \
or context.state == ArabolyGameState.SETUP:
if len(args) > 0:
status = False
elif ArabolyGenerals._authorised(channel, context, srcFull):
otherPlayers = list(context.players["byName"].keys())
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, src, srcFull, status
# }}}
# vim:expandtab foldmethod=marker sw=4 ts=4 tw=0
|
import setuptools
setuptools.setup(
name="video_to_ascii",
version="1.0.6",
author="Joel Ibaceta",
author_email="mail@joelibaceta.com",
description="A simple tool to play a video using ascii characters",
url="https://github.com/joelibaceta/video-to-ascii",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
scripts=['bin/video-to-ascii'],
)
|
import unittest
from SDWLE.agents.trade.possible_play import PossiblePlays
from SDWLE.cards import Wisp, WarGolem, BloodfenRaptor, RiverCrocolisk, AbusiveSergeant, ArgentSquire
from testsSDW.agents.trade.test_helpers import TestHelpers
from testsSDW.agents.trade.test_case_mixin import TestCaseMixin
class TestTradeAgent(TestCaseMixin, unittest.TestCase):
def test_setup_smoke(self):
game = TestHelpers().make_game()
self.add_minions(game, 0, Wisp(), WarGolem())
self.add_minions(game, 1, BloodfenRaptor())
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
def test_basic_trade(self):
game = TestHelpers().make_game()
self.add_minions(game, 1, Wisp(), WarGolem())
self.add_minions(game, 0, BloodfenRaptor())
self.make_all_active(game)
game.play_single_turn()
self.assert_minions(game.players[1], "War Golem")
self.assert_minions(game.players[0], "Bloodfen Raptor")
def test_buff_target(self):
game = TestHelpers().make_game()
self.add_minions(game, 0, BloodfenRaptor(), RiverCrocolisk())
self.make_all_active(game)
game.players[0].agent.player = game.players[0]
self.add_minions(game, 0, AbusiveSergeant())
game.play_single_turn()
def test_hero_power(self):
game = self.make_game()
cards = self.make_cards(game.current_player, ArgentSquire())
possible_plays = PossiblePlays(cards, 10, allow_hero_power=True)
self.assertEqual(1, len(possible_plays.plays()))
|
import numpy as np
from numpy.linalg import norm
from ._jit import jit
@jit
def J2_perturbation(t0, state, k, J2, R):
r"""Calculates J2_perturbation acceleration (km/s2)
.. math::
\vec{p} = \frac{3}{2}\frac{J_{2}\mu R^{2}}{r^{4}}\left [\frac{x}{r}\left ( 5\frac{z^{2}}{r^{2}}-1 \right )\vec{i} + \frac{y}{r}\left ( 5\frac{z^{2}}{r^{2}}-1 \right )\vec{j} + \frac{z}{r}\left ( 5\frac{z^{2}}{r^{2}}-3 \right )\vec{k}\right]
.. versionadded:: 0.9.0
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
J2: float
oblateness factor
R: float
attractor radius
Note
----
The J2 accounts for the oblateness of the attractor. The formula is given in
Howard Curtis, (12.30)
"""
r_vec = state[:3]
r = norm(r_vec)
factor = (3.0 / 2.0) * k * J2 * (R ** 2) / (r ** 5)
a_x = 5.0 * r_vec[2] ** 2 / r ** 2 - 1
a_y = 5.0 * r_vec[2] ** 2 / r ** 2 - 1
a_z = 5.0 * r_vec[2] ** 2 / r ** 2 - 3
return np.array([a_x, a_y, a_z]) * r_vec * factor
@jit
def J3_perturbation(t0, state, k, J3, R):
r"""Calculates J3_perturbation acceleration (km/s2)
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
J3: float
oblateness factor
R: float
attractor radius
Note
----
The J3 accounts for the oblateness of the attractor. The formula is given in
Howard Curtis, problem 12.8
This perturbation has not been fully validated, see https://github.com/poliastro/poliastro/pull/398
"""
r_vec = state[:3]
r = norm(r_vec)
factor = (1.0 / 2.0) * k * J3 * (R ** 3) / (r ** 5)
cos_phi = r_vec[2] / r
a_x = 5.0 * r_vec[0] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)
a_y = 5.0 * r_vec[1] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)
a_z = 3.0 * (35.0 / 3.0 * cos_phi ** 4 - 10.0 * cos_phi ** 2 + 1)
return np.array([a_x, a_y, a_z]) * factor
@jit
def atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0):
r"""Calculates atmospheric drag acceleration (km/s2)
.. math::
\vec{p} = -\frac{1}{2}\rho v_{rel}\left ( \frac{C_{d}A}{m} \right )\vec{v_{rel}}
.. versionadded:: 0.9.0
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
R : float
radius of the attractor (km)
C_D: float
dimensionless drag coefficient ()
A: float
frontal area of the spacecraft (km^2)
m: float
mass of the spacecraft (kg)
H0 : float
atmospheric scale height, (km)
rho0: float
the exponent density pre-factor, (kg / m^3)
Note
----
This function provides the acceleration due to atmospheric drag. We follow
Howard Curtis, section 12.4
the atmospheric density model is rho(H) = rho0 x exp(-H / H0)
"""
H = norm(state[:3])
v_vec = state[3:]
v = norm(v_vec)
B = C_D * A / m
rho = rho0 * np.exp(-(H - R) / H0)
return -(1.0 / 2.0) * rho * B * v * v_vec
@jit
def shadow_function(r_sat, r_sun, R):
r"""Determines whether the satellite is in attractor's shadow, uses algorithm 12.3 from Howard Curtis
Parameters
----------
r_sat : numpy.ndarray
position of the satellite in the frame of attractor (km)
r_sun : numpy.ndarray
position of star in the frame of attractor (km)
R : float
radius of body (attractor) that creates shadow (km)
"""
r_sat_norm = np.sqrt(np.sum(r_sat ** 2))
r_sun_norm = np.sqrt(np.sum(r_sun ** 2))
theta = np.arccos(np.dot(r_sat, r_sun) / r_sat_norm / r_sun_norm)
theta_1 = np.arccos(R / r_sat_norm)
theta_2 = np.arccos(R / r_sun_norm)
return theta < theta_1 + theta_2
def third_body(t0, state, k, k_third, third_body):
r"""Calculates 3rd body acceleration (km/s2)
.. math::
\vec{p} = \mu_{m}\left ( \frac{\vec{r_{m/s}}}{r_{m/s}^3} - \frac{\vec{r_{m}}}{r_{m}^3} \right )
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
third_body: a callable object returning the position of 3rd body
third body that causes the perturbation
Note
----
This formula is taken from Howard Curtis, section 12.10. As an example, a third body could be
the gravity from the Moon acting on a small satellite.
"""
body_r = third_body(t0)
delta_r = body_r - state[:3]
return k_third * delta_r / norm(delta_r) ** 3 - k_third * body_r / norm(body_r) ** 3
def radiation_pressure(t0, state, k, R, C_R, A, m, Wdivc_s, star):
r"""Calculates radiation pressure acceleration (km/s2)
.. math::
\vec{p} = -\nu \frac{S}{c} \left ( \frac{C_{r}A}{m} \right )\frac{\vec{r}}{r}
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
R : float
radius of the attractor
C_R: float
dimensionless radiation pressure coefficient, 1 < C_R < 2 ()
A: float
effective spacecraft area (km^2)
m: float
mass of the spacecraft (kg)
Wdivc_s : float
total star emitted power divided by the speed of light (W * s / km)
star: a callable object returning the position of star in attractor frame
star position
Note
----
This function provides the acceleration due to star light pressure. We follow
Howard Curtis, section 12.9
"""
r_star = star(t0)
r_sat = state[:3]
P_s = Wdivc_s / (norm(r_star) ** 2)
nu = float(shadow_function(r_sat, r_star, R))
return -nu * P_s * (C_R * A / m) * r_star / norm(r_star)
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
class PurchasableState(Enum):
"""
Whether or not the in-skill product is purchasable by customers. A product that is not purchasable will prevent new customers from being prompted to purchase the product. Customers who already own the product will see no effect and continue to have access to the product features.
Allowed enum values: [PURCHASABLE, NOT_PURCHASABLE]
"""
PURCHASABLE = "PURCHASABLE"
NOT_PURCHASABLE = "NOT_PURCHASABLE"
def to_dict(self):
# type: () -> Dict[str, Any]
"""Returns the model properties as a dict"""
result = {self.name: self.value}
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.value)
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (Any) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, PurchasableState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (Any) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.exchange_database_copy_info
import cohesity_management_sdk.models.exchange_database_info
class ApplicationServerInfo(object):
"""Implementation of the 'ApplicationServerInfo' model.
Specifies the Information about the Exchange Server Node.
Attributes:
database_copy_info_list (list of ExchangeDatabaseCopyInfo): Specifies
the list of all the copies of the Exchange databases(that are part
of DAG) that are present on this Exchange Node.
database_info_list (list of ExchangeDatabaseInfo): Specifies the list
of all the databases available on the standalone Exchange server
node. This is populated for the Standlone Exchange Servers.
fqdn (string): Specifies the fully qualified domain name of the
Exchange Server.
guid (string): Specifies the Guid of the Exchange Application Server.
name (string): Specifies the display name of the Exchange
Application Server.
total_size_bytes (int): Specifies the total size of all Exchange
database copies in all the Exchange Application Servers that are
part of the DAG.
"""
# Create a mapping from Model property names to API property names
_names = {
"database_copy_info_list": 'databaseCopyInfoList',
"database_info_list":'databaseInfoList',
"fqdn": 'fqdn',
"guid": 'guid',
"name": 'name',
"total_size_bytes":'totalSizeBytes'
}
def __init__(self,
database_copy_info_list=None,
database_info_list=None,
fqdn=None,
guid=None,
name=None,
total_size_bytes=None):
"""Constructor for the ApplicationServerInfo class"""
# Initialize members of the class
self.database_copy_info_list = database_copy_info_list
self.database_info_list = database_info_list
self.fqdn = fqdn
self.guid = guid
self.name = name
self.total_size_bytes = total_size_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
database_copy_info_list = None
if dictionary.get('databaseCopyInfoList') != None:
database_copy_info_list = list()
for structure in dictionary.get('databaseCopyInfoList'):
database_copy_info_list.append(cohesity_management_sdk.models.exchange_database_copy_info.ExchangeDatabaseCopyInfo.from_dictionary(structure))
database_info_list = None
if dictionary.get('databaseInfoList') != None:
database_info_list = list()
for structure in dictionary.get('databaseInfoList'):
database_info_list.append(cohesity_management_sdk.models.exchange_database_info.ExchangeDatabaseInfo.from_dictionary(structure))
fqdn = dictionary.get('fqdn')
guid = dictionary.get('guid')
name = dictionary.get('name')
total_size_bytes = dictionary.get('totalSizeBytes')
# Return an object of this model
return cls(database_copy_info_list,
database_info_list,
fqdn,
guid,
name,
total_size_bytes)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'main_page_behavior',
'dependencies': [
'../animation/compiled_resources2.gyp:animation',
'../compiled_resources2.gyp:route',
'settings_section',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_animated_pages',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_page_visibility',
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_section',
'dependencies': [
'../animation/compiled_resources2.gyp:animation',
'<(EXTERNS_GYP):web_animations',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_subpage',
'dependencies': [
'../compiled_resources2.gyp:route',
'settings_subpage_search',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/iron-resizable-behavior/compiled_resources2.gyp:iron-resizable-behavior-extracted',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/neon-animation/compiled_resources2.gyp:neon-animatable-behavior-extracted',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_subpage_search',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-icon-button/compiled_resources2.gyp:paper-icon-button-extracted',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-input/compiled_resources2.gyp:paper-input-container-extracted',
'<(DEPTH)/ui/webui/resources/cr_elements/cr_search_field/compiled_resources2.gyp:cr_search_field_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
|
#
# coding=utf-8
import unittest
import sys
import os
from io import open
import openpyxl as xl
from pptx_template.xlsx_model import _build_tsv, _format_cell_value, generate_whole_model
class Cell:
def __init__(self, value, number_format):
self.value = value
self.number_format = number_format
def _to_cells(list_of_list):
return [[Cell(value, '') for value in list] for list in list_of_list]
class MyTest(unittest.TestCase):
def test_build_tsv(self):
tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])])
self.assertEqual([["Year","A","B"],["2016",100,200]], tsv)
def test_build_tsv_tranapose(self):
tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])], transpose=True)
self.assertEqual([["Year","2016"],["A",100],["B",200]], tsv)
def test_build_tsv_side_by_side(self):
tsv = _build_tsv([_to_cells([["Year","A"],["2016",100]]), _to_cells([["B"],[200]])], side_by_side=True)
self.assertEqual([["Year","A","B"],["2016",100,200]], tsv)
def test_format_cell_value(self):
self.assertEqual(123.45678, _format_cell_value(Cell(123.45678, '')))
self.assertEqual("123", _format_cell_value(Cell(123.45678, '0')))
self.assertEqual("123.46", _format_cell_value(Cell(123.45678, '0.00')))
self.assertEqual("123.5", _format_cell_value(Cell(123.45678, '0.0_')))
self.assertEqual("12345.7%", _format_cell_value(Cell(123.45678, '0.0%_')))
self.assertEqual("12345%", _format_cell_value(Cell(123.45678, '0%_')))
def test_generate_whole_model(self):
def read_expect(name):
file_name = os.path.join(os.path.dirname(__file__), 'data2', name)
f = open(file_name, mode = 'r', encoding = 'utf-8')
result = f.read()
f.close()
return result
xls_file = os.path.join(os.path.dirname(__file__), 'data2', 'in.xlsx')
slides = generate_whole_model(xls_file, {})
self.assertEqual(u'Hello!', slides['p01']['greeting']['en'])
self.assertEqual(u'こんにちは!', slides['p01']['greeting']['ja'])
self.assertEqual([
['Season', u'売り上げ', u'利益', u'利益率'],
[u'春', 100, 50, 0.5],
[u'夏', 110, 60, 0.5],
[u'秋', 120, 70, 0.5],
[u'冬', 130, 0, 0.6],
], slides['p02']['array'])
self.assertEqual(read_expect('p02-normal.tsv'), slides['p02']['normal']['tsv_body'])
self.assertEqual(read_expect('p02-transpose.tsv'), slides['p02']['transpose']['tsv_body'])
self.assertEqual(read_expect('p02-sidebyside.tsv'), slides['p02']['sidebyside']['tsv_body'])
if __name__ == '__main__':
unittest.main()
|
import base64
import copy
import hashlib
import json
from botocore.exceptions import ClientError
import pytest
from ..test_utils import import_lambda
sdk_analysis = import_lambda(
"sdk_analysis",
mock_imports=[
"pulse3D.plate_recording",
"pulse3D.constants",
"pulse3D.excel_writer",
"pymysql",
"pandas",
],
)
TEST_BUCKET_NAME = "test_name"
TEST_OBJECT_KEY = "customer_id/username/test_key"
TEST_RECORD = {"s3": {"bucket": {"name": TEST_BUCKET_NAME}, "object": {"key": TEST_OBJECT_KEY}}}
TEST_FILENAME = TEST_OBJECT_KEY.rsplit("/", 1)[1]
@pytest.fixture(scope="function", name="mocked_boto3_client")
def fixture_mocked_boto3_client(mocker):
mocked_sqs_client = mocker.Mock()
mocked_ssm_client = mocker.Mock()
mocked_s3_client = mocker.Mock()
mocked_ec2_client = mocker.Mock()
mocked_s3_client.head_object.return_value = {"Metadata": {"upload-id": "test-id"}}
mocked_dynamodb_client = mocker.Mock()
def se(client_type):
if client_type == "sqs":
return mocked_sqs_client
if client_type == "s3":
return mocked_s3_client
if client_type == "dynamodb":
return mocked_dynamodb_client
if client_type == "secretsmanager":
return mocked_ssm_client
if client_type == "ec2":
return mocked_ec2_client
mocker.patch.object(sdk_analysis.boto3, "client", autospec=True, side_effect=se)
yield {
"sqs": mocked_sqs_client,
"s3": mocked_s3_client,
"dynamodb": mocked_dynamodb_client,
"secretsmanager": mocked_ssm_client,
"ec2": mocked_ec2_client,
}
def test_sdk_analysis__logs_exception_when_receiving_message_from_sqs_fails(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
expected_error = ClientError({}, "")
mocked_sqs_client.receive_message.side_effect = expected_error
spied_logger_exception = mocker.spy(sdk_analysis.logger, "exception")
sdk_analysis.handler(max_num_loops=1)
spied_logger_exception.assert_called_once_with(f"receive_message failed. Error: {expected_error}")
def test_sdk_analysis__sleeps_after_each_loop_but_not_in_final_loop(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_sleep = mocker.patch.object(sdk_analysis, "sleep", autospec=True)
# Tanner (9/23/21): mocking receive_message to have error raised here in order to avoid mocking multiple other objects
mocked_sqs_client.receive_message.side_effect = ClientError({}, "")
sdk_analysis.handler(max_num_loops=2)
mocked_sleep.assert_called_once_with(5)
def test_sdk_analysis__gets_messages_from_sqs_queue_correctly(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_sqs_client.receive_message.return_value = {}
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
sdk_analysis.handler(max_num_loops=1)
mocked_sqs_client.receive_message.assert_called_once_with(
QueueUrl=expected_sqs_url, MaxNumberOfMessages=1, WaitTimeSeconds=10
)
def test_sdk_analysis__deletes_messages_from_sqs_queue_after_processing_them(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
test_message = {"ReceiptHandle": "rh"}
test_message_list = [test_message] * 3
mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list}
sdk_analysis.handler(max_num_loops=1)
assert mocked_sqs_client.delete_message.call_count == len(test_message_list)
mocked_sqs_client.delete_message.called_with(
QueueUrl=expected_sqs_url, ReceiptHandle=test_message["ReceiptHandle"]
)
@pytest.mark.parametrize(
"test_message",
[
{},
{"Body": json.dumps({})},
{"Body": json.dumps({"other_key": "val"})},
{"Body": json.dumps({"Records": []})},
{"Body": json.dumps({"Records": [{}]})},
{"Body": json.dumps({"Records": [{"eventSource": "aws:s3"}]})},
{"Body": json.dumps({"Records": [{"eventName": "ObjectCreated:Post"}]})},
],
)
def test_sdk_analysis__does_not_process_message_or_record_from_sqs_queue_that_is_not_formatted_correctly(
test_message, mocker, mocked_boto3_client
):
mocked_sqs_client = mocked_boto3_client["sqs"]
test_message.update({"ReceiptHandle": "rh"})
mocked_sqs_client.receive_message.return_value = {"Messages": [test_message]}
spied_process_record = mocker.spy(sdk_analysis, "process_record")
sdk_analysis.handler(max_num_loops=1)
spied_process_record.assert_not_called()
def test_sdk_analysis__processes_each_record_of_each_record_of_each_message_from_sqs_queue(
mocker, mocked_boto3_client
):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_s3_client = mocked_boto3_client["s3"]
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
test_num_records = 5
test_records = [
{"eventSource": "aws:s3", "eventName": "ObjectCreated:Post", "num": i}
for i in range(test_num_records)
]
test_messages = [
{"Body": json.dumps({"Records": records}), "ReceiptHandle": "rh"}
for records in (test_records[:2], test_records[2:])
]
mocked_sqs_client.receive_message.return_value = {"Messages": test_messages}
mocked_process_record = mocker.patch.object(sdk_analysis, "process_record")
sdk_analysis.handler(max_num_loops=1)
assert mocked_process_record.call_count == test_num_records
for record in test_records:
mocked_process_record.assert_any_call(record, mocked_s3_client, mocked_dynamodb_client)
def test_sdk_analysis__handles_info_logging_pertaining_to_sqs_queue(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
test_message_list = []
mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list}
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
sdk_analysis.handler(max_num_loops=1)
spied_logger_info.assert_any_call(f"Receiving messages on {expected_sqs_url}")
spied_logger_info.assert_any_call(f"Received: {len(test_message_list)}")
spied_logger_info.assert_any_call("Received: 0")
def test_process_record__retrieves_metadata_of_file_correctly(mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_s3_client.head_object.assert_called_once_with(Bucket=TEST_BUCKET_NAME, Key=TEST_OBJECT_KEY)
def test_process_record__logs_error_when_one_is_raised_while_retrieving_metadata_from_s3_and_does_not_attempt_to_download_the_file(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
expected_error = ClientError({}, "")
mocked_s3_client.head_object.side_effect = expected_error
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_logger_error.assert_called_once_with(
f"Error occurred while retrieving head object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}"
)
mocked_s3_client.download_file.assert_not_called()
def test_process_record__correctly_downloads_file_to_temporary_directory(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_temporary_dir.assert_called_once_with(dir="/tmp")
mocked_s3_client.download_file.assert_called_once_with(
TEST_BUCKET_NAME, TEST_OBJECT_KEY, f"{spied_temporary_dir.spy_return.name}/{TEST_FILENAME}"
)
def test_process_record__handles_error_raised_while_downloading_file_from_s3(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_error = ClientError({}, "")
mocked_s3_client.download_file.side_effect = expected_error
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
spied_update_status = mocker.spy(sdk_analysis, "update_sdk_status")
spied_pr_from_dir = mocker.spy(sdk_analysis.PlateRecording, "from_directory")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_logger_error.assert_called_once_with(
f"Failed to download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}"
)
spied_update_status.assert_called_once_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error accessing file"
)
spied_pr_from_dir.assert_not_called()
def test_process_record__sets_file_status_to_analysis_running_then_runs_sdk_analysis_on_file(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
mocked_pr_from_dir = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
pr = mocked_pr_from_dir.return_value.__next__()
error_tracker = {"funcs_called_out_of_order": False}
def se(*args):
if args[-1] == "analysis running":
error_tracker["funcs_called_out_of_order"] = mocked_pr_from_dir.call_count != 0
mocked_update_status = mocker.patch.object(
sdk_analysis, "update_sdk_status", autospec=True, side_effect=se
)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
assert error_tracker["funcs_called_out_of_order"] is False
assert mocked_update_status.call_args_list[0] == mocker.call(
mocked_boto3_client["dynamodb"], expected_upload_id, "analysis running"
)
mocked_pr_from_dir.assert_called_once_with(spied_temporary_dir.spy_return)
sdk_analysis.write_xlsx.assert_called_with(pr, name=f"{TEST_FILENAME}.xlsx")
def test_process_record__handles_error_raised_while_running_sdk_analysis(mocker, mocked_boto3_client):
expected_upload_id = mocked_boto3_client["s3"].head_object.return_value["Metadata"]["upload-id"]
expected_error = Exception("test_exception")
mocker.patch.object(
sdk_analysis.PlateRecording, "from_directory", autospec=True, side_effect=expected_error
)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
sdk_analysis.process_record(
copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"]
)
spied_logger_error.assert_called_once_with(f"SDK analysis failed: {expected_error}")
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error during analysis"
)
def test_process_record__uploads_file_created_by_sdk_analysis_to_s3_bucket_correctly_and_sets_file_status_to_analysis_complete(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
mocked_dynamo_client = mocked_boto3_client["dynamodb"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_upload_bucket = "test_url"
mocker.patch.object(hashlib, "md5")
mocked_base64 = mocker.patch.object(base64, "b64encode")
expected_md5 = mocked_base64().decode()
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocked_open = mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_open.assert_called_with(f"{TEST_FILENAME}.xlsx", "rb")
mocked_s3_client.put_object.assert_called_once_with(
Body=mocked_open.return_value.__enter__(),
Bucket=expected_upload_bucket,
Key=f"{TEST_OBJECT_KEY}.xlsx",
ContentMD5=expected_md5,
)
assert mocked_update_status.call_args_list[1] == mocker.call(
mocked_dynamo_client, expected_upload_id, "analysis complete"
)
def test_process_record__handles_error_raised_while_uploading_file_to_s3(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
expected_error = Exception("test_exception")
mocked_s3_client.put_object.side_effect = expected_error
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_db_handling = mocker.patch.object(
sdk_analysis.main, "handle_db_metadata_insertions", autospec=True
)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
expected_file_name = f"{TEST_FILENAME}.xlsx"
spied_logger_error.assert_called_with(
f"S3 Upload failed for {expected_file_name} to {expected_upload_bucket}/{TEST_OBJECT_KEY}.xlsx: {expected_error}"
)
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error during upload of analyzed file"
)
mocked_db_handling.assert_not_called()
def test_process_record__after_successful_upload_logger_handles_failed_aurora_db_insertion(
mocker, mocked_boto3_client
):
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
# mocker.patch.object(sdk_analysis, "write_xslx", autospec=True)
mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", side_effect=Exception("ERROR"))
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error inserting analysis to database"
)
spied_logger_error.assert_called_with("Recording metadata failed to store in aurora database: ERROR")
def test_process_record__after_successful_upload_logger_handles_successful_aurora_db_insertion(
mocker, mocked_boto3_client
):
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_upload_bucket = "test_bucket"
expected_db_cluster_endpoint = "test_host"
expected_file_name = f"{TEST_OBJECT_KEY}.xlsx"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint)
mocker.patch.object(hashlib, "md5")
mocked_base64 = mocker.patch.object(base64, "b64encode")
expected_md5 = mocked_base64().decode()
mocked_open = mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocked_db_handling = mocker.patch.object(
sdk_analysis.main, "handle_db_metadata_insertions", autospec=True
)
mocker.patch.object(mocked_s3_client, "put_object")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_update_status.assert_any_call(
mocked_boto3_client["dynamodb"], expected_upload_id, "analysis successfully inserted into database"
)
spied_logger_info.assert_any_call(f"Inserting {TEST_FILENAME}.xlsx metadata into aurora database")
test_args = [
mocked_open.return_value.__enter__(),
mocked_PR_instance.return_value.__next__(),
expected_md5,
]
mocked_db_handling.assert_called_with(
expected_upload_bucket, expected_file_name, expected_db_cluster_endpoint, test_args
)
def test_set_info_dict__correctly_retrieves_aws_credentials(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password"))
mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch("builtins.open", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
expected_info_dict = {
"db_name": "mantarray_recordings",
"db_password": "test_password",
"db_username": "test_username",
}
assert sdk_analysis.main.INFO_DICT == expected_info_dict
def test_load_data_into_dataframe__successfully_gets_called_after_successful_db_connection(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password"))
expected_db_cluster_endpoint = "test_host"
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint)
mocker.patch.object(sdk_analysis.main.pymysql, "connect")
format_spy = mocker.patch.object(sdk_analysis.main, "load_data_to_dataframe")
mocked_open = mocker.patch("builtins.open", autospec=True)
mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(mocked_s3_client, "put_object", autospec=True)
mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
format_spy.assert_any_call(
mocked_open.return_value.__enter__(), mocked_PR_instance.return_value.__next__()
)
def test_process_record__handles_info_logging(mocker, mocked_boto3_client):
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
sdk_analysis.process_record(
copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"]
)
spied_logger_info.assert_any_call(f"Retrieving Head Object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}")
spied_logger_info.assert_any_call(
f"Download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY} to {spied_temporary_dir.spy_return.name}/{TEST_FILENAME}"
)
def test_update_sdk_status__updates_item_correctly(mocker, mocked_boto3_client):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
mocked_dynamodb_client.update_item.assert_called_once_with(
TableName=expected_table_name,
Key={"upload_id": {"S": test_upload_id}},
UpdateExpression="SET sdk_status = :val",
ExpressionAttributeValues={":val": {"S": test_status}},
ConditionExpression="attribute_exists(upload_id)",
)
def test_update_sdk_status__handles_conditional_check_failed_exceptions_raised_from_updating_item(
mocker, mocked_boto3_client
):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_error = ClientError({"Error": {"Code": "ConditionalCheckFailedException"}}, "")
mocked_dynamodb_client.update_item.side_effect = expected_error
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
spied_logger_error.assert_any_call(f"Error: {expected_error}")
spied_logger_error.assert_any_call(
f"Upload ID: {test_upload_id} was not found in table {expected_table_name}"
)
mocked_dynamodb_client.put_item.assert_called_once_with(
TableName=expected_table_name,
Item={"upload_id": {"S": test_upload_id}, "sdk_status": {"S": test_status}},
)
def test_update_sdk_status__logs_other_aws_errors_raised_from_updating_item(mocker, mocked_boto3_client):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_error = ClientError({"Error": {"Code": "SomeOtherException"}}, "")
mocked_dynamodb_client.update_item.side_effect = expected_error
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
spied_logger_error.assert_called_once_with(f"Error: {expected_error}")
mocked_dynamodb_client.put_item.assert_not_called()
|
from typing import TypeVar, AsyncIterator, Sequence
from chris.common.types import PluginUrl
from chris.common.client import AuthenticatedClient
from chris.common.search import get_paginated, to_sequence
import chris.common.decorator as http
from chris.cube.types import ComputeResourceName, PfconUrl
from chris.cube.deserialization import CubeCollectionLinks, CubePlugin, ComputeResource
_T = TypeVar("_T")
class CubeClient(AuthenticatedClient[CubeCollectionLinks, CubePlugin, "CubeClient"]):
@http.post("/chris-admin/api/v1/")
async def register_plugin(
self, plugin_store_url: PluginUrl, compute_name: ComputeResourceName
) -> CubePlugin:
...
@http.post("/chris-admin/api/v1/computeresources/")
async def create_compute_resource(
self,
name: ComputeResourceName,
compute_url: PfconUrl,
compute_user: str,
compute_password: str,
description: str = "",
) -> ComputeResource:
...
def get_compute_resources_of(
self, plugin: CubePlugin
) -> AsyncIterator[ComputeResource]:
return get_paginated(
session=self.s, url=plugin.compute_resources, element_type=ComputeResource
)
def search_compute_resources(
self, max_requests=100, **query
) -> AsyncIterator[ComputeResource]:
return self.search(
url=self.collection_links.compute_resources,
query=query,
element_type=ComputeResource,
max_requests=max_requests,
)
async def get_all_compute_resources(self) -> Sequence[ComputeResource]:
return await to_sequence(self.search_compute_resources())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-04-07 17:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawl', '0015_remove_article_news_source'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='source',
new_name='news_source',
),
]
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylib module containing functions that operate on dictionaries."""
def _add(*dictionaries):
"""Returns a new `dict` that has all the entries of the given dictionaries.
If the same key is present in more than one of the input dictionaries, the
last of them in the argument list overrides any earlier ones.
This function is designed to take zero or one arguments as well as multiple
dictionaries, so that it follows arithmetic identities and callers can avoid
special cases for their inputs: the sum of zero dictionaries is the empty
dictionary, and the sum of a single dictionary is a copy of itself.
Args:
*dictionaries: Zero or more dictionaries to be added.
Returns:
A new `dict` that has all the entries of the given dictionaries.
"""
result = {}
for d in dictionaries:
result.update(d)
return result
dicts = struct(
add = _add,
)
|
"""
artificial measure
------------------
Creation of artificial measure
"""
import numpy as np
############################### Create measure ################################
###############################################################################
def create_artificial_measure_array(n_k, n_vals_i, n_feats):
"""Create artificial random measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the transformed measure computed by the whole spatial descriptor model.
"""
measure = np.random.random((n_vals_i, n_feats, n_k))
return measure
def create_artificial_measure_append(n_k, n_vals_i, n_feats):
"""Create artificial random measure in the list form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the transformed measure computed by the whole spatial descriptor model.
"""
rounds = np.random.randint(1, 40)
measure = create_empty_append(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
for i in range(len(vals_i[k])):
measure[k][vals_i[k][i]].append(x_i[k][i])
return measure
def create_artificial_measure_replacelist(n_k, n_vals_i, n_feats,
unique_=False):
"""Create artificial random measure in the replacelist form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
unique_: boolean (default=False)
if there are no collapse.
Returns
-------
measure: list
the transformed measure computed by the whole spatial descriptor model.
"""
last = 0
rounds = np.random.randint(1, 40)
measure = create_empty_replacelist(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
if unique_:
vals_i = np.array([last+np.arange(n_iss)]*n_k)
last += n_iss
else:
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
measure[k][0].append(x_i[k])
measure[k][1].append(vals_i[k])
return measure
############################### Empty measure #################################
###############################################################################
def create_empty_array(n_k, n_vals_i, n_feats):
"""Create null measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return np.zeros((n_vals_i, n_feats, n_k))
def create_empty_append(n_k, n_iss, n_feats):
"""Create null measure in the list form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return [[[]]*n_iss]*n_k
def create_empty_replacelist(n_k, n_iss, n_feats):
"""Create null measure in the replacelist form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return [[[], []]]*n_k
############################### Vals_i creation ###############################
###############################################################################
def create_vals_i(n_iss, nvals, n_k):
"""
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
vals_i: np.ndarray
the associated stored indices for the element indices.
"""
return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss))
############################### Empty features ################################
###############################################################################
def create_empty_features_array(n_feats, n_iss, n_k):
"""Create null features for different iss in an array-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: np.ndarray
the null features we want to compute.
"""
return np.zeros((n_k, n_iss, n_feats))
def create_empty_features_dict(n_feats, n_iss, n_k):
"""Create null features for different iss in an listdict-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: list
the null features we want to compute.
"""
return [[{}]*n_iss]*n_k
################################ X_i features #################################
###############################################################################
def create_features_i_array(n_feats, n_iss, n_k):
"""Create null features for different iss in an array-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: np.ndarray
the null features we want to compute.
"""
x_i = np.random.random((n_k, n_iss, n_feats))
return x_i
def create_features_i_dict(n_feats, n_iss, n_k):
"""Create null features for different iss in an listdict-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: list
the null features we want to compute.
"""
x_i = []
for k in range(n_k):
x_i_k = []
for i in range(n_iss):
keys = np.unique(np.random.randint(1, n_feats, n_feats))
keys = [str(e) for e in keys]
values = np.random.random(len(keys))
x_i_k.append(dict(zip(keys, values)))
x_i.append(x_i_k)
return x_i
|
from .prettifier import prettify
from .prettifier.common import assert_prettifier_works
import pytoml
def test_prettifying_against_humanly_verified_sample():
toml_source = open('sample.toml').read()
expected = open('sample-prettified.toml').read()
assert_prettifier_works(toml_source, expected, prettify)
assert pytoml.loads(toml_source) == pytoml.loads(expected)
|
# coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class SignalLiveStreamCompleteResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'object'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None): # noqa: E501
"""SignalLiveStreamCompleteResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this SignalLiveStreamCompleteResponse. # noqa: E501
:return: The data of this SignalLiveStreamCompleteResponse. # noqa: E501
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SignalLiveStreamCompleteResponse.
:param data: The data of this SignalLiveStreamCompleteResponse. # noqa: E501
:type data: object
"""
self._data = data
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SignalLiveStreamCompleteResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SignalLiveStreamCompleteResponse):
return True
return self.to_dict() != other.to_dict()
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-pre"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "keras_ocr/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands, ))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", "--match",
"%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date")
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None
}
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from datetime import date, datetime
from vistrails.core.system import strftime, time_strptime
class XMLDAO:
def __init__(self):
pass
def getAttribute(self, node, attr):
try:
attribute = node.attributes.get(attr)
if attribute is not None:
return attribute.value
except KeyError:
pass
return None
def convertFromStr(self, value, type):
if value is not None:
if type == 'str':
return str(value)
elif value.strip() != '':
if type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'date':
return date(*time_strptime(value, '%Y-%m-%d')[0:3])
elif type == 'datetime':
return datetime(*time_strptime(value, '%Y-%m-%d %H:%M:%S')[0:6])
return None
def convertToStr(self, value, type):
if value is not None:
if type == 'date':
return value.isoformat()
elif type == 'datetime':
return strftime(value, '%Y-%m-%d %H:%M:%S')
else:
return str(value)
return ''
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import collections
import os
import pickle
import re
import subprocess
import sys
import time
import hashlib
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
from gyp import DebugOutput, DEBUG_GENERAL
try:
import sys
reload(sys)
sys.setdefaultencoding('utf8')
except:
pass
try:
basestring = basestring
except NameError:
basestring = str
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# Use a heuristic to try to find args that are paths, and normalize them
if arg.find('/') > 0 or arg.count('/') > 1:
arg = os.path.normpath(arg)
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if isinstance(line, bytes):
line = line.decode()
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path().decode(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.items():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.lower().endswith('.lib') else lib
for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release'), VS2015 and later only use this level
if int(self.vs_version.short_name) >= 2015:
return config
# and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
#arch = self.GetArch(config)
#if arch == 'x64' and not config.endswith('_x64'):
# config += '_x64'
#if arch == 'x86' and config.endswith('_x64'):
# config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if float(self.vs_version.project_version) >= 12.0:
# New flag introduced in VS2013 (project version 12.0) Forces writes to
# the program database (PDB) to be serialized through MSPDBSRV.EXE.
# https://msdn.microsoft.com/en-us/library/dn502518.aspx
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = [x for x in cflags if not x.startswith('/MP')]
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = self.msvs_precompiled_header[config]
pchbase = os.path.split(pch)[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pchbase + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', [])
if s.lower().endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
if not any('DYNAMICBASE' in flag or flag == '/FIXED' for flag in ldflags):
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not any('NXCOMPAT' in flag for flag in ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = any(flag.startswith('/DEF:') for flag in ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return self.settings.msvs_precompiled_header[self.config]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.items():
if isinstance(new, bytes):
new = new.decode()
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set, arch):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
# This occasionally happens and leads to misleading SYSTEMROOT error messages
# if not caught here.
cl_find = 'cl.exe'
if 'Visual Studio 2017'.encode('utf-8') in output_of_set:
cl_find = arch + '.' + cl_find
if output_of_set.count('='.encode('utf-8')) == 0:
raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
for line in output_of_set.splitlines():
if re.search(cl_find.encode(), line, re.I):
env['GYP_CL_PATH'] = line
continue
for envvar in envvars_to_save:
if re.match((envvar + '=').encode(), line, re.I):
var, setting = line.split('='.encode(), 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting.decode()
env[var.upper()] = setting
break
for required in (b'SYSTEMROOT', b'TEMP', b'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.items():
try:
block += key
except:
block += key.decode()
block += '='
try:
block += value
except:
block += value.decode()
block += nul
block += nul
return block
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
env = _GetEnvironment(arch, vs, open_out)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'w')
f.write(env_block)
f.close()
cl_paths[arch] = env['GYP_CL_PATH']
return cl_paths
def _GetEnvironment(arch, vs, open_out):
"""
This function will run the VC environment setup script, retrieve variables,
and also the path on cl.exe.
It will then try to cache the values to disk, and on next run will try to
lookup the cache. The cache key is the path to the setup script (which is
embedded within each Visual Studio installed instance) + it's args.
Even after a cache hit we do some validation of the cached values,
since parts of the tool-set can be upgraded with in the installed lifecycle
so paths and version numbers may change.
Args:
arch: {string} target architecture
vs: VisualStudioVersion
open_out: file open wrapper
Returns: {dict} the important environment variables VC need to run
"""
env = {}
args = vs.SetupScript(arch)
args.extend(('&&', 'set', '&&', 'where', 'cl.exe'))
cache_key = hashlib.md5(''.join(args).encode('utf-8')).hexdigest()
# The default value for %TEMP% will make all cache look ups to safely miss
appdata_dir = os.environ.get('TEMP', '')
cache_path = os.path.join(appdata_dir, '.gyp-cache')
cache_keyed_file = os.path.join(cache_path, cache_key)
if os.path.exists(cache_keyed_file):
try:
with file(cache_keyed_file) as f:
env = pickle.load(f)
except Exception:
pass
cl_path = env.get('GYP_CL_PATH', '')
if os.path.exists(cl_path):
return env
else:
# cache has become invalid (probably form a tool set update)
os.remove(cache_keyed_file)
start_time = time.clock()
# Extract environment variables for subprocesses.
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
std_out, _ = popen.communicate()
if popen.returncode != 0:
raise Exception('"%s" failed with error %d' % (args, popen.returncode))
end_time = time.clock()
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "vcvars %s time: %f" %
(' '.join(args), end_time - start_time))
env = _ExtractImportantEnvironment(std_out, arch)
if os.path.exists(appdata_dir):
try:
with open_out(cache_keyed_file) as f:
pickle.dump(env, f)
except Exception as e:
print (e)
return env
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = [x for x in relative if not os.path.exists(x)]
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
# Copyright 2015 Hewlett-Packard
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from oslo_log import log
from keystone.auth import core
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.federation import utils
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class TokenlessAuthHelper(provider_api.ProviderAPIMixin, object):
def __init__(self, env):
"""A init class for TokenlessAuthHelper.
:param env: The HTTP request environment that should contain
client certificate attributes. These attributes should match
with what the mapping defines. Or a user cannot be mapped and
results un-authenticated. The following examples are for the
attributes that reference to the client certificate's Subject's
Common Name and Organization:
SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O
:type env: dict
"""
self.env = env
def _build_scope_info(self):
"""Build the token request scope based on the headers.
:returns: scope data
:rtype: dict
"""
project_id = self.env.get('HTTP_X_PROJECT_ID')
project_name = self.env.get('HTTP_X_PROJECT_NAME')
project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID')
project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME')
domain_id = self.env.get('HTTP_X_DOMAIN_ID')
domain_name = self.env.get('HTTP_X_DOMAIN_NAME')
scope = {}
if project_id:
scope['project'] = {'id': project_id}
elif project_name:
scope['project'] = {'name': project_name}
if project_domain_id:
scope['project']['domain'] = {'id': project_domain_id}
elif project_domain_name:
scope['project']['domain'] = {'name': project_domain_name}
else:
msg = _('Neither Project Domain ID nor Project Domain Name '
'was provided.')
raise exception.ValidationError(msg)
elif domain_id:
scope['domain'] = {'id': domain_id}
elif domain_name:
scope['domain'] = {'name': domain_name}
else:
raise exception.ValidationError(
attribute='project or domain',
target='scope')
return scope
def get_scope(self):
auth = {}
# NOTE(chioleong): Auth methods here are insignificant because
# we only care about using auth.controllers.AuthInfo
# to validate the scope information. Therefore,
# we don't provide any identity.
auth['scope'] = self._build_scope_info()
# NOTE(chioleong): We'll let AuthInfo validate the scope for us
auth_info = core.AuthInfo.create(auth, scope_only=True)
return auth_info.get_scope()
def get_mapped_user(self, project_id=None, domain_id=None):
"""Map client certificate to an existing user.
If user is ephemeral, there is no validation on the user himself;
however it will be mapped to a corresponding group(s) and the scope
of this ephemeral user is the same as what is assigned to the group.
:param project_id: Project scope of the mapped user.
:param domain_id: Domain scope of the mapped user.
:returns: A dictionary that contains the keys, such as
user_id, user_name, domain_id, domain_name
:rtype: dict
"""
idp_id = self._build_idp_id()
LOG.debug('The IdP Id %s and protocol Id %s are used to look up '
'the mapping.', idp_id, CONF.tokenless_auth.protocol)
mapped_properties, mapping_id = self.federation_api.evaluate(
idp_id, CONF.tokenless_auth.protocol, self.env)
user = mapped_properties.get('user', {})
user_id = user.get('id')
user_name = user.get('name')
user_type = user.get('type')
if user.get('domain') is not None:
user_domain_id = user.get('domain').get('id')
user_domain_name = user.get('domain').get('name')
else:
user_domain_id = None
user_domain_name = None
# if user is ephemeral type, we don't care if the user exists
# or not, but just care if the mapped group(s) is valid.
if user_type == utils.UserType.EPHEMERAL:
user_ref = {'type': utils.UserType.EPHEMERAL}
group_ids = mapped_properties['group_ids']
utils.validate_mapped_group_ids(group_ids,
mapping_id,
self.identity_api)
group_ids.extend(
utils.transform_to_group_ids(
mapped_properties['group_names'], mapping_id,
self.identity_api, self.assignment_api))
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
if roles is not None:
role_names = [role['name'] for role in roles]
user_ref['roles'] = role_names
user_ref['group_ids'] = list(group_ids)
user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id
user_ref[federation_constants.PROTOCOL] = (
CONF.tokenless_auth.protocol)
return user_ref
if user_id:
user_ref = self.identity_api.get_user(user_id)
elif user_name and (user_domain_name or user_domain_id):
if user_domain_name:
user_domain = self.resource_api.get_domain_by_name(
user_domain_name)
self.resource_api.assert_domain_enabled(user_domain['id'],
user_domain)
user_domain_id = user_domain['id']
user_ref = self.identity_api.get_user_by_name(user_name,
user_domain_id)
else:
msg = _('User auth cannot be built due to missing either '
'user id, or user name with domain id, or user name '
'with domain name.')
raise exception.ValidationError(msg)
self.identity_api.assert_user_enabled(
user_id=user_ref['id'],
user=user_ref)
user_ref['type'] = utils.UserType.LOCAL
return user_ref
def _build_idp_id(self):
"""Build the IdP name from the given config option issuer_attribute.
The default issuer attribute SSL_CLIENT_I_DN in the environment is
built with the following formula -
base64_idp = sha1(env['SSL_CLIENT_I_DN'])
:returns: base64_idp like the above example
:rtype: str
"""
idp = self.env.get(CONF.tokenless_auth.issuer_attribute)
if idp is None:
raise exception.TokenlessAuthConfigError(
issuer_attribute=CONF.tokenless_auth.issuer_attribute)
hashed_idp = hashlib.sha256(idp.encode('utf-8'))
return hashed_idp.hexdigest()
|
# coding=utf-8
from common.BNFParser import *
from common.Grammar import Grammar
# 求文法G的可空变量集
# 该算法只跟G的P有关系
def algo_6_3(P):
"""
测试数据来源于第6章习题12(2)
>>> from common.production import Production
>>> p1 = Production(['S'], [['A', 'B', 'D', 'C']])
>>> p2 = Production(['A'], [['B', 'D'], ['\\"a\\"', '\\"a\\"'], ['\\"ε\\"']])
>>> p3 = Production(['B'], [['\\"a\\"', 'B'], ['\\"a\\"']])
>>> p4 = Production(['C'], [['D','C'], ['\\"c\\"'], ['\\"ε\\"']])
>>> p5 = Production(['D'], [['\\"ε\\"']])
>>> p = [p1, p2, p3, p4, p5]
>>> u = algo_6_3(p)
>>> set(u) == set(['A', 'C', 'D'])
True
"""
simple_plist = []
for p in P:
simple_plist.extend(Production.toSimpleProduction(p))
old_u = set()
new_u = set()
for p in simple_plist:
if Production.isDirectEmpty(p):
new_u.add(p.left[0])
while new_u != old_u:
old_u = new_u
for p in simple_plist:
if set(p.right[0]) <= old_u:
new_u.add(p.left[0])
return new_u
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostFileSystemVolume(vim, *args, **kwargs):
'''Detailed information about a file system. This is a base type for derived types
that have more specific details about specific filesystem types.Typically a
FileSystem is exposed as a datatoreSee DatastoreInfoSee HostVmfsVolumeSee
HostNasVolumeSee HostLocalFileSystemVolumeSee HostVfatVolume'''
obj = vim.client.factory.create('{urn:vim25}HostFileSystemVolume')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'capacity', 'name', 'type' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
#!../bin/python3
# -*- coding:utf-8 -*-
"""
Copyright 2021 Jerome DE LUCCHI
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
from env import _SERVER_DIR
sys.path.insert(0, _SERVER_DIR)
from api import db
__DATAMODEL_DIR = os.path.join(os.path.abspath('..'), 'datamodel')
__DATAMODEL_NODE_MODE_FILE = os.path.join(__DATAMODEL_DIR, 'node_mode.template.mapping')
__ES_ADDR = db.ES_PROTOCOL + """://""" + str(db.ES_HOSTNAME) + """:""" + str(db.ES_PORT)
__CREATE_INDEX_TEMPLATE = """curl -s -XPUT -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/_template/blast_node_mode -d@""" + __DATAMODEL_NODE_MODE_FILE
__NODE_MODES = [
{"name": "maintenance"},
{"name": "pause"},
{"name": "running"}
]
def defineIndexTemplate():
try:
if json.load(os.popen(__CREATE_INDEX_TEMPLATE))["acknowledged"]:
return True
except KeyError:
return False
def provisionDefault():
try:
for mode in __NODE_MODES:
__ES_PROVISION_DEFAULT = """curl -s -XPOST -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/blast_node_mode/_doc -d \'""" + json.dumps(mode) + """\'"""
if not json.load(os.popen(__ES_PROVISION_DEFAULT))["result"] == "created":
return False
return True
except KeyError:
return False
def main():
if defineIndexTemplate():
if provisionDefault():
sys.exit(0)
if __name__ == "__main__":
main()
|
# ext/declarative/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import declarative_base, synonym_for, comparable_using, \
instrument_declarative, ConcreteBase, AbstractConcreteBase, \
DeclarativeMeta, DeferredReflection, has_inherited_table,\
declared_attr, as_declarative
__all__ = ['declarative_base', 'synonym_for', 'has_inherited_table',
'comparable_using', 'instrument_declarative', 'declared_attr',
'as_declarative',
'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta',
'DeferredReflection']
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
import pickle
from nose.tools import eq_
import numpy as np
from numpy.testing import assert_array_equal
from eelbrain import datasets
from eelbrain._stats.spm import LM, LMGroup
def test_lm():
ds = datasets.get_uts()
model = ds.eval("A*B*Y")
coeffs = ds['uts'].ols(model)
lm = LM('uts', 'A*B*Y', ds, 'effect')
eq_(repr(lm), "<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>")
for i, effect in enumerate(model.effects):
assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i])
def test_random_lm():
# dummy coding
ds = datasets.get_uts()
lms = []
for i in range(5):
ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
lms.append(LM('uts', 'A*B*Y', ds))
rlm = LMGroup(lms)
eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>')
# coefficients
ds = rlm.coefficients_dataset(('A', 'A x B'))
eq_(ds['term'].cells, ('A', 'A x B'))
# tests
res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
eq_(res.clusters.n_cases, 1)
# effect coding
ds = datasets.get_uts()
lms = []
for i in range(5):
ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
lms.append(LM('uts', 'A*B*Y', ds, 'effect'))
rlm = LMGroup(lms)
res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
eq_(res.clusters.n_cases, 6)
# persistence
rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))
eq_(rlm_p.dims, rlm.dims)
|
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armada.exceptions import base_exception
class LintException(base_exception.ArmadaBaseException):
'''Base class for linting exceptions and errors.'''
message = 'An unknown linting error occurred.'
class InvalidManifestException(LintException):
'''
Exception for invalid manifests.
**Troubleshoot:**
*Coming Soon*
'''
message = 'Armada manifest invalid.'
class InvalidChartNameException(LintException):
'''Exception that occurs when an invalid filename is encountered.'''
message = 'Chart name must be a string.'
class InvalidChartDefinitionException(LintException):
'''Exception when invalid chart definition is encountered.'''
message = 'Invalid chart definition. Chart definition must be array.'
class InvalidReleaseException(LintException):
'''Exception that occurs when a release is invalid.'''
message = 'Release needs to be a string.'
class InvalidArmadaObjectException(LintException):
'''
Exception that occurs when an Armada object is not declared.
**Troubleshoot:**
*Coming Soon*
'''
message = 'An Armada object was not declared.'
|
import numpy as np
import tensorflow as tf
from common.shared_functions import dot_or_lookup, glorot_variance, make_tf_variable, make_tf_bias
from encoders.message_gcns.message_gcn import MessageGcn
class BasisGcn(MessageGcn):
def parse_settings(self):
self.dropout_keep_probability = float(self.settings['DropoutKeepProbability'])
self.n_coefficients = int(self.settings['NumberOfBasisFunctions'])
def local_initialize_train(self):
vertex_feature_dimension = self.entity_count if self.onehot_input else self.shape[0]
type_matrix_shape = (self.relation_count, self.n_coefficients)
vertex_matrix_shape = (vertex_feature_dimension, self.n_coefficients, self.shape[1])
self_matrix_shape = (vertex_feature_dimension, self.shape[1])
glorot_var_combined = glorot_variance([vertex_matrix_shape[0], vertex_matrix_shape[2]])
self.W_forward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape)
self.W_backward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape)
self.W_self = make_tf_variable(0, glorot_var_combined, self_matrix_shape)
type_init_var = 1
self.C_forward = make_tf_variable(0, type_init_var, type_matrix_shape)
self.C_backward = make_tf_variable(0, type_init_var, type_matrix_shape)
self.b = make_tf_bias(self.shape[1])
def local_get_weights(self):
return [self.W_forward, self.W_backward,
self.C_forward, self.C_backward,
self.W_self,
self.b]
def compute_messages(self, sender_features, receiver_features):
backward_type_scaling, forward_type_scaling = self.compute_coefficients()
receiver_terms, sender_terms = self.compute_basis_functions(receiver_features, sender_features)
forward_messages = tf.reduce_sum(input_tensor=sender_terms * tf.expand_dims(forward_type_scaling,-1), axis=1)
backward_messages = tf.reduce_sum(input_tensor=receiver_terms * tf.expand_dims(backward_type_scaling, -1), axis=1)
return forward_messages, backward_messages
def compute_coefficients(self):
message_types = self.get_graph().get_type_indices()
forward_type_scaling = tf.nn.embedding_lookup(params=self.C_forward, ids=message_types)
backward_type_scaling = tf.nn.embedding_lookup(params=self.C_backward, ids=message_types)
return backward_type_scaling, forward_type_scaling
def compute_basis_functions(self, receiver_features, sender_features):
sender_terms = self.dot_or_tensor_mul(sender_features, self.W_forward)
receiver_terms = self.dot_or_tensor_mul(receiver_features, self.W_backward)
return receiver_terms, sender_terms
def dot_or_tensor_mul(self, features, tensor):
tensor_shape = tf.shape(input=tensor)
flat_shape = [tensor_shape[0], tensor_shape[1] * tensor_shape[2]]
flattened_tensor = tf.reshape(tensor, flat_shape)
result_tensor = dot_or_lookup(features, flattened_tensor, onehot_input=self.onehot_input)
result_tensor = tf.reshape(result_tensor, [-1, tensor_shape[1], tensor_shape[2]])
return result_tensor
def compute_self_loop_messages(self, vertex_features):
return dot_or_lookup(vertex_features, self.W_self, onehot_input=self.onehot_input)
def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):
mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))
mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))
collected_messages_f = tf.sparse.sparse_dense_matmul(mtr_f, forward_messages)
collected_messages_b = tf.sparse.sparse_dense_matmul(mtr_b, backward_messages)
updated_vertex_embeddings = collected_messages_f + collected_messages_b
if self.use_nonlinearity:
activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages)
else:
activated = updated_vertex_embeddings + self_loop_messages
return activated
def local_get_regularization(self):
regularization = tf.reduce_mean(input_tensor=tf.square(self.W_forward))
regularization += tf.reduce_mean(input_tensor=tf.square(self.W_backward))
regularization += tf.reduce_mean(input_tensor=tf.square(self.W_self))
return 0.0 * regularization
|
import hashlib
import datetime
import json
import uuid
from hashlib import sha256
from sys import version_info as pyVersion
from binascii import hexlify, unhexlify
from wallet import *
from func.send_message import send_message
from func.send_coin import send_coin
from func.node_connection import *
from lib.mixlib import *
import pickle
from blockchain.blockchain_main import get_blockchain , create_blockchain, sendme_full_chain
from lib.settings import the_settings
def show_menu():
print(banner_maker(sc_name="Mix Blockchain Network",description="This is an open source blockchain network project. It exists for people to build and use their own blockchain networks. Or to join the network created by others.",author="Onur Atakan ULUSOY",email="atadogan06@gmail.com") + \
menu_space() + \
menu_maker(menu_number="cbc",menu_text="Create Blockchain")+ \
menu_maker(menu_number="cw",menu_text="Create Wallet")+ \
menu_space() + \
menu_maker(menu_number="sm",menu_text="Send Message")+ \
menu_maker(menu_number="sc",menu_text="Send Coin")+ \
menu_space() + \
menu_maker(menu_number="gb",menu_text="Get Balance")+ \
menu_space() + \
menu_maker(menu_number="ndstart",menu_text="Node Start")+ \
menu_maker(menu_number="ndstop",menu_text="Node Stop")+ \
menu_maker(menu_number="ndconnect",menu_text="Node Connect")+ \
menu_maker(menu_number="ndconnectmix_blockchain_network",menu_text="Node Connect from mix_blockchain_network-DB")+ \
menu_space() + \
menu_maker(menu_number="testmodeon",menu_text="Test mode ON")+ \
menu_maker(menu_number="testmodeoff",menu_text="Test mode OF")+ \
menu_maker(menu_number="debugmodeon",menu_text="Debug mode ON")+ \
menu_maker(menu_number="debugmodeoff",menu_text="Debug mode OF")+ \
menu_space() + \
menu_maker(menu_number="getfullnodelist",menu_text="Get Full Node List")+ \
menu_maker(menu_number="getfullchain",menu_text="Get Full Chain")+ \
quit_menu_maker(mode="main")
)
def menu():
while True:
show_menu()
choices_input = question_maker(mode="main")
if choices_input == "cbc":
create_blockchain()
if choices_input == "cw":
Wallet_Create()
if choices_input == "sm":
send_message(input("Message: "),input("Please write receiver adress: "))
if choices_input == "sc":
send_coin(input("Coin Amount: "),input("Please write receiver adress: "))
if choices_input == "gb":
print(get_blockchain().getBalance(Wallet_Import(0,0)))
if choices_input == "help":
show_menu()
if choices_input == "ndstart":
ndstart(int(input("port: ")))
if choices_input == "ndstop":
ndstop()
if choices_input == "ndconnect":
ndconnect(str(input("node ip: ")),int(input("node port: ")))
if choices_input == "ndconnectmix_blockchain_network":
ndconnectmix_blockchain_network()
if choices_input == "testmodeon":
the_settings().test_mode(True)
if choices_input == "testmodeoff":
the_settings().test_mode(False)
if choices_input == "debugmodeon":
the_settings().debug_mode(True)
if choices_input == "debugmodeoff":
the_settings().debug_mode(False)
if choices_input == "getfullnodelist":
sendme_full_node_list()
if choices_input == "getfullchain":
sendme_full_chain()
if choices_input == "0":
exit()
def start():
menu()
if __name__ == '__main__':
start()
|
import numpy as np
import cv2
def make_colorwheel():
'''
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
'''
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)
col = col + RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)
colorwheel[col:col + YG, 1] = 255
col = col + YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)
col = col + GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)
colorwheel[col:col + CB, 2] = 255
col = col + CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)
col = col + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)
colorwheel[col:col + MR, 0] = 255
return colorwheel
def flow_compute_color(u, v, convert_to_bgr=False):
'''
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param u: np.ndarray, input horizontal flow
:param v: np.ndarray, input vertical flow
:param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
:return:
'''
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(np.int32)
k0[k0 > 53] = 53
k1 = k0 + 1
k1[k1 == ncols] = 1
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:, i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1 - f) * col0 + f * col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1 - col[idx])
col[~idx] = col[~idx] * 0.75 # out of range?
# Note the 2-i => BGR instead of RGB
ch_idx = 2 - i if convert_to_bgr else i
flow_image[:, :, ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):
'''
Expects a two dimensional flow image of shape [H,W,2]
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param flow_uv: np.ndarray of shape [H,W,2]
:param clip_flow: float, maximum clipping value for flow
:return:
'''
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:, :, 0]
v = flow_uv[:, :, 1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_compute_color(u, v, convert_to_bgr)
def readFlow(name):
f = open(name, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height,
width, 2))
f.close()
return flow.astype(np.float32)
def get_warp_label(flow1, flow2, label1, th=50, value=0):
label2 = np.ones_like(label1, dtype=label1.dtype) * value
height = flow1.shape[0]
width = flow1.shape[1]
flow_t = np.zeros_like(flow1, dtype=flow1.dtype)
grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2)
dx = grid[:, :, 0] + flow2[:, :, 1]
dy = grid[:, :, 1] + flow2[:, :, 0]
sx = np.floor(dx).astype(int)
sy = np.floor(dy).astype(int)
valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1)
sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1)
sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1)
sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) *
(1 - np.abs(sy_mat - dy[:, :, np.newaxis])))
for i in range(4):
flow_t = flow_t + sxsy_mat[:, :, i][:, :, np.
newaxis] * flow1[sx_mat[:, :, i],
sy_mat[:, :, i], :]
valid = valid & (np.linalg.norm(
flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th)
flow_t = (flow2 - flow_t) / 2.0
dx = grid[:, :, 0] + flow_t[:, :, 1]
dy = grid[:, :, 1] + flow_t[:, :, 0]
valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1)
label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round()
.astype(int), :]
return label2
def flow_tf(flow, size):
flow_shape = flow.shape
flow_resized = cv2.resize(flow, (size[1], size[0]))
flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1]))
flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0]))
return flow_resized
|
r"""
`torch.distributed.launch` is a module that spawns up multiple distributed
training processes on each of the training nodes.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. The utility can be used for either
CPU training or GPU training. If the utility is used for GPU training,
each distributed process will be operating on a single GPU. This can achieve
well-improved single-node training performance. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
This will especially be benefitial for systems with multiple Infiniband
interfaces that have direct-GPU support, since all of them can be utilized for
aggregated communication bandwidth.
In both cases of single-node distributed training or multi-node distributed
training, this utility will launch the given number of processes per node
(``--nproc_per_node``). If used for GPU training, this number needs to be less
or euqal to the number of GPUs on the current system (``nproc_per_node``),
and each process will be operating on a single GPU from *GPU 0 to
GPU (nproc_per_node - 1)*.
**How to use this module:**
1. Single-Node multi-process distributed training
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
arguments of your training script)
2. Multi-Node multi-process distributed training: (e.g. two nodes)
Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
Node 2:
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m torch.distributed.launch --help
**Important Notices:**
1. This utilty and multi-process distributed (single-node or
multi-node) GPU training currently only achieves the best performance using
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
use for GPU training.
2. In your training program, you must parse the command-line argument:
``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
If your training program uses GPUs, you should ensure that your code only
runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
Parsing the local_rank argument
::
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--local_rank", type=int)
>>> args = parser.parse_args()
Set your device to local rank using either
::
>>> torch.cuda.set_device(arg.local_rank) # before your code runs
or
::
>>> with torch.cuda.device(arg.local_rank):
>>> # your code to run
3. In your training program, you are supposed to call the following function
at the beginning to start the distributed backend. You need to make sure that
the init_method uses ``env://``, which is the only supported ``init_method``
by this module.
::
torch.distributed.init_process_group(backend='YOUR BACKEND',
init_method='env://')
4. In your training program, you can either use regular distributed functions
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
training program uses GPUs for training and you would like to use
:func:`torch.nn.parallel.DistributedDataParallel` module,
here is how to configure it.
::
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[arg.local_rank],
output_device=arg.local_rank)
Please ensure that ``device_ids`` argument is set to be the only GPU device id
that your code will be operating on. This is generally the local rank of the
process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
and ``output_device`` needs to be ``args.local_rank`` in order to use this
utility
5. Another way to pass ``local_rank`` to the subprocesses via environment variable
``LOCAL_RANK``. This behavior is enabled when you launch the script with
``--use_env=True``. You must adjust the subprocess example above to replace
``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
will not pass ``--local_rank`` when you specify this flag.
.. warning::
``local_rank`` is NOT globally unique: it is only unique per process
on a machine. Thus, don't use it to decide if you should, e.g.,
write to a networked filesystem. See
https://github.com/pytorch/pytorch/issues/12042 for an example of
how things can go wrong if you don't do this correctly.
"""
import sys
import subprocess
import os
from argparse import ArgumentParser, REMAINDER
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument("--use_env", default=False, action="store_true",
help="Use environment variable to pass "
"'local rank'. For legacy reasons, the default value is False. "
"If set to True, the script will not pass "
"--local_rank as argument, and will instead set LOCAL_RANK.")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
if args.use_env:
cmd = [sys.executable, "-u",
args.training_script] + args.training_script_args
else:
cmd = [sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)] + args.training_script_args
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=cmd)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from bamboo_engine.builder import * # noqa
from bamboo_engine.engine import Engine
from pipeline.eri.runtime import BambooDjangoRuntime
from ..utils import * # noqa
def test_retry_subprocess():
subproc_start = EmptyStartEvent()
subproc_act = ServiceActivity(component_code="debug_node")
subproc_end = EmptyEndEvent()
subproc_start.extend(subproc_act).extend(subproc_end)
params = Params({"${raise_var}": Var(type=Var.LAZY, custom_type="raise_variable", value="")})
start = EmptyStartEvent()
subproc = SubProcess(start=subproc_start, params=params)
end = EmptyEndEvent()
start.extend(subproc).extend(end)
pipeline = build_tree(start)
engine = Engine(BambooDjangoRuntime())
engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})
sleep(1)
old_state = runtime.get_state(subproc.id)
assert old_state.name == states.FAILED
engine.retry_subprocess(subproc.id)
sleep(1)
state = runtime.get_state(subproc.id)
assert state.name == states.FAILED
assert state.version != old_state.version
histories = runtime.get_histories(subproc.id)
assert len(histories) == 1
assert histories[0].node_id == subproc.id
assert histories[0].loop == 1
assert histories[0].retry == 0
assert histories[0].skip is False
assert histories[0].started_time is not None
assert histories[0].archived_time is not None
assert histories[0].inputs == {}
assert len(histories[0].outputs) == 1
assert "ex_data" in histories[0].outputs
assert histories[0].version == old_state.version
|
"""
Cross-validation with blocks.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
from theano.compat.six.moves import xrange
from pylearn2.blocks import StackedBlocks
class StackedBlocksCV(object):
"""
Multi-layer transforms using cross-validation models.
Parameters
----------
layers : iterable (list of lists)
Cross-validation models for each layer. Should be a list of lists,
where the first index is for the layer and the second index is for
the cross-validation fold.
"""
def __init__(self, layers):
stacked_blocks = []
n_folds = len(layers[0])
assert all([len(layer) == n_folds for layer in layers])
# stack the k-th block from each layer
for k in xrange(n_folds):
this_blocks = []
for i, layer in enumerate(layers):
this_blocks.append(layer[k])
this_stacked_blocks = StackedBlocks(this_blocks)
stacked_blocks.append(this_stacked_blocks)
# _folds contains a StackedBlocks instance for each CV fold
self._folds = stacked_blocks
def select_fold(self, k):
"""
Choose a single cross-validation fold to represent.
Parameters
----------
k : int
Index of selected fold.
"""
return self._folds[k]
def get_input_space(self):
"""Get input space."""
return self._folds[0][0].get_input_space()
def get_output_space(self):
"""Get output space."""
return self._folds[0][-1].get_output_space()
def set_input_space(self, space):
"""
Set input space.
Parameters
----------
space : WRITEME
Input space.
"""
for fold in self._folds:
this_space = space
for layer in fold._layers:
layer.set_input_space(this_space)
this_space = layer.get_output_space()
|
# Generated by Django 2.2.1 on 2019-05-12 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leads', '0004_lead_documentstagecode'),
]
operations = [
migrations.AlterField(
model_name='lead',
name='email',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='lead',
name='name',
field=models.CharField(blank=True, max_length=100),
),
]
|
import asyncio
import json
import logging
import socket
import time
import traceback
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
from blspy import PrivateKey
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.constants import ConsensusConstants
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
KeyringIsEmpty,
KeyringIsLocked,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH
from chia.protocols import wallet_protocol
from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
RejectAdditionsRequest,
RejectRemovalsRequest,
RequestAdditions,
RequestHeaderBlocks,
RespondAdditions,
RespondBlockHeader,
RespondHeaderBlocks,
RespondRemovals,
)
from chia.server.node_discovery import WalletPeers
from chia.server.outbound_message import Message, NodeType, make_msg
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_spend import CoinSpend
from chia.types.header_block import HeaderBlock
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.peer_info import PeerInfo
from chia.util.byte_types import hexstr_to_bytes
from chia.util.check_fork_next_block import check_fork_next_block
from chia.util.errors import Err, ValidationError
from chia.util.ints import uint32, uint128
from chia.util.keychain import Keychain
from chia.util.lru_cache import LRUCache
from chia.util.merkle_set import MerkleSet, confirm_included_already_hashed, confirm_not_included_already_hashed
from chia.util.path import mkdir, path_from_root
from chia.wallet.block_record import HeaderBlockRecord
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.settings.settings_objects import BackupInitialized
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.backup_utils import open_backup_file
from chia.wallet.util.wallet_types import WalletType
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_blockchain import ReceiveBlockResult
from chia.wallet.wallet_state_manager import WalletStateManager
from chia.util.profiler import profile_task
class WalletNode:
key_config: Dict
config: Dict
constants: ConsensusConstants
keychain_proxy: Optional[KeychainProxy]
local_keychain: Optional[Keychain] # For testing only. KeychainProxy is used in normal cases
server: Optional[ChiaServer]
log: logging.Logger
wallet_peers: WalletPeers
# Maintains the state of the wallet (blockchain and transactions), handles DB connections
wallet_state_manager: Optional[WalletStateManager]
# How far away from LCA we must be to perform a full sync. Before then, do a short sync,
# which is consecutive requests for the previous block
short_sync_threshold: int
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
syncing: bool
full_node_peer: Optional[PeerInfo]
peer_task: Optional[asyncio.Task]
logged_in: bool
wallet_peers_initialized: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
local_keychain: Optional[Keychain] = None,
):
self.config = config
self.constants = consensus_constants
self.keychain_proxy = None
self.local_keychain = local_keychain
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
# Normal operation data
self.cached_blocks: Dict = {}
self.future_block_hashes: Dict = {}
# Sync data
self._shut_down = False
self.proof_hashes: List = []
self.header_hashes: List = []
self.header_hashes_error = False
self.short_sync_threshold = 15 # Change the test when changing this
self.potential_blocks_received: Dict = {}
self.potential_header_hashes: Dict = {}
self.state_changed_callback = None
self.wallet_state_manager = None
self.backup_initialized = False # Delay first launch sync after user imports backup info or decides to skip
self.server = None
self.wsm_close_task = None
self.sync_task: Optional[asyncio.Task] = None
self.logged_in_fingerprint: Optional[int] = None
self.peer_task = None
self.logged_in = False
self.wallet_peers_initialized = False
self.last_new_peak_messages = LRUCache(5)
async def ensure_keychain_proxy(self) -> KeychainProxy:
if not self.keychain_proxy:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]:
key: PrivateKey = None
try:
keychain_proxy = await self.ensure_keychain_proxy()
key = await keychain_proxy.get_key_for_fingerprint(fingerprint)
except KeyringIsEmpty:
self.log.warning("No keys present. Create keys with the UI, or with the 'sit keys' program.")
return None
except KeyringIsLocked:
self.log.warning("Keyring is locked")
return None
except KeychainProxyConnectionFailure as e:
tb = traceback.format_exc()
self.log.error(f"Missing keychain_proxy: {e} {tb}")
raise e # Re-raise so that the caller can decide whether to continue or abort
return key
async def _start(
self,
fingerprint: Optional[int] = None,
new_wallet: bool = False,
backup_file: Optional[Path] = None,
skip_backup_import: bool = False,
) -> bool:
try:
private_key = await self.get_key_for_fingerprint(fingerprint)
except KeychainProxyConnectionFailure:
self.log.error("Failed to connect to keychain service")
return False
if private_key is None:
self.logged_in = False
return False
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "wallet", self.log))
db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
db_path_replaced: str = (
self.config["database_path"]
.replace("CHALLENGE", self.config["selected_network"])
.replace("KEY", db_path_key_suffix)
)
path = path_from_root(self.root_path, db_path_replaced)
mkdir(path.parent)
self.new_peak_lock = asyncio.Lock()
assert self.server is not None
self.wallet_state_manager = await WalletStateManager.create(
private_key, self.config, path, self.constants, self.server, self.root_path
)
self.wsm_close_task = None
assert self.wallet_state_manager is not None
backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
if backup_settings.user_initialized is False:
if new_wallet is True:
await self.wallet_state_manager.user_settings.user_created_new_wallet()
self.wallet_state_manager.new_wallet = True
elif skip_backup_import is True:
await self.wallet_state_manager.user_settings.user_skipped_backup_import()
elif backup_file is not None:
await self.wallet_state_manager.import_backup_info(backup_file)
else:
self.backup_initialized = False
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
self.logged_in = False
return False
self.backup_initialized = True
# Start peers here after the backup initialization has finished
# We only want to do this once per instantiation
# However, doing it earlier before backup initialization causes
# the wallet to spam the introducer
if self.wallet_peers_initialized is False:
asyncio.create_task(self.wallet_peers.start())
self.wallet_peers_initialized = True
if backup_file is not None:
json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
if "start_height" in json_dict["data"]:
start_height = json_dict["data"]["start_height"]
self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
else:
self.config["starting_height"] = 0
else:
self.config["starting_height"] = 0
if self.state_changed_callback is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
self._shut_down = False
self.peer_task = asyncio.create_task(self._periodically_check_full_node())
self.sync_event = asyncio.Event()
self.sync_task = asyncio.create_task(self.sync_job())
self.logged_in_fingerprint = fingerprint
self.logged_in = True
return True
def _close(self):
self.log.info("self._close")
self.logged_in_fingerprint = None
self._shut_down = True
async def _await_closed(self):
self.log.info("self._await_closed")
await self.server.close_all_connections()
asyncio.create_task(self.wallet_peers.ensure_is_closed())
if self.wallet_state_manager is not None:
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
if self.sync_task is not None:
self.sync_task.cancel()
self.sync_task = None
if self.peer_task is not None:
self.peer_task.cancel()
self.peer_task = None
self.logged_in = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
def _pending_tx_handler(self):
if self.wallet_state_manager is None or self.backup_initialized is False:
return None
asyncio.create_task(self._resend_queue())
async def _action_messages(self) -> List[Message]:
if self.wallet_state_manager is None or self.backup_initialized is False:
return []
actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions()
result: List[Message] = []
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
msg = make_msg(
ProtocolMessageTypes.request_puzzle_solution,
wallet_protocol.RequestPuzzleSolution(coin_name, height),
)
result.append(msg)
return result
async def _resend_queue(self):
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
for msg, sent_peers in await self._messages_to_resend():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
if peer.peer_node_id in sent_peers:
continue
await peer.send_message(msg)
for msg in await self._action_messages():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]:
if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down:
return []
messages: List[Tuple[Message, Set[bytes32]]] = []
records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent()
for record in records:
if record.spend_bundle is None:
continue
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(record.spend_bundle),
)
already_sent = set()
for peer, status, _ in record.sent_to:
if status == MempoolInclusionStatus.SUCCESS.value:
already_sent.add(hexstr_to_bytes(peer))
messages.append((msg, already_sent))
return messages
def set_server(self, server: ChiaServer):
self.server = server
DNS_SERVERS_EMPTY: list = []
# TODO: Perhaps use a different set of DNS seeders for wallets, to split the traffic.
self.wallet_peers = WalletPeers(
self.server,
self.root_path,
self.config["target_peer_count"],
self.config["wallet_peers_path"],
self.config["introducer_peer"],
DNS_SERVERS_EMPTY,
self.config["peer_connect_interval"],
self.config["selected_network"],
None,
self.log,
)
async def on_connect(self, peer: WSChiaConnection):
if self.wallet_state_manager is None or self.backup_initialized is False:
return None
messages_peer_ids = await self._messages_to_resend()
self.wallet_state_manager.state_changed("add_connection")
for msg, peer_ids in messages_peer_ids:
if peer.peer_node_id in peer_ids:
continue
await peer.send_message(msg)
if not self.has_full_node() and self.wallet_peers is not None:
asyncio.create_task(self.wallet_peers.on_connect(peer))
async def _periodically_check_full_node(self) -> None:
tries = 0
while not self._shut_down and tries < 5:
if self.has_full_node():
await self.wallet_peers.ensure_is_closed()
if self.wallet_state_manager is not None:
self.wallet_state_manager.state_changed("add_connection")
break
tries += 1
await asyncio.sleep(self.config["peer_connect_interval"])
def has_full_node(self) -> bool:
if self.server is None:
return False
if "full_node_peer" in self.config:
full_node_peer = PeerInfo(
self.config["full_node_peer"]["host"],
self.config["full_node_peer"]["port"],
)
peers = [c.get_peer_info() for c in self.server.get_full_node_connections()]
full_node_resolved = PeerInfo(socket.gethostbyname(full_node_peer.host), full_node_peer.port)
if full_node_peer in peers or full_node_resolved in peers:
self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}")
for connection in self.server.get_full_node_connections():
if (
connection.get_peer_info() != full_node_peer
and connection.get_peer_info() != full_node_resolved
):
self.log.info(f"Closing unnecessary connection to {connection.get_peer_logging()}.")
asyncio.create_task(connection.close())
return True
return False
async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChiaConnection):
if self.wallet_state_manager is None:
return None
header_block_records: List[HeaderBlockRecord] = []
assert self.server
trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"])
async with self.wallet_state_manager.blockchain.lock:
for block in header_blocks:
if block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
block, block.transactions_filter, None
)
# Get Additions
added_coins = await self.get_additions(peer, block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
# If there is a launcher created, or we have a singleton spent, fetches the required solutions
additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends(
peer, block, added_coins, removed_coins
)
hbr = HeaderBlockRecord(block, added_coins, removed_coins)
else:
hbr = HeaderBlockRecord(block, [], [])
header_block_records.append(hbr)
additional_coin_spends = []
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
hbr, trusted=trusted, additional_coin_spends=additional_coin_spends
)
if result == ReceiveBlockResult.NEW_PEAK:
if not self.wallet_state_manager.sync_mode:
self.wallet_state_manager.blockchain.clean_block_records()
self.wallet_state_manager.state_changed("new_block")
self.wallet_state_manager.state_changed("sync_changed")
await self.wallet_state_manager.new_peak()
elif result == ReceiveBlockResult.INVALID_BLOCK:
self.log.info(f"Invalid block from peer: {peer.get_peer_logging()} {error}")
await peer.close()
return
else:
self.log.debug(f"Result: {result}")
async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection):
if self.wallet_state_manager is None:
return
if self.wallet_state_manager.blockchain.contains_block(peak.header_hash):
self.log.debug(f"known peak {peak.header_hash}")
return
if self.wallet_state_manager.sync_mode:
self.last_new_peak_messages.put(peer, peak)
return
async with self.new_peak_lock:
curr_peak = self.wallet_state_manager.blockchain.get_peak()
if curr_peak is not None and curr_peak.weight >= peak.weight:
return
request = wallet_protocol.RequestBlockHeader(peak.height)
response: Optional[RespondBlockHeader] = await peer.request_block_header(request)
if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None:
self.log.warning(f"bad peak response from peer {response}")
return
header_block = response.header_block
curr_peak_height = 0 if curr_peak is None else curr_peak.height
if (curr_peak_height == 0 and peak.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or (
curr_peak_height > peak.height - 200
):
if peak.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]:
await self.wallet_short_sync_backtrack(header_block, peer)
else:
await self.batch_sync_to_peak(curr_peak_height, peak)
elif peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# Request weight proof
# Sync if PoW validates
weight_request = RequestProofOfWeight(peak.height, peak.header_hash)
weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
weight_request, timeout=360
)
if weight_proof_response is None:
return
weight_proof = weight_proof_response.wp
if self.wallet_state_manager is None:
return
if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]):
valid, fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations(
weight_proof
)
else:
valid, fork_point, _ = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(
weight_proof
)
if not valid:
self.log.error(
f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}"
f" recent blocks num ,{len(weight_proof.recent_chain_data)}"
)
self.log.debug(f"{weight_proof}")
return
self.log.info(f"Validated, fork point is {fork_point}")
self.wallet_state_manager.sync_store.add_potential_fork_point(
header_block.header_hash, uint32(fork_point)
)
self.wallet_state_manager.sync_store.add_potential_peak(header_block)
self.start_sync()
async def wallet_short_sync_backtrack(self, header_block, peer):
top = header_block
blocks = [top]
# Fetch blocks backwards until we hit the one that we have,
# then complete them with additions / removals going forward
while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0:
request_prev = wallet_protocol.RequestBlockHeader(top.height - 1)
response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev)
if response_prev is None or not isinstance(response_prev, RespondBlockHeader):
raise RuntimeError("bad block header response from peer while syncing")
prev_head = response_prev.header_block
blocks.append(prev_head)
top = prev_head
blocks.reverse()
await self.complete_blocks(blocks, peer)
await self.wallet_state_manager.create_more_puzzle_hashes()
async def batch_sync_to_peak(self, fork_height, peak):
advanced_peak = False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(max(0, fork_height - 1), peak.height, batch_size):
start_height = i
end_height = min(peak.height, start_height + batch_size)
peers = self.server.get_full_node_connections()
added = False
for peer in peers:
try:
added, advanced_peak = await self.fetch_blocks_and_validate(
peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height
)
if added:
break
except Exception as e:
await peer.close()
exc = traceback.format_exc()
self.log.error(f"Error while trying to fetch from peer:{e} {exc}")
if not added:
raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}")
curr_peak = self.wallet_state_manager.blockchain.get_peak()
assert peak is not None
self.wallet_state_manager.blockchain.clean_block_record(
min(end_height, curr_peak.height) - self.constants.BLOCKS_CACHE_SIZE
)
def start_sync(self) -> None:
self.log.info("self.sync_event.set()")
self.sync_event.set()
async def check_new_peak(self) -> None:
if self.wallet_state_manager is None:
return None
current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak()
if current_peak is None:
return None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
for _, block in potential_peaks:
if current_peak.weight < block.weight:
await asyncio.sleep(5)
self.start_sync()
return None
async def sync_job(self) -> None:
while True:
self.log.info("Loop start in sync job")
if self._shut_down is True:
break
asyncio.create_task(self.check_new_peak())
await self.sync_event.wait()
self.last_new_peak_messages = LRUCache(5)
self.sync_event.clear()
if self._shut_down is True:
break
try:
assert self.wallet_state_manager is not None
self.wallet_state_manager.set_sync_mode(True)
await self._sync()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Loop exception in sync {e}. {tb}")
finally:
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_sync_mode(False)
for peer, peak in self.last_new_peak_messages.cache.items():
asyncio.create_task(self.new_peak_wallet(peak, peer))
self.log.info("Loop end in sync job")
async def _sync(self) -> None:
"""
Wallet has fallen far behind (or is starting up for the first time), and must be synced
up to the LCA of the blockchain.
"""
if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None:
return None
highest_weight: uint128 = uint128(0)
peak_height: uint32 = uint32(0)
peak: Optional[HeaderBlock] = None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
for header_hash, potential_peak_block in potential_peaks:
if potential_peak_block.weight > highest_weight:
highest_weight = potential_peak_block.weight
peak_height = potential_peak_block.height
peak = potential_peak_block
if peak_height is None or peak_height == 0:
return None
if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight:
self.log.info("Not performing sync, already caught up.")
return None
peers: List[WSChiaConnection] = self.server.get_full_node_connections()
if len(peers) == 0:
self.log.info("No peers to sync to")
return None
async with self.wallet_state_manager.blockchain.lock:
fork_height = None
if peak is not None:
fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash)
assert fork_height is not None
# This is the fork point in SES in the case where no fork was detected
peers = self.server.get_full_node_connections()
fork_height = await check_fork_next_block(
self.wallet_state_manager.blockchain, fork_height, peers, wallet_next_block_check
)
if fork_height is None:
fork_height = uint32(0)
await self.wallet_state_manager.blockchain.warmup(fork_height)
await self.batch_sync_to_peak(fork_height, peak)
async def fetch_blocks_and_validate(
self,
peer: WSChiaConnection,
height_start: uint32,
height_end: uint32,
fork_point_with_peak: Optional[uint32],
) -> Tuple[bool, bool]:
"""
Returns whether the blocks validated, and whether the peak was advanced
"""
if self.wallet_state_manager is None:
return False, False
self.log.info(f"Requesting blocks {height_start}-{height_end}")
request = RequestHeaderBlocks(uint32(height_start), uint32(height_end))
res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request)
if res is None or not isinstance(res, RespondHeaderBlocks):
raise ValueError("Peer returned no response")
header_blocks: List[HeaderBlock] = res.header_blocks
advanced_peak = False
if header_blocks is None:
raise ValueError(f"No response from peer {peer}")
assert self.server
trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"])
pre_validation_results: Optional[List[PreValidationResult]] = None
if not trusted:
pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing(
header_blocks
)
if pre_validation_results is None:
return False, advanced_peak
assert len(header_blocks) == len(pre_validation_results)
for i in range(len(header_blocks)):
header_block = header_blocks[i]
if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None:
raise ValidationError(Err(pre_validation_results[i].error))
fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak
if header_block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
header_block, header_block.transactions_filter, fork_point_with_old_peak
)
# Get Additions
added_coins = await self.get_additions(peer, header_block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, header_block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
# If there is a launcher created, or we have a singleton spent, fetches the required solutions
additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends(
peer, header_block, added_coins, removed_coins
)
header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins)
else:
header_block_record = HeaderBlockRecord(header_block, [], [])
additional_coin_spends = []
start_t = time.time()
if trusted:
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record,
None,
trusted,
fork_point_with_old_peak,
additional_coin_spends=additional_coin_spends,
)
else:
assert pre_validation_results is not None
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record,
pre_validation_results[i],
trusted,
fork_point_with_old_peak,
additional_coin_spends=additional_coin_spends,
)
self.log.debug(
f"Time taken to validate {header_block.height} with fork "
f"{fork_point_with_old_peak}: {time.time() - start_t}"
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
self.wallet_state_manager.state_changed("new_block")
elif result == ReceiveBlockResult.INVALID_BLOCK:
raise ValueError("Value error peer sent us invalid block")
if advanced_peak:
await self.wallet_state_manager.create_more_puzzle_hashes()
return True, advanced_peak
def validate_additions(
self,
coins: List[Tuple[bytes32, List[Coin]]],
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]],
root,
):
if proofs is None:
# Verify root
additions_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle_hash, coins_l in coins:
additions_merkle_set.add_already_hashed(puzzle_hash)
additions_merkle_set.add_already_hashed(hash_coin_list(coins_l))
additions_root = additions_merkle_set.get_root()
if root != additions_root:
return False
else:
for i in range(len(coins)):
assert coins[i][0] == proofs[i][0]
coin_list_1: List[Coin] = coins[i][1]
puzzle_hash_proof: bytes32 = proofs[i][1]
coin_list_proof: Optional[bytes32] = proofs[i][2]
if len(coin_list_1) == 0:
# Verify exclusion proof for puzzle hash
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if not_included is False:
return False
else:
try:
# Verify inclusion proof for coin list
included = confirm_included_already_hashed(
root,
hash_coin_list(coin_list_1),
coin_list_proof,
)
if included is False:
return False
except AssertionError:
return False
try:
# Verify inclusion proof for puzzle hash
included = confirm_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if included is False:
return False
except AssertionError:
return False
return True
def validate_removals(self, coins, proofs, root):
if proofs is None:
# If there are no proofs, it means all removals were returned in the response.
# we must find the ones relevant to our wallets.
# Verify removals root
removals_merkle_set = MerkleSet()
for name_coin in coins:
# TODO review all verification
name, coin = name_coin
if coin is not None:
removals_merkle_set.add_already_hashed(coin.name())
removals_root = removals_merkle_set.get_root()
if root != removals_root:
return False
else:
# This means the full node has responded only with the relevant removals
# for our wallet. Each merkle proof must be verified.
if len(coins) != len(proofs):
return False
for i in range(len(coins)):
# Coins are in the same order as proofs
if coins[i][0] != proofs[i][0]:
return False
coin = coins[i][1]
if coin is None:
# Verifies merkle proof of exclusion
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
proofs[i][1],
)
if not_included is False:
return False
else:
# Verifies merkle proof of inclusion of coin name
if coins[i][0] != coin.name():
return False
included = confirm_included_already_hashed(
root,
coin.name(),
proofs[i][1],
)
if included is False:
return False
return True
async def fetch_puzzle_solution(self, peer, height: uint32, coin: Coin) -> CoinSpend:
solution_response = await peer.request_puzzle_solution(
wallet_protocol.RequestPuzzleSolution(coin.name(), height)
)
if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution):
raise ValueError(f"Was not able to obtain solution {solution_response}")
return CoinSpend(coin, solution_response.response.puzzle, solution_response.response.solution)
async def get_additional_coin_spends(
self, peer, block, added_coins: List[Coin], removed_coins: List[Coin]
) -> List[CoinSpend]:
assert self.wallet_state_manager is not None
additional_coin_spends: List[CoinSpend] = []
if len(removed_coins) > 0:
removed_coin_ids = set([coin.name() for coin in removed_coins])
all_added_coins = await self.get_additions(peer, block, [], get_all_additions=True)
assert all_added_coins is not None
if all_added_coins is not None:
for coin in all_added_coins:
# This searches specifically for a launcher being created, and adds the solution of the launcher
if coin.puzzle_hash == SINGLETON_LAUNCHER_HASH and coin.parent_coin_info in removed_coin_ids:
cs: CoinSpend = await self.fetch_puzzle_solution(peer, block.height, coin)
additional_coin_spends.append(cs)
# Apply this coin solution, which might add things to interested list
await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False)
all_removed_coins: Optional[List[Coin]] = await self.get_removals(
peer, block, added_coins, removed_coins, request_all_removals=True
)
assert all_removed_coins is not None
all_removed_coins_dict: Dict[bytes32, Coin] = {coin.name(): coin for coin in all_removed_coins}
keep_searching = True
while keep_searching:
# This keeps fetching solutions for coins we are interested list, in this block, until
# there are no more interested things to fetch
keep_searching = False
interested_ids: List[
bytes32
] = await self.wallet_state_manager.interested_store.get_interested_coin_ids()
for coin_id in interested_ids:
if coin_id in all_removed_coins_dict:
coin = all_removed_coins_dict[coin_id]
cs = await self.fetch_puzzle_solution(peer, block.height, coin)
# Apply this coin solution, which might add things to interested list
await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False)
additional_coin_spends.append(cs)
keep_searching = True
all_removed_coins_dict.pop(coin_id)
break
return additional_coin_spends
async def get_additions(
self, peer: WSChiaConnection, block_i, additions: Optional[List[bytes32]], get_all_additions: bool = False
) -> Optional[List[Coin]]:
if (additions is not None and len(additions) > 0) or get_all_additions:
if get_all_additions:
additions = None
additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions)
additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions(
additions_request
)
if additions_res is None:
await peer.close()
return None
elif isinstance(additions_res, RespondAdditions):
validated = self.validate_additions(
additions_res.coins,
additions_res.proofs,
block_i.foliage_transaction_block.additions_root,
)
if not validated:
await peer.close()
return None
added_coins = []
for ph_coins in additions_res.coins:
ph, coins = ph_coins
added_coins.extend(coins)
return added_coins
elif isinstance(additions_res, RejectRemovalsRequest):
await peer.close()
return None
return None
else:
return [] # No added coins
async def get_removals(
self, peer: WSChiaConnection, block_i, additions, removals, request_all_removals=False
) -> Optional[List[Coin]]:
assert self.wallet_state_manager is not None
# Check if we need all removals
for coin in additions:
puzzle_store = self.wallet_state_manager.puzzle_store
record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash(
coin.puzzle_hash.hex()
)
if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN:
# TODO why ?
request_all_removals = True
break
if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID:
request_all_removals = True
break
if len(removals) > 0 or request_all_removals:
if request_all_removals:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None)
else:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals)
removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals(
removals_request
)
if removals_res is None:
return None
elif isinstance(removals_res, RespondRemovals):
validated = self.validate_removals(
removals_res.coins,
removals_res.proofs,
block_i.foliage_transaction_block.removals_root,
)
if validated is False:
await peer.close()
return None
removed_coins = []
for _, coins_l in removals_res.coins:
if coins_l is not None:
removed_coins.append(coins_l)
return removed_coins
elif isinstance(removals_res, RejectRemovalsRequest):
return None
else:
return None
else:
return []
async def wallet_next_block_check(
peer: WSChiaConnection, potential_peek: uint32, blockchain: BlockchainInterface
) -> bool:
block_response = await peer.request_header_blocks(
wallet_protocol.RequestHeaderBlocks(potential_peek, potential_peek)
)
if block_response is not None and isinstance(block_response, wallet_protocol.RespondHeaderBlocks):
our_peak = blockchain.get_peak()
if our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash:
return True
return False
|
from phiqnet.train.train import train_main
if __name__ == '__main__':
args = {}
args['multi_gpu'] = 0
args['gpu'] = 0
args['result_folder'] = r'..\databases\experiments\koniq_small'
args['n_quality_levels'] = 1
args['train_folders'] = [#r'..\databases\train\koniq_normal',
r'..\databases\train\koniq_small',]
# r'..\databases\train\live']
args['val_folders'] = [#r'..\databases\val\koniq_normal',
r'..\databases\val\koniq_small',]
# r'..\databases\val\live']
args['koniq_mos_file'] = r'..\databases\koniq10k_images_scores.csv'
args['live_mos_file'] = r'..\databases\live_mos.csv'
args['naive_backbone'] = False
args['backbone'] = 'resnet50'
args['model_weights'] = r'..\databases\experiments\koniq_small\resnet50_mos_attention_fpn\44_0.0094_0.0473.h5'
args['initial_epoch'] = 0
args['lr_base'] = 1e-6
args['lr_schedule'] = True
args['batch_size'] = 8
args['epochs'] = 120
args['fpn_type'] = 'fpn'
args['attention_module'] = True
args['image_aug'] = True
train_main(args)
|
import anchor
name = 'anchor'
|
# web_app/__init__.py
from flask import Flask
from web_app.models import db, migrate
from web_app.routes.home_routes import home_routes
from web_app.routes.book_routes import book_routes
DATABASE_URI = "sqlite:///twitoff_class.db" # using relative filepath
#DATABASE_URI = "sqlite:////Users/Username/Desktop/your-repo-name/web_app_99.db" # using absolute filepath on Mac (recommended)
#DATABASE_URI = "sqlite:///C:\\Users\\Username\\Desktop\\your-repo-name\\web_app_99.db" # using absolute filepath on Windows (recommended) h/t: https://stackoverflow.com/a/19262231/670433
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
migrate.init_app(app, db)
app.register_blueprint(home_routes)
app.register_blueprint(book_routes)
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True)
|
from flask import jsonify, request, url_for, abort
from app import db
from app.api import bp
from app.api.auth import token_auth
from app.api.errors import bad_request
from app.models import User
@bp.route('/users/<int:id>', methods=['GET'])
@token_auth.login_required
def get_user(id):
return jsonify(User.query.get_or_404(id).to_dict())
@bp.route('/users', methods=['GET'])
@token_auth.login_required
def get_users():
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = User.to_collection_dict(User.query, page, per_page, 'api.get_users')
return jsonify(data)
@bp.route('/users/<int:id>/followers', methods=['GET'])
@token_auth.login_required
def get_followers(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = User.to_collection_dict(user.followers, page, per_page,
'api.get_followers', id=id)
return jsonify(data)
@bp.route('/users/<int:id>/followed', methods=['GET'])
@token_auth.login_required
def get_followed(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = User.to_collection_dict(user.followed, page, per_page,
'api.get_followed', id=id)
return jsonify(data)
@bp.route('/users', methods=['POST'])
def create_user():
data = request.get_json() or {}
if 'username' not in data or 'email' not in data or 'password' not in data:
return bad_request('Request must include username, email and password')
if User.query.filter_by(username=data['username']).first():
return bad_request('Please use a different username')
if User.query.filter_by(email=data['email']).first():
return bad_request('Please use a different email')
user = User()
user.from_dict(data, new_user=True)
db.session.add(user)
db.session.commit()
response = jsonify(user.to_dict())
response.status_code = 201
response.headers['Location'] = url_for('api.get_user', id=user.id)
return response
@bp.route('/users/<int:id>', methods=['PUT'])
@token_auth.login_required
def update_user(id):
if token_auth.current_user().id != id:
abort(403)
user = User.query.get_or_404(id)
data = request.get_json() or {}
if 'username' in data and data['username'] != user.username and \
User.query.filter_by(username=data['username']).first():
return bad_request('Please use a different username')
if 'email' in data and data['email'] != user.email and \
User.query.filter_by(email=data['email']).first():
return bad_request('Please use a different email')
user.from_dict(data, new_user=False)
db.session.commit()
return jsonify(user.to_dict())
|
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import os
import sys
class ReadsSplitter:
def __init__(self):
self.options = None
self.files_to_split = []
self.getOptions()
def go(self):
for fn in self.files_to_split:
self.splitFile(fn)
def getOptions(self):
parser = OptionParser()
parser.add_option("-u", "--unaligned", dest="unaligned_dir", \
help="Unaligned read directory", metavar="DIR")
parser.add_option("-o", "--output", dest="output_dir",\
help="Directory for output", metavar="DIR",\
default="data/output/breakpoints/reads")
(options, args) = parser.parse_args()
self.options = options
if options.unaligned_dir:
for file_name in os.listdir(options.unaligned_dir):
if 'unaligned' in file_name:
self.files_to_split.append(options.unaligned_dir + file_name)
def splitFile(self, fn):
if not os.path.isfile(fn):
warning("%s DOES NOT EXIST" %(fn))
exit(1)
read_split_output_dir = self.options.output_dir
ensure_dir(read_split_output_dir)
read_split_output_1 = read_split_output_dir + os.path.split(fn)[1] + ".1"
read_split_output_2 = read_split_output_dir + os.path.split(fn)[1] + ".2"
read_file = open(fn, 'r')
r_o_1 = open(read_split_output_1, 'w')
r_o_2 = open(read_split_output_2, 'w')
for read in self.read_read(read_file):
h1 = read[0].strip()
read_contents = read[1].strip()
h2 = read[2].strip()
read_quality = read[3].strip()
#
l = len(read_contents)
l_1 = int(l / 3)
l_2 = int(l - l_1)
# left
h1_1 = h1 + "/1\n"
read_contents_1 = read_contents[0:l_1] + "\n"
h2_1 = h2 + "/1\n"
read_quality_1 = read_quality[0:l_1] + "\n"
# right
h1_2 = h1 + "/2\n"
read_contents_2 = read_contents[l_2:]+ "\n"
h2_2 = h2 + "/2\n"
read_quality_2 = read_quality[l_2:] + "\n"
r_o_1.write(h1_1)
r_o_1.write(read_contents_1)
r_o_1.write(h2_1)
r_o_1.write(read_quality_1)
r_o_2.write(h1_2)
r_o_2.write(read_contents_2)
r_o_2.write(h2_2)
r_o_2.write(read_quality_2)
r_o_1.close()
r_o_2.close()
read_file.close()
def read_read(self, fp):
while True:
read_bundle = []
for i in range(4):
read_bundle.append(fp.readline())
if not read_bundle[0]:
break
else:
yield read_bundle
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def warning(*objs):
print("\tINFO: ",*objs, file=sys.stderr)
def main():
'''
splits read files for breakpoint
'''
splitter = ReadsSplitter()
splitter.go()
if __name__=='__main__':
main()
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2019 Colin Curtain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Author: Colin Curtain (ccbogel)
https://github.com/ccbogel/QualCoder
https://qualcoder.wordpress.com/
'''
from PyQt5 import QtWidgets, QtCore
import os
import sys
import logging
import traceback
from GUI.ui_dialog_information import Ui_Dialog_information
path = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def exception_handler(exception_type, value, tb_obj):
""" Global exception handler useful in GUIs.
tb_obj: exception.__traceback__ """
tb = '\n'.join(traceback.format_tb(tb_obj))
text = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value)
print(text)
logger.error(_("Uncaught exception: ") + text)
QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text)
class DialogInformation(QtWidgets.QDialog):
"""
Dialog to display about information from html and text files for PyQDA development,
version and license.
The html is coded below because it avoids potential data file import errors with pyinstaller.
Called from:
qualcoder.MainWindow.about
view_graph_original.ViewGraphOriginal.list_graph.TextGraphicsItem
view_graph_original.ViewGraphOriginal.circular_graph.TextGraphicsItem
"""
title = ""
text = ""
def __init__(self, app, title, html="", parent=None):
"""Display information text in dialog.
If no html is given, fill with About html. """
sys.excepthook = exception_handler
QtWidgets.QDialog.__init__(self)
self.ui = Ui_Dialog_information()
self.ui.setupUi(self)
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
font = 'font: ' + str(app.settings['fontsize']) + 'pt '
font += '"' + app.settings['font'] + '";'
self.setStyleSheet(font)
self.setWindowTitle(title)
if html == "":
self.setHtml(a)
else:
self.setHtml(html)
def setHtml(self, html):
"""This method is used to populate the textEdit.
Usually called from a View_graph TextGraphicsItem via a context menu. """
self.text = html
self.ui.textEdit.setHtml(self.text)
def accepted(self):
""" Accepted button overridden method """
self.information = self.ui.textEdit.toPlainText()
self.ui.Dialog_information.accept()
a = '<h1 class="western">About QualCoder</h1>\
<h2 class="western">Version:</h2>\
<p>QualCoder 1.9 2020 March 11</p>\
<p>Depends on python 3.x, pyqt5 lxml Pillow ebooklib ply chardet pdfminer.six openpyxl</p>\
<p>VLC should also be installed.</p>\
<p>Tested on: Linux Mint 18.04, Ubuntu 19.04, Lubuntu 18.04, mostly tested on Windows 10, partly tested on Mac OS.</p>\
<p></p>\
<h2 class="western">Acknowledgements</h2>\
<p>Ronggui Huang and Zhang Gehao for creating RQDA, which inspired this software.</p>\
<p>Mike MacCana for the source code for the docx module.</p>\
<p>User: bit4 on stackoverflow who presented the source code to convert html to text.</p>\
<p>ebooklib: Aleksandar Erkalović (<a href="https://github.com/aerkalov">https://github.com/aerkalov</a>)</p>\
<p>The VideoLAN team for the bindings to VLC</p>\
<p>To various members on github for supporting this project.</p>\
<h2 class="western">Other details</h2\
<p>The qda data folder contains folders for imported documents, \
images, audio and video. It also contains the sqlite database, named data.qda, to store coding data.</p>\
<p>QualCoder creates a .qualcoder folder inside your home directory. \
This contains QualCoder.log, config.ini (for settings) and \
recent_project.txt. The config file contains the name of the current coder, \
default working directory and selected font.</p>\
<p>QualCoder is written in python 3 using Qt5 for the graphical interface.</p>\
<p>The REFI-QDA Project import and export are experimental and should not be relied upon. </p>\
<h2 class="western">License</h2>\
<p>MIT License</p>\
<p>Copyright (c) 2020 Colin Curtain</p>\
<p>Permission is hereby granted, free of charge, to any person<br />\
obtaining a copy of this software and associated documentation files<br />\
(the "Software"), to deal in the Software without<br />\
restriction, including without limitation the rights to use, copy,<br />\
modify, merge, publish, distribute, sublicense, and/or sell copies of<br />\
the Software, and to permit persons to whom the Software is furnished<br />\
to do so, subject to the following conditions:</p>\
<p>The above copyright notice and this permission notice shall be <br />\
included in all copies or substantial portions of the Software.</p>\
<p>THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF<br />\
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE<br />\
WARRANTIES OF MERCHANTABILITY,</p>\
<p>FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT<br />\
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,<br />\
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR<br />\
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR<br />\
THE USE OR OTHER DEALINGS IN THE SOFTWARE.</p>'
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = DialogInformation(None, "a title", "")
ui.show()
sys.exit(app.exec_())
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2342
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceListOfPortfolio(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[Portfolio]',
'href': 'str',
'links': 'list[Link]'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional'
}
def __init__(self, values=None, href=None, links=None): # noqa: E501
"""
ResourceListOfPortfolio - a model defined in OpenAPI
:param values: (required)
:type values: list[lusid.Portfolio]
:param href:
:type href: str
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._values = None
self._href = None
self._links = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
@property
def values(self):
"""Gets the values of this ResourceListOfPortfolio. # noqa: E501
:return: The values of this ResourceListOfPortfolio. # noqa: E501
:rtype: list[Portfolio]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfPortfolio.
:param values: The values of this ResourceListOfPortfolio. # noqa: E501
:type: list[Portfolio]
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfPortfolio. # noqa: E501
:return: The href of this ResourceListOfPortfolio. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfPortfolio.
:param href: The href of this ResourceListOfPortfolio. # noqa: E501
:type: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfPortfolio. # noqa: E501
:return: The links of this ResourceListOfPortfolio. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfPortfolio.
:param links: The links of this ResourceListOfPortfolio. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfPortfolio):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# (C) British Crown Copyright 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the UTM coordinate system.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import cartopy.crs as ccrs
def check_proj4_params(crs, other_args):
expected = other_args | {'proj=utm', 'no_defs', 'units=m'}
pro4_params = set(crs.proj4_init.lstrip('+').split(' +'))
assert expected == pro4_params
@pytest.mark.parametrize('south', [False, True])
def test_default(south):
zone = 1 # Limits are fixed, so don't bother checking other zones.
utm = ccrs.UTM(zone, southern_hemisphere=south)
other_args = {'ellps=WGS84', 'zone={}'.format(zone)}
if south:
other_args |= {'south'}
check_proj4_params(utm, other_args)
assert_almost_equal(np.array(utm.x_limits),
[-250000, 1250000])
assert_almost_equal(np.array(utm.y_limits),
[-10000000, 25000000])
def test_ellipsoid_transform():
# USGS Professional Paper 1395, pp 269 - 271
globe = ccrs.Globe(ellipse='clrk66')
utm = ccrs.UTM(zone=18, globe=globe)
geodetic = utm.as_geodetic()
other_args = {'ellps=clrk66', 'zone=18'}
check_proj4_params(utm, other_args)
assert_almost_equal(np.array(utm.x_limits),
[-250000, 1250000])
assert_almost_equal(np.array(utm.y_limits),
[-10000000, 25000000])
result = utm.transform_point(-73.5, 40.5, geodetic)
assert_almost_equal(result, np.array([127106.5 + 500000, 4484124.4]),
decimal=1)
inverse_result = geodetic.transform_point(result[0], result[1], utm)
assert_almost_equal(inverse_result, [-73.5, 40.5])
|
from Calculator import make_root
def main
if __name__ == '__main__':
main()
|
import numpy as np
import pyqtgraph as pg
from datetime import datetime, timedelta
from vnpy.trader.constant import Interval, Direction, Offset
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets, QtGui
from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell
from vnpy.trader.ui.editor import CodeEditor
from vnpy.event import Event, EventEngine
from vnpy.chart import ChartWidget, CandleItem, VolumeItem
from vnpy.trader.utility import load_json, save_json
from ..engine import (
APP_NAME,
EVENT_BACKTESTER_LOG,
EVENT_BACKTESTER_BACKTESTING_FINISHED,
EVENT_BACKTESTER_OPTIMIZATION_FINISHED,
OptimizationSetting
)
class BacktesterManager(QtWidgets.QWidget):
""""""
setting_filename = "cta_backtester_setting.json"
signal_log = QtCore.pyqtSignal(Event)
signal_backtesting_finished = QtCore.pyqtSignal(Event)
signal_optimization_finished = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.backtester_engine = main_engine.get_engine(APP_NAME)
self.class_names = []
self.settings = {}
self.target_display = ""
self.init_ui()
self.register_event()
self.backtester_engine.init_engine()
self.init_strategy_settings()
def init_strategy_settings(self):
""""""
self.class_names = self.backtester_engine.get_strategy_class_names()
for class_name in self.class_names:
setting = self.backtester_engine.get_default_setting(class_name)
self.settings[class_name] = setting
self.class_combo.addItems(self.class_names)
def init_ui(self):
""""""
self.setWindowTitle("CTA回测")
# Setting Part
self.class_combo = QtWidgets.QComboBox()
self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX")
self.interval_combo = QtWidgets.QComboBox()
for inteval in Interval:
self.interval_combo.addItem(inteval.value)
end_dt = datetime.now()
start_dt = end_dt - timedelta(days=3 * 365)
self.start_date_edit = QtWidgets.QDateEdit(
QtCore.QDate(
start_dt.year,
start_dt.month,
start_dt.day
)
)
self.end_date_edit = QtWidgets.QDateEdit(
QtCore.QDate.currentDate()
)
self.rate_line = QtWidgets.QLineEdit("0.000025")
self.slippage_line = QtWidgets.QLineEdit("0.2")
self.size_line = QtWidgets.QLineEdit("300")
self.pricetick_line = QtWidgets.QLineEdit("0.2")
self.capital_line = QtWidgets.QLineEdit("1000000")
self.inverse_combo = QtWidgets.QComboBox()
self.inverse_combo.addItems(["正向", "反向"])
backtesting_button = QtWidgets.QPushButton("开始回测")
backtesting_button.clicked.connect(self.start_backtesting)
optimization_button = QtWidgets.QPushButton("参数优化")
optimization_button.clicked.connect(self.start_optimization)
self.result_button = QtWidgets.QPushButton("优化结果")
self.result_button.clicked.connect(self.show_optimization_result)
self.result_button.setEnabled(False)
downloading_button = QtWidgets.QPushButton("下载数据")
downloading_button.clicked.connect(self.start_downloading)
self.order_button = QtWidgets.QPushButton("委托记录")
self.order_button.clicked.connect(self.show_backtesting_orders)
self.order_button.setEnabled(False)
self.trade_button = QtWidgets.QPushButton("成交记录")
self.trade_button.clicked.connect(self.show_backtesting_trades)
self.trade_button.setEnabled(False)
self.daily_button = QtWidgets.QPushButton("每日盈亏")
self.daily_button.clicked.connect(self.show_daily_results)
self.daily_button.setEnabled(False)
self.candle_button = QtWidgets.QPushButton("K线图表")
self.candle_button.clicked.connect(self.show_candle_chart)
self.candle_button.setEnabled(False)
edit_button = QtWidgets.QPushButton("代码编辑")
edit_button.clicked.connect(self.edit_strategy_code)
reload_button = QtWidgets.QPushButton("策略重载")
reload_button.clicked.connect(self.reload_strategy_class)
for button in [
backtesting_button,
optimization_button,
downloading_button,
self.result_button,
self.order_button,
self.trade_button,
self.daily_button,
self.candle_button,
edit_button,
reload_button
]:
button.setFixedHeight(button.sizeHint().height() * 2)
form = QtWidgets.QFormLayout()
form.addRow("交易策略", self.class_combo)
form.addRow("本地代码", self.symbol_line)
form.addRow("K线周期", self.interval_combo)
form.addRow("开始日期", self.start_date_edit)
form.addRow("结束日期", self.end_date_edit)
form.addRow("手续费率", self.rate_line)
form.addRow("交易滑点", self.slippage_line)
form.addRow("合约乘数", self.size_line)
form.addRow("价格跳动", self.pricetick_line)
form.addRow("回测资金", self.capital_line)
form.addRow("合约模式", self.inverse_combo)
result_grid = QtWidgets.QGridLayout()
result_grid.addWidget(self.trade_button, 0, 0)
result_grid.addWidget(self.order_button, 0, 1)
result_grid.addWidget(self.daily_button, 1, 0)
result_grid.addWidget(self.candle_button, 1, 1)
left_vbox = QtWidgets.QVBoxLayout()
left_vbox.addLayout(form)
left_vbox.addWidget(backtesting_button)
left_vbox.addWidget(downloading_button)
left_vbox.addStretch()
left_vbox.addLayout(result_grid)
left_vbox.addStretch()
left_vbox.addWidget(optimization_button)
left_vbox.addWidget(self.result_button)
left_vbox.addStretch()
left_vbox.addWidget(edit_button)
left_vbox.addWidget(reload_button)
# Result part
self.statistics_monitor = StatisticsMonitor()
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setMaximumHeight(400)
self.chart = BacktesterChart()
self.chart.setMinimumWidth(1000)
self.trade_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测成交记录",
BacktestingTradeMonitor
)
self.order_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测委托记录",
BacktestingOrderMonitor
)
self.daily_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测每日盈亏",
DailyResultMonitor
)
# Candle Chart
self.candle_dialog = CandleChartDialog()
# Layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.statistics_monitor)
vbox.addWidget(self.log_monitor)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(vbox)
hbox.addWidget(self.chart)
self.setLayout(hbox)
# Code Editor
self.editor = CodeEditor(self.main_engine, self.event_engine)
# Load setting
setting = load_json(self.setting_filename)
if not setting:
return
self.class_combo.setCurrentIndex(
self.class_combo.findText(setting["class_name"])
)
self.symbol_line.setText(setting["vt_symbol"])
self.interval_combo.setCurrentIndex(
self.interval_combo.findText(setting["interval"])
)
self.rate_line.setText(str(setting["rate"]))
self.slippage_line.setText(str(setting["slippage"]))
self.size_line.setText(str(setting["size"]))
self.pricetick_line.setText(str(setting["pricetick"]))
self.capital_line.setText(str(setting["capital"]))
if not setting["inverse"]:
self.inverse_combo.setCurrentIndex(0)
else:
self.inverse_combo.setCurrentIndex(1)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.signal_backtesting_finished.connect(
self.process_backtesting_finished_event)
self.signal_optimization_finished.connect(
self.process_optimization_finished_event)
self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)
self.event_engine.register(
EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)
def process_log_event(self, event: Event):
""""""
msg = event.data
self.write_log(msg)
def write_log(self, msg):
""""""
timestamp = datetime.now().strftime("%H:%M:%S")
msg = f"{timestamp}\t{msg}"
self.log_monitor.append(msg)
def process_backtesting_finished_event(self, event: Event):
""""""
statistics = self.backtester_engine.get_result_statistics()
self.statistics_monitor.set_data(statistics)
df = self.backtester_engine.get_result_df()
self.chart.set_data(df)
self.trade_button.setEnabled(True)
self.order_button.setEnabled(True)
self.daily_button.setEnabled(True)
self.candle_button.setEnabled(True)
def process_optimization_finished_event(self, event: Event):
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def start_backtesting(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
# Save backtesting parameters
backtesting_setting = {
"class_name": class_name,
"vt_symbol": vt_symbol,
"interval": interval,
"rate": rate,
"slippage": slippage,
"size": size,
"pricetick": pricetick,
"capital": capital,
"inverse": inverse,
}
save_json(self.setting_filename, backtesting_setting)
# Get strategy setting
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_backtesting(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
new_setting
)
if result:
self.statistics_monitor.clear_data()
self.chart.clear_data()
self.trade_button.setEnabled(False)
self.order_button.setEnabled(False)
self.daily_button.setEnabled(False)
self.candle_button.setEnabled(False)
self.trade_dialog.clear_data()
self.order_dialog.clear_data()
self.daily_dialog.clear_data()
self.candle_dialog.clear_data()
def start_optimization(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
parameters = self.settings[class_name]
dialog = OptimizationSettingEditor(class_name, parameters)
i = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga = dialog.get_setting()
self.target_display = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
optimization_setting,
use_ga
)
self.result_button.setEnabled(False)
def start_downloading(self):
""""""
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start_date = self.start_date_edit.date()
end_date = self.end_date_edit.date()
start = datetime(start_date.year(), start_date.month(), start_date.day())
end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59)
self.backtester_engine.start_downloading(
vt_symbol,
interval,
start,
end
)
def show_optimization_result(self):
""""""
result_values = self.backtester_engine.get_result_values()
dialog = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show_backtesting_trades(self):
""""""
if not self.trade_dialog.is_updated():
trades = self.backtester_engine.get_all_trades()
self.trade_dialog.update_data(trades)
self.trade_dialog.exec_()
def show_backtesting_orders(self):
""""""
if not self.order_dialog.is_updated():
orders = self.backtester_engine.get_all_orders()
self.order_dialog.update_data(orders)
self.order_dialog.exec_()
def show_daily_results(self):
""""""
if not self.daily_dialog.is_updated():
results = self.backtester_engine.get_all_daily_results()
self.daily_dialog.update_data(results)
self.daily_dialog.exec_()
def show_candle_chart(self):
""""""
if not self.candle_dialog.is_updated():
history = self.backtester_engine.get_history_data()
self.candle_dialog.update_history(history)
trades = self.backtester_engine.get_all_trades()
self.candle_dialog.update_trades(trades)
self.candle_dialog.exec_()
def edit_strategy_code(self):
""""""
class_name = self.class_combo.currentText()
file_path = self.backtester_engine.get_strategy_class_file(class_name)
self.editor.open_editor(file_path)
self.editor.show()
def reload_strategy_class(self):
""""""
self.backtester_engine.reload_strategy_class()
self.class_combo.clear()
self.init_strategy_settings()
def show(self):
""""""
self.showMaximized()
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日",
"profit_days": "盈利交易日",
"loss_days": "亏损交易日",
"capital": "起始资金",
"end_balance": "结束资金",
"total_return": "总收益率",
"annual_return": "年化收益",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"total_net_pnl": "总盈亏",
"total_commission": "总手续费",
"total_slippage": "总滑点",
"total_turnover": "总成交额",
"total_trade_count": "总成交笔数",
"daily_net_pnl": "日均盈亏",
"daily_commission": "日均手续费",
"daily_slippage": "日均滑点",
"daily_turnover": "日均成交额",
"daily_trade_count": "日均成交笔数",
"daily_return": "日均收益率",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比"
}
def __init__(self):
""""""
super().__init__()
self.cells = {}
self.init_ui()
def init_ui(self):
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
def clear_data(self):
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict):
""""""
data["capital"] = f"{data['capital']:,.2f}"
data["end_balance"] = f"{data['end_balance']:,.2f}"
data["total_return"] = f"{data['total_return']:,.2f}%"
data["annual_return"] = f"{data['annual_return']:,.2f}%"
data["max_drawdown"] = f"{data['max_drawdown']:,.2f}"
data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%"
data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}"
data["total_commission"] = f"{data['total_commission']:,.2f}"
data["total_slippage"] = f"{data['total_slippage']:,.2f}"
data["total_turnover"] = f"{data['total_turnover']:,.2f}"
data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}"
data["daily_commission"] = f"{data['daily_commission']:,.2f}"
data["daily_slippage"] = f"{data['daily_slippage']:,.2f}"
data["daily_turnover"] = f"{data['daily_turnover']:,.2f}"
data["daily_return"] = f"{data['daily_return']:,.2f}%"
data["return_std"] = f"{data['return_std']:,.2f}%"
data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}"
data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
):
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.init_ui()
def init_ui(self):
""""""
form = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text = "确定"
parameters = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
self.setLayout(form)
def get_setting(self):
""""""
setting = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class BacktesterChart(pg.GraphicsWindow):
""""""
def __init__(self):
""""""
super().__init__(title="Backtester Chart")
self.dates = {}
self.init_ui()
def init_ui(self):
""""""
pg.setConfigOptions(antialias=True)
# Create plot widgets
self.balance_plot = self.addPlot(
title="账户净值",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.drawdown_plot = self.addPlot(
title="净值回撤",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.pnl_plot = self.addPlot(
title="每日盈亏",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.distribution_plot = self.addPlot(title="盈亏分布")
# Add curves and bars on plot widgets
self.balance_curve = self.balance_plot.plot(
pen=pg.mkPen("#ffc107", width=3)
)
dd_color = "#303f9f"
self.drawdown_curve = self.drawdown_plot.plot(
fillLevel=-0.3, brush=dd_color, pen=dd_color
)
profit_color = 'r'
loss_color = 'g'
self.profit_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=profit_color, pen=profit_color
)
self.loss_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=loss_color, pen=loss_color
)
self.pnl_plot.addItem(self.profit_pnl_bar)
self.pnl_plot.addItem(self.loss_pnl_bar)
distribution_color = "#6d4c41"
self.distribution_curve = self.distribution_plot.plot(
fillLevel=-0.3, brush=distribution_color, pen=distribution_color
)
def clear_data(self):
""""""
self.balance_curve.setData([], [])
self.drawdown_curve.setData([], [])
self.profit_pnl_bar.setOpts(x=[], height=[])
self.loss_pnl_bar.setOpts(x=[], height=[])
self.distribution_curve.setData([], [])
def set_data(self, df):
""""""
if df is None:
return
count = len(df)
self.dates.clear()
for n, date in enumerate(df.index):
self.dates[n] = date
# Set data for curve of balance and drawdown
self.balance_curve.setData(df["balance"])
self.drawdown_curve.setData(df["drawdown"])
# Set data for daily pnl bar
profit_pnl_x = []
profit_pnl_height = []
loss_pnl_x = []
loss_pnl_height = []
for count, pnl in enumerate(df["net_pnl"]):
if pnl >= 0:
profit_pnl_height.append(pnl)
profit_pnl_x.append(count)
else:
loss_pnl_height.append(pnl)
loss_pnl_x.append(count)
self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)
self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)
# Set data for pnl distribution
hist, x = np.histogram(df["net_pnl"], bins="auto")
x = x[:-1]
self.distribution_curve.setData(x, hist)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.dates = dates
def tickStrings(self, values, scale, spacing):
""""""
strings = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
):
""""""
super().__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.optimization_setting = None
self.use_ga = False
self.init_ui()
def init_ui(self):
""""""
QLabel = QtWidgets.QLabel
self.target_combo = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
grid = QtWidgets.QGridLayout()
grid.addWidget(QLabel("目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("参数"), 1, 0)
grid.addWidget(QLabel("开始"), 1, 1)
grid.addWidget(QLabel("步进"), 1, 2)
grid.addWidget(QLabel("结束"), 1, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator = QtGui.QDoubleValidator()
row = 2
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit = QtWidgets.QLineEdit(str(value))
step_edit = QtWidgets.QLineEdit(str(1))
end_edit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
self.setLayout(grid)
def generate_ga_setting(self):
""""""
self.use_ga = True
self.generate_setting()
def generate_parallel_setting(self):
""""""
self.use_ga = False
self.generate_setting()
def generate_setting(self):
""""""
self.optimization_setting = OptimizationSetting()
self.target_display = self.target_combo.currentText()
target_name = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self):
""""""
return self.optimization_setting, self.use_ga
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
):
""""""
super().__init__()
self.result_values = result_values
self.target_display = target_display
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
table = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell = QtWidgets.QTableWidgetItem(str(setting))
target_cell = QtWidgets.QTableWidgetItem(str(target_value))
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
self.setLayout(vbox)
class BacktestingTradeMonitor(BaseMonitor):
"""
Monitor for backtesting trade data.
"""
headers = {
"tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False},
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "数量", "cell": BaseCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class BacktestingOrderMonitor(BaseMonitor):
"""
Monitor for backtesting order data.
"""
headers = {
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"type": {"display": "类型", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "总数量", "cell": BaseCell, "update": False},
"traded": {"display": "已成交", "cell": BaseCell, "update": False},
"status": {"display": "状态", "cell": EnumCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class DailyResultMonitor(BaseMonitor):
"""
Monitor for backtesting daily result.
"""
headers = {
"date": {"display": "日期", "cell": BaseCell, "update": False},
"trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False},
"start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False},
"end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False},
"turnover": {"display": "成交额", "cell": BaseCell, "update": False},
"commission": {"display": "手续费", "cell": BaseCell, "update": False},
"slippage": {"display": "滑点", "cell": BaseCell, "update": False},
"trading_pnl": {"display": "交易盈亏", "cell": BaseCell, "update": False},
"holding_pnl": {"display": "持仓盈亏", "cell": BaseCell, "update": False},
"total_pnl": {"display": "总盈亏", "cell": BaseCell, "update": False},
"net_pnl": {"display": "净盈亏", "cell": BaseCell, "update": False},
}
class BacktestingResultDialog(QtWidgets.QDialog):
"""
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
title: str,
table_class: QtWidgets.QTableWidget
):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.title = title
self.table_class = table_class
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle(self.title)
self.resize(1100, 600)
self.table = self.table_class(self.main_engine, self.event_engine)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.table)
self.setLayout(vbox)
def clear_data(self):
""""""
self.updated = False
self.table.setRowCount(0)
def update_data(self, data: list):
""""""
self.updated = True
data.reverse()
for obj in data:
self.table.insert_new_row(obj)
def is_updated(self):
""""""
return self.updated
class CandleChartDialog(QtWidgets.QDialog):
"""
"""
def __init__(self):
""""""
super().__init__()
self.dt_ix_map = {}
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("回测K线图表")
self.resize(1400, 800)
# Create chart widget
self.chart = ChartWidget()
self.chart.add_plot("candle", hide_x_axis=True)
self.chart.add_plot("volume", maximum_height=200)
self.chart.add_item(CandleItem, "candle", "candle")
self.chart.add_item(VolumeItem, "volume", "volume")
self.chart.add_cursor()
# Add scatter item for showing tradings
self.trade_scatter = pg.ScatterPlotItem()
candle_plot = self.chart.get_plot("candle")
candle_plot.addItem(self.trade_scatter)
# Set layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.chart)
self.setLayout(vbox)
def update_history(self, history: list):
""""""
self.updated = True
self.chart.update_history(history)
for ix, bar in enumerate(history):
self.dt_ix_map[bar.datetime] = ix
def update_trades(self, trades: list):
""""""
trade_data = []
for trade in trades:
ix = self.dt_ix_map[trade.datetime]
scatter = {
"pos": (ix, trade.price),
"data": 1,
"size": 14,
"pen": pg.mkPen((255, 255, 255))
}
if trade.direction == Direction.LONG:
scatter_symbol = "t1" # Up arrow
else:
scatter_symbol = "t" # Down arrow
if trade.offset == Offset.OPEN:
scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow
else:
scatter_brush = pg.mkBrush((0, 0, 255)) # Blue
scatter["symbol"] = scatter_symbol
scatter["brush"] = scatter_brush
trade_data.append(scatter)
self.trade_scatter.setData(trade_data)
def clear_data(self):
""""""
self.updated = False
self.chart.clear_all()
self.dt_ix_map.clear()
self.trade_scatter.clear()
def is_updated(self):
""""""
return self.updated
|
#
# @lc app=leetcode id=677 lang=python3
#
# [677] Map Sum Pairs
# https://leetcode.com/problems/map-sum-pairs/
# This problem is about the trie data structure. Each node keeps track of the sum of its children.
# A new key overrides the original values.
#
import unittest
from typing import Dict
# @lc code=start
class Node:
def __init__(self, val: int = 0):
self.value = val
self.children: Dict[str, Node] = {}
class MapSum:
def __init__(self) -> None:
"""
Initialize your data structure here.
"""
self.root_node = Node()
self.keys: Dict[str, int] = {}
def insert(self, key: str, val: int) -> None:
# override if key already exists
val_diff = val - self.keys.get(key, 0)
self.keys[key] = val
# track count of prefix characters
node = self.root_node
for c in key:
if c not in node.children:
node.children[c] = Node()
node = node.children[c]
node.value += val_diff
def sum(self, prefix: str) -> int:
node = self.root_node
for c in prefix:
# return 0 if prefix doesn't exist
if c not in node.children:
return 0
node = node.children[c]
return node.value
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
# @lc code=end
class TestSolution(unittest.TestCase):
def test_given(self) -> None:
x = MapSum()
x.insert("apple", 3)
self.assertEqual(x.sum("ap"), 3)
x.insert("app", 2)
self.assertEqual(x.sum("ap"), 5)
def test_override(self) -> None:
x = MapSum()
x.insert("apple", 3)
x.insert("app", 2)
x.insert("apple", 8)
self.assertEqual(x.sum("ap"), 10)
if __name__ == "__main__":
unittest.main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSecurityContactResult',
'AwaitableGetSecurityContactResult',
'get_security_contact',
]
@pulumi.output_type
class GetSecurityContactResult:
"""
Contact details for security issues
"""
def __init__(__self__, alert_notifications=None, alerts_to_admins=None, email=None, id=None, name=None, phone=None, type=None):
if alert_notifications and not isinstance(alert_notifications, str):
raise TypeError("Expected argument 'alert_notifications' to be a str")
pulumi.set(__self__, "alert_notifications", alert_notifications)
if alerts_to_admins and not isinstance(alerts_to_admins, str):
raise TypeError("Expected argument 'alerts_to_admins' to be a str")
pulumi.set(__self__, "alerts_to_admins", alerts_to_admins)
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if phone and not isinstance(phone, str):
raise TypeError("Expected argument 'phone' to be a str")
pulumi.set(__self__, "phone", phone)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="alertNotifications")
def alert_notifications(self) -> str:
"""
Whether to send security alerts notifications to the security contact
"""
return pulumi.get(self, "alert_notifications")
@property
@pulumi.getter(name="alertsToAdmins")
def alerts_to_admins(self) -> str:
"""
Whether to send security alerts notifications to subscription admins
"""
return pulumi.get(self, "alerts_to_admins")
@property
@pulumi.getter
def email(self) -> str:
"""
The email of this security contact
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def phone(self) -> Optional[str]:
"""
The phone number of this security contact
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetSecurityContactResult(GetSecurityContactResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityContactResult(
alert_notifications=self.alert_notifications,
alerts_to_admins=self.alerts_to_admins,
email=self.email,
id=self.id,
name=self.name,
phone=self.phone,
type=self.type)
def get_security_contact(security_contact_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityContactResult:
"""
Contact details for security issues
:param str security_contact_name: Name of the security contact object
"""
__args__ = dict()
__args__['securityContactName'] = security_contact_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:security/v20170801preview:getSecurityContact', __args__, opts=opts, typ=GetSecurityContactResult).value
return AwaitableGetSecurityContactResult(
alert_notifications=__ret__.alert_notifications,
alerts_to_admins=__ret__.alerts_to_admins,
email=__ret__.email,
id=__ret__.id,
name=__ret__.name,
phone=__ret__.phone,
type=__ret__.type)
|
__author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>'
__version__ = '1.7.6'
import copy
import glob
import os
import pkgutil
import sys
import traceback
import typing as t
import zipfile
if t.TYPE_CHECKING:
from sys import _MetaPathFinder
def is_local(filename: str, pathlist: t.List[str]) -> bool:
''' Returns True if *filename* is a subpath of any of the paths in *pathlist*. '''
filename = os.path.abspath(filename)
for path_name in pathlist:
path_name = os.path.abspath(path_name)
if is_subpath(filename, path_name):
return True
return False
def is_subpath(path: str, parent: str) -> bool:
''' Returns True if *path* points to the same or a subpath of *parent*. '''
try:
relpath = os.path.relpath(path, parent)
except ValueError:
return False # happens on Windows if drive letters don't match
return relpath == os.curdir or not relpath.startswith(os.pardir)
def eval_pth(
filename: str,
sitedir: str,
dest: t.Optional[t.List[str]] = None,
imports: t.Optional[t.List[t.Tuple[str, int, str]]] = None,
) -> t.List[str]:
''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list
*dest*. If *dest* is #None, it will fall back to `sys.path`.
If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to
that list in tuples of (*filename*, *line*, *stmt*).
'''
if dest is None:
dest = sys.path
if not os.path.isfile(filename):
return []
with open(filename, 'r') as fp:
for index, line in enumerate(fp):
if line.startswith('import'):
if imports is None:
exec_pth_import(filename, index+1, line)
else:
imports.append((filename, index+1, line))
else:
index = line.find('#')
if index > 0: line = line[:index]
line = line.strip()
if not os.path.isabs(line):
line = os.path.join(os.path.dirname(filename), line)
line = os.path.normpath(line)
if line and line not in dest:
dest.insert(0, line)
return dest
def exec_pth_import(filename: str, lineno: int, line: str) -> None:
line = '\n' * (lineno - 1) + line.strip()
try:
exec(compile(line, filename, 'exec'))
except BaseException:
traceback.print_exc()
def extend_path(pth: t.List[str], name: str) -> t.List[str]:
''' Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original
#pkgutil.extend_path() gets mocked by this function inside the #localimport context.
'''
def zip_isfile(z, name):
name.rstrip('/')
return name in z.namelist()
pname = os.path.join(*name.split('.'))
zname = '/'.join(name.split('.'))
init_py = '__init__' + os.extsep + 'py'
init_pyc = '__init__' + os.extsep + 'pyc'
init_pyo = '__init__' + os.extsep + 'pyo'
mod_path = list(pth)
for path in sys.path:
if zipfile.is_zipfile(path):
try:
egg = zipfile.ZipFile(path, 'r')
addpath = (
zip_isfile(egg, zname + '/__init__.py') or
zip_isfile(egg, zname + '/__init__.pyc') or
zip_isfile(egg, zname + '/__init__.pyo'))
fpath = os.path.join(path, path, zname)
if addpath and fpath not in mod_path:
mod_path.append(fpath)
except (zipfile.BadZipfile, zipfile.LargeZipFile):
pass # xxx: Show a warning at least?
else:
path = os.path.join(path, pname)
if os.path.isdir(path) and path not in mod_path:
addpath = (
os.path.isfile(os.path.join(path, init_py)) or
os.path.isfile(os.path.join(path, init_pyc)) or
os.path.isfile(os.path.join(path, init_pyo)))
if addpath and path not in mod_path:
mod_path.append(path)
return [os.path.normpath(x) for x in mod_path]
class localimport:
def __init__(
self,
path: t.Union[t.List[str], str],
parent_dir: t.Optional[str] = None,
do_eggs: bool = True,
do_pth: bool = True,
do_autodisable: bool = True,
) -> None:
if not parent_dir:
frame = sys._getframe(1).f_globals
if '__file__' in frame:
parent_dir = os.path.dirname(os.path.abspath(frame['__file__']))
# Convert relative paths to absolute paths with parent_dir and
# evaluate .egg files in the specified directories.
self.path = []
if isinstance(path, str):
path = [path]
for path_name in path:
if not os.path.isabs(path_name):
if not parent_dir:
raise ValueError('relative path but no parent_dir')
path_name = os.path.join(parent_dir, path_name)
path_name = os.path.normpath(path_name)
self.path.append(path_name)
if do_eggs:
self.path.extend(glob.glob(os.path.join(path_name, '*.egg')))
self.meta_path: t.List[_MetaPathFinder] = []
self.modules: t.Dict[str, t.Any] = {}
self.do_pth = do_pth
self.in_context = False
self.do_autodisable = do_autodisable
self.pth_imports: t.List[t.Tuple[str, int, str]] = []
if self.do_pth:
seen = set()
for path_name in self.path:
for fn in glob.glob(os.path.join(path_name, '*.pth')):
if fn in seen: continue
seen.add(fn)
eval_pth(fn, path_name, dest=self.path, imports=self.pth_imports)
def __enter__(self) -> 'localimport':
# pkg_resources comes with setuptools.
try:
import pkg_resources
nsdict = copy.deepcopy(pkg_resources._namespace_packages) # type: ignore
declare_namespace = pkg_resources.declare_namespace
pkg_resources.declare_namespace = self._declare_namespace # type: ignore
except ImportError:
nsdict = None
declare_namespace = None
# Save the global importer state.
self.state = {
'nsdict': nsdict,
'declare_namespace': declare_namespace,
'nspaths': {},
'path': sys.path[:],
'meta_path': sys.meta_path[:],
'disables': {},
'pkgutil.extend_path': pkgutil.extend_path,
}
# Update the systems meta path and apply function mocks.
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path + sys.meta_path
pkgutil.extend_path = extend_path # type: ignore
# If this function is called not the first time, we need to
# restore the modules that have been imported with it and
# temporarily disable the ones that would be shadowed.
for key, mod in list(self.modules.items()):
try: self.state['disables'][key] = sys.modules.pop(key)
except KeyError: pass
sys.modules[key] = mod
# Evaluate imports from the .pth files, if any.
for fn, lineno, stmt in self.pth_imports:
exec_pth_import(fn, lineno, stmt)
# Add the original path to sys.path.
sys.path += self.state['path']
# Update the __path__ of all namespace modules.
for key, mod in list(sys.modules.items()):
if mod is None:
# Relative imports could have lead to None-entries in
# sys.modules. Get rid of them so they can be re-evaluated.
prefix = key.rpartition('.')[0]
if hasattr(sys.modules.get(prefix), '__path__'):
del sys.modules[key]
elif hasattr(mod, '__path__'):
self.state['nspaths'][key] = copy.copy(mod.__path__)
mod.__path__ = pkgutil.extend_path(mod.__path__, mod.__name__)
self.in_context = True
if self.do_autodisable:
self.autodisable()
return self
def __exit__(self, *__) -> None:
if not self.in_context:
raise RuntimeError('context not entered')
# Figure the difference of the original sys.path and the
# current path. The list of paths will be used to determine
# what modules are local and what not.
local_paths = []
for path in sys.path:
if path not in self.state['path']:
local_paths.append(path)
for path in self.path:
if path not in local_paths:
local_paths.append(path)
# Move all meta path objects to self.meta_path that have not
# been there before and have not been in the list before.
for meta in sys.meta_path:
if meta is not self and meta not in self.state['meta_path']:
if meta not in self.meta_path:
self.meta_path.append(meta)
# Move all modules that shadow modules of the original system
# state or modules that are from any of the localimport context
# paths away.
modules = sys.modules.copy()
for key, mod in modules.items():
force_pop = False
filename = getattr(mod, '__file__', None)
if not filename and key not in sys.builtin_module_names:
parent = key.rsplit('.', 1)[0]
if parent in modules:
filename = getattr(modules[parent], '__file__', None)
else:
force_pop = True
if force_pop or (filename and is_local(filename, local_paths)):
self.modules[key] = sys.modules.pop(key)
# Restore the disabled modules.
sys.modules.update(self.state['disables'])
for key, mod in self.state['disables'].items():
try: parent_name = key.split('.')[-2]
except IndexError: parent_name = None
if parent_name and parent_name in sys.modules:
parent_module = sys.modules[parent_name]
setattr(parent_module, key.split('.')[-1], mod)
# Restore the original __path__ value of namespace packages.
for key, path_list in self.state['nspaths'].items():
try: sys.modules[key].__path__ = path_list
except KeyError: pass
# Restore the original state of the global importer.
sys.path[:] = self.state['path']
sys.meta_path[:] = self.state['meta_path']
pkgutil.extend_path = self.state['pkgutil.extend_path']
try:
import pkg_resources
pkg_resources.declare_namespace = self.state['declare_namespace']
pkg_resources._namespace_packages.clear() # type: ignore
pkg_resources._namespace_packages.update(self.state['nsdict']) # type: ignore
except ImportError: pass
self.in_context = False
del self.state
def _declare_namespace(self, package_name: str) -> None:
'''
Mock for #pkg_resources.declare_namespace() which calls
#pkgutil.extend_path() afterwards as the original implementation doesn't
seem to properly find all available namespace paths.
'''
self.state['declare_namespace'](package_name)
mod = sys.modules[package_name]
mod.__path__ = pkgutil.extend_path(mod.__path__, package_name) # type: ignore
def discover(self) -> t.Iterable[pkgutil.ModuleInfo]:
return pkgutil.iter_modules(self.path)
def disable(self, module: t.Union[t.List[str], str]) -> None:
if not isinstance(module, str):
for module_name in module:
self.disable(module_name)
return
sub_prefix = module + '.'
modules = {}
for key, mod in sys.modules.items():
if key == module or key.startswith(sub_prefix):
try: parent_name = '.'.join(key.split('.')[:-1])
except IndexError: parent_name = None
# Delete the child module reference from the parent module.
modules[key] = mod
if parent_name and parent_name in sys.modules:
parent = sys.modules[parent_name]
try:
delattr(parent, key.split('.')[-1])
except AttributeError:
pass
# Pop all the modules we found from sys.modules
for key, mod in modules.items():
del sys.modules[key]
self.state['disables'][key] = mod
def autodisable(self) -> None:
for loader, name, ispkg in self.discover():
self.disable(name)
|
import numpy as np
import h5py
import pandas as pd
from svhn_io import load_svhn
from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error
EPSILON = 1e-10
def load_hdf5_data(filename):
inp = h5py.File(filename, "r")
preds = inp["preds"][...]
inp.close()
return preds
NUM_ENSEMBLES = 15
NUM_BINS=7
#IOD_FILE_PATTERN = "cnn_svhn-num_ens-{}-preds.hdf5"
#OUTPUT_PATTERN = "svhn-calibration-sub-deepensembles_1_num-ens-{}_cnn_svhn.csv"
IOD_FILE_PATTERN = "deepensembles-cnn_svhn-num_ens-{}-preds.hdf5"
OUTPUT_PATTERN = "svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv"
if __name__ == "__main__":
for num_ens in range(1, NUM_ENSEMBLES + 1):
(_, __), (___, y_true) = load_svhn()
y_true = y_true.flatten()
y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens))
y_confs = np.max(y_probs, axis=1)
y_pred = np.argmax(y_probs, axis=1)
curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS)
error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS)
print("Processing calibration curve for {} ensembles. Error: {}".format(num_ens, error))
output_df = pd.DataFrame(data={"conf": curve_conf, "acc": curve_acc})
output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False)
|
from unittest import TestCase
from rockaway.models import Track
class TestTrackBasics(TestCase):
def test_track_create_no_args(self):
track = Track()
self.assertFalse(track.hasDbEntry())
self.assertFalse(track.hasFile())
def test_track_create(self):
args = {"Title": "Rockaway Beach",
"Artist": "The Ramones", # FIXME--This and album will not just be strings
"Album": "Rocket to Russia",
"Year": 1977,
"Genre": "Punk Rock",
"Time": 126000}
track = Track(**args)
self.assertEqual(track.Title, args["Title"])
self.assertEqual(track.Year, 1977)
# Alternate ways of looking up attributes
self.assertEqual(track.genre, track.Genre)
self.assertEqual(track.Time, track["Time"])
|
"""
Implementation of functions in the Numpy package.
"""
import math
import sys
import itertools
from collections import namedtuple
from llvmlite.llvmpy import core as lc
import numpy as np
import operator
from . import builtins, callconv, ufunc_db, arrayobj
from .imputils import Registry, impl_ret_new_ref, force_error_model
from .. import typing, types, cgutils, numpy_support, utils
from ..numpy_support import ufunc_find_matching_loop, select_array_wrapper, from_dtype
from ..typing import npydecl
from ..extending import overload, intrinsic
from .. import errors
registry = Registry()
lower = registry.lower
########################################################################
# In the way we generate code, ufuncs work with scalar as well as
# with array arguments. The following helper classes help dealing
# with scalar and array arguments in a regular way.
#
# In short, the classes provide a uniform interface. The interface
# handles the indexing of as many dimensions as the array may have.
# For scalars, all indexing is ignored and when the value is read,
# the scalar is returned. For arrays code for actual indexing is
# generated and reading performs the appropriate indirection.
class _ScalarIndexingHelper(object):
def update_indices(self, loop_indices, name):
pass
def as_values(self):
pass
class _ScalarHelper(object):
"""Helper class to handle scalar arguments (and result).
Note that store_data is only used when generating code for
a scalar ufunc and to write the output value.
For loading, the value is directly used without having any
kind of indexing nor memory backing it up. This is the use
for input arguments.
For storing, a variable is created in the stack where the
value will be written.
Note that it is not supported (as it is unneeded for our
current use-cases) reading back a stored value. This class
will always "load" the original value it got at its creation.
"""
def __init__(self, ctxt, bld, val, ty):
self.context = ctxt
self.builder = bld
self.val = val
self.base_type = ty
intpty = ctxt.get_value_type(types.intp)
self.shape = [lc.Constant.int(intpty, 1)]
lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1)
self._ptr = cgutils.alloca_once(bld, lty)
def create_iter_indices(self):
return _ScalarIndexingHelper()
def load_data(self, indices):
return self.val
def store_data(self, indices, val):
self.builder.store(val, self._ptr)
@property
def return_val(self):
return self.builder.load(self._ptr)
class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper',
('array', 'indices'))):
def update_indices(self, loop_indices, name):
bld = self.array.builder
intpty = self.array.context.get_value_type(types.intp)
ONE = lc.Constant.int(lc.Type.int(intpty.width), 1)
# we are only interested in as many inner dimensions as dimensions
# the indexed array has (the outer dimensions are broadcast, so
# ignoring the outer indices produces the desired result.
indices = loop_indices[len(loop_indices) - len(self.indices):]
for src, dst, dim in zip(indices, self.indices, self.array.shape):
cond = bld.icmp(lc.ICMP_UGT, dim, ONE)
with bld.if_then(cond):
bld.store(src, dst)
def as_values(self):
"""
The indexing helper is built using alloca for each value, so it
actually contains pointers to the actual indices to load. Note
that update_indices assumes the same. This method returns the
indices as values
"""
bld = self.array.builder
return [bld.load(index) for index in self.indices]
class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder',
'shape', 'strides', 'data',
'layout', 'base_type', 'ndim',
'return_val'))):
"""Helper class to handle array arguments/result.
It provides methods to generate code loading/storing specific
items as well as support code for handling indices.
"""
def create_iter_indices(self):
intpty = self.context.get_value_type(types.intp)
ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0)
indices = []
for i in range(self.ndim):
x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width))
self.builder.store(ZERO, x)
indices.append(x)
return _ArrayIndexingHelper(self, indices)
def _load_effective_address(self, indices):
return cgutils.get_item_pointer2(self.context,
self.builder,
data=self.data,
shape=self.shape,
strides=self.strides,
layout=self.layout,
inds=indices)
def load_data(self, indices):
model = self.context.data_model_manager[self.base_type]
ptr = self._load_effective_address(indices)
return model.load_from_data_pointer(self.builder, ptr)
def store_data(self, indices, value):
ctx = self.context
bld = self.builder
store_value = ctx.get_value_as_data(bld, self.base_type, value)
assert ctx.get_data_type(self.base_type) == store_value.type
bld.store(store_value, self._load_effective_address(indices))
def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):
"""returns an instance of the appropriate Helper (either
_ScalarHelper or _ArrayHelper) class to handle the argument.
using the polymorphic interface of the Helper classes, scalar
and array cases can be handled with the same code"""
# first un-Optional Optionals
if isinstance(tyinp, types.Optional):
oty = tyinp
tyinp = tyinp.type
inp = ctxt.cast(bld, inp, oty, tyinp)
# then prepare the arg for a concrete instance
if isinstance(tyinp, types.ArrayCompatible):
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
return _ArrayHelper(ctxt, bld, shape, strides, ary.data,
tyinp.layout, tyinp.dtype, tyinp.ndim, inp)
elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]):
return _ScalarHelper(ctxt, bld, inp, tyinp)
else:
raise NotImplementedError('unsupported type for {0}: {1}'.format(where, str(tyinp)))
_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),
types.intp, types.CPointer(types.intp))
def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):
'''Low-level utility function used in calculating a shape for
an implicit output array. This function assumes that the
destination shape is an LLVM pointer to a C-style array that was
already initialized to a size of one along all axes.
Returns an integer value:
>= 1 : Succeeded. Return value should equal the number of dimensions in
the destination shape.
0 : Failed to broadcast because source shape is larger than the
destination shape (this case should be weeded out at type
checking).
< 0 : Failed to broadcast onto destination axis, at axis number ==
-(return_value + 1).
'''
if src_ndim > dest_ndim:
# This check should have been done during type checking, but
# let's be defensive anyway...
return 0
else:
src_index = 0
dest_index = dest_ndim - src_ndim
while src_index < src_ndim:
src_dim_size = src_shape[src_index]
dest_dim_size = dest_shape[dest_index]
# Check to see if we've already mutated the destination
# shape along this axis.
if dest_dim_size != 1:
# If we have mutated the destination shape already,
# then the source axis size must either be one,
# or the destination axis size.
if src_dim_size != dest_dim_size and src_dim_size != 1:
return -(dest_index + 1)
elif src_dim_size != 1:
# If the destination size is still its initial
dest_shape[dest_index] = src_dim_size
src_index += 1
dest_index += 1
return dest_index
def _build_array(context, builder, array_ty, input_types, inputs):
"""Utility function to handle allocation of an implicit output array
given the target context, builder, output array type, and a list of
_ArrayHelper instances.
"""
intp_ty = context.get_value_type(types.intp)
def make_intp_const(val):
return context.get_constant(types.intp, val)
ZERO = make_intp_const(0)
ONE = make_intp_const(1)
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"src_shape")
dest_ndim = make_intp_const(array_ty.ndim)
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"dest_shape")
dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)
for index in range(array_ty.ndim))
# Initialize the destination shape with all ones.
for dest_shape_addr in dest_shape_addrs:
builder.store(ONE, dest_shape_addr)
# For each argument, try to broadcast onto the destination shape,
# mutating along any axis where the argument shape is not one and
# the destination shape is one.
for arg_number, arg in enumerate(inputs):
if not hasattr(arg, "ndim"): # Skip scalar arguments
continue
arg_ndim = make_intp_const(arg.ndim)
for index in range(arg.ndim):
builder.store(arg.shape[index],
cgutils.gep_inbounds(builder, src_shape, index))
arg_result = context.compile_internal(
builder, _broadcast_onto, _broadcast_onto_sig,
[arg_ndim, src_shape, dest_ndim, dest_shape])
with cgutils.if_unlikely(builder,
builder.icmp(lc.ICMP_SLT, arg_result, ONE)):
msg = "unable to broadcast argument %d to output array" % (
arg_number,)
loc = errors.loc_info.get('loc', None)
if loc is not None:
msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line)
context.call_conv.return_user_exc(builder, ValueError, (msg,))
real_array_ty = array_ty.as_array
dest_shape_tup = tuple(builder.load(dest_shape_addr)
for dest_shape_addr in dest_shape_addrs)
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,
dest_shape_tup)
# Get the best argument to call __array_wrap__ on
array_wrapper_index = select_array_wrapper(input_types)
array_wrapper_ty = input_types[array_wrapper_index]
try:
# __array_wrap__(source wrapped array, out array) -> out wrapped array
array_wrap = context.get_function('__array_wrap__',
array_ty(array_wrapper_ty, real_array_ty))
except NotImplementedError:
# If it's the same priority as a regular array, assume we
# should use the allocated array unchanged.
if array_wrapper_ty.array_priority != types.Array.array_priority:
raise
out_val = array_val._getvalue()
else:
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
out_val = array_wrap(builder, wrap_args)
ndim = array_ty.ndim
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
return _ArrayHelper(context, builder, shape, strides, array_val.data,
array_ty.layout, array_ty.dtype, ndim,
out_val)
def numpy_ufunc_kernel(context, builder, sig, args, kernel_class,
explicit_output=True):
# This is the code generator that builds all the looping needed
# to execute a numpy functions over several dimensions (including
# scalar cases).
#
# context - the code generation context
# builder - the code emitter
# sig - signature of the ufunc
# args - the args to the ufunc
# kernel_class - a code generating subclass of _Kernel that provides
# explicit_output - if the output was explicit in the call
# (ie: np.add(x,y,r))
arguments = [_prepare_argument(context, builder, arg, tyarg)
for arg, tyarg in zip(args, sig.args)]
if not explicit_output:
ret_ty = sig.return_type
if isinstance(ret_ty, types.ArrayCompatible):
output = _build_array(context, builder, ret_ty, sig.args, arguments)
else:
output = _prepare_argument(
context, builder,
lc.Constant.null(context.get_value_type(ret_ty)), ret_ty)
arguments.append(output)
elif context.enable_nrt:
# Incref the output
context.nrt.incref(builder, sig.return_type, args[-1])
inputs = arguments[0:-1]
output = arguments[-1]
outer_sig = [a.base_type for a in arguments]
#signature expects return type first, while we have it last:
outer_sig = outer_sig[-1:] + outer_sig[:-1]
outer_sig = typing.signature(*outer_sig)
kernel = kernel_class(context, builder, outer_sig)
intpty = context.get_value_type(types.intp)
indices = [inp.create_iter_indices() for inp in inputs]
loopshape = output.shape
with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices:
vals_in = []
for i, (index, arg) in enumerate(zip(indices, inputs)):
index.update_indices(loop_indices, i)
vals_in.append(arg.load_data(index.as_values()))
val_out = kernel.generate(*vals_in)
output.store_data(loop_indices, val_out)
out = arguments[-1].return_val
return impl_ret_new_ref(context, builder, sig.return_type, out)
# Kernels are the code to be executed inside the multidimensional loop.
class _Kernel(object):
def __init__(self, context, builder, outer_sig):
self.context = context
self.builder = builder
self.outer_sig = outer_sig
def cast(self, val, fromty, toty):
"""Numpy uses cast semantics that are different from standard Python
(for example, it does allow casting from complex to float).
This method acts as a patch to context.cast so that it allows
complex to real/int casts.
"""
if (isinstance(fromty, types.Complex) and
not isinstance(toty, types.Complex)):
# attempt conversion of the real part to the specified type.
# note that NumPy issues a warning in this kind of conversions
newty = fromty.underlying_float
attr = self.context.get_getattr(fromty, 'real')
val = attr(self.context, self.builder, fromty, val, 'real')
fromty = newty
# let the regular cast do the rest...
return self.context.cast(self.builder, val, fromty, toty)
def _ufunc_db_function(ufunc):
"""Use the ufunc loop type information to select the code generation
function from the table provided by the dict_of_kernels. The dict
of kernels maps the loop identifier to a function with the
following signature: (context, builder, signature, args).
The loop type information has the form 'AB->C'. The letters to the
left of '->' are the input types (specified as NumPy letter
types). The letters to the right of '->' are the output
types. There must be 'ufunc.nin' letters to the left of '->', and
'ufunc.nout' letters to the right.
For example, a binary float loop resulting in a float, will have
the following signature: 'ff->f'.
A given ufunc implements many loops. The list of loops implemented
for a given ufunc can be accessed using the 'types' attribute in
the ufunc object. The NumPy machinery selects the first loop that
fits a given calling signature (in our case, what we call the
outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'.
"""
class _KernelImpl(_Kernel):
def __init__(self, context, builder, outer_sig):
super(_KernelImpl, self).__init__(context, builder, outer_sig)
loop = ufunc_find_matching_loop(
ufunc, outer_sig.args + (outer_sig.return_type,))
self.fn = ufunc_db.get_ufunc_info(ufunc).get(loop.ufunc_sig)
self.inner_sig = typing.signature(
*(loop.outputs + loop.inputs))
if self.fn is None:
msg = "Don't know how to lower ufunc '{0}' for loop '{1}'"
raise NotImplementedError(msg.format(ufunc.__name__, loop))
def generate(self, *args):
isig = self.inner_sig
osig = self.outer_sig
cast_args = [self.cast(val, inty, outty)
for val, inty, outty in zip(args, osig.args,
isig.args)]
with force_error_model(self.context, 'numpy'):
res = self.fn(self.context, self.builder, isig, cast_args)
dmm = self.context.data_model_manager
res = dmm[isig.return_type].from_return(self.builder, res)
return self.cast(res, isig.return_type, osig.return_type)
return _KernelImpl
################################################################################
# Helper functions that register the ufuncs
_kernels = {} # Temporary map from ufunc's to their kernel implementation class
def register_unary_ufunc_kernel(ufunc, kernel):
def unary_ufunc(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel)
def unary_ufunc_no_explicit_output(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
_any = types.Any
# (array or scalar, out=array)
lower(ufunc, _any, types.Array)(unary_ufunc)
# (array or scalar)
lower(ufunc, _any)(unary_ufunc_no_explicit_output)
_kernels[ufunc] = kernel
def register_binary_ufunc_kernel(ufunc, kernel):
def binary_ufunc(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel)
def binary_ufunc_no_explicit_output(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
_any = types.Any
# (array or scalar, array o scalar, out=array)
lower(ufunc, _any, _any, types.Array)(binary_ufunc)
# (scalar, scalar)
lower(ufunc, _any, _any)(binary_ufunc_no_explicit_output)
_kernels[ufunc] = kernel
def register_unary_operator_kernel(operator, kernel, inplace=False):
assert not inplace # are there any inplace unary operators?
def lower_unary_operator(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
_arr_kind = types.Array
lower(operator, _arr_kind)(lower_unary_operator)
def register_binary_operator_kernel(op, kernel, inplace=False):
def lower_binary_operator(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
def lower_inplace_operator(context, builder, sig, args):
# The visible signature is (A, B) -> A
# The implementation's signature (with explicit output)
# is (A, B, A) -> A
args = tuple(args) + (args[0],)
sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],))
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=True)
_any = types.Any
_arr_kind = types.Array
formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)]
for sig in formal_sigs:
if not inplace:
lower(op, *sig)(lower_binary_operator)
else:
lower(op, *sig)(lower_inplace_operator)
################################################################################
# Use the contents of ufunc_db to initialize the supported ufuncs
for ufunc in ufunc_db.get_ufuncs():
if ufunc.nin == 1:
register_unary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))
elif ufunc.nin == 2:
register_binary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))
else:
raise RuntimeError("Don't know how to register ufuncs from ufunc_db with arity > 2")
@lower(operator.pos, types.Array)
def array_positive_impl(context, builder, sig, args):
'''Lowering function for +(array) expressions. Defined here
(numba.targets.npyimpl) since the remaining array-operator
lowering functions are also registered in this module.
'''
class _UnaryPositiveKernel(_Kernel):
def generate(self, *args):
[val] = args
return val
return numpy_ufunc_kernel(context, builder, sig, args,
_UnaryPositiveKernel, explicit_output=False)
for _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map,
npydecl.NumpyRulesArrayOperator._op_map,
):
for operator, ufunc_name in _op_map.items():
ufunc = getattr(np, ufunc_name)
kernel = _kernels[ufunc]
if ufunc.nin == 1:
register_unary_operator_kernel(operator, kernel)
elif ufunc.nin == 2:
register_binary_operator_kernel(operator, kernel)
else:
raise RuntimeError("There shouldn't be any non-unary or binary operators")
for _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map,
):
for operator, ufunc_name in _op_map.items():
ufunc = getattr(np, ufunc_name)
kernel = _kernels[ufunc]
if ufunc.nin == 1:
register_unary_operator_kernel(operator, kernel, inplace=True)
elif ufunc.nin == 2:
register_binary_operator_kernel(operator, kernel, inplace=True)
else:
raise RuntimeError("There shouldn't be any non-unary or binary operators")
del _kernels
@intrinsic
def _make_dtype_object(typingctx, desc):
"""Given a string or NumberClass description *desc*, returns the dtype object.
"""
def from_nb_type(nb_type):
return_type = types.DType(nb_type)
sig = return_type(desc)
def codegen(context, builder, signature, args):
# All dtype objects are dummy values in LLVM.
# They only exist in the type level.
return context.get_dummy_value()
return sig, codegen
if isinstance(desc, types.Literal):
# Convert the str description into np.dtype then to numba type.
nb_type = from_dtype(np.dtype(desc.literal_value))
return from_nb_type(nb_type)
elif isinstance(desc, types.functions.NumberClass):
thestr = str(desc.dtype)
# Convert the str description into np.dtype then to numba type.
nb_type = from_dtype(np.dtype(thestr))
return from_nb_type(nb_type)
@overload(np.dtype)
def numpy_dtype(desc):
"""Provide an implementation so that numpy.dtype function can be lowered.
"""
if isinstance(desc, (types.Literal, types.functions.NumberClass)):
def imp(desc):
return _make_dtype_object(desc)
return imp
else:
raise TypeError('unknown dtype descriptor: {}'.format(desc))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import re
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('oauth_api')
setup(
name="django-oauth-api",
version=version,
description="OAuth API for Django using Django Rest Framework",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='django djangorestframework oauth oauth2 oauthlib',
author='Tomi Pajunen',
author_email='tomi@madlab.fi',
url='https://github.com/eofs/django-oauth-api',
license='BSD',
packages=find_packages(),
include_package_data=True,
test_suite='runtests',
install_requires=[
'django>=1.11',
'oauthlib==2.0.7',
],
zip_safe=False,
)
|
from django.contrib import admin
from events.models import Place, Event, Attendance
# Register your models here.
class EventAdmin(admin.ModelAdmin):
filter_horizontal = ('expected_members', )
class AttendanceAdmin(admin.ModelAdmin):
list_display = ('event__name', 'member', 'attendance', 'proxy_to', 'accepted',)
list_filter = ('event__name',)
def event__name(self, obj):
return str(obj.event)
admin.site.register(Place)
admin.site.register(Attendance, AttendanceAdmin)
admin.site.register(Event, EventAdmin)
|
# -*- coding: utf-8 -*-
# Created at 03/09/2020
__author__ = 'raniys'
import math
import pytest
from factorial_example import factorial_function
@pytest.mark.sample
def test_factorial_functionality():
print("Inside test_factorial_functionality")
assert factorial_function(0) == 1
assert factorial_function(4) == 24
@pytest.mark.sample
def test_standard_library():
print("Inside test_standard_library")
for i in range(5):
# verify whether factorial is calculated correctly
# by checking against result against standard
# library - math.factorial()
assert math.factorial(i) == factorial_function(i)
@pytest.mark.sample
def test_negative_number():
print("Inside test_negative_number")
# This test case would pass if Assertion Error
# is raised. In this case, the input number is negative
# hence, the test case passes
with pytest.raises(AssertionError):
factorial_function(-10)
|
import requests
import json
# Get Current Patch
def getCurrentVersion():
versionResponse = requests.get("https://ddragon.leagueoflegends.com/api/versions.json")
version_patch_RawData = versionResponse.json()
currentVersion = version_patch_RawData[0]
print(currentVersion)
return currentVersion
#champions, items, summoner_spells, spells
def GetDDragonData_Champions():
version = getCurrentVersion()
#Champions Data
response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/champion.json")
allChampionRawData = json.loads(response.text)
ChampionIdToName = {}
for key,champion in allChampionRawData['data'].items():
ChampionIdToName[int(champion['key'])] = champion['name']
print(ChampionIdToName)
return ChampionIdToName
def GetDDragonData_Items():
version = getCurrentVersion()
response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/item.json")
allItemsRawData = json.loads(response.text)
QuickPrinter(allItemsRawData)
#Items Data
ItemIdToName = {}
for key,item in allItemsRawData['data'].items():
ItemIdToName[int(key)] = item['name']
print(ItemToToName)
return ItemIdToName
def QuickPrinter(String_to_Print):
print(json.dumps(String_to_Print, indent=4, sort_keys=True))
#main()
version = getCurrentVersion()
GetDDragonData_Champions()
GetDDragonData_Items()
|
import numpy as np
import nibabel as nib
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from my_functions.matrix_stuff import *
def manual_rigid_body(fname = 'example_brain.nii.gz',
outmat = 'transformation.mat',
outimg = 'example_brain_transformed.nii.gz',
theta = np.radians([0,0,0]),
translation_vec = [0,0,0],
type = 'rotation',
flip_coordinates = [True, False, False]):
"""
Function to perform a rigid body transformation based on manually determined parameters.
Args:
- fname (str): filepath to input nifti image (.nii.gz)
- outmat (str): filepath of output 4x4 transformation matrix (.mat)
- outimg (str): filepath of transformed output image (.nii.gz)
- theta (np.array): vector of rotation angles in x,y,z dimension (in radians)
- translation_vec (np.array): vector for translation in x,y,z (in image coordinates)
- type (str): can be 'rotation' or 'translation' or 'rotation_translation'
- flip_coordinates (boolean vector): indicates for which axis the sign of the offset needs to be flipped
Returns:
- M (np.array): output 4x4 transformation matrix
- M is written to outmat
- the output image (outimg) is written out
Note on flip_coordinates:
Voxel coordinates in the image are expected to increase in the following directions
(it's similar to determining the reorient-command):
- first dimension: left -> right
- second dimension: posterir -> anterior
- third dimension: inferior -> superior
if they go the other way, change input variable accordingly, e.g.:
flip_coordinates = [True, False, False]
"""
# get sform from image to determine offset of coordinate-system
img = nib.load(fname)
aff = img.get_affine()
offset = aff[0:3,3]
# which type of manipulation is requested
if type == 'rotation':
print('do rotation only')
M = rotation(theta, offset, flip_coordinates)
elif type == 'translation':
print('do translation only')
M = vector_to_translation_matrix(translation_vec)
elif type == 'rotation_translation':
print('do combined rotation and translation')
M = rotation_translation(theta, translation_vec, offset, flip_coordinates)
# save output matrix
print('output matrix: ', M)
print('save in: ', outmat)
save_matrix4x4(M, outmat)
# apply transformation to input image
applywarp_command = "applywarp -i " + fname + " -r " + fname + " --premat=" + outmat + " --interp=nn -o " + outimg
print('run flirt: ', applywarp_command)
os.system(applywarp_command)
return M
|
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None,
photo_path=None, photo_delete=False, title=None, company=None, address=None,
telephones_all=None, telephone_home=None,
telephone_mobile=None, telephone_work=None, telephone_fax=None, emails_all=None,
email=None, email2=None, email3=None,
homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None,
anniversary_month=None, anniversary_year=None, group=None, secondary_address=None,
secondary_telephone_home=None, secondary_notes=None, id_contact=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.photo_path = photo_path
self.photo_delete = photo_delete
self.title = title
self.company = company
self.address = address
self.telephones_all = telephones_all
self.telephone_home = telephone_home
self.telephone_mobile = telephone_mobile
self.telephone_work = telephone_work
self.telephone_fax = telephone_fax
self.emails_all = emails_all
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.birthday_day = birthday_day
self.birthday_month = birthday_month
self.birthday_year = birthday_year
self.anniversary_day = anniversary_day
self.anniversary_month = anniversary_month
self.anniversary_year = anniversary_year
self.group = group
self.secondary_address = secondary_address
self.secondary_telephone_home = secondary_telephone_home
self.secondary_notes = secondary_notes
self.id = id_contact
def __repr__(self):
return "%s: %s %s, %s" % (self.id, self.first_name, self.last_name, self.address)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and \
self.check_for_none(self.first_name, other.first_name) and \
self.check_for_none(self.last_name, other.last_name) and \
self.check_for_none(self.address, other.address)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
def check_for_none(self, first, second):
return first == second or (first is None and second == "") or (first == "" and second is None)
|
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .base import Base
class MapperBase():
user = os.getenv("MYSQL_USER")
key = os.getenv("MYSQL_KEY")
host = os.getenv("MYSQL_HOST")
port = os.getenv("MYSQL_PORT")
def __init__(self, database):
self.db = database
if database == 'test':
self.url = 'sqlite:///:memory:'
else:
self.url = \
'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(
self.user,
self.key,
self.host,
self.port,
self.db,
)
self.engine = create_engine(
self.url,
connect_args={'use_pure': True}
)
self.session = sessionmaker(bind=self.engine)
self.base = Base
def get_base(self):
return self.base
def get_engine(self):
return self.engine
def get_session(self):
return self.session()
|
"""
Contains the Miq model definition. Based on MNIST.
The model in this file is a simple convolutional network with two
convolutional layers, two pooling layers, followed by two fully connected
layers. A single dropout layer is used between the two fully connected layers.
"""
import logging
import os
import pkg_resources
import tensorflow
import tensorflow.contrib.slim
import urllib
import microscopeimagequality.constants as constants
DEFAULT_MODEL_DIRECTORY = pkg_resources.resource_filename(__name__, "data")
DEFAULT_MODEL_PATH = DEFAULT_MODEL_DIRECTORY + "/" + os.path.basename(constants.REMOTE_MODEL_CHECKPOINT_PATH)
def download_model(source_path=constants.REMOTE_MODEL_CHECKPOINT_PATH, output_path=DEFAULT_MODEL_DIRECTORY):
print "Downloading model from %s to %s." % (source_path, output_path)
if not os.path.isdir(output_path):
os.mkdir(output_path)
file_extensions = [".index", ".meta", ".data-00000-of-00001"]
for extension in file_extensions:
remote_path = constants.REMOTE_MODEL_CHECKPOINT_PATH + extension
local_path = os.path.join(output_path, os.path.basename(remote_path))
urllib.urlretrieve(remote_path, local_path)
print "Downloaded %d files to %s." % (len(file_extensions), output_path)
print "Default model path is %s." % DEFAULT_MODEL_PATH
def add_loss(logits, one_hot_labels, use_rank_loss=False):
"""Add loss function to tf.losses.
Args:
logits: Tensor of logits of shape [batch_size, num_classes]
one_hot_labels: A `Tensor` of size [batch_size, num_classes], where
each row has a single element set to one and the rest set to zeros.
use_rank_loss: Boolean, whether to use rank probability score loss instead
of cross entropy.
"""
if not use_rank_loss:
tensorflow.contrib.slim.losses.softmax_cross_entropy(logits, one_hot_labels)
else:
rank_loss = ranked_probability_score(
tensorflow.nn.softmax(logits), one_hot_labels, dim=1)
tensorflow.losses.add_loss(tensorflow.reduce_mean(rank_loss))
def miq_model(images, num_classes=2, is_training=False, model_id=0):
"""Creates the convolutional model.
Note that since the output is a set of 'logits', the values fall in the
interval of (-infinity, infinity). Consequently, to convert the outputs to a
probability distribution over the characters, one will need to convert them
using the softmax function:
logits = miq.Miq(images, is_training=False)
probabilities = tf.nn.softmax(logits)
predictions = tf.argmax(logits, 1)
Args:
images: the input patches, a tensor of size [batch_size, patch_width,
patch_width, 1].
num_classes: the number of classes in the dataset.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
model_id: Integer, model ID.
Returns:
the output logits, a tensor of size [batch_size, 11].
Raises:
ValueError: If an invalid model ID is specified.
"""
logging.info('Using model_id = %d.', model_id)
if model_id == 0:
return model_v0(images, num_classes, is_training)
elif model_id == 1:
return model_v1(images, num_classes, is_training)
else:
raise ValueError('Unsupported model %d' % model_id)
def model_v1(images, num_classes, is_training):
"""Dilated convolution."""
return model(images, num_classes, is_training, rate=2)
def model_v0(images, num_classes, is_training):
"""Original model."""
return model(images, num_classes, is_training, rate=1)
def model(images, num_classes, is_training, rate):
"""Generic model.
Args:
images: the input patches, a tensor of size [batch_size, patch_width,
patch_width, 1].
num_classes: the number of classes in the dataset.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
rate: Integer, convolution rate. 1 for standard convolution, > 1 for dilated
convolutions.
Returns:
the output logits, a tensor of size [batch_size, 11].
"""
# Adds a convolutional layer with 32 filters of size [5x5], followed by
# the default (implicit) Relu activation.
net = tensorflow.contrib.slim.conv2d(images, 32, [5, 5], padding='SAME', scope='conv1')
# Adds a [2x2] pooling layer with a stride of 2.
net = tensorflow.contrib.slim.max_pool2d(net, [2, 2], 2, scope='pool1')
# Adds a convolutional layer with 64 filters of size [5x5], followed by
# the default (implicit) Relu activation.
net = tensorflow.contrib.slim.conv2d(net, 64, [5, 5], padding='SAME', scope='conv2', rate=rate)
# Adds a [2x2] pooling layer with a stride of 2.
net = tensorflow.contrib.slim.max_pool2d(net, [2, 2], 2, scope='pool2')
# Reshapes the hidden units such that instead of 2D maps, they are 1D vectors:
net = tensorflow.contrib.slim.flatten(net)
# Adds a fully-connected layer with 1024 hidden units, followed by the default
# Relu activation.
net = tensorflow.contrib.slim.fully_connected(net, 1024, scope='fc3')
# Adds a dropout layer during training.
net = tensorflow.contrib.slim.dropout(net, 0.5, is_training=is_training, scope='dropout3')
# Adds a fully connected layer with 'num_classes' outputs. Note
# that the default Relu activation has been overridden to use no activation.
net = tensorflow.contrib.slim.fully_connected(net, num_classes, activation_fn=None, scope='fc4')
return net
def ranked_probability_score(predictions, targets, dim, name=None):
r"""Calculate the Ranked Probability Score (RPS).
RPS is given by the formula
sum_{k=1}^K (CDF_{prediction,k} - CDF_{target,k}) ^ 2
where CDF denotes the emperical CDF and each value of `k` denotes a different
class, in rank order. The range of possible RPS values is `[0, K - 1]`, where
`K` is the total number of classes. Perfect predictions have a score of zero.
This is a better metric than cross-entropy for probabilistic classification of
ranked targets, because it penalizes wrong guesses more harshly if they
predict a target that is further away. For deterministic predictions (zero
or one) ranked probability score is equal to absolute error in the number of
classes.
Importantly (like cross entropy), it is a strictly proper score rule: the
highest expected reward is obtained by predicting the true probability
distribution.
For these reasons, it is widely used for evaluating weather forecasts, which
are a prototypical use case for probabilistic regression.
References:
Murphy AH. A Note on the Ranked Probability Score. J. Appl. Meteorol. 1971,
10:155-156.
http://dx.doi.org/10.1175/1520-0450(1971)010<0155:ANOTRP>2.0.CO;2
Args:
predictions: tf.Tensor with probabilities for each class.
targets: tf.Tensor with one-hot encoded targets.
dim: integer dimension which corresponds to different classes in both
``predictions`` and ``targets``.
name: optional string name for the operation.
Returns:
tf.Tensor with the ranked probability score.
Raises:
ValueError: if predictions and targets do not have the same shape.
"""
with tensorflow.name_scope(name, 'ranked_probability_score', [predictions,
targets]) as scope:
predictions = tensorflow.convert_to_tensor(predictions, name='predictions')
targets = tensorflow.convert_to_tensor(targets, name='targets')
if not predictions.get_shape().is_compatible_with(targets.get_shape()):
raise ValueError('predictions and targets must have compatible shapes')
if predictions.dtype.is_floating and targets.dtype.is_integer:
# it's safe to coerce integer targets to float dtype
targets = tensorflow.cast(targets, dtype=predictions.dtype)
cdf_pred = tensorflow.cumsum(predictions, dim)
cdf_target = tensorflow.cumsum(targets, dim)
values = (cdf_pred - cdf_target) ** 2
# If desired, we could add arbitrary weighting in this sum along dim.
# That would still be a proper scoring rule (it's equivalent to rescaling
# the discretization):
# https://www.stat.washington.edu/research/reports/2008/tr533.pdf
rps = tensorflow.reduce_sum(values, dim, name=scope)
return rps
|
import numpy as np
import matplotlib.pyplot as plt
import argparse
def extract_name(word: str):
return word.split('=')[-1]
def extract_info(filename: str):
filename_splitted = filename.split('_')
assert len(filename_splitted) == 7
p = float(extract_name(filename_splitted[1]))
iterations = int(extract_name(filename_splitted[2]))
size = int(extract_name(filename_splitted[3]))
G = int(extract_name(filename_splitted[4]))
return p, iterations, size, G
def load_metrics(filename: str) -> list:
with open(filename, 'r') as f:
return [float(line.strip()) for line in f]
def plot_metrics(filename: str, metrics: list, output_path: str = None):
p, iterations, size, G = extract_info(filename)
x = np.linspace(0, iterations, len(metrics))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(figsize=(8, 5))
plt.grid(True, alpha=0.3)
plt.plot(x, metrics, label=f'p = {p}, N = {size}, G = {G}')
plt.ylabel(r'$\rho$', fontsize=14)
plt.xlabel('$t$', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(fontsize=13)
if output_path is not None:
plt.savefig(output_path, bbox_inches='tight')
else:
plt.show()
def main():
parser = argparse.ArgumentParser(description='Plot positive edge density (rho)')
parser.add_argument('--metrics-file', type=str, required=True, help='Path to calculated positive edge density')
parser.add_argument('--output-figure', type=str, required=False, default=None, help='Where to save output figure')
args = parser.parse_args()
metrics = load_metrics(args.metrics_file)
plot_metrics(args.metrics_file, metrics, args.output_figure)
if __name__ == '__main__':
main()
|
from decimal import Decimal
from urllib.parse import urlparse
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django_scopes.forms import SafeModelMultipleChoiceField
from pretix.api.models import WebHook
from pretix.api.webhooks import get_all_webhook_events
from pretix.base.forms import I18nModelForm, SettingsForm
from pretix.base.forms.widgets import SplitDateTimePickerWidget
from pretix.base.models import (
Device, EventMetaProperty, Gate, GiftCard, Organizer, Team,
)
from pretix.control.forms import ExtFileField, SplitDateTimeField
from pretix.control.forms.event import SafeEventMultipleChoiceField
from pretix.multidomain.models import KnownDomain
class OrganizerForm(I18nModelForm):
error_messages = {
'duplicate_slug': _("This slug is already in use. Please choose a different one."),
}
class Meta:
model = Organizer
fields = ['name', 'slug']
def clean_slug(self):
slug = self.cleaned_data['slug']
if Organizer.objects.filter(slug__iexact=slug).exists():
raise forms.ValidationError(
self.error_messages['duplicate_slug'],
code='duplicate_slug',
)
return slug
class OrganizerDeleteForm(forms.Form):
error_messages = {
'slug_wrong': _("The slug you entered was not correct."),
}
slug = forms.CharField(
max_length=255,
label=_("Event slug"),
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if slug != self.organizer.slug:
raise forms.ValidationError(
self.error_messages['slug_wrong'],
code='slug_wrong',
)
return slug
class OrganizerUpdateForm(OrganizerForm):
def __init__(self, *args, **kwargs):
self.domain = kwargs.pop('domain', False)
self.change_slug = kwargs.pop('change_slug', False)
kwargs.setdefault('initial', {})
self.instance = kwargs['instance']
if self.domain and self.instance:
initial_domain = self.instance.domains.first()
if initial_domain:
kwargs['initial'].setdefault('domain', initial_domain.domainname)
super().__init__(*args, **kwargs)
if not self.change_slug:
self.fields['slug'].widget.attrs['readonly'] = 'readonly'
if self.domain:
self.fields['domain'] = forms.CharField(
max_length=255,
label=_('Custom domain'),
required=False,
help_text=_('You need to configure the custom domain in the webserver beforehand.')
)
def clean_domain(self):
d = self.cleaned_data['domain']
if d:
if d == urlparse(settings.SITE_URL).hostname:
raise ValidationError(
_('You cannot choose the base domain of this installation.')
)
if KnownDomain.objects.filter(domainname=d).exclude(organizer=self.instance.pk,
event__isnull=True).exists():
raise ValidationError(
_('This domain is already in use for a different event or organizer.')
)
return d
def clean_slug(self):
if self.change_slug:
return self.cleaned_data['slug']
return self.instance.slug
def save(self, commit=True):
instance = super().save(commit)
if self.domain:
current_domain = instance.domains.first()
if self.cleaned_data['domain']:
if current_domain and current_domain.domainname != self.cleaned_data['domain']:
current_domain.delete()
KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain'])
elif not current_domain:
KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain'])
elif current_domain:
current_domain.delete()
instance.cache.clear()
for ev in instance.events.all():
ev.cache.clear()
return instance
class EventMetaPropertyForm(forms.ModelForm):
class Meta:
model = EventMetaProperty
fields = ['name', 'default', 'required', 'protected', 'allowed_values']
widgets = {
'default': forms.TextInput()
}
class TeamForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all().order_by(
'-has_subevents', '-date_from'
)
class Meta:
model = Team
fields = ['name', 'all_events', 'limit_events', 'can_create_events',
'can_change_teams', 'can_change_organizer_settings',
'can_manage_gift_cards',
'can_change_event_settings', 'can_change_items',
'can_view_orders', 'can_change_orders',
'can_view_vouchers', 'can_change_vouchers']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events',
'class': 'scrolling-multiple-choice scrolling-multiple-choice-large',
}),
}
field_classes = {
'limit_events': SafeEventMultipleChoiceField
}
def clean(self):
data = super().clean()
if self.instance.pk and not data['can_change_teams']:
if not self.instance.organizer.teams.exclude(pk=self.instance.pk).filter(
can_change_teams=True, members__isnull=False
).exists():
raise ValidationError(_('The changes could not be saved because there would be no remaining team with '
'the permission to change teams and permissions.'))
return data
class GateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
kwargs.pop('organizer')
super().__init__(*args, **kwargs)
class Meta:
model = Gate
fields = ['name', 'identifier']
class DeviceForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all().order_by(
'-has_subevents', '-date_from'
)
self.fields['gate'].queryset = organizer.gates.all()
def clean(self):
d = super().clean()
if not d['all_events'] and not d['limit_events']:
raise ValidationError(_('Your device will not have access to anything, please select some events.'))
return d
class Meta:
model = Device
fields = ['name', 'all_events', 'limit_events', 'security_profile', 'gate']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events',
'class': 'scrolling-multiple-choice scrolling-multiple-choice-large',
}),
}
field_classes = {
'limit_events': SafeEventMultipleChoiceField
}
class OrganizerSettingsForm(SettingsForm):
auto_fields = [
'contact_mail',
'imprint_url',
'organizer_info_text',
'event_list_type',
'event_list_availability',
'organizer_homepage_text',
'organizer_link_back',
'organizer_logo_image_large',
'giftcard_length',
'giftcard_expiry_years',
'locales',
'region',
'event_team_provisioning',
'primary_color',
'theme_color_success',
'theme_color_danger',
'theme_color_background',
'theme_round_borders',
'primary_font'
]
organizer_logo_image = ExtFileField(
label=_('Header image'),
ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"),
max_size=10 * 1024 * 1024,
required=False,
help_text=_('If you provide a logo image, we will by default not show your organization name '
'in the page header. By default, we show your logo with a size of up to 1140x120 pixels. You '
'can increase the size with the setting below. We recommend not using small details on the picture '
'as it will be resized on smaller screens.')
)
favicon = ExtFileField(
label=_('Favicon'),
ext_whitelist=(".ico", ".png", ".jpg", ".gif", ".jpeg"),
required=False,
max_size=1 * 1024 * 1024,
help_text=_('If you provide a favicon, we will show it instead of the default pretix icon. '
'We recommend a size of at least 200x200px to accommodate most devices.')
)
class WebHookForm(forms.ModelForm):
events = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
label=pgettext_lazy('webhooks', 'Event types')
)
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all()
self.fields['events'].choices = [
(
a.action_type,
mark_safe('{} – <code>{}</code>'.format(a.verbose_name, a.action_type))
) for a in get_all_webhook_events().values()
]
if self.instance:
self.fields['events'].initial = list(self.instance.listeners.values_list('action_type', flat=True))
class Meta:
model = WebHook
fields = ['target_url', 'enabled', 'all_events', 'limit_events']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events'
}),
}
field_classes = {
'limit_events': SafeModelMultipleChoiceField
}
class GiftCardCreateForm(forms.ModelForm):
value = forms.DecimalField(
label=_('Gift card value'),
min_value=Decimal('0.00')
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
initial = kwargs.pop('initial', {})
initial['expires'] = self.organizer.default_gift_card_expiry
kwargs['initial'] = initial
super().__init__(*args, **kwargs)
def clean_secret(self):
s = self.cleaned_data['secret']
if GiftCard.objects.filter(
secret__iexact=s
).filter(
Q(issuer=self.organizer) | Q(issuer__gift_card_collector_acceptance__collector=self.organizer)
).exists():
raise ValidationError(
_('A gift card with the same secret already exists in your or an affiliated organizer account.')
)
return s
class Meta:
model = GiftCard
fields = ['secret', 'currency', 'testmode', 'expires', 'conditions']
field_classes = {
'expires': SplitDateTimeField
}
widgets = {
'expires': SplitDateTimePickerWidget,
'conditions': forms.Textarea(attrs={"rows": 2})
}
class GiftCardUpdateForm(forms.ModelForm):
class Meta:
model = GiftCard
fields = ['expires', 'conditions']
field_classes = {
'expires': SplitDateTimeField
}
widgets = {
'expires': SplitDateTimePickerWidget,
'conditions': forms.Textarea(attrs={"rows": 2})
}
|
from setuptools import setup
from setuptools import find_packages
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAJOR_VERSION = '0'
MINOR_VERSION = '11'
MICRO_VERSION = '214'
VERSION = "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, MICRO_VERSION)
setup(name='yagmail',
version=VERSION,
description='Yet Another GMAIL client',
long_description=LONG_DESCRIPTION,
url='https://github.com/kootenpv/yagmail',
author='Pascal van Kooten',
author_email='kootenpv@gmail.com',
license='MIT',
extras_require={
"all": ["keyring"]
},
keywords='email mime automatic html attachment',
entry_points={
'console_scripts': ['yagmail = yagmail.__main__:main']
},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: Utilities'
],
packages=find_packages(),
zip_safe=False,
platforms='any')
|
#
# Loop transformation submodule that enables pragma directive insertions.
#
import sys
import module.loop.submodule.submodule, transformator
#---------------------------------------------------------------------
class Pragma(module.loop.submodule.submodule.SubModule):
'''The pragma directive insertion submodule'''
def __init__(self, perf_params = None, transf_args = None, stmt = None):
'''To instantiate a pragma insertion submodule'''
module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt)
#-----------------------------------------------------------------
def readTransfArgs(self, perf_params, transf_args):
'''Process the given transformation arguments'''
# all expected argument names
PRAGMAS = 'pragmas'
# all expected transformation arguments
pragmas = []
# iterate over all transformation arguments
for aname, rhs, line_no in transf_args:
# evaluate the RHS expression
try:
rhs = eval(rhs, perf_params)
except Exception, e:
print 'error:%s: failed to evaluate the argument expression: %s' % (line_no, rhs)
print ' --> %s: %s' % (e.__class__.__name__, e)
sys.exit(1)
# pragma directives
if aname == PRAGMAS:
pragmas = (rhs, line_no)
# unknown argument name
else:
print 'error:%s: unrecognized transformation argument: "%s"' % (line_no, aname)
sys.exit(1)
# check semantics of the transformation arguments
pragmas, = self.checkTransfArgs(pragmas)
# return information about the transformation arguments
return (pragmas, )
#-----------------------------------------------------------------
def checkTransfArgs(self, pragmas):
'''Check the semantics of the given transformation arguments'''
# evaluate the pragma directives
rhs, line_no = pragmas
if isinstance(rhs, str):
pragmas = [rhs]
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or
not reduce(lambda x,y: x and y, map(lambda x: isinstance(x, str), rhs), True)):
print ('error:%s: pragma directives must be a list/tuple of strings: %s' %
(line_no, rhs))
sys.exit(1)
pragmas = rhs
# return information about the transformation arguments
return (pragmas, )
#-----------------------------------------------------------------
def insertPragmas(self, pragmas, stmt):
'''To apply pragma directive insertion'''
# perform the pragma directive insertion
t = transformator.Transformator(pragmas, stmt)
transformed_stmt = t.transform()
# return the transformed statement
return transformed_stmt
#-----------------------------------------------------------------
def transform(self):
'''To perform code transformations'''
# read all transformation arguments
pragmas, = self.readTransfArgs(self.perf_params, self.transf_args)
# perform the pragma directive insertion
transformed_stmt = self.insertPragmas(pragmas, self.stmt)
# return the transformed statement
return transformed_stmt
|
from jinja2 import Template
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import PlainTextResponse, HTMLResponse
from starlette_wtf import StarletteForm, CSRFProtectMiddleware, csrf_protect
from wtforms import StringField
from wtforms.validators import DataRequired
class MyForm(StarletteForm):
name = StringField('name', validators=[DataRequired()])
template = Template('''
<html>
<body>
<form method="post" novalidate>
{{ form.csrf_token }}
<div>
{{ form.name(placeholder='Name') }}
{% if form.name.errors -%}
<span>{{ form.name.errors[0] }}</span>
{%- endif %}
</div>
<button type="submit">Submit</button>
</form>
</body>
</html>
''')
app = Starlette(middleware=[
Middleware(SessionMiddleware, secret_key='***REPLACEME1***'),
Middleware(CSRFProtectMiddleware, csrf_secret='***REPLACEME2***')
])
@app.route('/', methods=['GET', 'POST'])
@csrf_protect
async def index(request):
"""GET|POST /: form handler
"""
form = await MyForm.from_formdata(request)
if form.validate_on_submit():
return PlainTextResponse('SUCCESS')
html = template.render(form=form)
return HTMLResponse(html)
|
#подключение библиотек
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel,QVBoxLayout,QHBoxLayout, QMessageBox, QRadioButton
#создание приложения и главного окна
app=QApplication([])
main_win =QWidget()
main_win.setWindowTitle('Конкурс от Crazy People')
question =QLabel("В каком году канал получил золотую кнопку от YouTube?")
btn_answer1 =QRadioButton('2005')
btn_answer2 =QRadioButton('2010')
btn_answer3 =QRadioButton('2015')
btn_answer4 =QRadioButton('2020')
layout_main=QVBoxLayout()
h1=QHBoxLayout()
h2=QHBoxLayout()
h3=QHBoxLayout()
h1.addWidget(question,alignment =Qt.AlignCenter)
h2.addWidget(btn_answer1,alignment =Qt.AlignCenter)
h2.addWidget(btn_answer2,alignment =Qt.AlignCenter)
h3.addWidget(btn_answer3,alignment =Qt.AlignCenter)
h3.addWidget(btn_answer4,alignment =Qt.AlignCenter)
layout_main.addLayout(h1)
layout_main.addLayout(h2)
layout_main.addLayout(h3)
main_win.setLayout(layout_main)
def win ():
win =QMessageBox()
win.setText('Верно!')
win.exec_()
def lose():
lose =QMessageBox()
lose.setText('«Нет, в 2015 году. Вы выиграли фирменный плакат')
lose.exec_()
btn_answer1.clicked.connect(lose)
btn_answer2.clicked.connect(lose)
btn_answer3.clicked.connect(win)
btn_answer4.clicked.connect(lose)
main_win.show()
app.exec_()
|
from action_class import Action
place = 'place'
upgrade = 'upgrade'
target = 'target'
top = 'upgrade 1'
middle = 'upgrade 2'
bottom = 'upgrade 3'
ouch_script = [
Action(place, name='sub1', action='sub', position=(708, 540)), # Sub
Action(place, name='sub2', action='sub', position=(984, 545)), # Sub2
Action('start', action='start', cost=0),
Action(place, name='dart1', action='dart', position=(303, 671)), # Dart
Action(place, name='Psi', action='Hero', position=(546, 309)), # Psi
Action(target, name = 'Psi', action='Strong'), # Psi Strong
Action(upgrade, name='sub1', action=bottom), # 001
Action(upgrade, name='sub2', action=bottom), # 001
Action(upgrade, name='sub1', action=middle), # 011
Action(upgrade, name='sub2', action=top), # 101
Action(upgrade, name='sub1', action=middle), # 021
Action(upgrade, name='sub2', action=top), # 201
Action(upgrade, name='sub1', action=bottom), # 022
Action(upgrade, name='sub2', action=bottom), # 202
Action(place, name='alch1', action='alch', position=(1009, 411)), # Alchemist
Action(target, name = 'alch1', action='Strong'), # Strong
Action(upgrade, name='alch1', action=top), # 100
Action(upgrade, name='alch1', action=top), # 200
Action(upgrade, name='sub2', action=bottom), # 203 Sub2
Action(place, name='ace1', action='ace', position=(845, 310)), # Ace
Action(upgrade, name='ace1', action= bottom), # 001
Action(upgrade, name='ace1', action=bottom), # 002
Action(upgrade, name='ace1', action=bottom), # 003
Action(place, name='village1', action='Village', position=(990, 295)), # Village
Action(upgrade, name='village1', action= middle), # 010
Action(upgrade, name='village1', action= middle), # 020
Action(upgrade, name='ace1', action=top), # 103 Ace
Action(upgrade, name='ace1', action=top), # 203
Action(upgrade, name='sub2', action=bottom), # 204 Sub2
Action(upgrade, name='sub1', action=middle), # 023 Sub2
Action(upgrade, name='alch1', action=top), # 300 Alch
Action(upgrade, name='alch1', action=top), # 400
Action(upgrade, name='alch1', action=bottom), # 401
Action(upgrade, name='ace1', action=bottom), # 204 Ace
Action(place, name='sniper1', action='sniper', position=(85, 676)), # Sniper
Action(upgrade, name='sniper1', action= top), # 100
Action(target, name = 'sniper1', action='Strong'),
Action(upgrade, name='sniper1', action=top), # 200
Action(upgrade, name='sniper1', action=top), # 300
Action(upgrade, name='sniper1', action=top), # 400
Action(upgrade, name='sniper1', action=bottom), # 401
Action(upgrade, name='sniper1', action=bottom), # 402
Action('finish', action='finish', cost=0)
]
|
def run():
my_list = [1, "Hello", True, 4.5]
my_dict = {"firstname":"Facundo", "lastname":"Garcia"}
superList = [
{"firstname":"Facundo", "lastname":"Garcia"},
{"firstname":"Miguel", "lastname":"Torres"},
{"firstname":"José", "lastname":"Rodelo"},
{"firstname":"Susana", "lastname":"Martinez"},
{"firstname":"Luis", "lastname":"Cruz"}
]
superDict = {
"naturalNums": [1,2,3,4,5],
"integerNums": [-1,-2,0,1,2],
"floatingNums": [1.1, 4.5, 6.43]
}
for k, v in superDict.items():
print(k, "-", v)
for innerDict in superList:
for k, v in innerDict.items():
print(k, "-", v)
if __name__ == '__main__':
run()
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
#Key:
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)
def compare_facies_plot(logs, compadre, facies_colors):
"""plot the facies plot as a function of depth for both the prediction
and the actual lithofacies labels.
"""
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[6])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im2, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[6].set_xlabel(compadre)
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
ax[6].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)
|
import wx
import numpy as np
from imagepy.core.engine import Tool, Filter
import scipy.ndimage as nimg
class ScaleTool(Tool):
def __init__(self, plg):
self.plg = plg
self.para = plg.para
self.moving = False
def snap(self, x, y, lim):
plg = self.plg
if abs(x-plg.lt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'l'
if abs(x-plg.rt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'r'
if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.tp)<lim:return 't'
if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.bm)<lim:return 'b'
if abs(x-plg.lt)<lim and abs(y-plg.tp)<lim:return 'lt'
if abs(x-plg.rt)<lim and abs(y-plg.bm)<lim:return 'rb'
if abs(x-plg.rt)<lim and abs(y-plg.tp)<lim:return 'rt'
if abs(x-plg.lt)<lim and abs(y-plg.bm)<lim:return 'lb'
if (x-plg.lt)*(x-plg.rt)<0 and (y-plg.tp)*(y-plg.bm)<0:
self.ox, self.oy = x, y
return True
return False
def mouse_down(self, ips, x, y, btn, **key):
lim = 5.0/key['canvas'].get_scale()
self.moving = self.snap(x, y, lim)
print(self.moving)
def mouse_up(self, ips, x, y, btn, **key):
if self.moving : self.plg.preview(ips, self.para)
def mouse_move(self, ips, x, y, btn, **key):
lim = 5.0/key['canvas'].get_scale()
if btn==None:
self.cursor = wx.CURSOR_CROSS
if isinstance(self.snap(x, y, lim), str):
self.cursor = wx.CURSOR_HAND
elif self.moving==True:
self.plg.lt+=x-self.ox
self.plg.rt+=x-self.ox
self.plg.bm+=y-self.oy
self.plg.tp+=y-self.oy
self.ox, self.oy = x, y
self.plg.count()
self.plg.dialog.reset()
ips.update = True
elif self.moving != False:
print("scale_tol.ScaleTool.mouse_move")
if 'l' in self.moving:self.plg.lt = x
if 'r' in self.moving:self.plg.rt = x
if 't' in self.moving:self.plg.tp = y
if 'b' in self.moving:self.plg.bm = y
self.plg.count()
self.plg.dialog.reset()
ips.update = True
class Plugin(Filter):
modal = False
title = 'Scale'
note = ['all', 'auto_msk', 'auto_snap', 'preview']
para = {'kx': 1, 'ky':1, 'ox':0, 'oy':0, 'img':True, 'msk':False}
view = [(float, (-100,100), 3, 'KX', 'kx', ''),
(float, (-100,100), 3, 'KY', 'ky', ''),
(int, (-10000,10000), 0, 'OffX', 'ox', 'pix'),
(int, (-10000,10000), 0, 'OffY', 'oy', 'pix'),
(bool, 'scale image', 'img'),
(bool, 'scale mask', 'msk')]
def draw(self, dc, f, **key):
body = [(self.lt,self.bm),(self.rt,self.bm),
(self.rt,self.tp),(self.lt,self.tp),(self.lt,self.bm)]
dc.SetPen(wx.Pen((0,255,0), width=1, style=wx.SOLID))
dc.DrawLines([f(*i) for i in body])
for i in body:dc.DrawCircle(f(*i),2)
dc.DrawCircle(f(self.lt, (self.tp+self.bm)/2),2)
dc.DrawCircle(f(self.rt, (self.tp+self.bm)/2),2)
dc.DrawCircle(f((self.lt+self.rt)/2, self.tp),2)
dc.DrawCircle(f((self.lt+self.rt)/2, self.bm),2)
def load(self, ips):
self.bufroi = ips.roi
self.lt, self.tp, self.rt, self.bm = 0, 0, ips.size[1], ips.size[0]
if ips.roi!=None:
box = ips.roi.get_box()
if box[0]!=box[2] and box[1]!=box[3]:
self.lt, self.tp, self.rt, self.bm = box
self.orio = ((self.lt+self.rt)/2,(self.tp+self.bm)/2)
self.oriw, self.orih = self.rt - self.lt, self.tp - self.bm
self.para['ox'] = (self.lt+self.rt)/2
self.para['oy'] = (self.tp+self.bm)/2
self.para['kx'] = self.para['ky'] = 1
ips.mark = self
ips.update = True
ips.tool = ScaleTool(self)
return True
def count(self, dir=True):
if dir:
self.para['ox'] = int((self.lt+self.rt)/2)
self.para['oy'] = int((self.tp+self.bm)/2)
self.para['kx'] = (self.rt-self.lt)*1.0/self.oriw
self.para['ky'] = (self.tp-self.bm)*1.0/self.orih
else:
self.lt = self.para['ox']-self.oriw*self.para['kx']/2
self.rt = self.para['ox']+self.oriw*self.para['kx']/2
self.bm = self.para['oy']-self.orih*self.para['ky']/2
self.tp = self.para['oy']+self.orih*self.para['ky']/2
def ok(self, ips, para=None):
Filter.ok(self, ips, para)
ips.mark = None
ips.tool = None
def cancel(self, ips):
Filter.cancel(self, ips)
ips.roi = self.bufroi
ips.mark = None
ips.tool = None
ips.update = 'pix'
def run(self, ips, img, buf, para = None):
if para == None: para = self.para
self.count(False)
trans = np.array([[1/self.para['ky'],0],[0,1/self.para['kx']]])
o = np.array([self.para['oy'], self.para['ox']])
offset = self.orio[::-1]-trans.dot(o)
if self.para['img']:
nimg.affine_transform(img, trans, output=buf, offset=offset)
trans = np.array([[self.para['kx'],0],[0, self.para['ky']]])
offset = o[::-1]-trans.dot(self.orio)
if self.para['msk'] and self.bufroi!=None:ips.roi = self.bufroi.affine(trans, offset)
if self.para['img'] and not ips.get_msk('out') is None:
buf[ips.get_msk('out')] = img[ips.get_msk('out')]
ips.update = True
|
class std_logic():
"""
class to represent a digital bit allowing for the same 9 values of a bit supported by IEEE 1164.
====== ===============
Value Interpreatation
------ ---------------
U Unitialized
X Unknown
0 Strong 0
1 Strong 1
Z High Impedance
W Weak unknown logic
L Weak logic 0
H Weak logic 1
- Don't care
====== ===============
Refer to https://en.wikipedia.org/wiki/IEEE_1164 for more details
"""
def __init__(self,initialvalue='U'):
"""
:param initialvalue: value to be loaded into the bit
:type initialvalue: int, bool, str
"""
self._value = 'U'
self.set(value=initialvalue)
def __str__(self):
return self._value
def __repr__(self):
base_repr = super().__repr__()
return base_repr[:-2] + ':%s>'%self._value
def __eq__(self, other):
if issubclass(other.__class__,std_logic):
return self._value == other._value
else:
raise NotImplementedError
def __and__(self,other):
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
( 'U', 'U', '0', 'U', 'U', 'U', '0', 'U', 'U' ), -- | U |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | X |
( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | 0 |
( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | 1 |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | Z |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | W |
( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | L |
( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | H |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ) -- | - |
"""
if self == std_logic('U'):
if other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
else:
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
else:
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
return_value = std_logic(0)
elif self == std_logic('1') or self == std_logic('H'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __xor__(self, other):
"""
perfroms a bitwise xor operation
:param other:
:return: self ^ other
"""
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
('U', 'U', 'U', 'U', 'U', 'U', 'U', 'U', 'U'), -- | U |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | X |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 |
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | 1 |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | Z |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | W |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L |
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | H |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X') -- | - |
);
"""
if self == std_logic('U'):
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
else:
return_value = std_logic('X')
elif self == std_logic('1') or self == std_logic('H'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(1)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(0)
else:
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __or__(self,other):
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
('U', 'U', 'U', '1', 'U', 'U', 'U', '1', 'U'), -- | U |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | X |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 |
('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | 1 |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | Z |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | W |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L |
('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | H |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X') -- | - |
)
"""
if self == std_logic('U'):
if other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
elif self == std_logic('1') or self == std_logic('H'):
return_value = std_logic(1)
elif self == std_logic('0') or self == std_logic('L'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __invert__(self):
"""
truth table from std_logic_1164-body.vhdl
-------------------------------------------------
| U X 0 1 Z W L H - |
-------------------------------------------------
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X')
"""
if self == std_logic('U'):
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
return_value = std_logic(1)
elif self == std_logic('1') or self == std_logic('H'):
return_value = std_logic(0)
return return_value
def set(self,value):
"""
in place value set
:param value: value to be loaded into the bit
:type value: int, bool, str
"""
if isinstance(value,str):
if len(value) != 1:
raise ValueError('length is not 1')
if ((value == 'U') or
(value == 'X') or
(value == '0') or
(value == '1') or
(value == 'Z') or
(value == 'W') or
(value == 'L') or
(value == 'H') or
(value == '-')):
self._value = value
else:
raise ValueError('Unsupported value, only U,X,0,1,Z,W,L,H or - is permitted')
elif isinstance(value,bool):
if value is False:
self._value = '0'
elif value is True:
self._value = '1'
else:
raise ValueError('Illegal boolean value')
elif isinstance(value,int):
if (value == 0) or (value == 1):
self._value = str(value)
assert (self._value == '1') or (self._value == '0')
else:
raise ValueError('Unsupported integer value, only 0 or 1 is permitted')
else:
raise ValueError('Unsupported type')
|
#!/usr/bin/env python
"""
Predefined bluesky scan plans
"""
import numpy as np
import bluesky.plans as bp
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from .utility import load_config
#@bpp.run_decorator()
def collect_white_field(experiment, cfg_tomo, atfront=True):
"""
Collect white/flat field images by moving the sample out of the FOV
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
# move sample out of the way
_x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX']
_z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ']
yield from bps.mv(tomostage.ksamX, _x)
yield from bps.mv(tomostage.ksamZ, _z)
# setup detector
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white'])
yield from bps.trigger_and_read([det])
# move sample back to FOV
# NOTE:
# not sure is this will work or not...
yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX'])
yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ'])
#@bpp.run_decorator()
def collect_dark_field(experiment, cfg_tomo):
"""
Collect dark field images by close the shutter
"""
det = experiment.det
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark'])
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def step_scan(experiment, cfg_tomo):
"""
Collect projects with step motion
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
angs = np.arange(
cfg_tomo['omega_start'],
cfg_tomo['omega_end']+cfg_tomo['omega_step']/2,
cfg_tomo['omega_step'],
)
for ang in angs:
yield from bps.checkpoint()
yield from bps.mv(tomostage.preci, ang)
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def fly_scan(experiment, cfg_tomo):
"""
Collect projections with fly motion
"""
det = experiment.det
psofly = experiment.psofly
yield from bps.mv(det.hdf1.nd_array_port, 'PG1')
yield from bps.mv(det.tiff1.nd_array_port, 'PG1')
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_tomo['omega_start'],
psofly.end, cfg_tomo['omega_end'],
psofly.scan_delta, abs(cfg_tomo['omega_step']),
psofly.slew_speed, cfg_tomo['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam.num_images, cfg_tomo['n_projections'],
det.cam.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def tomo_scan(experiment, cfg):
"""
Tomography scan plan based on given configuration
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
shutter = experiment.shutter
shutter_suspender = experiment.suspend_shutter
cfg = load_config(cfg) if type(cfg) != dict else cfg
# update the cached motor position in the dict in case exp goes wrong
_cahed_position = experiment.cache_motor_position()
# step 0: preparation
acquire_time = cfg['tomo']['acquire_time']
n_white = cfg['tomo']['n_white']
n_dark = cfg['tomo']['n_dark']
angs = np.arange(
cfg['tomo']['omega_start'],
cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2,
cfg['tomo']['omega_step'],
)
n_projections = len(angs)
cfg['tomo']['n_projections'] = n_projections
total_images = n_white + n_projections + n_white + n_dark
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# calculate slew speed for fly scan
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
if cfg['tomo']['type'].lower() == 'fly':
scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['tomo']['slew_speed'] = slew_speed
# need to make sure that the sample out position is the same for both front and back
x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position
dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ']
rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['tomo']['initial_ksamX'] = x0
cfg['tomo']['initial_ksamZ'] = z0
cfg['tomo']['fronte_white_ksamX'] = x0 + dfx
cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz
cfg['tomo']['back_white_ksamX'] = x0 + dbx
cfg['tomo']['back_white_ksamZ'] = z0 + dbz
@bpp.run_decorator()
@bpp.stage_decorator([det])
def scan_closure():
# open shutter for beam
yield from bps.mv(shutter, 'open')
yield from bps.install_suspender(shutter_suspender)
# config output
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_name, fn)
yield from bps.mv(me.file_write_mode, 2)
yield from bps.mv(me.num_capture, total_images)
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg['output']['type']}")
# collect front white field
yield from bps.mv(det.cam.frame_type, 0) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=True)
# collect projections
yield from bps.mv(det.cam.frame_type, 1) # for HDF5 dxchange data structure
if cfg['tomo']['type'].lower() == 'step':
yield from step_scan(experiment, cfg['tomo'])
elif cfg['tomo']['type'].lower() == 'fly':
yield from fly_scan(experiment, cfg['tomo'])
else:
raise ValueError(f"Unsupported scan type: {cfg['tomo']['type']}")
# collect back white field
yield from bps.mv(det.cam.frame_type, 2) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=False)
# collect back dark field
yield from bps.mv(det.cam.frame_type, 3) # for HDF5 dxchange data structure
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close")
yield from collect_dark_field(experiment, cfg['tomo'])
return (yield from scan_closure())
|
from pymongo import *
from flask import *
from flask_restful import *
import datetime
mongodb_url = "mongodb://Ranuga:ranuga2008@cluster0-shard-00-00.6n3dg.mongodb.net:27017,cluster0-shard-00-01.6n3dg.mongodb.net:27017,cluster0-shard-00-02.6n3dg.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-uo9rgq-shard-0&authSource=admin&retryWrites=true&w=majority"
app = Flask(__name__)
app.debug = True
app.secret_key = "development"
cluster = MongoClient(mongodb_url)
from server.routes import *
|
"""
Classes for GP models with Stan that perform transfer optimization.
"""
from argparse import Namespace
import numpy as np
import copy
from .gp_stan import StanGp
from .regression.transfer_regression import TransferRegression
from ..util.misc_util import dict_to_namespace
class StanTransferGp(StanGp):
"""
GP model with transferred prior mean based on a regression model.
"""
def __init__(self, params=None, data=None, verbose=None):
self.set_params(params)
self.set_verbose(verbose)
self.set_model(data)
def set_params(self, params):
"""Set self.params, the parameters for this model."""
super().set_params(params)
params = dict_to_namespace(params)
assert hasattr(params, 'transfer_config')
self.params.transfer_config = params.transfer_config
def set_model(self, data):
"""Set GP Stan model and regression model."""
self.model = self.get_model()
self.regressor = self.get_regressor(data)
#self.regressor = self.get_proxy_regressor(data) # TODO
def get_regressor(self, data):
"""Return transfer (prior mean) regressor."""
# Define regressor
regressor = TransferRegression(self.params.transfer_config)
if len(data.x) < 1:
regressor = None
else:
mean_errors = []
# TODO: remove extra files such as .DS_STORE (or ignore files that break)
for i, reg in enumerate(regressor.model_fnames):
try:
val_acc = regressor.evaluate_model(reg, data.x)
error = np.mean((data.y - val_acc) ** 2)
mean_errors.append((error, i))
except:
print(f'Transfer model file in tarball did not load: {reg}')
mean_errors.sort()
if mean_errors[0][0] > self.params.transfer_config.get('metric_threshold', 0.6):
regressor.set_best_model(-1)
else:
regressor.set_best_model(mean_errors[0][1])
return regressor
def get_proxy_regressor(self, data):
if not data:
regressor = None
else:
def regressor(x): return np.linalg.norm(x)
return regressor
def transform_data_y(self):
"""Transform data.y using PriorMeanDataTransformer."""
self.dt = PriorMeanDataTransformer(self.data, self.regressor, False)
y_trans = self.dt.transform_y_data()
self.data = Namespace(x=self.data.x, y=y_trans)
def gen_list(self, x_list, z, s, nsamp):
"""
Draw nsamp samples from generative process, given list of inputs
x_list, posterior sample z, and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,)
z : Namespace
Namespace of GP hyperparameters.
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from generative process.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_pred(nsamp, x_list)
pred_list = [
self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)
]
return pred_list
def postgen_list(self, x_list, s, nsamp):
"""
Draw nsamp samples from posterior predictive distribution, given list
of inputs x_list and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,).
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from the posterior predictive
distribution.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_post_pred(
nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])
)
pred_list = [
self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)
]
return pred_list
def __str__(self):
return f'StanTransferGp with params={self.params}'
class PriorMeanDataTransformer:
"""
A class to transform (and inverse transform) data, based on a prior mean regression.
"""
def __init__(self, data, prior_mean_f, verbose=True):
"""
Parameters
----------
data : Namespace
Namespace containing data.
prior_mean_f : function
Prior mean function.
verbose : bool
If True, print description string.
"""
self._set_data(data)
self._set_prior_mean_f(prior_mean_f)
self._set_verbose(verbose)
def _set_data(self, data):
"""Set self.data"""
self.data = data
def _set_prior_mean_f(self, prior_mean_f):
"""Set self.prior_mean_f."""
if prior_mean_f is None:
# Default prior mean function is constant 0 function
def prior_mean_f(x): return 0.
self.prior_mean_f = prior_mean_f
def _set_verbose(self, verbose):
"""Set verbose options."""
self.verbose = verbose
if self.verbose:
self._print_str()
def transform_y_data(self, y_data=None, x_data=None):
"""Transform and return self.data.y"""
# Transform self.data.y into new list
y_trans = [y - self.prior_mean_f(x) for x, y in zip(self.data.x, self.data.y)]
return y_trans
def inv_transform_y_data(self, y_arr, x_single_arr):
"""Return inverse transform of y_arr."""
# Compute prior mean val for the single input
prior_mean_val = self.prior_mean_f(x_single_arr)
# Inverse transform y_arr into list
y_inv_trans_list = [y + prior_mean_val for y in list(y_arr)]
# Transform back to array and return
y_inv_trans = np.array(y_inv_trans_list).reshape(-1)
return y_inv_trans
def _print_str(self):
"""Print a description string."""
print('*PriorMeanDataTransformer')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagementLockAtResourceLevelResult',
'AwaitableGetManagementLockAtResourceLevelResult',
'get_management_lock_at_resource_level',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""", DeprecationWarning)
@pulumi.output_type
class GetManagementLockAtResourceLevelResult:
"""
The lock information.
"""
def __init__(__self__, id=None, level=None, name=None, notes=None, owners=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if level and not isinstance(level, str):
raise TypeError("Expected argument 'level' to be a str")
pulumi.set(__self__, "level", level)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID of the lock.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def level(self) -> str:
"""
The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it.
"""
return pulumi.get(self, "level")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the lock.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
Notes about the lock. Maximum of 512 characters.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def owners(self) -> Optional[Sequence['outputs.ManagementLockOwnerResponse']]:
"""
The owners of the lock.
"""
return pulumi.get(self, "owners")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type of the lock - Microsoft.Authorization/locks.
"""
return pulumi.get(self, "type")
class AwaitableGetManagementLockAtResourceLevelResult(GetManagementLockAtResourceLevelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagementLockAtResourceLevelResult(
id=self.id,
level=self.level,
name=self.name,
notes=self.notes,
owners=self.owners,
type=self.type)
def get_management_lock_at_resource_level(lock_name: Optional[str] = None,
parent_resource_path: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementLockAtResourceLevelResult:
"""
The lock information.
Latest API Version: 2016-09-01.
:param str lock_name: The name of lock.
:param str parent_resource_path: An extra path parameter needed in some services, like SQL Databases.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the resource.
:param str resource_provider_namespace: The namespace of the resource provider.
:param str resource_type: The type of the resource.
"""
pulumi.log.warn("""get_management_lock_at_resource_level is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""")
__args__ = dict()
__args__['lockName'] = lock_name
__args__['parentResourcePath'] = parent_resource_path
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['resourceProviderNamespace'] = resource_provider_namespace
__args__['resourceType'] = resource_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization/latest:getManagementLockAtResourceLevel', __args__, opts=opts, typ=GetManagementLockAtResourceLevelResult).value
return AwaitableGetManagementLockAtResourceLevelResult(
id=__ret__.id,
level=__ret__.level,
name=__ret__.name,
notes=__ret__.notes,
owners=__ret__.owners,
type=__ret__.type)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mvn.utils.img import to_numpy, to_torch
from mvn.utils import multiview
def integrate_tensor_2d(heatmaps, softmax=True):
"""Applies softmax to heatmaps and integrates them to get their's "center of masses"
Args:
heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps
Returns:
coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps
"""
batch_size, n_heatmaps, h, w = heatmaps.shape
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))
if softmax:
heatmaps = nn.functional.softmax(heatmaps, dim=2)
else:
heatmaps = nn.functional.relu(heatmaps)
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))
mass_x = heatmaps.sum(dim=2)
mass_y = heatmaps.sum(dim=3)
mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y), dim=2)
coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))
return coordinates
def integrate_tensor_3d(volumes, softmax=True):
batch_size, n_volumes, x_size, y_size, z_size = volumes.shape
volumes = volumes.reshape((batch_size, n_volumes, -1))
if softmax:
volumes = nn.functional.softmax(volumes, dim=2)
else:
volumes = nn.functional.relu(volumes)
volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))
mass_x = volumes.sum(dim=3).sum(dim=3)
mass_y = volumes.sum(dim=2).sum(dim=3)
mass_z = volumes.sum(dim=2).sum(dim=2)
mass_times_coord_x = mass_x * torch.arange(x_size).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(y_size).type(torch.float).to(mass_y.device)
mass_times_coord_z = mass_z * torch.arange(z_size).type(torch.float).to(mass_z.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
z = mass_times_coord_z.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
z = z / mass_z.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y, z), dim=2)
coordinates = coordinates.reshape((batch_size, n_volumes, 3))
return coordinates, volumes
def integrate_tensor_3d_with_coordinates(volumes, coord_volumes, softmax=True):
batch_size, n_volumes, x_size, y_size, z_size = volumes.shape
volumes = volumes.reshape((batch_size, n_volumes, -1))
if softmax:
volumes = nn.functional.softmax(volumes, dim=2)
else:
volumes = nn.functional.relu(volumes)
volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))
coordinates = torch.einsum("bnxyz, bxyzc -> bnc", volumes, coord_volumes)
return coordinates #, volumes
def unproject_heatmaps(heatmaps, proj_matricies, coord_volumes, volume_aggregation_method='sum', vol_confidences=None):
device = heatmaps.device
batch_size, n_views, n_joints, heatmap_shape = heatmaps.shape[0], heatmaps.shape[1], heatmaps.shape[2], tuple(heatmaps.shape[3:]) # 1,4,32,96x96
volume_shape = coord_volumes.shape[1:4] #64x64x64
volume_batch = torch.zeros(batch_size, n_joints, *volume_shape, device=device) # 1x32x64x64x64のTensor
# TODO: speed up this this loop
for batch_i in range(batch_size):
coord_volume = coord_volumes[batch_i] # Bx64x64x64x3 -> 64x64x64x3
grid_coord = coord_volume.reshape((-1, 3)) # 262144x3
volume_batch_to_aggregate = torch.zeros(n_views, n_joints, *volume_shape, device=device) # 4x32x64x64x64
for view_i in range(n_views):
heatmap = heatmaps[batch_i, view_i] # 1x4x32x96x96 -> 32x96x96
heatmap = heatmap.unsqueeze(0) # 1x32x96x96 (一番初めに次元を追加)
grid_coord_proj = multiview.project_3d_points_to_image_plane_without_distortion( # 262144x3
proj_matricies[batch_i, view_i], grid_coord, convert_back_to_euclidean=False
)
invalid_mask = grid_coord_proj[:, 2] <= 0.0 # depth must be larger than 0.0 #人がカメラに近づきすぎた場合に起こる??
grid_coord_proj[grid_coord_proj[:, 2] == 0.0, 2] = 1.0 # not to divide by zero
grid_coord_proj = multiview.homogeneous_to_euclidean(grid_coord_proj)
# transform to [-1.0, 1.0] range
grid_coord_proj_transformed = torch.zeros_like(grid_coord_proj) # 262144x2
grid_coord_proj_transformed[:, 0] = 2 * (grid_coord_proj[:, 0] / heatmap_shape[0] - 0.5) # (0,0)->(96,96)の座標を、中心を(0,0)、左上を(-1,-1)、右下を(1,1)とする相対的な座標に変換
grid_coord_proj_transformed[:, 1] = 2 * (grid_coord_proj[:, 1] / heatmap_shape[1] - 0.5)
grid_coord_proj = grid_coord_proj_transformed
# prepare to F.grid_sample
grid_coord_proj = grid_coord_proj.unsqueeze(1).unsqueeze(0) # 引数で指定された場所に一つ次元を足すらしい 1x262144x1x2。heatmapが1x32x96x96
try:
current_volume = F.grid_sample(heatmap, grid_coord_proj, align_corners=True) # 1x32x262144x1 = Heatmap(1x32x96x96), grid_coord_proj(1x262144x1x2)
except TypeError: # old PyTorch
current_volume = F.grid_sample(heatmap, grid_coord_proj)
# zero out non-valid points
current_volume = current_volume.view(n_joints, -1) #32x262144
current_volume[:, invalid_mask] = 0.0
# reshape back to volume
current_volume = current_volume.view(n_joints, *volume_shape) #32x64x64x64
# collect
volume_batch_to_aggregate[view_i] = current_volume
# agregate resulting volume
if volume_aggregation_method.startswith('conf'):
volume_batch[batch_i] = (volume_batch_to_aggregate * vol_confidences[batch_i].view(n_views, n_joints, 1, 1, 1)).sum(0)
elif volume_aggregation_method == 'sum':
volume_batch[batch_i] = volume_batch_to_aggregate.sum(0)
elif volume_aggregation_method == 'max':
volume_batch[batch_i] = volume_batch_to_aggregate.max(0)[0]
elif volume_aggregation_method == 'softmax':
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate.clone() # 2x32x64x64x64(n_views, n_joints, *volume_shape)
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, -1) # reshape
volume_batch_to_aggregate_softmin = nn.functional.softmax(volume_batch_to_aggregate_softmin, dim=0)
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, n_joints, *volume_shape) #reshape back
volume_batch[batch_i] = (volume_batch_to_aggregate * volume_batch_to_aggregate_softmin).sum(0)
else:
raise ValueError("Unknown volume_aggregation_method: {}".format(volume_aggregation_method))
return volume_batch
def gaussian_2d_pdf(coords, means, sigmas, normalize=True):
normalization = 1.0
if normalize:
normalization = (2 * np.pi * sigmas[:, 0] * sigmas[:, 0])
exp = torch.exp(-((coords[:, 0] - means[:, 0]) ** 2 / sigmas[:, 0] ** 2 + (coords[:, 1] - means[:, 1]) ** 2 / sigmas[:, 1] ** 2) / 2)
return exp / normalization
def render_points_as_2d_gaussians(points, sigmas, image_shape, normalize=True):
device = points.device
n_points = points.shape[0]
yy, xx = torch.meshgrid(torch.arange(image_shape[0]).to(device), torch.arange(image_shape[1]).to(device))
grid = torch.stack([xx, yy], dim=-1).type(torch.float32)
grid = grid.unsqueeze(0).repeat(n_points, 1, 1, 1) # (n_points, h, w, 2)
grid = grid.reshape((-1, 2))
points = points.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)
points = points.reshape(-1, 2)
sigmas = sigmas.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)
sigmas = sigmas.reshape(-1, 2)
images = gaussian_2d_pdf(grid, points, sigmas, normalize=normalize)
images = images.reshape(n_points, *image_shape)
return images
|
from calendar import timegm
from datetime import date, datetime, time
import sqlite3
from typing import Callable
import julian # type: ignore
def store_time(time_type: str, time_format: str = "") -> None:
if time_type == "seconds":
sqlite3.register_adapter(time, time_to_seconds)
elif time_type == "text":
sqlite3.register_adapter(time, time_to_text(time_format))
else:
raise ValueError(f"Unknown time adapter: '{time_type}'")
def store_date(date_type: str, date_format: str = "") -> None:
if date_type == "julian":
sqlite3.register_adapter(date, date_to_julian)
elif date_type == "posix":
sqlite3.register_adapter(date, date_to_posix)
elif date_type == "text":
sqlite3.register_adapter(date, date_to_text(date_format))
else:
raise ValueError(f"Unknown date adapter: '{date_type}'")
def store_datetime(datetime_type: str, datetime_format: str = "") -> None:
if datetime_type == "julian":
sqlite3.register_adapter(datetime, datetime_to_julian)
elif datetime_type == "posix":
sqlite3.register_adapter(datetime, datetime_to_posix)
elif datetime_type == "text":
sqlite3.register_adapter(datetime, datetime_to_text(datetime_format))
else:
raise ValueError(f"Unknown datetime adapter: '{datetime_type}'")
def time_to_seconds(t: time) -> float:
return (60 * 60 * t.hour) + (60 * t.minute) + t.second + t.microsecond
def time_to_text(format: str) -> Callable[[time], str]:
def _time_to_text(t: time) -> str:
return t.strftime(format)
return _time_to_text
def date_to_posix(d: date) -> int:
return datetime_to_posix(datetime(d.year, d.month, d.day))
def date_to_julian(d: date) -> float:
return datetime_to_julian(datetime(d.year, d.month, d.day))
def date_to_text(format: str) -> Callable[[date], str]:
def _date_to_text(d: date) -> str:
return d.strftime(format)
return _date_to_text
def datetime_to_posix(dt: datetime) -> int:
return timegm(dt.utctimetuple())
def datetime_to_julian(dt: datetime) -> float:
return float(julian.to_jd(dt))
def datetime_to_text(format: str) -> Callable[[datetime], str]:
def _datetime_to_text(dt: datetime) -> str:
return dt.strftime(format)
return _datetime_to_text
|
import click
import logging
from .constants import WELCOME_TEXT
from .api import run_server
from .logger import OrigamiLogger
logger = OrigamiLogger(
file_log_level=logging.DEBUG, console_log_level=logging.DEBUG)
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx):
"""
Origami daemon is an application which deploys and manages demos on
CloudCV servers.
"""
if not ctx.invoked_subcommand:
click.echo(WELCOME_TEXT)
main.add_command(run_server)
|
from __future__ import annotations
import itertools
import math
from dataclasses import dataclass
from typing import Any
@dataclass
class TreeZipper:
inner: Any
path: list[int]
def up(self):
if self.path:
return TreeZipper(self.inner, self.path[:-1]), self.path[-1]
return None
def get(self):
v = self.inner
for p in self.path:
v = v[p]
return v
def set(self, x):
v = self.inner
for p in self.path[:-1]:
v = v[p]
v[self.path[-1]] = x
def try_left(self):
v = self.get()
if isinstance(v, list):
return TreeZipper(self.inner, self.path + [0])
return None
def try_right(self):
v = self.get()
if isinstance(v, list):
return TreeZipper(self.inner, self.path + [1])
return None
class Whoop(Exception):
pass
def do_reduce_exp(v: TreeZipper, depth):
if depth == 4 and isinstance(v.get(), list):
# print("exploding")
l, r = v.get()
v.set(0)
l_v = v
came_from_left = False
dont_go = False
while True:
# print("left", l_v, l_v.get())
if (l_v_n := l_v.try_left()) != None and not came_from_left:
l_v = l_v_n
break
elif (l_v_n_v := l_v.up()) != None:
# if we can up and didn't go left, do so
l_v = l_v_n_v[0]
came_from_left = l_v_n_v[1] == 0
else:
dont_go = True
# if we did nothing, we have to have reached the top and we were already from the left
break
if not dont_go:
while True:
if (l_v_n := l_v.try_right()) != None:
l_v = l_v_n
# try to go down and to the left
if isinstance(l_v.get(), int):
# if it's an int, add and quit
l_v.set(l_v.get() + l)
break
l_v = v
came_from_right = False
dont_go = False
while True:
# print("right", l_v, l_v.get())
if (l_v_n := l_v.try_right()) != None and not came_from_right:
l_v = l_v_n
break
elif (l_v_n_v := l_v.up()) != None:
# if we can up and didn't go left, do so
l_v = l_v_n_v[0]
came_from_right = l_v_n_v[1] == 1
else:
# if we did nothing, we have to have reached the top, bail
dont_go = True
break
if not dont_go:
while True:
if (l_v_n := l_v.try_left()) != None:
l_v = l_v_n
# try to go down and to the left
if isinstance(l_v.get(), int):
# if it's an int, add and quit
l_v.set(l_v.get() + r)
break
raise Whoop()
if (l_v := v.try_left()) != None:
do_reduce_exp(l_v, depth + 1)
if (r_v := v.try_right()) != None:
do_reduce_exp(r_v, depth + 1)
def do_reduce_splt(v: TreeZipper):
n_v = v.get()
if isinstance(n_v, int):
if n_v >= 10:
# print("splitting")
l_v = math.floor(n_v / 2)
r_v = math.ceil(n_v / 2)
v.set([l_v, r_v])
raise Whoop()
# otherwise, go and reduce both sides
if (l_v := v.try_left()) != None:
do_reduce_splt(l_v)
if (r_v := v.try_right()) != None:
do_reduce_splt(r_v)
def iter_red(l):
# print("doing", l)
while True:
t = TreeZipper(l, [])
try:
# print(l)
do_reduce_exp(t, 0)
do_reduce_splt(t)
except Whoop:
pass
else:
print("did nothing")
return
def do_mag(v: TreeZipper):
if isinstance(v.get(), int):
return v.get()
return 3 * do_mag(v.try_left()) + 2 * do_mag(v.try_right())
inp = [
[[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]],
[[[3,6],[9,4]],[[[5,9],5],[8,0]]],
[[[2,2],2],[1,[[1,6],7]]],
[[[[0,9],7],[[3,2],8]],[6,[7,9]]],
[[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]],
[[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]],
[[4,[8,2]],[1,[0,5]]],
[8,[8,7]],
[[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]],
[[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]],
[0,7],
[[[[5,1],3],[8,[5,3]]],7],
[[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]],
[[[[3,4],2],0],4],
[[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]],
[[[3,[2,5]],[3,3]],7],
[[[[5,1],1],[4,8]],[[5,[8,3]],2]],
[[4,[[8,1],[8,5]]],[[[4,1],0],6]],
[[[5,5],[5,9]],[0,[[6,8],[0,1]]]],
[4,[[[7,9],4],0]],
[[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]],
[[[7,7],[8,0]],[6,[8,[7,9]]]],
[[[9,2],1],6],
[[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]],
[[2,[[4,7],5]],1],
[[8,7],[[[2,0],7],[1,[0,3]]]],
[[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]],
[[3,4],[[9,4],5]],
[[5,[[8,3],5]],1],
[[0,[[9,0],[3,2]]],[2,[7,[5,1]]]],
[[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]],
[[[1,[5,2]],9],[[4,6],[3,[8,0]]]],
[[1,7],[[1,7],9]],
[[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]],
[[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]],
[[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]],
[[[8,9],0],3],
[[[1,7],[1,[3,9]]],[6,[0,[8,5]]]],
[[0,5],[6,5]],
[[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]],
[[8,[[0,9],8]],[9,[7,[7,9]]]],
[0,[[[7,1],2],[[0,4],4]]],
[[0,[[9,1],5]],[1,4]],
[3,4],
[[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]],
[[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]],
[8,[[1,[3,0]],[[7,9],4]]],
[[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]],
[[3,[[9,6],6]],2],
[[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]],
[[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]],
[[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]],
[[3,[7,1]],[9,[[1,8],7]]],
[[9,1],[0,[[0,7],[7,1]]]],
[[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]],
[8,[[[2,1],[6,9]],[[3,3],[4,6]]]],
[0,[7,[3,0]]],
[[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]],
[[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]],
[[[[5,1],4],[[1,2],1]],7],
[[[3,[7,5]],7],3],
[[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]],
[2,7],
[[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]],
[[[0,[6,4]],2],[4,[7,[7,5]]]],
[[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]],
[[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]],
[[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]],
[[[3,[0,2]],3],8],
[[[4,[4,9]],9],[[[4,4],5],9]],
[[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]],
[[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]],
[[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]],
[[9,[[9,1],5]],[[4,[1,1]],2]],
[[[[7,4],[0,3]],7],[8,[6,[3,3]]]],
[5,5],
[[6,7],[1,[7,[8,1]]]],
[[1,[0,4]],7],
[[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]],
[[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]],
[[[[8,3],7],5],[9,[[5,1],8]]],
[[[[4,0],[5,2]],[[0,0],7]],2],
[[[[0,1],6],2],[[8,2],6]],
[[[[2,4],1],[[6,7],9]],[[[1,6],9],3]],
[[5,5],[[8,[7,7]],[5,8]]],
[[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]],
[[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]],
[[7,[6,2]],[[9,[5,2]],[1,4]]],
[[[7,[5,9]],[[3,9],[4,5]]],[0,6]],
[[9,[8,[2,2]]],[[9,7],[1,1]]],
[[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]],
[[0,[[9,3],0]],[8,8]],
[[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]],
[[9,[[8,3],[5,8]]],[[7,[3,0]],3]],
[[[4,[4,2]],0],1],
[[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]],
[[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]],
[[3,[5,[0,3]]],[5,4]],
[[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]],
[[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]]
]
inp = [
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]],
[[[5,[2,8]],4],[5,[[9,9],0]]],
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]],
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]],
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]],
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]],
[[[[5,4],[7,7]],8],[[8,3],8]],
[[9,3],[[9,9],[6,[4,9]]]],
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]],
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
]
# inp = [
# [[[[[7,0],[7,7]],[[7,7],[7,8]]],[[[7,7],[8,8]],[[7,7],[8,7]]]],[7,[5,[[3,8],[1,4]]]]]
# ]
def do_add(l):
it = iter(l)
x = next(it)
iter_red(x)
for y in it:
x = [x, y]
iter_red(x)
return x
out = do_add(inp)
print(out)
print(do_mag(TreeZipper(out, [])))
import copy
inp = [
[[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]],
[[[3,6],[9,4]],[[[5,9],5],[8,0]]],
[[[2,2],2],[1,[[1,6],7]]],
[[[[0,9],7],[[3,2],8]],[6,[7,9]]],
[[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]],
[[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]],
[[4,[8,2]],[1,[0,5]]],
[8,[8,7]],
[[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]],
[[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]],
[0,7],
[[[[5,1],3],[8,[5,3]]],7],
[[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]],
[[[[3,4],2],0],4],
[[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]],
[[[3,[2,5]],[3,3]],7],
[[[[5,1],1],[4,8]],[[5,[8,3]],2]],
[[4,[[8,1],[8,5]]],[[[4,1],0],6]],
[[[5,5],[5,9]],[0,[[6,8],[0,1]]]],
[4,[[[7,9],4],0]],
[[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]],
[[[7,7],[8,0]],[6,[8,[7,9]]]],
[[[9,2],1],6],
[[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]],
[[2,[[4,7],5]],1],
[[8,7],[[[2,0],7],[1,[0,3]]]],
[[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]],
[[3,4],[[9,4],5]],
[[5,[[8,3],5]],1],
[[0,[[9,0],[3,2]]],[2,[7,[5,1]]]],
[[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]],
[[[1,[5,2]],9],[[4,6],[3,[8,0]]]],
[[1,7],[[1,7],9]],
[[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]],
[[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]],
[[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]],
[[[8,9],0],3],
[[[1,7],[1,[3,9]]],[6,[0,[8,5]]]],
[[0,5],[6,5]],
[[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]],
[[8,[[0,9],8]],[9,[7,[7,9]]]],
[0,[[[7,1],2],[[0,4],4]]],
[[0,[[9,1],5]],[1,4]],
[3,4],
[[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]],
[[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]],
[8,[[1,[3,0]],[[7,9],4]]],
[[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]],
[[3,[[9,6],6]],2],
[[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]],
[[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]],
[[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]],
[[3,[7,1]],[9,[[1,8],7]]],
[[9,1],[0,[[0,7],[7,1]]]],
[[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]],
[8,[[[2,1],[6,9]],[[3,3],[4,6]]]],
[0,[7,[3,0]]],
[[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]],
[[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]],
[[[[5,1],4],[[1,2],1]],7],
[[[3,[7,5]],7],3],
[[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]],
[2,7],
[[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]],
[[[0,[6,4]],2],[4,[7,[7,5]]]],
[[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]],
[[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]],
[[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]],
[[[3,[0,2]],3],8],
[[[4,[4,9]],9],[[[4,4],5],9]],
[[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]],
[[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]],
[[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]],
[[9,[[9,1],5]],[[4,[1,1]],2]],
[[[[7,4],[0,3]],7],[8,[6,[3,3]]]],
[5,5],
[[6,7],[1,[7,[8,1]]]],
[[1,[0,4]],7],
[[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]],
[[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]],
[[[[8,3],7],5],[9,[[5,1],8]]],
[[[[4,0],[5,2]],[[0,0],7]],2],
[[[[0,1],6],2],[[8,2],6]],
[[[[2,4],1],[[6,7],9]],[[[1,6],9],3]],
[[5,5],[[8,[7,7]],[5,8]]],
[[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]],
[[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]],
[[7,[6,2]],[[9,[5,2]],[1,4]]],
[[[7,[5,9]],[[3,9],[4,5]]],[0,6]],
[[9,[8,[2,2]]],[[9,7],[1,1]]],
[[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]],
[[0,[[9,3],0]],[8,8]],
[[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]],
[[9,[[8,3],[5,8]]],[[7,[3,0]],3]],
[[[4,[4,2]],0],1],
[[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]],
[[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]],
[[3,[5,[0,3]]],[5,4]],
[[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]],
[[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]]
]
m_v = 0
for l, r in itertools.permutations(inp, 2):
l = copy.deepcopy(l)
r = copy.deepcopy(r)
v = [l, r]
print(f"{l=} {r=}")
do_add(v)
m_v = max(do_mag(TreeZipper(v, [])), m_v)
print(m_v)
|
import json
from unittest import TestCase
from unittest.mock import Mock
from utils import protocols
from api.ontology import OntologyAPI
from utils.protocols import ONTOLOGY_3PRIME_PARENT, ONTOLOGY_5PRIME_PARENT, ONTOLOGY_CITESEQ
class TestProtocols(TestCase):
def setUp(self) -> None:
self.ontology_api = Mock()
def test_is_10x__when_equal_3prime_parent__returns_true(self):
# given
lib_prep_protocol = {
'content': {
'library_construction_method': {
'ontology': ONTOLOGY_3PRIME_PARENT
}
}
}
# when
is10x = protocols.is_10x(OntologyAPI(), lib_prep_protocol)
# then
self.assertTrue(is10x)
def test_is_10x__when_equal_5prime_parent__returns_true(self):
# given
lib_prep_protocol = {
'content': {
'library_construction_method': {
'ontology': ONTOLOGY_5PRIME_PARENT
}
}
}
# when
is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol)
# then
self.assertTrue(is10x)
def test_is_10x__when_equal_citeseq__returns_true(self):
# given
lib_prep_protocol = {
'content': {
'library_construction_method': {
'ontology': ONTOLOGY_CITESEQ
}
}
}
# when
is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol)
# then
self.assertTrue(is10x)
def test_is_10x__when_not_descendant__returns_false(self):
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0000000",
}
}
}
self.ontology_api.is_equal_or_descendant = Mock(return_value=False)
is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol)
self.assertFalse(is10x)
def test_map_bam_schema__when_equals_citeseq__returns_10xV2(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": ONTOLOGY_CITESEQ,
}
}
}
# when
bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, '10xV2')
def test_map_bam_schema__when_not_leaf_term__returns_none(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0000000",
}
}
}
self.ontology_api.get_descendants = Mock(return_value=['descendant']) # not leaf term
self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"})
# when
bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, None)
def test_map_bam_schema__when_leaf_term__returns_correct_bam_schema(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0000000",
}
}
}
self.ontology_api.get_descendants = Mock(return_value=[]) # leaf term
self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"})
# when
bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, '10xV2')
def test_version_10x_by_label__given_label__return_version(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0009294",
}
}
}
self.ontology_api.search = Mock(return_value={'label': "10x 5' v2"})
# when
bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, 'V2')
def test_version_10x_by_label__given_label__return_version(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0009294",
}
}
}
self.ontology_api.search = Mock(return_value={'label': "10x 3' v3"})
# when
bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, 'V3')
|
from sumy.parsers.plaintext import PlaintextParser #We're choosing a plaintext parser here, other parsers available for HTML etc.
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer #We're choosing Lexrank, other algorithms are also built in
def get_summary(text):
# file = "plain_text.txt" #name of the plain-text file
# parser = PlaintextParser.from_file(file, Tokenizer("english"))
parser=PlaintextParser.from_string(text,Tokenizer("English"))
summarizer = LexRankSummarizer()
summary = summarizer(parser.document, 5) #Summarize the document with 5 sentences
# for sentence in summary:
# print(sentence)
return summary
|
import pandas as pd
import cv2
import numpy as np
dataset_path = 'fer2013/fer2013/fer2013.csv'
image_size=(48,48)
def load_fer2013():
data = pd.read_csv(dataset_path)
pixels = data['pixels'].tolist()
width, height = 48, 48
faces = []
for pixel_sequence in pixels:
face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
face = cv2.resize(face.astype('uint8'),image_size)
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = pd.get_dummies(data['emotion']).as_matrix()
return faces, emotions
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.