text
stringlengths 2
999k
|
|---|
import logging
import os
import uuid
from distutils import util
from pathlib import Path
import pytest
import test_infra.utils as infra_utils
from test_infra import assisted_service_api, consts, utils
qe_env = False
def is_qe_env():
return os.environ.get('NODE_ENV') == 'QE_VM'
def _get_cluster_name():
cluster_name = utils.get_env('CLUSTER_NAME', f'{consts.CLUSTER_PREFIX}')
if cluster_name == consts.CLUSTER_PREFIX:
cluster_name = cluster_name + '-' + str(uuid.uuid4())[:8]
return cluster_name
# TODO changes it
if is_qe_env():
from test_infra.controllers.node_controllers.qe_vm_controler import \
QeVmController as nodeController
qe_env = True
else:
from test_infra.controllers.node_controllers.terraform_controller import \
TerraformController as nodeController
private_ssh_key_path_default = os.path.join(os.getcwd(), "ssh_key/key") if not qe_env else \
os.path.join(str(Path.home()), ".ssh/id_rsa")
env_variables = {"ssh_public_key": utils.get_env('SSH_PUB_KEY'),
"remote_service_url": utils.get_env('REMOTE_SERVICE_URL'),
"pull_secret": utils.get_env('PULL_SECRET'),
"offline_token": utils.get_env('OFFLINE_TOKEN'),
"openshift_version": utils.get_openshift_version(),
"base_domain": utils.get_env('BASE_DOMAIN', "redhat.com"),
"num_masters": int(utils.get_env('NUM_MASTERS', consts.NUMBER_OF_MASTERS)),
"num_workers": max(2, int(utils.get_env('NUM_WORKERS', 0))),
"vip_dhcp_allocation": bool(util.strtobool(utils.get_env('VIP_DHCP_ALLOCATION'))),
"worker_memory": int(utils.get_env('WORKER_MEMORY', '8892')),
"master_memory": int(utils.get_env('MASTER_MEMORY', '16984')),
"network_mtu": utils.get_env('NETWORK_MTU', '1500'),
"worker_disk": int(utils.get_env('WORKER_DISK', '21474836480')),
"master_disk": int(utils.get_env('MASTER_DISK', '128849018880')),
"storage_pool_path": utils.get_env('STORAGE_POOL_PATH', os.path.join(os.getcwd(), "storage_pool")),
"cluster_name": _get_cluster_name(),
"private_ssh_key_path": utils.get_env('PRIVATE_KEY_PATH', private_ssh_key_path_default),
"kubeconfig_path": utils.get_env('KUBECONFIG', ''),
"log_folder": utils.get_env('LOG_FOLDER', consts.LOG_FOLDER),
"service_cidr": utils.get_env('SERVICE_CIDR', '172.30.0.0/16'),
"cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'),
"host_prefix": int(utils.get_env('HOST_PREFIX', '23')),
"iso_image_type": utils.get_env('ISO_IMAGE_TYPE', consts.ImageType.FULL_ISO),
"worker_vcpu": utils.get_env('WORKER_CPU', consts.WORKER_CPU),
"master_vcpu": utils.get_env('MASTER_CPU', consts.MASTER_CPU),
"test_teardown": bool(util.strtobool(utils.get_env('TEST_TEARDOWN', 'true'))),
"namespace": utils.get_env('NAMESPACE', consts.DEFAULT_NAMESPACE),
"olm_operators": utils.get_env('OLM_OPERATORS', []),
}
cluster_mid_name = infra_utils.get_random_name()
# Tests running on terraform parallel must have unique ISO file
if not qe_env:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-{cluster_mid_name}-'
f'installer-image.iso')).strip()
env_variables["kubeconfig_path"] = f'/tmp/test_kubeconfig_{cluster_mid_name}'
else:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-installer-image.iso')). \
strip()
env_variables["iso_download_path"] = image
env_variables["num_nodes"] = env_variables["num_workers"] + env_variables["num_masters"]
@pytest.fixture(scope="session")
def api_client():
logging.info('--- SETUP --- api_client\n')
yield get_api_client()
def get_api_client(offline_token=env_variables['offline_token'], **kwargs):
url = env_variables['remote_service_url']
if not url:
url = utils.get_local_assisted_service_url(
utils.get_env('PROFILE'), env_variables['namespace'], 'assisted-service', utils.get_env('DEPLOY_TARGET'))
return assisted_service_api.create_client(url, offline_token, **kwargs)
@pytest.fixture(scope="session")
def setup_node_controller():
logging.info('--- SETUP --- node controller\n')
yield nodeController
logging.info('--- TEARDOWN --- node controller\n')
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
result = outcome.get_result()
setattr(item, "result_" + result.when, result)
|
# coding: utf-8
"""
This is part of the MSS Python's module.
Source: https://github.com/BoboTiG/python-mss
"""
import platform
from .exception import ScreenShotError
def mss(**kwargs):
# type: (**str) -> MSS
""" Factory returning a proper MSS class instance.
It detects the plateform we are running on
and choose the most adapted mss_class to take
screenshots.
It then proxies its arguments to the class for
instantiation.
"""
operating_system = platform.system().lower()
if operating_system == "darwin":
from .darwin import MSS
elif operating_system == "linux":
from .linux import MSS
elif operating_system == "windows":
from .windows import MSS
else:
raise ScreenShotError(
"System {!r} not (yet?) implemented.".format(operating_system)
)
return MSS(**kwargs)
|
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
parser = argparse.ArgumentParser(description='''Predicts the detectability of input peptides using a single dimension
Convolutionar Neural Network, based on Tensorflow 1.13.1
Requierements: Tensorflow 1.13.1''')
parser.add_argument('infile', metavar='F', type=str, nargs='+',
help='File containing the peptides to be predicted, one per line (max length= 81)')
args = parser.parse_args()
def load_pep_and_codify(file, max_len):
aa_dict={'A':1,'R':2,'N':3,'D':4,'C':5,'Q':6,'E':7,'G':8,'H':9,'I':10,'L':11,'K':12,'M':13,'F':14,
'P':15,'O':16,'S':17,'U':18,'T':19,'W':20,'Y':21,'V':22}
with open(file, 'r') as inf:
lines = inf.read().splitlines()
pep_codes=[]
long_pep_counter = 0
newLines = []
for pep in lines:
if not len(pep) > max_len:
current_pep=[]
for aa in pep:
current_pep.append(aa_dict[aa])
pep_codes.append(current_pep)
newLines.extend([pep])
else:
long_pep_counter += 1
predict_data = keras.preprocessing.sequence.pad_sequences(pep_codes, value=0, padding='post', maxlen=max_len)
return predict_data, long_pep_counter, newLines
print('Loading model...')
model_2_1D = keras.models.load_model('model_2_1D.h5')
print('Loading input peptides')
predict_data, skipped, lines = load_pep_and_codify(args.infile[0], 81)
print('Succesfully loaded {0} peptides and skipped {1}'.format(len(lines), str(skipped)))
print('Making predictions')
model_2_1D_pred = model_2_1D.predict(predict_data)
model_2_1D_pred = np.hstack((np.array(lines).reshape(len(lines), 1),model_2_1D_pred)).tolist()
Pred_output = []
for pred in model_2_1D_pred:
if float(pred[1]) > 0.5:
# pred.extend('0')
Pred_output.append([pred[0], str(1-float(pred[1])), '0'])
else:
Pred_output.append([pred[0], str(1-float(pred[1])), '1'])
# pred.extend('1')
outFile = '{0}_Predictions.txt'.format(args.infile[0].split('.')[0])
print('Saving predictions to file {}'.format(outFile))
with open(outFile, 'w') as outf:
outf.write('Peptide\tProb\tDetectability\n')
outf.writelines('\t'.join(i) + '\n' for i in Pred_output)
|
"""
py-ctrl script
1. generate problem PD file
1.1 save PD file in /inputfiles
2. solve convex hull
2.1 save hull information in /output
2.2 show figure for 10 sec
2.3 save figure in /output
"""
import os
import subprocess
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description = 'Plot tradeoff')
# parser.add_argument('-S', '--Solver', type = int, choices = [0, 1], default = 0, help = "0: Cplex\n1: Gurobi")
parser.add_argument('-P', '--Problem', type = int, choices = [1, 2, 3], default = 1, help=" 1: Coded Caching\n 2: Private Information Retrieval\n 3: Symmetric Private Information Retrieval")
parser.add_argument('-N1', '--N1', type = int, choices = range(2, 10), default = 2, help = "number of files in coded caching")
parser.add_argument('-K1', '--K1', type = int, choices = range(2, 10), default = 3, help = "number of users in coded caching")
parser.add_argument('-N2', '--N2', type = int, choices = range(1, 10), default = 2, help = "number of servers in private information retrieval")
parser.add_argument('-K2', '--K2', type = int, choices = range(1, 10), default = 2, help = "number of files in private information retrieval")
parser.add_argument('-N3', '--N3', type = int, choices = range(1, 10), default = 2, help = "number of servers in symmetric private information retrieval")
parser.add_argument('-K3', '--K3', type = int, choices = range(1, 10), default = 2, help = "number of files in symmetric private information retrieval")
parser.add_argument('-IP', '--InPt', type = str, help = "list of achievable points, e.g. \"(1,1);(1.25,0.85)\"", default=None)
if __name__ == "__main__":
# directory of CAI repository
cai = os.path.dirname(os.path.abspath(__file__)) + "/../../"
#### HERE
# You might need to change these lines:
# 1. directory of solver
SOLVER = cai + "CplexCompute/cplexcompute.out"
# 2. duration of the convex hull figure pausing in sec
PAUSE = 10
# read args
args = parser.parse_args()
# generate PD file
print("Genearte PD file")
if args.Problem == 1:
from gen_pd_cache import gen_pd_cache
fn = gen_pd_cache(args.N1, args.K1)
title = "Coded Caching with {} files and {} users".format(args.N1, args.K1)
xlabel = "Storage"
ylabel = "Download"
name = "cache{}x{}".format(args.N1, args.K1)
elif args.Problem == 2:
from gen_pd_pir import gen_pd_pir
fn = gen_pd_pir(args.N2, args.K2)
xlabel = "Storage"
ylabel = "Download"
title = "Private Information Retrieval with {} servers and {} files".format(args.N2, args.K2)
name = "PIR{}x{}".format(args.N2, args.K2)
elif args.Problem == 3:
from gen_pd_spir import gen_pd_spir
fn = gen_pd_spir(args.N3, args.K3)
xlabel = "Storage"
ylabel = "Download"
title = "Symmetric Private Information Retrieval with {} servers and {} files".format(args.N3, args.K3)
name = "SPIR{}x{}".format(args.N3, args.K3)
# Solve PD
print()
print("Solve the convex hull")
if not os.path.exists(cai + 'PlotTradeoff/output'):
os.makedirs(cai + 'PlotTradeoff/output')
print('Open ' + cai + 'PlotTradeoff/output/Hull_' + name + '.txt for details')
if os.path.exists(cai + 'PlotTradeoff/output/Hull_' + name + '.txt') and os.path.exists(cai + 'PlotTradeoff/output/Fig_' + name + '.eps'):
print("file " + cai + "PlotTradeoff/output/Hull_" + name + ".txt already exists")
print("Overwrite[y/n]:", end="")
if input() == "y":
pass
else:
with open(cai + 'PlotTradeoff/output/Hull_' + name + '.txt', 'w') as fout:
subprocess.run([SOLVER, fn, "hull"], stdout=fout, text=True)
"""
if args.Solver == 0:
subprocess.run([cai + "CplexCompute/cplexcompute.out", fn, "hull"], stdout=fout, text=True)
else:
subprocess.run([cai + "GurobiCompute/gurobicompute.out", fn, "hull"], stdout=fout, text=True)
"""
with open(cai + 'PlotTradeoff/output/Hull_' + name + '.txt', 'r') as fout:
res = fout.read()
# capture the points on the hull
res = res[res.find("List of found points on the hull:\n"):-1].split("\n")[1: -1]
points = []
for p in res:
points.append(tuple(map(float, p[1: -2].split(', '))))
# plot region
points = sorted(points, key=lambda x: x[0])
width = points[0][1] - points[-1][1]
plt.plot(*zip(*points), label = "Outer Bounds")
if args.InPt != None:
InPt = []
for p in args.InPt.split(";"):
InPt.append(tuple(map(float, p[1: -1].split(','))))
plt.plot(*zip(*InPt), 'o', label = "Achievable Points")
plt.ylim(points[-1][1]- 0.01 * width, points[0][1] + 0.01 * width)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.savefig(cai + 'PlotTradeoff/output/Fig_' + name + '.eps', format='eps')
plt.legend()
plt.show(block=False)
plt.pause(PAUSE)
plt.close()
print("Figure " + cai + "PlotTradeoff/output/Fig_" + name + '.eps')
|
import asyncio
import errno
import inspect
import io
import os
import socket
import ssl
import threading
import warnings
from distutils.version import StrictVersion
from itertools import chain
from typing import (
Any,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import ParseResult, parse_qs, unquote, urlparse
import async_timeout
from .compat import Protocol, TypedDict
from .exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
ExecAbortError,
InvalidResponse,
ModuleError,
NoPermissionError,
NoScriptError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
)
from .utils import str_if_bytes
NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
BlockingIOError: errno.EWOULDBLOCK,
ssl.SSLWantReadError: 2,
ssl.SSLWantWriteError: 2,
ssl.SSLError: 2,
}
NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
try:
import hiredis
except ImportError:
HIREDIS_AVAILABLE = False
else:
HIREDIS_AVAILABLE = True
hiredis_version = StrictVersion(hiredis.__version__)
if hiredis_version < StrictVersion("1.0.0"):
warnings.warn(
"aioredis supports hiredis @ 1.0.0 or higher. "
f"You have hiredis @ {hiredis.__version__}. "
"Pure-python parser will be used instead."
)
HIREDIS_AVAILABLE = False
SYM_STAR = b"*"
SYM_DOLLAR = b"$"
SYM_CRLF = b"\r\n"
SYM_LF = b"\n"
SYM_EMPTY = b""
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
MODULE_EXPORTS_DATA_TYPES_ERROR = (
"Error unloading module: the module "
"exports one or more module-side data "
"types, can't unload"
)
EncodedT = Union[bytes, memoryview]
DecodedT = Union[str, int, float]
EncodableT = Union[EncodedT, DecodedT]
class Encoder:
"""Encode strings to bytes-like and decode bytes-like to strings"""
__slots__ = "encoding", "encoding_errors", "decode_responses"
def __init__(self, encoding: str, encoding_errors: str, decode_responses: bool):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value: EncodableT) -> EncodedT:
"""Return a bytestring or bytes-like representation of the value"""
if isinstance(value, (bytes, memoryview)):
return value
if isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError(
"Invalid input of type: 'bool'. "
"Convert to a bytes, string, int or float first."
)
if isinstance(value, (int, float)):
return repr(value).encode()
if not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = value.__class__.__name__
raise DataError(
f"Invalid input of type: {typename!r}. "
"Convert to a bytes, string, int or float first."
)
if isinstance(value, str):
return value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value: EncodableT, force=False) -> EncodableT:
"""Return a unicode string from the bytes-like representation"""
if self.decode_responses or force:
if isinstance(value, memoryview):
return value.tobytes().decode(self.encoding, self.encoding_errors)
if isinstance(value, bytes):
return value.decode(self.encoding, self.encoding_errors)
return value
ExceptionMappingT = Mapping[str, Union[Type[Exception], Mapping[str, Type[Exception]]]]
class BaseParser:
"""Plain Python parsing class"""
__slots__ = "_stream", "_buffer", "_read_size"
EXCEPTION_CLASSES: ExceptionMappingT = {
"ERR": {
"max number of clients reached": ConnectionError,
"Client sent AUTH, but no password is set": AuthenticationError,
"invalid password": AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
"wrong number of arguments for 'auth' command": AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
"wrong number of arguments for 'AUTH' command": AuthenticationWrongNumberOfArgsError,
MODULE_LOAD_ERROR: ModuleError,
MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
NO_SUCH_MODULE_ERROR: ModuleError,
MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
},
"EXECABORT": ExecAbortError,
"LOADING": BusyLoadingError,
"NOSCRIPT": NoScriptError,
"READONLY": ReadOnlyError,
"NOAUTH": AuthenticationError,
"NOPERM": NoPermissionError,
}
def __init__(self, socket_read_size: int):
self._stream: Optional[asyncio.StreamReader] = None
self._buffer: Optional[SocketBuffer] = None
self._read_size = socket_read_size
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def parse_error(self, response: str) -> ResponseError:
"""Parse an error response"""
error_code = response.split(" ")[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1 :]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
def on_disconnect(self):
raise NotImplementedError()
def on_connect(self, connection: "Connection"):
raise NotImplementedError()
async def can_read(self, timeout: float) -> bool:
raise NotImplementedError()
async def read_response(self) -> Union[EncodableT, ResponseError, None]:
raise NotImplementedError()
class SocketBuffer:
"""Async-friendly re-impl of redis-py's SocketBuffer.
TODO: We're currently passing through two buffers,
the asyncio.StreamReader and this. I imagine we can reduce the layers here
while maintaining compliance with prior art.
"""
def __init__(
self,
stream_reader: asyncio.StreamReader,
socket_read_size: int,
socket_timeout: float,
):
self._stream = stream_reader
self.socket_read_size = socket_read_size
self.socket_timeout = socket_timeout
self._buffer = io.BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
async def _read_from_socket(
self,
length: int = None,
timeout: Optional[float] = SENTINEL, # type: ignore
raise_on_timeout: bool = True,
) -> bool:
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
timeout = timeout if timeout is not SENTINEL else self.socket_timeout
try:
while True:
async with async_timeout.timeout(timeout):
data = await self._stream.read(self.socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
return True
except (socket.timeout, asyncio.TimeoutError):
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
async def can_read(self, timeout: float) -> bool:
return bool(self.length) or await self._read_from_socket(
timeout=timeout, raise_on_timeout=False
)
async def read(self, length: int) -> bytes:
length += 2
# make sure we've read enough data from the socket
if length > self.length:
await self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
async def readline(self) -> bytes:
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
await self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except Exception:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._stream = None
class PythonParser(BaseParser):
"""Plain Python parsing class"""
__slots__ = BaseParser.__slots__ + ("encoder",)
def __init__(self, socket_read_size: int):
super().__init__(socket_read_size)
self.encoder: Optional[Encoder] = None
def on_connect(self, connection: "Connection"):
"""Called when the stream connects"""
self._stream = connection._reader
self._buffer = SocketBuffer(
self._stream, self._read_size, connection.socket_timeout
)
self.encoder = connection.encoder
def on_disconnect(self):
"""Called when the stream disconnects"""
if self._stream is not None:
self._stream = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoder = None
async def can_read(self, timeout: float):
return self._buffer and bool(await self._buffer.can_read(timeout))
async def read_response(self) -> Union[EncodableT, ResponseError, None]:
if not self._buffer:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
raw = await self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
response: Any
byte, response = raw[:1], raw[1:]
if byte not in (b"-", b"+", b":", b"$", b"*"):
raise InvalidResponse(f"Protocol Error: {raw!r}")
# server returned an error
if byte == b"-":
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# int value
elif byte == b":":
response = int(response)
# bulk response
elif byte == b"$":
length = int(response)
if length == -1:
return None
response = await self._buffer.read(length)
# multi-bulk response
elif byte == b"*":
length = int(response)
if length == -1:
return None
response = [(await self.read_response()) for _ in range(length)]
if isinstance(response, bytes):
response = self.encoder.decode(response)
return response
class HiredisParser(BaseParser):
"""Parser class for connections using Hiredis"""
__slots__ = BaseParser.__slots__ + ("_next_response", "_reader", "_socket_timeout")
def __init__(self, socket_read_size: int):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not available.")
super().__init__(socket_read_size=socket_read_size)
self._next_response = ...
self._reader: Optional[hiredis.Reader] = None
self._socket_timeout: Optional[float] = None
def on_connect(self, connection: "Connection"):
self._stream = connection._reader
kwargs = {
"protocolError": InvalidResponse,
"replyError": self.parse_error,
}
if connection.encoder.decode_responses:
kwargs.update(
encoding=connection.encoder.encoding,
errors=connection.encoder.encoding_errors,
)
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
self._socket_timeout = connection.socket_timeout
def on_disconnect(self):
self._stream = None
self._reader = None
self._next_response = False
async def can_read(self, timeout: float):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
if self._next_response is False:
return await self.read_from_socket(timeout=timeout, raise_on_timeout=False)
return True
async def read_from_socket(
self, timeout: Optional[float] = SENTINEL, raise_on_timeout: bool = True
):
timeout = self._socket_timeout if timeout is SENTINEL else timeout
try:
async with async_timeout.timeout(timeout):
buffer = await self._stream.read(self._read_size)
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
self._reader.feed(buffer)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
except asyncio.CancelledError:
raise
except (socket.timeout, asyncio.TimeoutError):
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket") from None
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError(f"Error while reading from socket: {ex.args}")
async def read_response(self) -> EncodableT:
if not self._stream or not self._reader:
self.on_disconnect()
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
while response is False:
await self.read_from_socket()
response = self._reader.gets()
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif (
isinstance(response, list)
and response
and isinstance(response[0], ConnectionError)
):
raise response[0]
return response
DefaultParser: Type[Union[PythonParser, HiredisParser]]
DefaultParser = HiredisParser if HIREDIS_AVAILABLE else PythonParser
class ConnectCallbackProtocol(Protocol):
def __call__(self, connection: "Connection"):
...
class AsyncConnectCallbackProtocol(Protocol):
async def __call__(self, connection: "Connection"):
...
ConnectCallbackT = Union[ConnectCallbackProtocol, AsyncConnectCallbackProtocol]
class Connection:
"""Manages TCP communication to and from a Redis server"""
__slots__ = (
"pid",
"host",
"port",
"db",
"username",
"client_name",
"password",
"socket_timeout",
"socket_connect_timeout",
"socket_keepalive",
"socket_keepalive_options",
"socket_type",
"retry_on_timeout",
"health_check_interval",
"next_health_check",
"last_active_at",
"encoder",
"ssl_context",
"_reader",
"_writer",
"_parser",
"_connect_callbacks",
"_buffer_cutoff",
"__dict__",
)
def __init__(
self,
*,
host: str = "localhost",
port: Union[str, int] = 6379,
db: Union[str, int] = 0,
password: str = None,
socket_timeout: float = None,
socket_connect_timeout: float = None,
socket_keepalive: bool = False,
socket_keepalive_options: dict = None,
socket_type: int = 0,
retry_on_timeout: bool = False,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
parser_class: Type[BaseParser] = DefaultParser,
socket_read_size: int = 65536,
health_check_interval: int = 0,
client_name: str = None,
username: str = None,
encoder_class: Type[Encoder] = Encoder,
):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout or None
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.socket_type = socket_type
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = -1
self.ssl_context: Optional[RedisSSLContext] = None
self.encoder = encoder_class(encoding, encoding_errors, decode_responses)
self._reader: Optional[asyncio.StreamReader] = None
self._writer: Optional[asyncio.StreamWriter] = None
self._parser = parser_class(
socket_read_size=socket_read_size,
)
self._connect_callbacks: List[ConnectCallbackT] = []
self._buffer_cutoff = 6000
def __repr__(self):
repr_args = ",".join((f"{k}={v}" for k, v in self.repr_pieces()))
return f"{self.__class__.__name__}<{repr_args}>"
def repr_pieces(self):
pieces = [("host", self.host), ("port", self.port), ("db", self.db)]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
def __del__(self):
try:
if self.is_connected:
loop = asyncio.get_event_loop()
coro = self.disconnect()
if loop.is_running():
loop.create_task(coro)
else:
loop.run_until_complete(coro)
except Exception:
pass
@property
def is_connected(self):
return bool(self._reader and self._writer)
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
async def connect(self):
"""Connects to the Redis server if not already connected"""
if self.is_connected:
return
try:
await self._connect()
except asyncio.CancelledError:
raise
except (socket.timeout, asyncio.TimeoutError):
raise TimeoutError("Timeout connecting to server")
except OSError as e:
raise ConnectionError(self._error_message(e))
except Exception as exc:
raise ConnectionError(exc) from exc
try:
await self.on_connect()
except RedisError:
# clean up after any error in on_connect
await self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
task = callback(self)
if task and inspect.isawaitable(task):
await task
async def _connect(self):
"""Create a TCP socket connection"""
async with async_timeout.timeout(self.socket_connect_timeout):
reader, writer = await asyncio.open_connection(
host=self.host,
port=self.port,
ssl=self.ssl_context.get() if self.ssl_context else None,
)
self._reader = reader
self._writer = writer
sock = writer.transport.get_extra_info("socket")
if sock is not None:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in self.socket_keepalive_options.items():
sock.setsockopt(socket.SOL_TCP, k, v)
except (OSError, TypeError):
# `socket_keepalive_options` might contain invalid options
# causing an error. Do not leave the connection open.
writer.close()
raise
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return f"Error connecting to {self.host}:{self.port}. {exception.args[0]}."
else:
return (
f"Error {exception.args[0]} connecting to {self.host}:{self.port}. "
f"{exception.args[0]}."
)
async def on_connect(self):
"""Initialize the connection, authenticate and select a database"""
self._parser.on_connect(self)
# if username and/or password are set, authenticate
if self.username or self.password:
if self.username:
auth_args = (self.username, self.password or "")
else:
auth_args = (self.password,)
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
await self.send_command("AUTH", *auth_args, check_health=False)
try:
auth_response = await self.read_response()
except AuthenticationWrongNumberOfArgsError:
# a username and password were specified but the Redis
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
await self.send_command("AUTH", self.password, check_health=False)
auth_response = await self.read_response()
if str_if_bytes(auth_response) != "OK":
raise AuthenticationError("Invalid Username or Password")
# if a client_name is given, set it
if self.client_name:
await self.send_command("CLIENT", "SETNAME", self.client_name)
if str_if_bytes(await self.read_response()) != "OK":
raise ConnectionError("Error setting client name")
# if a database is specified, switch to it
if self.db:
await self.send_command("SELECT", self.db)
if str_if_bytes(await self.read_response()) != "OK":
raise ConnectionError("Invalid Database")
async def disconnect(self):
"""Disconnects from the Redis server"""
try:
async with async_timeout.timeout(self.socket_connect_timeout):
self._parser.on_disconnect()
if not self.is_connected:
return
try:
if os.getpid() == self.pid:
self._writer.close()
# py3.6 doesn't have this method
if hasattr(self._writer, "wait_closed"):
await self._writer.wait_closed()
except OSError:
pass
self._reader = None
self._writer = None
except asyncio.TimeoutError:
raise TimeoutError(
f"Timed out closing connection after {self.socket_connect_timeout}"
) from None
async def check_health(self):
"""Check the health of the connection with a PING/PONG"""
if (
not self.health_check_interval
or asyncio.get_event_loop().time() <= self.next_health_check
):
return
try:
await self.send_command("PING", check_health=False)
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError("Bad response from PING health check")
except (ConnectionError, TimeoutError) as err:
await self.disconnect()
try:
await self.send_command("PING", check_health=False)
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError(
"Bad response from PING health check"
) from None
except BaseException as err2:
raise err2 from err
async def _send_packed_command(
self, command: Union[bytes, str, Iterable[Union[bytes, str]]]
):
self._writer.writelines(command)
await self._writer.drain()
async def send_packed_command(
self,
command: Union[bytes, str, Iterable[Union[bytes, str]]],
check_health: bool = True,
):
"""Send an already packed command to the Redis server"""
if not self._writer:
await self.connect()
# guard against health check recursion
if check_health:
await self.check_health()
try:
if isinstance(command, str):
command = command.encode()
if isinstance(command, bytes):
command = [command]
await asyncio.wait_for(
self._send_packed_command(command),
self.socket_timeout,
)
except asyncio.TimeoutError:
await self.disconnect()
raise TimeoutError("Timeout writing to socket") from None
except OSError as e:
await self.disconnect()
if len(e.args) == 1:
err_no, errmsg = "UNKNOWN", e.args[0]
else:
err_no = e.args[0]
errmsg = e.args[1]
raise ConnectionError(
f"Error {err_no} while writing to socket. {errmsg}."
) from e
except BaseException:
await self.disconnect()
raise
async def send_command(self, *args, **kwargs):
"""Pack and send a command to the Redis server"""
if not self.is_connected:
await self.connect()
await self.send_packed_command(
self.pack_command(*args), check_health=kwargs.get("check_health", True)
)
async def can_read(self, timeout: float = 0):
"""Poll the socket to see if there's data that can be read."""
if not self.is_connected:
await self.connect()
return await self._parser.can_read(timeout)
async def read_response(self):
"""Read the response from a previously sent command"""
try:
async with async_timeout.timeout(self.socket_timeout):
response = await self._parser.read_response()
except asyncio.TimeoutError:
await self.disconnect()
raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
except BaseException:
await self.disconnect()
raise
if self.health_check_interval:
self.next_health_check = (
asyncio.get_event_loop().time() + self.health_check_interval
)
if isinstance(response, ResponseError):
raise response from None
return response
def pack_command(self, *args: EncodableT) -> List[bytes]:
"""Pack a series of arguments into the Redis protocol"""
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. These arguments should be bytestrings so that they are
# not encoded.
if isinstance(args[0], str):
args = tuple(args[0].encode().split()) + args[1:]
elif b" " in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
buffer_cutoff = self._buffer_cutoff
for arg in map(self.encoder.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
if (
len(buff) > buffer_cutoff
or arg_length > buffer_cutoff
or isinstance(arg, memoryview)
):
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
)
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
(
buff,
SYM_DOLLAR,
str(arg_length).encode(),
SYM_CRLF,
arg,
SYM_CRLF,
)
)
output.append(buff)
return output
def pack_commands(self, commands: Iterable[Iterable[EncodableT]]) -> List[bytes]:
"""Pack multiple commands into the Redis protocol"""
output: List[bytes] = []
pieces: List[bytes] = []
buffer_length = 0
buffer_cutoff = self._buffer_cutoff
for cmd in commands:
for chunk in self.pack_command(*cmd):
chunklen = len(chunk)
if (
buffer_length > buffer_cutoff
or chunklen > buffer_cutoff
or isinstance(chunk, memoryview)
):
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if chunklen > buffer_cutoff or isinstance(chunk, memoryview):
output.append(chunk)
else:
pieces.append(chunk)
buffer_length += chunklen
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
def __init__(
self,
ssl_keyfile: str = None,
ssl_certfile: str = None,
ssl_cert_reqs: str = "required",
ssl_ca_certs: str = None,
ssl_check_hostname: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.ssl_context = RedisSSLContext(
keyfile=ssl_keyfile,
certfile=ssl_certfile,
cert_reqs=ssl_cert_reqs,
ca_certs=ssl_ca_certs,
check_hostname=ssl_check_hostname,
)
@property
def keyfile(self):
return self.ssl_context.keyfile
@property
def certfile(self):
return self.ssl_context.certfile
@property
def cert_reqs(self):
return self.ssl_context.cert_reqs
@property
def ca_certs(self):
return self.ssl_context.ca_certs
@property
def check_hostname(self):
return self.ssl_context.check_hostname
class RedisSSLContext:
__slots__ = (
"keyfile",
"certfile",
"cert_reqs",
"ca_certs",
"context",
"check_hostname",
)
def __init__(
self,
keyfile: str = None,
certfile: str = None,
cert_reqs: str = None,
ca_certs: str = None,
check_hostname: bool = False,
):
self.keyfile = keyfile
self.certfile = certfile
if cert_reqs is None:
self.cert_reqs = ssl.CERT_NONE
elif isinstance(cert_reqs, str):
CERT_REQS = {
"none": ssl.CERT_NONE,
"optional": ssl.CERT_OPTIONAL,
"required": ssl.CERT_REQUIRED,
}
if cert_reqs not in CERT_REQS:
raise RedisError(
f"Invalid SSL Certificate Requirements Flag: {cert_reqs}"
)
self.cert_reqs = CERT_REQS[cert_reqs]
self.ca_certs = ca_certs
self.check_hostname = check_hostname
self.context = None
def get(self) -> ssl.SSLContext:
if not self.context:
context = ssl.create_default_context()
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.certfile and self.keyfile:
context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)
if self.ca_certs:
context.load_verify_locations(self.ca_certs)
self.context = context
return self.context
class UnixDomainSocketConnection(Connection): # lgtm [py/missing-call-to-init]
def __init__(
self,
*,
path: str = "",
db: Union[str, int] = 0,
username: str = None,
password: str = None,
socket_timeout: float = None,
socket_connect_timeout: float = None,
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
retry_on_timeout: bool = False,
parser_class: Type[BaseParser] = DefaultParser,
socket_read_size: int = 65536,
health_check_interval: float = 0.0,
client_name=None,
):
self.pid = os.getpid()
self.path = path
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout or None
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = -1
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._reader = None
self._writer = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
def repr_pieces(self) -> Iterable[Tuple[str, Union[str, int]]]:
pieces = [
("path", self.path),
("db", self.db),
]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
async def _connect(self):
async with async_timeout.timeout(self.socket_connect_timeout):
reader, writer = await asyncio.open_unix_connection(path=self.path)
self._reader = reader
self._writer = writer
await self.on_connect()
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return f"Error connecting to unix socket: {self.path}. {exception.args[0]}."
else:
return (
f"Error {exception.args[0]} connecting to unix socket: "
f"{self.path}. {exception.args[1]}."
)
FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
def to_bool(value) -> bool:
if value is None or value == "":
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
"db": int,
"socket_timeout": float,
"socket_connect_timeout": float,
"socket_keepalive": to_bool,
"retry_on_timeout": to_bool,
"max_connections": int,
"health_check_interval": int,
"ssl_check_hostname": to_bool,
}
class ConnectKwargs(TypedDict, total=False):
username: str
password: str
connection_class: Type[Connection]
host: str
port: int
db: int
def parse_url(url: str) -> ConnectKwargs:
parsed: ParseResult = urlparse(url)
kwargs: ConnectKwargs = {}
for name, value in parse_qs(parsed.query).items():
if value and len(value) > 0:
value = unquote(value[0])
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
kwargs[name] = parser(value)
except (TypeError, ValueError):
raise ValueError(f"Invalid value for `{name}` in connection URL.")
else:
kwargs[name] = value
if parsed.username:
kwargs["username"] = unquote(parsed.username)
if parsed.password:
kwargs["password"] = unquote(parsed.password)
# We only support redis://, rediss:// and unix:// schemes.
if parsed.scheme == "unix":
if parsed.path:
kwargs["path"] = unquote(parsed.path)
kwargs["connection_class"] = UnixDomainSocketConnection
elif parsed.scheme in ("redis", "rediss"):
if parsed.hostname:
kwargs["host"] = unquote(parsed.hostname)
if parsed.port:
kwargs["port"] = int(parsed.port)
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if parsed.path and "db" not in kwargs:
try:
kwargs["db"] = int(unquote(parsed.path).replace("/", ""))
except (AttributeError, ValueError):
pass
if parsed.scheme == "rediss":
kwargs["connection_class"] = SSLConnection
else:
valid_schemes = "redis://, rediss://, unix://"
raise ValueError(
f"Redis URL must specify one of the following schemes ({valid_schemes})"
)
return kwargs
_CP = TypeVar("_CP")
class ConnectionPool:
"""
Create a connection pool. ``If max_connections`` is set, then this
object raises :py:class:`~redis.ConnectionError` when the pool's
limit is reached.
By default, TCP connections are created unless ``connection_class``
is specified. Use :py:class:`~redis.UnixDomainSocketConnection` for
unix sockets.
Any additional keyword arguments are passed to the constructor of
``connection_class``.
"""
@classmethod
def from_url(cls: Type[_CP], url: str, **kwargs) -> _CP:
"""
Return a connection pool configured from the given URL.
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- `redis://` creates a TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/redis>
- `rediss://` creates a SSL wrapped TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/rediss>
- ``unix://``: creates a Unix Domain Socket connection.
The username, password, hostname, path and all querystring values
are passed through urllib.parse.unquote in order to replace any
percent-encoded values with their corresponding characters.
There are several ways to specify a database number. The first value
found will be used:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// or rediss:// schemes, the path argument
of the url, e.g. redis://localhost/0
3. A ``db`` keyword argument to this function.
If none of these options are specified, the default db=0 is used.
All querystring options are cast to their appropriate Python types.
Boolean arguments can be specified with string values "True"/"False"
or "Yes"/"No". Values that cannot be properly cast cause a
``ValueError`` to be raised. Once parsed, the querystring arguments
and keyword arguments are passed to the ``ConnectionPool``'s
class initializer. In the case of conflicting arguments, querystring
arguments always win.
"""
url_options = parse_url(url)
kwargs.update(url_options)
return cls(**kwargs)
def __init__(
self,
connection_class: Type[Connection] = Connection,
max_connections: int = None,
**connection_kwargs,
):
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, int) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
# a lock to protect the critical section in _checkpid().
# this lock is acquired when the process id changes, such as
# after a fork. during this time, multiple threads in the child
# process could attempt to acquire this lock. the first thread
# to acquire the lock will reset the data structures and lock
# object of this pool. subsequent threads acquiring this lock
# will notice the first thread already did the work and simply
# release the lock.
self._fork_lock = threading.Lock()
self._lock = asyncio.Lock()
self._created_connections: int
self._available_connections: List[Connection]
self._in_use_connections: Set[Connection]
self.reset() # lgtm [py/init-calls-subclass]
self.encoder_class = self.connection_kwargs.get("encoder_class", Encoder)
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"<{self.connection_class(**self.connection_kwargs)!r}>"
)
def reset(self):
self._lock = asyncio.Lock()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def _checkpid(self):
# _checkpid() attempts to keep ConnectionPool fork-safe on modern
# systems. this is called by all ConnectionPool methods that
# manipulate the pool's state such as get_connection() and release().
#
# _checkpid() determines whether the process has forked by comparing
# the current process id to the process id saved on the ConnectionPool
# instance. if these values are the same, _checkpid() simply returns.
#
# when the process ids differ, _checkpid() assumes that the process
# has forked and that we're now running in the child process. the child
# process cannot use the parent's file descriptors (e.g., sockets).
# therefore, when _checkpid() sees the process id change, it calls
# reset() in order to reinitialize the child's ConnectionPool. this
# will cause the child to make all new connection objects.
#
# _checkpid() is protected by self._fork_lock to ensure that multiple
# threads in the child process do not call reset() multiple times.
#
# there is an extremely small chance this could fail in the following
# scenario:
# 1. process A calls _checkpid() for the first time and acquires
# self._fork_lock.
# 2. while holding self._fork_lock, process A forks (the fork()
# could happen in a different thread owned by process A)
# 3. process B (the forked child process) inherits the
# ConnectionPool's state from the parent. that state includes
# a locked _fork_lock. process B will not be notified when
# process A releases the _fork_lock and will thus never be
# able to acquire the _fork_lock.
#
# to mitigate this possible deadlock, _checkpid() will only wait 5
# seconds to acquire _fork_lock. if _fork_lock cannot be acquired in
# that time it is assumed that the child is deadlocked and a
# redis.ChildDeadlockedError error is raised.
if self.pid != os.getpid():
acquired = self._fork_lock.acquire(timeout=5)
if not acquired:
raise ChildDeadlockedError
# reset() the instance for the new process if another thread
# hasn't already done so
try:
if self.pid != os.getpid():
self.reset()
finally:
self._fork_lock.release()
async def get_connection(self, command_name, *keys, **options):
"""Get a connection from the pool"""
self._checkpid()
async with self._lock:
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
try:
# ensure this connection is connected to Redis
await connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if await connection.can_read():
raise ConnectionError("Connection has data") from None
except ConnectionError:
await connection.disconnect()
await connection.connect()
if await connection.can_read():
raise ConnectionError("Connection not ready") from None
except BaseException:
# release the connection back to the pool so that we don't
# leak it
await self.release(connection)
raise
return connection
def get_encoder(self):
"""Return an encoder based on encoding settings"""
kwargs = self.connection_kwargs
return self.encoder_class(
encoding=kwargs.get("encoding", "utf-8"),
encoding_errors=kwargs.get("encoding_errors", "strict"),
decode_responses=kwargs.get("decode_responses", False),
)
def make_connection(self):
"""Create a new connection"""
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
async def release(self, connection: Connection):
"""Releases the connection back to the pool"""
self._checkpid()
async with self._lock:
try:
self._in_use_connections.remove(connection)
except KeyError:
# Gracefully fail when a connection is returned to this pool
# that the pool doesn't actually own
pass
if self.owns_connection(connection):
self._available_connections.append(connection)
else:
# pool doesn't own this connection. do not add it back
# to the pool and decrement the count so that another
# connection can take its place if needed
self._created_connections -= 1
await connection.disconnect()
return
def owns_connection(self, connection: Connection):
return connection.pid == self.pid
async def disconnect(self, inuse_connections: bool = True):
"""
Disconnects connections in the pool
If ``inuse_connections`` is True, disconnect connections that are
current in use, potentially by other tasks. Otherwise only disconnect
connections that are idle in the pool.
"""
self._checkpid()
async with self._lock:
if inuse_connections:
connections = chain(
self._available_connections, self._in_use_connections
)
else:
connections = self._available_connections
resp = await asyncio.gather(
*(connection.disconnect() for connection in connections),
return_exceptions=True,
)
exc = next((r for r in resp if isinstance(r, BaseException)), None)
if exc:
raise exc
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from aioredis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
:py:class:`~redis.ConnectionPool` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a :py:class:`~redis.ConnectionError` (as the default
:py:class:`~redis.ConnectionPool` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
>>> # Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
>>> # Raise a ``ConnectionError`` after five seconds if a connection is
>>> # not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(
self,
max_connections: int = 50,
timeout: Optional[int] = 20,
connection_class: Type[Connection] = Connection,
queue_class: Type[asyncio.Queue] = asyncio.LifoQueue,
**connection_kwargs,
):
self.queue_class = queue_class
self.timeout = timeout
self._connections: List[Connection]
super().__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs,
)
def reset(self):
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except asyncio.QueueFull:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def make_connection(self):
"""Make a fresh connection."""
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
async def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
async with async_timeout.timeout(self.timeout):
connection = await self.pool.get()
except (asyncio.QueueEmpty, asyncio.TimeoutError):
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
try:
# ensure this connection is connected to Redis
await connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if await connection.can_read():
raise ConnectionError("Connection has data") from None
except ConnectionError:
await connection.disconnect()
await connection.connect()
if await connection.can_read():
raise ConnectionError("Connection not ready") from None
except BaseException:
# release the connection back to the pool so that we don't leak it
await self.release(connection)
raise
return connection
async def release(self, connection: Connection):
"""Releases the connection back to the pool."""
# Make sure we haven't changed process.
self._checkpid()
if not self.owns_connection(connection):
# pool doesn't own this connection. do not add it back
# to the pool. instead add a None value which is a placeholder
# that will cause the pool to recreate the connection if
# its needed.
await connection.disconnect()
self.pool.put_nowait(None)
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except asyncio.QueueFull:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
async def disconnect(self, inuse_connections: bool = True):
"""Disconnects all connections in the pool."""
self._checkpid()
async with self._lock:
resp = await asyncio.gather(
*(connection.disconnect() for connection in self._connections),
return_exceptions=True,
)
exc = next((r for r in resp if isinstance(r, BaseException)), None)
if exc:
raise exc
|
# BPlusTree with Python https://github.com/Nero5023/bplustree/tree/main/bplus_tree
import pandas as pd
import bisect
import math
def flatten(l):
return [y for x in l for y in x]
class Leaf:
def __init__(self, previous_leaf, next_leaf, parent, b_factor):
self.previous = previous_leaf
self.next = next_leaf
self.parent = parent
self.b_factor = b_factor
self.a_factor = math.ceil(b_factor/2)
self.keys = []
self.children = []
@property
def is_root(self):
return self.parent is None
def insert(self, key, value):
index = bisect.bisect_left(self.keys, key)
if index < len(self.keys) and self.keys[index] == key:
self.children[index].append(value)
else:
self.keys.insert(index, key)
self.children.insert(index, [value])
if len(self.keys) > self.b_factor:
split_index = math.ceil(self.b_factor/2)
self.split(split_index)
def get(self, key):
index = bisect.bisect_left(self.keys, key)
if index < len(self.keys) and self.keys[index] == key:
return self.children[index]
else:
return None
def split(self, index):
new_leaf_node = Leaf(self, self.next, self.parent, self.b_factor)
new_leaf_node.keys = self.keys[index:]
new_leaf_node.children = self.children[index:]
self.keys = self.keys[:index]
self.children = self.children[:index]
if self.next is not None:
self.next.previous = new_leaf_node
self.next = new_leaf_node
if self.is_root:
self.parent = Node(None, None, [new_leaf_node.keys[0]], [self, self.next], b_factor=self.b_factor, parent=None)
self.next.parent = self.parent
else:
self.parent.add_child(self.next.keys[0], self.next)
def find_left(self, key, include_key=True):
items = []
index = bisect.bisect_right(self.keys, key) - 1
if index == -1:
items = []
else:
if include_key:
items = self.children[:index+1]
else:
if key == self.keys[index]:
index -= 1
items = self.children[:index+1]
return self.left_items() + flatten(items)
def find_right(self, key, include_key=True):
items = []
index = bisect.bisect_left(self.keys, key)
if index == len(self.keys):
items = []
else:
if include_key:
items = self.children[index:]
else:
if key == self.keys[index]:
index += 1
items = self.children[index:]
return flatten(items) + self.right_items()
def left_items(self):
items = []
node = self
while node.previous is not None:
node = node.previous
while node != self:
for elem in node.children:
if type(elem) == list:
items.extend(elem)
else:
items.append(elem)
node = node.next
return items
def right_items(self):
items = []
node = self.next
while node is not None:
for elem in node.children:
if type(elem) == list:
items.extend(elem)
else:
items.append(elem)
node = node.next
return items
def items(self):
return zip(self.keys, self.children)
# Node in BTree
class Node:
def __init__(self, previous_node, next_node, keys, children, b_factor, parent=None):
self.previous = previous_node
self.next = next_node
self.keys = keys
self.children = children
self.b_factor = b_factor
self.a_factor = math.ceil(b_factor / 2)
self.parent = parent
@property
def degree(self):
return len(self.children)
@property
def is_root(self):
return self.parent is None
def insert(self, key, value):
index = bisect.bisect_right(self.keys, key)
node = self.children[index]
node.insert(key, value)
def get(self, key):
index = bisect.bisect_right(self.keys, key)
return self.children[index].get(key)
def find_left(self, key, include_key=True):
index = bisect.bisect_right(self.keys, key)
return self.children[index].find_left(key, include_key)
def find_right(self, key, include_key=True):
index = bisect.bisect_right(self.keys, key)
return self.children[index].find_right(key, include_key)
def add_child(self, key, child):
index = bisect.bisect_right(self.keys, key)
self.keys.insert(index, key)
self.children.insert(index+1, child)
if self.degree > self.b_factor:
split_index = math.floor(self.b_factor / 2)
self.split(split_index)
def split(self, index):
split_key = self.keys[index]
new_node = Node(self, self.next, self.keys[index+1:], self.children[index+1:], self.b_factor, self.parent)
for node in self.children[index+1:]:
node.parent = new_node
self.keys = self.keys[:index]
self.children = self.children[:index+1]
if self.next is not None:
self.next.previous = new_node
self.next = new_node
if self.is_root:
self.parent = Node(None, None, [split_key], [self, self.next], b_factor=self.b_factor, parent=None)
self.next.parent = self.parent
else:
self.parent.add_child(split_key, self.next)
# BPlusTree Class
class BPlusTree:
def __init__(self, b_factor=32):
self.b_factor = b_factor
self.root = Leaf(None, None, None, b_factor)
self.size = 0
def get(self, key):
return self.root.get(key)
def __getitem__(self, key):
return self.get(key)
def __len__(self):
return self.size
def build(self, keys, values):
if len(keys) != len(values):
return
for ind in range(len(keys)):
# print(Item(keys[ind]))
# print(values[ind])
self.insert(keys[ind], values[ind])
def predict(self, key):
search_result = self.get(key)
return search_result
def insert(self, key, value):
self.root.insert(key, value)
self.size += 1
if self.root.parent is not None:
self.root = self.root.parent
def range_search(self, notation, cmp_key):
notation = notation.strip()
if notation not in [">", "<", ">=", "<="]:
raise Exception("Nonsupport notation: {}. Only '>' '<' '>=' '<=' are supported".format(notation))
if notation == '>':
return self.root.find_right(cmp_key, False)
if notation == '>=':
return self.root.find_right(cmp_key, True)
if notation == '<':
return self.root.find_left(cmp_key, False)
if notation == '<=':
return self.root.find_left(cmp_key, True)
def search(self, notation, cmp_key):
notation = notation.strip()
if notation not in [">", "<", ">=", "<=", "==", "!="]:
raise Exception("Nonsupport notation: {}. Only '>' '<' '>=' '<=' '==' '!=' are supported".format(notation))
if notation == '==':
res = self.get(cmp_key)
if res is None:
return []
else:
return res
if notation == '!=':
return self.root.find_left(cmp_key, False) + self.root.find_right(cmp_key, False)
return self.range_search(notation, cmp_key)
def show(self):
layer = 0
node = self.root
while node is not None:
print("Layer: {}".format(layer))
inner_node = node
while inner_node is not None:
print(inner_node.keys, end=' ')
inner_node = inner_node.next
print('')
node = node.children[0]
layer += 1
if type(node) != Leaf and type(node) != Node:
break
def leftmost_leaf(self):
leaf = self.root
while type(leaf) != Leaf:
leaf = leaf.children[0]
return leaf
def items(self):
leaf = self.leftmost_leaf()
items = []
while leaf is not None:
pairs = list(leaf.items())
items.extend(pairs)
leaf = leaf.next
return items
def keys(self):
leaf = self.leftmost_leaf()
ks = []
while leaf is not None:
ks.extend(leaf.keys)
leaf = leaf.next
return ks
def values(self):
leaf = self.leftmost_leaf()
vals = []
while leaf is not None:
for elem in leaf.children:
if type(elem) == list:
vals.extend(elem)
else:
vals.append(elem)
leaf = leaf.next
return vals
def height(self):
node = self.root
height = 0
while type(node) != Leaf:
height += 1
node = node.children[0]
return height
# Value in Node
class Item():
def __init__(self, k, v):
self.k = k
self.v = v
def __gt__(self, other):
if self.k > other.k:
return True
else:
return False
def __ge__(self, other):
if self.k >= other.k:
return True
else:
return False
def __eq__(self, other):
if self.k == other.k:
return True
else:
return False
def __le__(self, other):
if self.k <= other.k:
return True
else:
return False
def __lt__(self, other):
if self.k < other.k:
return True
else:
return False
# For Test
def b_plus_tree_main():
t = BPlusTree(32)
nums = [55,44,65,16,80,74,14,19,95,36,2,90,74,94,27,89,85]
for x in nums:
t.insert(x, x)
print(t.items())
for ni in t.items():
print(ni)
if ni is None:
continue
item = {"key": ni[0], "value": ni[1][0]}
print(item)
if __name__ == '__main__':
b_plus_tree_main()
|
from .utils import Atom, Residue, ActiveSite
import matplotlib.pyplot as plt
import numpy as np
from .helpers import *
from Bio import pairwise2
import rmsd
from sklearn.decomposition import PCA
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
def compute_similarity(site_a, site_b):
"""
Compute the similarity between two given ActiveSite instances.
Input: two ActiveSite instances
Output: the similarity between them (a floating point number)
"""
# Get strings of single letter aa residues
s_a = output_aa_string(site_a.residues)
s_b = output_aa_string(site_b.residues)
# Align strings using local alignment algorithm which relies
# on dynamic programming to compute all possible alignments and
# returns the highest scoring alignment.
# Local alignment aims to find the max alignment for substrings
# of two larger strings.
# Matches = +1
# Mismatches, gaps = +0
alignments = pairwise2.align.localxx(s_a, s_b) # perform alignment
if len(alignments) == 0: return float("inf") # return INF if no alignment found
align_a, align_b, s = alignments[0][:3] # extract first alignment
# Output indices where nucleotides in alignment match
inds_a, inds_b = match(align_a, align_b)
if len(inds_a) < 2: return float("inf")
# Create matrix of coordinates for atom CA
V = create_coord_matrix(site_a, inds_a)
W = create_coord_matrix(site_b, inds_b)
# Center and rotate Ca matrices then calculate Root-Mean-Square-Deviation (RMSD)
# It measures the average distance between backbone atoms of two
# superimposed proteins.
# The greater the RMSD, the less similar the proteins are.
# A RMSD equal to 0 represents identical proteins.
# Each protein is a matrix containing x, y, and z coordinates for each CA atom
# The rows of the two matrices are matching residues obtained from the alignment
# To minimize RMSD you must first center the coordinates on the origin so the
# two vectors can be near each other.
V -= rmsd.centroid(V)
W -= rmsd.centroid(W)
# Then find the optimal rotation for matrix W that aligns it best with V
# This is the Kabasch algorithm which works by calculating a covariance matrix
# and then finding the singular value decomposition (SVD) of the cov. matrix
# Last, find the optimal rotation matrix which is the dot product of V and W
# optimized by lowest RMSD
return rmsd.kabsch_rmsd(V,W)
def output_similarity_matrix(active_sites):
"""
Calculate RMSD for all pairwise active sites. This distance measure
is converted into a similarity metric by dividing by the max element and
subtracting 1
Input: list of active sites from PDB files
Output: similarity matrix for active sites
"""
# Create empty pairwise matrix
mat = np.empty([len(active_sites), len(active_sites)])
# For every pair calculate the RMSD
for (x,y), value in np.ndenumerate(mat):
mat[x][y] = compute_similarity(active_sites[x], active_sites[y])
# Infinite values means proteins had less than 3 similar amino acids, set to none
mat[np.isinf(mat)] = None
# Find max value in array for normalization
max_val = np.nanmax(mat)
# Make none values max value
mat[np.isnan(mat)] = max_val
# Get normalized dissimilarity matrix
norm_mat = mat/max_val
# Convert dissimilarity matrix to similarity by subtracting 1
norm_mat_sim = 1 - norm_mat
return norm_mat_sim
def cluster_by_partitioning(active_sites,k):
"""
Cluster a given set of ActiveSite instances using a partitioning method.
Input: a list of ActiveSite instances
Output: a clustering of ActiveSite instances
(this is really a list of clusters, each of which is list of
ActiveSite instances)
"""
cost_max = float("-inf")
mat = output_similarity_matrix(active_sites)
# randomly choose k medoids
centers = initialize_k_mediods(mat, k)
# assign elements to cluster medoid with max similarity
clusters = assign_k_clusters(mat, centers)
# calculate cost of clustering (sum of similarity of points to cluster)
cost = calculate_cost(mat, centers, clusters)
# iterate until cost does not increase
while cost_max < cost:
cost_max = cost
# Loop through medoids and all elements not in medoids
for i in range(0, len(centers)):
m = centers[i]
for o in range(len(active_sites)):
if o != m:
# replace medoid with element and re-calculate clusters
# and cost
centers[i] = o
clusters_temp = assign_k_clusters(mat, centers)
cost_swap = calculate_cost(mat, centers, clusters_temp)
# if cost increases then replace clusters
if cost_swap > cost:
cost = cost_swap
clusters = clusters_temp
# if cost decreases or stays the same leave center
else: centers[i] = m
return output_cluster_list(active_sites, clusters)
def cluster_hierarchically(active_sites,k):
"""
Cluster the given set of ActiveSite instances using a hierarchical algorithm. #
Input: a list of ActiveSite instances
Output: a list of clusterings
(each clustering is a list of lists of Sequence objects)
"""
# Create similarity matrix
mat_original = output_similarity_matrix(active_sites)
mat = output_similarity_matrix(active_sites)
# Fill diagonals with -infinity
np.fill_diagonal(mat, float("-inf"))
# Create cluster array to keep track of number of clusters
vals = [np.array([v]) for v in range(len(active_sites))]
keys = np.arange(0,len(active_sites))
clusters = dict(zip(keys, vals))
all_clusters = []
all_clusters.append(output_cluster_list(active_sites, clusters.values()))
# Group the most similar elements until you only have one more cluster
while len(clusters) > k:
# Get most similar clusters
i,j = np.unravel_index(mat.argmax(), mat.shape)
# Get two clusters
c_i = clusters.get(i)
c_j = clusters.get(j)
# Add new combined cluster
c_new = list(clusters.keys())[-1]+1
clusters[c_new] = np.append(c_i, c_j)
# Add new row/column to similarity matrix
new_dist = dist_HC(active_sites, clusters,c_new, mat_original)
new_col = np.append(new_dist, float("-inf"))
mat = np.vstack([mat, new_dist])
mat = np.column_stack([mat, new_col])
# Replace row/column with negative infinitys that correspond to
# most similar elements
mat[i], mat[j] = float("-inf"), float("-inf")
mat[:,j], mat[:,i] = float("-inf"), float("-inf")
# Drop most similar elements from cluster
clusters.pop(i)
clusters.pop(j)
all_clusters.append(output_cluster_list(active_sites, clusters.values()))
return all_clusters
|
# -*- coding: utf-8 -*-
#
# Read the Docs Template documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# If this is not inserted at the beginning of the list, our 'jira' ticket
# extension is hidden by the system 'jira' API, if it exists.
sys.path.insert(0, os.path.abspath('extensions'))
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src', 'condor_tests'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'ticket',
'macro',
'macro-def',
'jira',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HTCondor Manual'
copyright = u'1990-2020, Center for High Throughput Computing, Computer \
Sciences Department, University of Wisconsin-Madison, Madison, WI, US. \
Licensed under the Apache License, Version 2.0.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '9.3'
# The full version, including alpha/beta/rc tags.
release = '9.3.0'
rst_epilog = """
.. |release_date| replace:: Month Day, 2021
"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'extensions', 'utils']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man-pages/bosco_cluster', 'bosco_cluster', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_findplatform', 'bosco_findplatform', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_install', 'bosco_install', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_ssh_start', 'bosco_ssh_start', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_start', 'bosco_start', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_stop', 'bosco_stop', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/bosco_uninstall', 'bosco_uninstall', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/classad_eval', 'classad_eval', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/classads', 'classads', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_adstash', 'condor_adstash', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_advertise', 'condor_advertise', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_annex', 'condor_annex', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_check_password', 'condor_check_password', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_check_userlogs', 'condor_check_userlogs', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_chirp', 'condor_chirp', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_configure', 'condor_configure', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_config_val', 'condor_config_val', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_continue', 'condor_continue', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_dagman', 'condor_dagman', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_drain', 'condor_drain', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_evicted_files', 'condor_evicted_files', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_fetchlog', 'condor_fetchlog', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_findhost', 'condor_findhost', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_gather_info', 'condor_gather_info', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_gpu_discovery', 'condor_gpu_discovery', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_history', 'condor_history', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_hold', 'condor_hold', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_install', 'condor_install', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_job_router_info', 'condor_job_router_info', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_master', 'condor_master', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_now', 'condor_now', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_off', 'condor_off', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_on', 'condor_on', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_ping', 'condor_ping', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_pool_job_report', 'condor_pool_job_report', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_power', 'condor_power', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_preen', 'condor_preen', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_prio', 'condor_prio', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_procd', 'condor_procd', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_q', 'condor_q', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_qedit', 'condor_qedit', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_qsub', 'condor_qsub', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_reconfig', 'condor_reconfig', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_release', 'condor_release', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_reschedule', 'condor_reschedule', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_restart', 'condor_restart', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_rm', 'condor_rm', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_rmdir', 'condor_rmdir', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_router_history', 'condor_router_history', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_router_q', 'condor_router_q', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_router_rm', 'condor_router_rm', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_run', 'condor_run', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_set_shutdown', 'condor_set_shutdown', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_sos', 'condor_sos', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_ssh_to_job', 'condor_ssh_to_job', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_stats', 'condor_stats', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_status', 'condor_status', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_store_cred', 'condor_store_cred', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_submit', 'condor_submit', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_submit_dag', 'condor_submit_dag', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_suspend', 'condor_suspend', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_tail', 'condor_tail', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_create', 'condor_token_create', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_fetch', 'condor_token_fetch', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_list', 'condor_token_list', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request', 'condor_token_request', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request_approve', 'condor_token_request_approve', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request_auto_approve', 'condor_token_request_auto_approve', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_token_request_list', 'condor_token_request_list', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_top', 'condor_top', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_transfer_data', 'condor_transfer_data', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_transform_ads', 'condor_transform_ads', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_update_machine_ad', 'condor_update_machine_ad', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_updates_stats', 'condor_updates_stats', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_urlfetch', 'condor_urlfetch', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_userlog', 'condor_userlog', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_userprio', 'condor_userprio', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_vacate', 'condor_vacate', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_vacate_job', 'condor_vacate_job', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_version', 'condor_version', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_wait', 'condor_wait', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_watch_q', 'condor_watch_q', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/condor_who', 'condor_who', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/get_htcondor', 'get_htcondor', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/gidd_alloc', 'gidd_alloc', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/htcondor', 'htcondor', u'HTCondor Manual', [u'HTCondor Team'], 1),
('man-pages/procd_ctl', 'procd_ctl', u'HTCondor Manual', [u'HTCondor Team'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- conf.py contains common configuration and man pages configuration
# -- full_conf.py contains configuration for the whole manual
sys.path.append(os.path.dirname(__file__))
MANPAGES = os.environ.get('MANPAGES') == 'True'
if not MANPAGES:
from full_conf import *
|
"""
Transforms of aocd raw input text to something more useful for speed-solving.
Every function here needs to accept one positional argument and return the
'massaged' data.
"""
__all__ = ["lines", "numbers"]
def lines(data):
return data.splitlines()
def numbers(data):
return [int(n) for n in data.splitlines()]
|
from django.conf.urls import url
from . import views
from . import views_book
from . import views_sysinfo
urlpatterns = [
#########################图书信息#####################
# url(r'test', views.test),
url(r'books', views_book.query),
url(r'book/edit', views_book.edit),
#支持url参数的写法一
#url(r'^book/(\d+)$', views_book.queryOneBook),
#支持url参数的写法二,变量名称bookId需要跟方法参数名对应
url(r'^book/(?P<bookId>\d+)$', views_book.queryOneBook),
url(r'book/del/(?P<bookId>\d+)$', views_book.delBook),
url(r'book/add', views_book.addBook),
#########################系统监控信息#########################
url(r'sys/cntByCountry', views_sysinfo.listContries),
url(r'sys/cntByCategory', views_sysinfo.countBookByCategory),
#默认匹配
url(r'', views.index),
]
|
# -*- coding: utf-8 -*-
#
# osc2rtmidi/device.py
#
"""MIDI device abstraction classes."""
import logging
import time
from rtmidi.midiutil import open_midioutput
__all__ = ("RtMidiDevice",)
log = logging.getLogger(__name__)
class RtMidiDevice(object):
"""Provides a common API for different MIDI driver implementations."""
def __init__(self, name="RtMidiDevice", port=None, portname=None):
self.name = name
self.port = port
self.portname = portname
self._output = None
def __str__(self):
return self.portname
def open_output(self):
self._output, self.portname = open_midioutput(self.port, interactive=False,
client_name=self.name, use_virtual=True)
def close_output(self):
if self._output is not None:
self._output.close_port()
def send(self, events):
if self._output:
for ev in events:
self._output.send_message(ev)
def send_sysex(self, msg):
if self._output:
self._output.send_message([ord(c) for c in msg])
@classmethod
def time(cls):
return time.time() / 1000.
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import errno
import signal
import subprocess
import sys
import time
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
third_party_py = os.path.join(script_dir, "webkitpy", "thirdparty", "autoinstalled")
if third_party_py not in sys.path:
sys.path.append(third_party_py)
import unittest2 as unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem_mock import MockFileSystem
class ScriptErrorTest(unittest.TestCase):
def test_message_with_output(self):
error = ScriptError('My custom message!', '', -1)
self.assertEqual(error.message_with_output(), 'My custom message!')
error = ScriptError('My custom message!', '', -1, 'My output.')
self.assertEqual(error.message_with_output(), 'My custom message!\n\nMy output.')
error = ScriptError('', 'my_command!', -1, 'My output.', '/Users/username/blah')
self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1\n\nLast 500 characters of output:\nb' + '1' * 499)
def test_message_with_tuple(self):
error = ScriptError('', ('my', 'command'), -1, 'My output.', '/Users/username/blah')
self.assertEqual(error.message_with_output(), 'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
def never_ending_command():
"""Arguments for a command that will never end (useful for testing process
killing). It should be a process that is unlikely to already be running
because all instances will be killed."""
if sys.platform == 'win32':
return ['wmic']
return ['yes']
def command_line(cmd, *args):
return [sys.executable, __file__, '--' + cmd] + list(args)
class ExecutiveTest(unittest.TestCase):
def assert_interpreter_for_content(self, intepreter, content):
fs = MockFileSystem()
tempfile, temp_name = fs.open_binary_tempfile('')
tempfile.write(content)
tempfile.close()
file_interpreter = Executive.interpreter_for_script(temp_name, fs)
self.assertEqual(file_interpreter, intepreter)
def test_interpreter_for_script(self):
self.assert_interpreter_for_content(None, '')
self.assert_interpreter_for_content(None, 'abcd\nefgh\nijklm')
self.assert_interpreter_for_content(None, '##/usr/bin/perl')
self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl')
self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl\nfirst\nsecond')
self.assert_interpreter_for_content('perl', '#!/usr/bin/perl')
self.assert_interpreter_for_content('perl', '#!/usr/bin/perl -w')
self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python')
self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python\nfirst\nsecond')
self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/python')
self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby')
self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby\nfirst\nsecond')
self.assert_interpreter_for_content('ruby', '#!/usr/bin/ruby')
def test_run_command_with_bad_command(self):
def run_bad_command():
Executive().run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True)
self.assertRaises(OSError, run_bad_command)
def test_run_command_args_type(self):
executive = Executive()
self.assertRaises(AssertionError, executive.run_command, "echo")
self.assertRaises(AssertionError, executive.run_command, u"echo")
executive.run_command(command_line('echo', 'foo'))
executive.run_command(tuple(command_line('echo', 'foo')))
def test_auto_stringify_args(self):
executive = Executive()
executive.run_command(command_line('echo', 1))
executive.popen(command_line('echo', 1), stdout=executive.PIPE).wait()
self.assertEqual('echo 1', executive.command_for_printing(['echo', 1]))
def test_popen_args(self):
executive = Executive()
# Explicitly naming the 'args' argument should not thow an exception.
executive.popen(args=command_line('echo', 1), stdout=executive.PIPE).wait()
def test_run_command_with_unicode(self):
"""Validate that it is safe to pass unicode() objects
to Executive.run* methods, and they will return unicode()
objects by default unless decode_output=False"""
unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
if sys.platform == 'win32':
encoding = 'mbcs'
else:
encoding = 'utf-8'
encoded_tor = unicode_tor_input.encode(encoding)
# On Windows, we expect the unicode->mbcs->unicode roundtrip to be
# lossy. On other platforms, we expect a lossless roundtrip.
if sys.platform == 'win32':
unicode_tor_output = encoded_tor.decode(encoding)
else:
unicode_tor_output = unicode_tor_input
executive = Executive()
output = executive.run_command(command_line('cat'), input=unicode_tor_input)
self.assertEqual(output, unicode_tor_output)
output = executive.run_command(command_line('echo', unicode_tor_input))
self.assertEqual(output, unicode_tor_output)
output = executive.run_command(command_line('echo', unicode_tor_input), decode_output=False)
self.assertEqual(output, encoded_tor)
# Make sure that str() input also works.
output = executive.run_command(command_line('cat'), input=encoded_tor, decode_output=False)
self.assertEqual(output, encoded_tor)
# FIXME: We should only have one run* method to test
output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True)
self.assertEqual(output, unicode_tor_output)
output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True, decode_output=False)
self.assertEqual(output, encoded_tor)
def serial_test_kill_process(self):
executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
self.assertEqual(process.poll(), None) # Process is running
executive.kill_process(process.pid)
# Note: Can't use a ternary since signal.SIGKILL is undefined for sys.platform == "win32"
if sys.platform == "win32":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
# We seem to get either 0 or 1 here for some reason.
self.assertIn(process.wait(), (0, 1))
elif sys.platform == "cygwin":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=98196
# cygwin seems to give us either SIGABRT or SIGKILL
self.assertIn(process.wait(), (-signal.SIGABRT, -signal.SIGKILL))
else:
expected_exit_code = -signal.SIGKILL
self.assertEqual(process.wait(), expected_exit_code)
# Killing again should fail silently.
executive.kill_process(process.pid)
def serial_test_kill_all(self):
executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
self.assertIsNone(process.poll()) # Process is running
executive.kill_all(never_ending_command()[0])
# Note: Can't use a ternary since signal.SIGTERM is undefined for sys.platform == "win32"
if sys.platform == "cygwin":
expected_exit_code = 0 # os.kill results in exit(0) for this process.
self.assertEqual(process.wait(), expected_exit_code)
elif sys.platform == "win32":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
# We seem to get either 0 or 1 here for some reason.
self.assertIn(process.wait(), (0, 1))
else:
expected_exit_code = -signal.SIGTERM
self.assertEqual(process.wait(), expected_exit_code)
# Killing again should fail silently.
executive.kill_all(never_ending_command()[0])
def _assert_windows_image_name(self, name, expected_windows_name):
executive = Executive()
windows_name = executive._windows_image_name(name)
self.assertEqual(windows_name, expected_windows_name)
def test_windows_image_name(self):
self._assert_windows_image_name("foo", "foo.exe")
self._assert_windows_image_name("foo.exe", "foo.exe")
self._assert_windows_image_name("foo.com", "foo.com")
# If the name looks like an extension, even if it isn't
# supposed to, we have no choice but to return the original name.
self._assert_windows_image_name("foo.baz", "foo.baz")
self._assert_windows_image_name("foo.baz.exe", "foo.baz.exe")
def serial_test_check_running_pid(self):
executive = Executive()
self.assertTrue(executive.check_running_pid(os.getpid()))
# Maximum pid number on Linux is 32768 by default
self.assertFalse(executive.check_running_pid(100000))
def serial_test_running_pids(self):
if sys.platform in ("win32", "cygwin"):
return # This function isn't implemented on Windows yet.
executive = Executive()
pids = executive.running_pids()
self.assertIn(os.getpid(), pids)
def serial_test_run_in_parallel(self):
# We run this test serially to avoid overloading the machine and throwing off the timing.
if sys.platform in ("win32", "cygwin"):
return # This function isn't implemented properly on windows yet.
import multiprocessing
NUM_PROCESSES = 4
DELAY_SECS = 0.25
cmd_line = [sys.executable, '-c', 'import time; time.sleep(%f); print "hello"' % DELAY_SECS]
cwd = os.getcwd()
commands = [tuple([cmd_line, cwd])] * NUM_PROCESSES
start = time.time()
command_outputs = Executive().run_in_parallel(commands, processes=NUM_PROCESSES)
done = time.time()
self.assertTrue(done - start < NUM_PROCESSES * DELAY_SECS)
self.assertEqual([output[1] for output in command_outputs], ["hello\n"] * NUM_PROCESSES)
self.assertEqual([], multiprocessing.active_children())
def test_run_in_parallel_assert_nonempty(self):
self.assertRaises(AssertionError, Executive().run_in_parallel, [])
def main(platform, stdin, stdout, cmd, args):
if platform == 'win32' and hasattr(stdout, 'fileno'):
import msvcrt
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
if cmd == '--cat':
stdout.write(stdin.read())
elif cmd == '--echo':
stdout.write(' '.join(args))
return 0
if __name__ == '__main__' and len(sys.argv) > 1 and sys.argv[1] in ('--cat', '--echo'):
sys.exit(main(sys.platform, sys.stdin, sys.stdout, sys.argv[1], sys.argv[2:]))
|
# coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: developers@flat.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CollectionType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
ROOT = "root"
REGULAR = "regular"
SHAREDWITHME = "sharedWithMe"
SHAREDWITHGROUP = "sharedWithGroup"
TRASH = "trash"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""CollectionType - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CollectionType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
simple_tag = register.simple_tag
@simple_tag
def get_country(code):
return Country(code=code)
@simple_tag
def get_countries():
return list(countries)
|
"""
This python script demonstrates the creation of all parametric reactors available
in the paramak tool
"""
import paramak
def main():
all_reactors = []
my_reactor = paramak.BallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=50,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=50,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
)
my_reactor.name = "BallReactor"
all_reactors.append(my_reactor)
my_reactor = paramak.BallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=50,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=50,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
pf_coil_radial_thicknesses=[50, 50, 50, 50],
pf_coil_vertical_thicknesses=[50, 50, 50, 50],
pf_coil_to_rear_blanket_radial_gap=50,
pf_coil_to_tf_coil_radial_gap=50,
outboard_tf_coil_radial_thickness=100,
outboard_tf_coil_poloidal_thickness=50,
)
my_reactor.name = "BallReactor_with_pf_tf_coils"
all_reactors.append(my_reactor)
my_reactor = paramak.SingleNullBallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=50,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=50,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
pf_coil_radial_thicknesses=[50, 50, 50, 50],
pf_coil_vertical_thicknesses=[50, 50, 50, 50],
pf_coil_to_rear_blanket_radial_gap=50,
pf_coil_to_tf_coil_radial_gap=50,
outboard_tf_coil_radial_thickness=100,
outboard_tf_coil_poloidal_thickness=50,
divertor_position="lower"
)
my_reactor.name = "SingleNullBallReactor_with_pf_tf_coils"
all_reactors.append(my_reactor)
my_reactor = paramak.SubmersionTokamak(
inner_bore_radial_thickness=25,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
inboard_blanket_radial_thickness=50,
firstwall_radial_thickness=50,
inner_plasma_gap_radial_thickness=70,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=70,
outboard_blanket_radial_thickness=200,
blanket_rear_wall_radial_thickness=50,
divertor_radial_thickness=50,
plasma_high_point=(50 + 50 + 50 + 100 + 100, 350),
rotation_angle=180,
support_radial_thickness=150,
outboard_tf_coil_radial_thickness=50,
)
my_reactor.name = "SubmersionTokamak"
all_reactors.append(my_reactor)
my_reactor = paramak.SubmersionTokamak(
inner_bore_radial_thickness=25,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
inboard_blanket_radial_thickness=50,
firstwall_radial_thickness=50,
inner_plasma_gap_radial_thickness=70,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=70,
outboard_blanket_radial_thickness=200,
blanket_rear_wall_radial_thickness=50,
divertor_radial_thickness=50,
plasma_high_point=(50 + 50 + 50 + 100 + 100, 350),
rotation_angle=180,
support_radial_thickness=150,
outboard_tf_coil_radial_thickness=50,
tf_coil_to_rear_blanket_radial_gap=50,
outboard_tf_coil_poloidal_thickness=70,
pf_coil_vertical_thicknesses=[50, 50, 50, 50, 50],
pf_coil_radial_thicknesses=[40, 40, 40, 40, 40],
pf_coil_to_tf_coil_radial_gap=50,
number_of_tf_coils=16,
)
my_reactor.name = "SubmersionTokamak_with_pf_tf_coils"
all_reactors.append(my_reactor)
my_reactor = paramak.SingleNullSubmersionTokamak(
inner_bore_radial_thickness=10,
inboard_tf_leg_radial_thickness=30,
center_column_shield_radial_thickness=60,
divertor_radial_thickness=50,
inner_plasma_gap_radial_thickness=30,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=30,
firstwall_radial_thickness=30,
blanket_rear_wall_radial_thickness=30,
number_of_tf_coils=16,
rotation_angle=180,
support_radial_thickness=20,
inboard_blanket_radial_thickness=20,
outboard_blanket_radial_thickness=20,
plasma_high_point=(200, 200),
divertor_position="upper",
support_position="upper"
)
my_reactor.name = "SingleNullSubmersionTokamak"
all_reactors.append(my_reactor)
my_reactor = paramak.SingleNullSubmersionTokamak(
inner_bore_radial_thickness=10,
inboard_tf_leg_radial_thickness=30,
center_column_shield_radial_thickness=60,
divertor_radial_thickness=50,
inner_plasma_gap_radial_thickness=30,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=30,
firstwall_radial_thickness=30,
blanket_rear_wall_radial_thickness=30,
number_of_tf_coils=16,
rotation_angle=180,
support_radial_thickness=20,
inboard_blanket_radial_thickness=20,
outboard_blanket_radial_thickness=20,
plasma_high_point=(200, 200),
pf_coil_radial_thicknesses=[50, 50, 50, 50],
pf_coil_vertical_thicknesses=[50, 50, 50, 50],
pf_coil_to_tf_coil_radial_gap=50,
outboard_tf_coil_radial_thickness=100,
outboard_tf_coil_poloidal_thickness=50,
tf_coil_to_rear_blanket_radial_gap=20,
divertor_position="upper",
support_position="upper"
)
my_reactor.name = "SingleNullSubmersionTokamak_with_pf_tf_coils"
all_reactors.append(my_reactor)
return all_reactors
if __name__ == "__main__":
all_reactors = main()
for reactors in all_reactors:
reactors.export_stp(reactors.name)
reactors.export_stl(reactors.name)
reactors.export_neutronics_description()
|
import numpy as np
class LowLevelController:
"""Low level controller of a point mass robot with dynamics:
x_{k+1} = x_k + v_k * Ts * cos(psi_k)
y_{k+1} = y_k + v_k * Ts * sin(psi_k)
v_{k+1} = v_k + Ts * a_k
psi_{k+1} = psi_k + Ts * omega_k
omega_{k+1} = omega_k + Ts * epsilon_k
Where a_k and epsilon_k are the inputs and are the translational and rotational
accelerations respectively.
For now we assume, that it is a perfect controller which is able to produce
the exact commanded outputs if they are reachable with the provided
input constraints.
"""
def __init__(self,
params):
"""Initializes a LowLevelController."""
self._init_from_params(params)
def get_inputs(self, state, cmd_vel):
"""produces control inputs based on the actual state and the commanded
velocities in cmd_vel = np.array([v_des, omega_des])"""
v_des = cmd_vel[0]
omega_des = cmd_vel[1]
v_k = state[2]
omega_k = state[4]
# translational acceleration:
a_k = (v_des - v_k) / self._Ts
if a_k > self._acc_max:
a_k = self._acc_max
elif a_k < self._acc_min:
a_k = self._acc_min
# angular acceleration:
epsilon_k = (omega_des - omega_k) / self._Ts
if epsilon_k > self._epsilon_max:
a_epsilon_kk = self._epsilon_max
elif epsilon_k < self._epsilon_min:
epsilon_k = self._epsilon_min
return np.array([a_k, epsilon_k])
def _init_from_params(self, params):
"""Initializes some variables from the params."""
self._Ts = params["general"]["Ts"]
self._acc_min = params["LowLevelController"]["acc_min"]
self._acc_max = params["LowLevelController"]["acc_max"]
self._epsilon_min = params["LowLevelController"]["epsilon_min"]
self._epsilon_max = params["LowLevelController"]["epsilon_max"]
|
import json
import threading
import time
import os
import stat
from decimal import Decimal
from typing import Union, Optional
from numbers import Real
from copy import deepcopy
from . import util
from .util import (user_dir, make_dir,
NoDynamicFeeEstimates, format_fee_satoshis, quantize_feerate)
from .i18n import _
from .logging import get_logger, Logger
FEE_ETA_TARGETS = [25, 10, 5, 2]
FEE_DEPTH_TARGETS = [10000000, 5000000, 2000000, 1000000, 500000, 200000, 100000]
# satoshi per kbyte
FEERATE_MAX_DYNAMIC = 10000000000
FEERATE_WARNING_HIGH_FEE = 1000000000
FEERATE_FALLBACK_STATIC_FEE = 20000000
FEERATE_DEFAULT_RELAY = 10000000
FEERATE_STATIC_VALUES = [10000000, 20000000, 50000000, 100000000, 200000000, 500000000, 1000000000, 2000000000, 5000000000, 10000000000]
config = None
_logger = get_logger(__name__)
def get_config():
global config
return config
def set_config(c):
global config
config = c
FINAL_CONFIG_VERSION = 3
class SimpleConfig(Logger):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are two different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
They are taken in order (1. overrides config options set in 2.)
"""
def __init__(self, options=None, read_user_config_function=None,
read_user_dir_function=None):
if options is None:
options = {}
Logger.__init__(self)
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.mempool_fees = {}
self.fee_estimates = {}
self.fee_estimates_last_updated = {}
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# don't allow to be set on CLI:
self.cmdline_options.pop('config_version', None)
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
if not self.user_config:
# avoid new config getting upgraded
self.user_config = {'config_version': FINAL_CONFIG_VERSION}
# config "upgrade" - CLI options
self.rename_config_keys(
self.cmdline_options, {'auto_cycle': 'auto_connect'}, True)
# config upgrade - user config
if self.requires_upgrade():
self.upgrade()
# Make a singleton instance of 'self'
set_config(self)
def electrum_path(self):
# Read electrum_path from command line
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
make_dir(path, allow_symlink=False)
if self.get('testnet'):
path = os.path.join(path, 'testnet')
make_dir(path, allow_symlink=False)
elif self.get('regtest'):
path = os.path.join(path, 'regtest')
make_dir(path, allow_symlink=False)
elif self.get('simnet'):
path = os.path.join(path, 'simnet')
make_dir(path, allow_symlink=False)
self.logger.info(f"electrum directory {path}")
return path
def rename_config_keys(self, config, keypairs, deprecation_warning=False):
"""Migrate old key names to new ones"""
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if new_key not in config:
config[new_key] = config[old_key]
if deprecation_warning:
self.logger.warning('Note that the {} variable has been deprecated. '
'You should use {} instead.'.format(old_key, new_key))
del config[old_key]
updated = True
return updated
def set_key(self, key, value, save=True):
if not self.is_modifiable(key):
self.logger.warning(f"not changing config key '{key}' set on the command line")
return
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return
self._set_key_in_user_config(key, value, save)
def _set_key_in_user_config(self, key, value, save=True):
with self.lock:
if value is not None:
self.user_config[key] = value
else:
self.user_config.pop(key, None)
if save:
self.save_user_config()
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key, default)
return out
def requires_upgrade(self):
return self.get_config_version() < FINAL_CONFIG_VERSION
def upgrade(self):
with self.lock:
self.logger.info('upgrading config')
self.convert_version_2()
self.convert_version_3()
self.set_key('config_version', FINAL_CONFIG_VERSION, save=True)
def convert_version_2(self):
if not self._is_upgrade_method_needed(1, 1):
return
self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'})
try:
# change server string FROM host:port:proto TO host:port:s
server_str = self.user_config.get('server')
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in ('s', 't')
int(port) # Throw if cannot be converted to int
server_str = '{}:{}:s'.format(host, port)
self._set_key_in_user_config('server', server_str)
except BaseException:
self._set_key_in_user_config('server', None)
self.set_key('config_version', 2)
def convert_version_3(self):
if not self._is_upgrade_method_needed(2, 2):
return
base_unit = self.user_config.get('base_unit')
if isinstance(base_unit, str):
self._set_key_in_user_config('base_unit', None)
map_ = {'btc':8, 'mbtc':5, 'ubtc':2, 'bits':2, 'sat':0}
decimal_point = map_.get(base_unit.lower())
self._set_key_in_user_config('decimal_point', decimal_point)
self.set_key('config_version', 3)
def _is_upgrade_method_needed(self, min_version, max_version):
cur_version = self.get_config_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise Exception(
('config upgrade: unexpected version %d (should be %d-%d)'
% (cur_version, min_version, max_version)))
else:
return True
def get_config_version(self):
config_version = self.get('config_version', 1)
if config_version > FINAL_CONFIG_VERSION:
self.logger.warning('config version ({}) is higher than latest ({})'
.format(config_version, FINAL_CONFIG_VERSION))
return config_version
def is_modifiable(self, key):
return key not in self.cmdline_options
def save_user_config(self):
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
except FileNotFoundError:
# datadir probably deleted while running...
if os.path.exists(self.path): # or maybe not?
raise
def get_wallet_path(self):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd', ''), self.get('wallet_path'))
# path in config file
path = self.get('default_wallet_path')
if path and os.path.exists(path):
return path
# default path
util.assert_datadir_available(self.path)
dirpath = os.path.join(self.path, "wallets")
make_dir(dirpath, allow_symlink=False)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.logger.info(f"session timeout -> {seconds} seconds")
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def open_last_wallet(self):
if self.get('wallet_path') is None:
last_wallet = self.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.cmdline_options['default_wallet_path'] = last_wallet
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def impose_hard_limits_on_fee(func):
def get_fee_within_limits(self, *args, **kwargs):
fee = func(self, *args, **kwargs)
if fee is None:
return fee
fee = min(FEERATE_MAX_DYNAMIC, fee)
fee = max(FEERATE_DEFAULT_RELAY, fee)
return fee
return get_fee_within_limits
def eta_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))
if slider_pos < len(FEE_ETA_TARGETS):
num_blocks = FEE_ETA_TARGETS[slider_pos]
fee = self.eta_target_to_fee(num_blocks)
else:
fee = self.eta_target_to_fee(1)
return fee
@impose_hard_limits_on_fee
def eta_target_to_fee(self, num_blocks: int) -> Optional[int]:
"""Returns fee in sat/kbyte."""
if num_blocks == 1:
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee / 2
fee = int(fee)
else:
fee = self.fee_estimates.get(num_blocks)
return fee
def fee_to_depth(self, target_fee: Real) -> int:
"""For a given sat/vbyte fee, returns an estimate of how deep
it would be in the current mempool in vbytes.
Pessimistic == overestimates the depth.
"""
depth = 0
for fee, s in self.mempool_fees:
depth += s
if fee <= target_fee:
break
return depth
def depth_to_fee(self, slider_pos) -> int:
"""Returns fee in sat/kbyte."""
target = self.depth_target(slider_pos)
return self.depth_target_to_fee(target)
@impose_hard_limits_on_fee
def depth_target_to_fee(self, target: int) -> int:
"""Returns fee in sat/kbyte.
target: desired mempool depth in vbytes
"""
depth = 0
for fee, s in self.mempool_fees:
depth += s
if depth > target:
break
else:
return 0
# add one sat/byte as currently that is
# the max precision of the histogram
fee += 1
# convert to sat/kbyte
return fee * 1000
def depth_target(self, slider_pos):
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1)
return FEE_DEPTH_TARGETS[slider_pos]
def eta_target(self, i):
if i == len(FEE_ETA_TARGETS):
return 1
return FEE_ETA_TARGETS[i]
def fee_to_eta(self, fee_per_kb):
import operator
l = list(self.fee_estimates.items()) + [(1, self.eta_to_fee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def depth_tooltip(self, depth):
return "%.1f MB from tip"%(depth/1000000)
def eta_tooltip(self, x):
if x < 0:
return _('Low fee')
elif x == 1:
return _('In the next block')
else:
return _('Within {} blocks').format(x)
def get_fee_status(self):
dyn = self.is_dynfee()
mempool = self.use_mempool_fees()
pos = self.get_depth_level() if mempool else self.get_fee_level()
fee_rate = self.fee_per_kb()
target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate)
return tooltip + ' [%s]'%target if dyn else target + ' [Static]'
def get_fee_text(self, pos, dyn, mempool, fee_rate):
"""Returns (text, tooltip) where
text is what we target: static fee / num blocks to confirm in / mempool depth
tooltip is the corresponding estimate (e.g. num blocks for a static fee)
fee_rate is in sat/kbyte
"""
if fee_rate is None:
rate_str = 'unknown'
else:
fee_rate = fee_rate/1000
rate_str = format_fee_satoshis(fee_rate) + ' sat/byte'
if dyn:
if mempool:
depth = self.depth_target(pos)
text = self.depth_tooltip(depth)
else:
eta = self.eta_target(pos)
text = self.eta_tooltip(eta)
tooltip = rate_str
else:
text = rate_str
if mempool and self.has_fee_mempool():
depth = self.fee_to_depth(fee_rate)
tooltip = self.depth_tooltip(depth)
elif not mempool and self.has_fee_etas():
eta = self.fee_to_eta(fee_rate)
tooltip = self.eta_tooltip(eta)
else:
tooltip = ''
return text, tooltip
def get_depth_level(self):
maxp = len(FEE_DEPTH_TARGETS) - 1
return min(maxp, self.get('depth_level', 2))
def get_fee_level(self):
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
return min(maxp, self.get('fee_level', 2))
def get_fee_slider(self, dyn, mempool):
if dyn:
if mempool:
pos = self.get_depth_level()
maxp = len(FEE_DEPTH_TARGETS) - 1
fee_rate = self.depth_to_fee(pos)
else:
pos = self.get_fee_level()
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
fee_rate = self.eta_to_fee(pos)
else:
fee_rate = self.fee_per_kb(dyn=False)
pos = self.static_fee_index(fee_rate)
maxp = len(FEERATE_STATIC_VALUES) - 1
return maxp, pos, fee_rate
def static_fee(self, i):
return FEERATE_STATIC_VALUES[i]
def static_fee_index(self, value):
if value is None:
raise TypeError('static fee cannot be None')
dist = list(map(lambda x: abs(x - value), FEERATE_STATIC_VALUES))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_etas(self):
return len(self.fee_estimates) == 4
def has_fee_mempool(self):
return bool(self.mempool_fees)
def has_dynamic_fees_ready(self):
if self.use_mempool_fees():
return self.has_fee_mempool()
else:
return self.has_fee_etas()
def is_dynfee(self):
return bool(self.get('dynamic_fees', False))
def use_mempool_fees(self):
return bool(self.get('mempool_fees', False))
def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool,
mempool: bool) -> Union[int, None]:
fee_level = max(fee_level, 0)
fee_level = min(fee_level, 1)
if dyn:
max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS)
slider_pos = round(fee_level * max_pos)
fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos)
else:
max_pos = len(FEERATE_STATIC_VALUES) - 1
slider_pos = round(fee_level * max_pos)
fee_rate = FEERATE_STATIC_VALUES[slider_pos]
return fee_rate
def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Union[int, None]:
"""Returns sat/kvB fee to pay for a txn.
Note: might return None.
fee_level: float between 0.0 and 1.0, representing fee slider position
"""
if dyn is None:
dyn = self.is_dynfee()
if mempool is None:
mempool = self.use_mempool_fees()
if fee_level is not None:
return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool)
# there is no fee_level specified; will use config.
# note: 'depth_level' and 'fee_level' in config are integer slider positions,
# unlike fee_level here, which (when given) is a float in [0.0, 1.0]
if dyn:
if mempool:
fee_rate = self.depth_to_fee(self.get_depth_level())
else:
fee_rate = self.eta_to_fee(self.get_fee_level())
else:
fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE)
return fee_rate
def fee_per_byte(self):
"""Returns sat/vB fee to pay for a txn.
Note: might return None.
"""
fee_per_kb = self.fee_per_kb()
return fee_per_kb / 1000 if fee_per_kb is not None else None
def estimate_fee(self, size: Union[int, float, Decimal]) -> int:
fee_per_kb = self.fee_per_kb()
if fee_per_kb is None:
raise NoDynamicFeeEstimates()
return self.estimate_fee_for_feerate(fee_per_kb, size)
@classmethod
def estimate_fee_for_feerate(cls, fee_per_kb: Union[int, float, Decimal],
size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
fee_per_kb = Decimal(fee_per_kb)
fee_per_byte = fee_per_kb / 1000
# to be consistent with what is displayed in the GUI,
# the calculation needs to use the same precision:
fee_per_byte = quantize_feerate(fee_per_byte)
return round(fee_per_byte * size)
def update_fee_estimates(self, key, value):
self.fee_estimates[key] = value
self.fee_estimates_last_updated[key] = time.time()
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
return now - self.last_time_fee_estimates_requested > 60
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def read_user_config(path):
"""Parse and store the user config settings in electrum.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r", encoding='utf-8') as f:
data = f.read()
result = json.loads(data)
except:
_logger.warning(f"Cannot read config file. {config_path}")
return {}
if not type(result) is dict:
return {}
return result
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest.mock import patch
from collections import namedtuple
from airflow import configuration
from airflow.models import Connection
from airflow.contrib.hooks.azure_container_instance_hook import AzureContainerInstanceHook
from airflow.utils import db
from azure.mgmt.containerinstance.models import (Container,
ContainerGroup,
ContainerState,
Event,
Logs,
ResourceRequests,
ResourceRequirements)
class TestAzureContainerInstanceHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
Connection(
conn_id='azure_container_instance_test',
conn_type='azure_container_instances',
login='login',
password='key',
extra=json.dumps({'tenantId': 'tenant_id',
'subscriptionId': 'subscription_id'})
)
)
self.resources = ResourceRequirements(requests=ResourceRequests(
memory_in_gb='4',
cpu='1'))
with patch('azure.common.credentials.ServicePrincipalCredentials.__init__',
autospec=True, return_value=None):
with patch('azure.mgmt.containerinstance.ContainerInstanceManagementClient'):
self.testHook = AzureContainerInstanceHook(conn_id='azure_container_instance_test')
@patch('azure.mgmt.containerinstance.models.ContainerGroup')
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.create_or_update')
def test_create_or_update(self, create_or_update_mock, container_group_mock):
self.testHook.create_or_update('resource_group', 'aci-test', container_group_mock)
create_or_update_mock.assert_called_with('resource_group', 'aci-test', container_group_mock)
@patch('airflow.contrib.hooks.azure_container_instance_hook'
'.AzureContainerInstanceHook._get_instance_view')
def test_get_state_exitcode_details(self, get_instance_view_mock):
expected_state = ContainerState(state='testing', exit_code=1, detail_status='details')
instance_view = {"current_state": expected_state}
named_instance = namedtuple("InstanceView", instance_view.keys())(*instance_view.values())
get_instance_view_mock.return_value = named_instance
state, exit_code, details = self.testHook.get_state_exitcode_details('resource-group', 'test')
self.assertEqual(state, expected_state.state)
self.assertEqual(exit_code, expected_state.exit_code)
self.assertEqual(details, expected_state.detail_status)
@patch('airflow.contrib.hooks.azure_container_instance_hook'
'.AzureContainerInstanceHook._get_instance_view')
def test_get_messages(self, get_instance_view_mock):
expected_messages = ['test1', 'test2']
events = [Event(message=m) for m in expected_messages]
instance_view = {"events": events}
named_instance = namedtuple("Events", instance_view.keys())(*instance_view.values())
get_instance_view_mock.return_value = named_instance
messages = self.testHook.get_messages('resource-group', 'test')
self.assertSequenceEqual(messages, expected_messages)
@patch('azure.mgmt.containerinstance.operations.ContainerOperations.list_logs')
def test_get_logs(self, list_logs_mock):
expected_messages = ['log line 1\n', 'log line 2\n', 'log line 3\n']
logs = Logs(content=''.join(expected_messages))
list_logs_mock.return_value = logs
logs = self.testHook.get_logs('resource_group', 'name', 'name')
self.assertSequenceEqual(logs, expected_messages)
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.delete')
def test_delete(self, delete_mock):
self.testHook.delete('resource_group', 'aci-test')
delete_mock.assert_called_with('resource_group', 'aci-test')
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.list_by_resource_group')
def test_exists_with_existing(self, list_mock):
list_mock.return_value = [ContainerGroup(os_type='Linux',
containers=[Container(name='test1',
image='hello-world',
resources=self.resources)])]
self.assertFalse(self.testHook.exists('test', 'test1'))
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.list_by_resource_group')
def test_exists_with_not_existing(self, list_mock):
list_mock.return_value = [ContainerGroup(os_type='Linux',
containers=[Container(name='test1',
image='hello-world',
resources=self.resources)])]
self.assertFalse(self.testHook.exists('test', 'not found'))
|
from pathlib import Path
from sepal_ui import sepalwidgets as sw
from component import parameter as cp
from component.message import cm
class FolderSelect(sw.FileInput):
def __init__(self):
super().__init__([''], label=cm.widget.folder.label, folder=cp.down_dir)
def _on_file_select(self, change):
"""Dispatch the behaviour between file selection and folder change"""
if not change['new']:
return self
new_value = Path(change['new'])
# I keep the same behaviour but I write the value for each directory as no file will ever be selected
if new_value.is_dir():
self.folder = new_value
self.file = str(new_value)
self._change_folder()
return self
def is_valid_ts(self):
"""Check if the current folder is a SEPAL generated time series folder"""
# clean the errors
self.selected_file.error_messages = None
# avoid bug at start
if not self.v_model:
return True
folder = Path(self.v_model)
valid = True
dirs = [d for d in folder.glob('*/')]
if len(dirs) == 0:
valid = False
else:
for d in dirs:
try:
n = int(d.stem)
except:
valid = False
break
# write an error message
self.selected_file.error_messages = None if valid else cm.widget.folder.no_ts.format('')
return valid
|
from pathlib import Path
from typing import Tuple, List, Dict
import pandas as pd
import numpy as np
from tsfresh.utilities.dataframe_functions import roll_time_series
def get_path(df: pd.DataFrame) -> np.array:
out = []
for index, row in df.iterrows():
out.append((row["Latitude"], row["Longitude"]))
return np.array(out)
def write_to_csv(path: str, data: Dict[str, List[pd.DataFrame]]) -> None:
full_path: Path
for k, v in data.items():
full_path = Path(path).joinpath(k[: k.find("-")], k[k.find("-") + 1 :])
full_path.mkdir(parents=True, exist_ok=True)
for index, df in enumerate(v):
df.to_csv(full_path.joinpath("timeseries-" + str(index) + ".csv").open("w"))
def to_tsfresh(data_path: str) -> Tuple[pd.DataFrame, pd.Series, pd.Series]:
df = pd.DataFrame()
weight_series = pd.Series()
drivers_series = pd.Series()
temp_df: pd.DataFrame
# ident: str = ""
i: int = 0
for placement in {"deck", "stem"}:
for driver_number in {"single", "double"}:
for ds in Path(data_path).joinpath(placement, driver_number).iterdir():
temp_df = pd.read_csv(str(ds))
weight = temp_df["Weight"][0]
# ident = placement + "_" + driver_number + "_" + temp_df["Driver"][0]
temp_df = temp_df.assign(id=i)
temp_df = temp_df.drop(
["Unnamed: 0", "Driver", "Weight", "Placement"], axis=1
)
df = df.append(temp_df)
weight_series.loc[i] = weight
drivers_series.loc[i] = 0 if driver_number == "single" else 1
i += 1
return df.fillna(0), weight_series, drivers_series
def window_df(df: pd.DataFrame):
return roll_time_series(
df, column_id="id", column_sort="Timestamp", column_kind=None
)
def align(signal_1: np.array, signal_2: np.array):
# Standardization
signal_1 = (signal_1 - np.mean(signal_1)) / np.std(signal_1)
signal_2 = (signal_2 - np.mean(signal_2)) / np.std(signal_2)
# Cross-Correlation
correlation = np.correlate(signal_1, signal_2, "full")
center = len(correlation) - min(len(signal_1), len(signal_1))
max_position = correlation.argmax()
phase = np.abs(center - max_position)
if phase == 0:
reversed_correlation_signal = correlation[::-1]
max_position_reversed = reversed_correlation_signal.argmax()
phase_reversed = np.abs(center - max_position_reversed)
phase = np.max([phase, phase_reversed])
return signal_1, signal_2[phase:]
|
from rest_framework.permissions import BasePermission
class IsCreator(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
creator = obj.created_by
return user == creator
class HasChangePermissions(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
collection = obj.collection
return collection.has_permission(
user, 'change_collection_sampling_events')
class HasViewPermissions(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
collection = obj.collection
return collection.has_permission(
user, 'view_collection_sampling_events')
class IsCollectionAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
collection = obj.collection
return collection.is_admin(user)
class IsCollectionTypeAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
collection = obj.collection
collection_type = collection.collection_type
return collection_type.is_admin(user)
class HasViewItemsPermissions(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
collection = obj.collection
return collection.has_permission(
user, 'view_collection_item')
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zones list command."""
from apitools.base.py import list_pager
from googlecloudsdk.calliope import base
from googlecloudsdk.core import apis
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class List(base.ListCommand):
"""View the list of all your managed-zones.
This command displays the list of your managed-zones.
## EXAMPLES
To see the list of all managed-zones, run:
$ {command}
To see the list of first 10 managed-zones, run:
$ {command} --limit=10
"""
def Collection(self):
return 'dns.managedZones'
def GetUriFunc(self):
def _GetUri(resource):
return resources.REGISTRY.Create(
self.Collection(), managedZone=resource.name).SelfLink()
return _GetUri
def Run(self, args):
dns_client = apis.GetClientInstance('dns', 'v1')
dns_messages = apis.GetMessagesModule('dns', 'v1')
project_id = properties.VALUES.core.project.Get(required=True)
return list_pager.YieldFromList(
dns_client.managedZones,
dns_messages.DnsManagedZonesListRequest(project=project_id),
limit=args.limit, field='managedZones')
|
# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
from collections import OrderedDict
from typing import Union
import numpy as np
import torch
import torch.distributions.constraints as constraints
import pyro
import pyro.distributions as dist
from netcal.scaling import AbstractLogisticRegression
class LogisticCalibration(AbstractLogisticRegression):
"""
On classification, apply the logistic calibration method aka Platt scaling to obtain a
calibration mapping. This method is originally proposed by [1]_.
For the multiclass case, we use the Vector scaling proposed in [2]_.
On detection mode, this calibration method uses multiple independent normal distributions to obtain a
calibration mapping by means of the confidence as well as additional features [3]_. This calibration scheme
assumes independence between all variables.
On detection, it is necessary to provide all data in input parameter ``X`` as an NumPy array
of shape ``(n_samples, n_features)``,
whereas the confidence must be the first feature given in the input array. The ground-truth samples ``y``
must be an array of shape ``(n_samples,)`` consisting of binary labels :math:`y \\in \\{0, 1\\}`. Those
labels indicate if the according sample has matched a ground truth box :math:`\\text{m}=1` or is a false
prediction :math:`\\text{m}=0`.
**Mathematical background:** For confidence calibration in classification tasks, a
confidence mapping :math:`g` is applied on top of a miscalibrated scoring classifier :math:`\\hat{p} = h(x)` to
deliver a calibrated confidence score :math:`\\hat{q} = g(h(x))`.
For detection calibration, we can also use the additional box regression output which we denote as
:math:`\\hat{r} \\in [0, 1]^J` with :math:`J` as the number of dimensions used for the box encoding (e.g.
:math:`J=4` for x position, y position, width and height).
Therefore, the calibration map is not only a function of the confidence score, but also of :math:`\\hat{r}`.
To define a general calibration map for binary problems, we use the logistic function and the combined
input :math:`s = (\\hat{p}, \\hat{r})` of size K by
.. math::
g(s) = \\frac{1}{1 + \\exp(-z(s))} ,
According to [1]_, we can interpret the logit :math:`z` as the logarithm of the posterior odds
.. math::
z(s) = \\log \\frac{f(\\text{m}=1 | s)}{f(\\text{m}=0 | s)} \\approx
\\log \\frac{f(s | \\text{m}=1)}{f(s | \\text{m}=1)} = \\ell r(s)
If we assume independence of all variables given in :math:`s`, we can use multiple univariate probability
density distributions with the same variance to obtain a calibration mapping. Using this formulation, we can
simply extend the scaling factor (from classification logistic calibration) to a scaling
vector :math:`w \\in \\mathbb{R}^K`.
However, instead of using the uncalibrated confidence estimate :math:`\\hat{p}`, we use the logit of the
network as part of :math:`s` to be conform with the original formulation in [1]_ and [2]_. Thus,
the log-likelihood ratio can be expressed as
.. math::
\\ell r(s) = s^T w + c,
with bias :math:`c \\in \\mathbb{R}`.
We utilize standard optimization methods to determine the calibration mapping :math:`g(s)`.
Parameters
----------
temperature_only : bool, default: False
If True, use Temperature Scaling instead of Platt/Vector Scaling.
method : str, default: "mle"
Method that is used to obtain a calibration mapping:
- 'mle': Maximum likelihood estimate without uncertainty using a convex optimizer.
- 'momentum': MLE estimate using Momentum optimizer for non-convex optimization.
- 'variational': Variational Inference with uncertainty.
- 'mcmc': Markov-Chain Monte-Carlo sampling with uncertainty.
momentum_epochs : int, optional, default: 1000
Number of epochs used by momentum optimizer.
mcmc_steps : int, optional, default: 20
Number of weight samples obtained by MCMC sampling.
mcmc_chains : int, optional, default: 1
Number of Markov-chains used in parallel for MCMC sampling (this will result
in mcmc_steps * mcmc_chains samples).
mcmc_warmup_steps : int, optional, default: 100
Warmup steps used for MCMC sampling.
vi_epochs : int, optional, default: 1000
Number of epochs used for ELBO optimization.
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
use_cuda : str or bool, optional, default: False
Specify if CUDA should be used. If str, you can also specify the device
number like 'cuda:0', etc.
References
----------
.. [1] Platt, John:
"Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods."
Advances in large margin classifiers 10.3: 61-74, 1999
`Get source online <https://www.researchgate.net/profile/John_Platt/publication/2594015_Probabilistic_Outputs_for_Support_Vector_Machines_and_Comparisons_to_Regularized_Likelihood_Methods/links/004635154cff5262d6000000.pdf>`_
.. [2] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:
"On Calibration of Modern Neural Networks."
Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.
`Get source online <https://arxiv.org/abs/1706.04599>`_
.. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:
"Multivariate Confidence Calibration for Object Detection."
The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops.
.. [4] Fabian Küppers, Jan Kronenberger, Jonas Schneider and Anselm Haselhoff:
"Bayesian Confidence Calibration for Epistemic Uncertainty Modelling."
2021 IEEE Intelligent Vehicles Symposium (IV), 2021
"""
def __init__(self, *args, temperature_only: bool = False, **kwargs):
""" Create an instance of `LogisticCalibration`. Detailed parameter description given in class docs. """
super().__init__(*args, **kwargs)
self.temperature_only = temperature_only
# -------------------------------------------------
@property
def intercept(self) -> Union[np.ndarray, float]:
""" Getter for intercept of logistic calibration. """
if self._sites is None:
raise ValueError("Intercept is None. You have to call the method 'fit' first.")
if self.temperature_only:
raise ValueError("There is no intercept for temperature scaling.")
return self._sites['bias']['values']
@property
def weights(self) -> Union[np.ndarray, float]:
""" Getter for weights of logistic calibration. """
if self._sites is None:
raise ValueError("Weights is None. You have to call the method 'fit' first.")
return self._sites['weights']['values']
# -------------------------------------------------
def prepare(self, X: np.ndarray) -> torch.Tensor:
"""
Preprocessing of input data before called at the beginning of the fit-function.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes]) or (n_samples, [n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
On detection, this array must have 2 dimensions with number of additional box features in last dim.
Returns
-------
torch.Tensor
Prepared data vector X as torch tensor.
"""
if len(X.shape) == 1:
X = np.reshape(X, (-1, 1))
# on detection mode, convert confidence to sigmoid and append the remaining features
if self.detection:
data_input = np.concatenate((self._inverse_sigmoid(X[:, 0]).reshape(-1, 1), X[:, 1:]), axis=1)
# on binary classification, simply convert the confidences to logits
elif self._is_binary_classification():
data_input = self._inverse_sigmoid(X)
# on multiclass classification, use inverse softmax instead
else:
data_input = self._inverse_softmax(X)
return torch.Tensor(data_input)
def prior(self):
"""
Prior definition of the weights used for log regression. This function has to set the
variables 'self.weight_prior_dist', 'self.weight_mean_init' and 'self.weight_stddev_init'.
"""
self._sites = OrderedDict()
# on temperature scaling, we only have one single weight for all classes
if self.temperature_only:
self._sites['weights'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.ones(1),
'scale': torch.ones(1)
},
'prior': dist.Normal(torch.ones(1), 10 * torch.ones(1), validate_args=True)
}
else:
# on detection mode or binary classification, we have a weight for each given feature (one for binary
# classification) and bias
if self.detection or self._is_binary_classification():
num_bias = 1
num_weights = self.num_features
# on multiclass classification, we have one weight and one bias for each class separately
else:
num_bias = self.num_classes
num_weights = self.num_classes
# set properties for "weights"
self._sites['weights'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.ones(num_weights),
'scale': torch.ones(num_weights)
},
'prior': dist.Normal(torch.ones(num_weights), 10 * torch.ones(num_weights), validate_args=True),
}
# set properties for "bias"
self._sites['bias'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.zeros(num_bias),
'scale': torch.ones(num_bias)
},
'prior': dist.Normal(torch.zeros(num_bias), 10 * torch.ones(num_bias), validate_args=True),
}
def model(self, X: torch.Tensor = None, y: torch.Tensor = None) -> torch.Tensor:
"""
Definition of the log regression model.
Parameters
----------
X : torch.Tensor, shape=(n_samples, n_log_regression_features)
Input data that has been prepared by "self.prepare" function call.
y : torch.Tensor, shape=(n_samples, [n_classes])
Torch tensor with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D) (for multiclass MLE only).
Returns
-------
torch.Tensor, shape=(n_samples, [n_classes])
Logit of the log regression model.
"""
# sample from prior - on MLE, this weight will be set as conditional
weights = pyro.sample("weights", self._sites["weights"]["prior"])
if self.temperature_only:
bias = 0.
else:
bias = pyro.sample("bias", self._sites["bias"]["prior"])
# on detection or binary classification, use dot product to sum up all given features to one logit
if self.detection or self._is_binary_classification():
# we need squeeze to remove last (unnecessary) dim to avoid site-effects
# temperature scaling: sinlge scalar
if self.temperature_only:
def logit_op(x, w, b): return torch.squeeze(torch.sum(torch.mul(x, w), dim=1))
# platt scaling: one weight for each feature given
else:
weights = torch.reshape(weights, (-1, 1))
def logit_op(x, w, b): return torch.squeeze(torch.matmul(x, w) + b)
# define as probabilistic output the sigmoid and a bernoulli distribution
prob_op = torch.sigmoid
dist_op = dist.Bernoulli
else:
# the op for calculating the logit is an element-wise multiplication
# for vector scaling and to keep multinomial output
def logit_op(x, w, b): return torch.mul(x, w) + b
# define as probabilistic output the softmax and a categorical distribution
def prob_op(logit): return torch.softmax(logit, dim=1)
dist_op = dist.Categorical
# the first dimension of the given input data is the "independent" sample dimension
with pyro.plate("data", X.shape[0]):
# calculate logit
logit = logit_op(X, weights, bias)
# if MLE, (slow) sampling is not necessary. However, this is needed for 'variational' and 'mcmc'
if self.method in ['variational', 'mcmc']:
probs = prob_op(logit)
pyro.sample("obs", dist_op(probs=probs, validate_args=True), obs=y)
return logit
|
#!/usr/bin/env python
import argparse
import math
import matplotlib.pyplot as plt
def file_count(shape, chunkXY, chunkZ=1, chunkT=1, chunkC=1):
t, c, z, y, x = shape
return (
math.ceil(x / chunkXY)
* math.ceil(y / chunkXY)
* math.ceil(z / chunkZ)
* math.ceil(t / chunkT)
* math.ceil(c / chunkC)
)
def plot(ax, twoD=True, font=16):
if twoD:
shape = (1, 8, 1, 2 ** 16, 2 ** 16)
chunkSizesXY = [32, 1024]
chunkSizesOther = (1, 2, 4, 8)
else:
shape = (100, 1, 1024, 1024, 1024)
chunkSizesXY = (16, 32, 64, 128)
chunkSizesOther = (1, 10, 100)
ax.set_ylabel("Number of chunks")
ax.set_yscale("log")
ax.set_xscale("log")
ax.set(xlim=(10, 2 * 10 ** 3), ylim=(10, 10 ** 8))
if twoD:
ax.set_xlabel("Chunk size (X and Y)")
ax.set_title("XYZCT: (64k, 64k, 1, 8, 1)")
chunkDim = "C"
annTitle = "Chosen chunk size:\n(256, 256, 1, 1, 1)"
xy = ((256), file_count(shape, 256))
else:
ax.set_xlabel("Chunk size (XYZ)")
ax.set_title("XYZCT: (1k, 1k, 1k, 1, 100)")
chunkDim = "T"
annTitle = "Chosen chunk size:\n(32, 32, 32, 1, 1)"
xy = ((32), file_count(shape, 32, chunkZ=32))
for item in (
[ax.title, ax.xaxis.label, ax.yaxis.label]
+ ax.get_xticklabels()
+ ax.get_yticklabels()
):
item.set_fontsize(font)
styles = ["solid", "dashed", "dashdot", "dotted"]
for whichChunk, chunkOther in enumerate(chunkSizesOther):
numFiles = []
fileSize = []
for i in chunkSizesXY:
if twoD:
count = file_count(shape, i, **{f"chunk{chunkDim}": chunkOther})
else:
# Could be simpler
count = file_count(
shape, i, chunkZ=i, **{f"chunk{chunkDim}": chunkOther}
)
numFiles.append(count)
fileSize.append(i)
ax.plot(
fileSize,
numFiles,
linewidth=0.5,
label=f"{chunkOther}",
linestyle=styles.pop(0),
)
ax.annotate(
annTitle,
xy=xy,
xycoords="data",
xytext=(0, 40),
textcoords="offset points",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="left",
verticalalignment="center",
fontsize=font - 4,
)
leg = ax.legend(
loc="lower left",
title=f"Chunk size ({chunkDim})",
frameon=False,
prop={"size": font},
)
for legobj in leg.legendHandles:
legobj.set_linewidth(0.5)
for axis in ["top", "bottom", "left", "right"]:
ax.spines[axis].set_linewidth(0.5)
return fig
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename")
ns = parser.parse_args()
# fig = plt.figure()
# ax2D = fig.add_subplot(2, 1, 1)
# ax3D = fig.add_subplot(2, 1, 2)
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
plot(ax[1], False)
plot(ax[0], True)
plt.savefig(ns.filename)
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio model specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import csv
import io
import os
import tempfile
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api.api_util import mm_export
from tensorflow_examples.lite.model_maker.core.task import model_util
import tensorflow_hub as hub
try:
from tflite_support.metadata_writers import audio_classifier as md_writer # pylint: disable=g-import-not-at-top
from tflite_support.metadata_writers import metadata_info as md_info # pylint: disable=g-import-not-at-top
from tflite_support.metadata_writers import writer_utils # pylint: disable=g-import-not-at-top
ENABLE_METADATA = True
except ImportError:
ENABLE_METADATA = False
class MetadataWriter:
"""Helper class to populate Audio Metadata, to be used in `with` statement.
Simple usage for model with two classification heads.
with MetadataWriter(tflite_path) as writer:
writer.add_input(sample_rate=16000, channels=1)
writer.add_output(name='animal_sound', labels=['dog', 'cat'])
writer.add_output(name='speech_command', labels=['yes', 'no'])
writer.save(tflite_path, json_filepath)
`add_output` can also take an ordered dict for multiple locales, example:
writer.add_output(name='animal_sound', labels=collections.OrderedDict([
('en', ['bird', 'cat']),
('fr', ['oiseau', 'chat'])
]))
"""
def __init__(self, tflite_filepath, **kwargs):
self._model = writer_utils.load_file(tflite_filepath)
self._general_md = md_info.GeneralMd(**kwargs)
self._inputs = []
self._outputs = []
def __enter__(self):
self._temp_folder = tempfile.TemporaryDirectory()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._temp_folder.cleanup()
# Delete the attribute so that it errors out if not in `with` statement.
delattr(self, '_temp_folder')
def add_input(self, **kwargs):
"""Add metadta for the input tensor."""
self._inputs.append(md_info.InputAudioTensorMd(**kwargs))
def add_output(self, name, labels, **kwargs):
"""Add metadata for output tensor in order."""
if isinstance(labels, list):
default_locale = None
labels = collections.OrderedDict([(default_locale, labels)])
return self.add_output(name, labels, **kwargs)
label_files = []
if isinstance(labels, collections.OrderedDict):
for locale, label_list in labels.items():
full_path = os.path.join(
self._temp_folder.name,
'{}_labels_{}.txt'.format(name, locale or 'default'))
model_util.export_labels(full_path, label_list)
label_files.append(
md_info.LabelFileMd(file_path=full_path, locale=locale))
else:
raise ValueError(
'`labels` should be either a list of labels or an ordered dict mapping `locale` -> list of labels. got: {}'
.format(labels))
idx = len(self._outputs)
self._outputs.append(
md_info.ClassificationTensorMd(
name=name,
label_files=label_files,
tensor_type=writer_utils.get_output_tensor_types(self._model)[idx],
**kwargs))
def save(self, tflite_filepath=None, json_filepath=None):
"""Persist model with metadata."""
if len(self._inputs) > 1:
raise ValueError('Only supports single input, got {}'.format(
len(self._inputs)))
input_md = self._inputs[0]
writer = md_writer.MetadataWriter.create_from_metadata_info_for_multihead(
model_buffer=self._model,
general_md=self._general_md,
input_md=input_md,
output_md_list=self._outputs)
if tflite_filepath:
writer_utils.save_file(writer.populate(), tflite_filepath, mode='wb')
if json_filepath:
writer_utils.save_file(
writer.get_metadata_json(), json_filepath, mode='wt')
def _ensure_tf25(version):
if version < '2.5':
raise RuntimeError(
'Audio Tasks requires TF2.5 or later. For example, you can run the '
'following command to install TF2.5.0rc2:\n\n'
'pip3 install tensorflow==2.5.0rc2\n\n')
def _get_tf_version():
return tf.__version__
class BaseSpec(abc.ABC):
"""Base model spec for audio classification."""
def __init__(self, model_dir=None, strategy=None):
_ensure_tf25(_get_tf_version())
self.model_dir = model_dir
if not model_dir:
self.model_dir = tempfile.mkdtemp()
tf.compat.v1.logging.info('Checkpoints are stored in %s', self.model_dir)
self.strategy = strategy or tf.distribute.get_strategy()
@abc.abstractproperty
def target_sample_rate(self):
pass
@abc.abstractmethod
def create_model(self, num_classes, train_whole_model=False):
pass
@abc.abstractmethod
def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):
pass
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
"""Returns a preprocessed dataset."""
_ = is_training
_ = cache_fn
return ds
def get_default_quantization_config(self):
"""Gets the default quantization configuration."""
return None
def _remove_suffix_if_possible(text, suffix):
return text.rsplit(suffix, 1)[0]
TFJS_MODEL_ROOT = 'https://storage.googleapis.com/tfjs-models/tfjs'
def _load_browser_fft_preprocess_model():
"""Load a model replicating WebAudio's AnalyzerNode.getFloatFrequencyData."""
model_name = 'sc_preproc_model'
file_extension = '.tar.gz'
filename = model_name + file_extension
# Load the preprocessing model, which transforms audio waveform into
# spectrograms (2D image-like representation of sound).
# This model replicates WebAudio's AnalyzerNode.getFloatFrequencyData
# (https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getFloatFrequencyData).
# It performs short-time Fourier transform (STFT) using a length-2048 Blackman
# window. It opeartes on mono audio at the 44100-Hz sample rate.
filepath = tf.keras.utils.get_file(
filename,
f'{TFJS_MODEL_ROOT}/speech-commands/conversion/{filename}',
cache_subdir='model_maker',
extract=True)
model_path = _remove_suffix_if_possible(filepath, file_extension)
return tf.keras.models.load_model(model_path)
def _load_tfjs_speech_command_model():
"""Download TFJS speech command model for fine-tune."""
origin_root = f'{TFJS_MODEL_ROOT}/speech-commands/v0.3/browser_fft/18w'
files_to_download = [
'metadata.json', 'model.json', 'group1-shard1of2', 'group1-shard2of2'
]
for filename in files_to_download:
filepath = tf.keras.utils.get_file(
filename,
f'{origin_root}/{filename}',
cache_subdir='model_maker/tfjs-sc-model')
model_path = os.path.join(os.path.dirname(filepath), 'model.json')
return model_util.load_tfjs_keras_model(model_path)
@mm_export('audio_classifier.BrowserFftSpec')
class BrowserFFTSpec(BaseSpec):
"""Model good at detecting speech commands, using Browser FFT spectrum."""
EXPECTED_WAVEFORM_LENGTH = 44032
# Information used to populate TFLite metadata.
_MODEL_NAME = 'AudioClassifier'
_MODEL_DESCRIPTION = ('Identify the most prominent type in the audio clip '
'from a known set of categories.')
_MODEL_VERSION = 'v1'
_MODEL_AUTHOR = 'TensorFlow Lite Model Maker'
_MODEL_LICENSES = ('Apache License. Version 2.0 '
'http://www.apache.org/licenses/LICENSE-2.0.')
_SAMPLE_RATE = 44100
_CHANNELS = 1
_INPUT_NAME = 'audio_clip'
_INPUT_DESCRIPTION = 'Input audio clip to be classified.'
_OUTPUT_NAME = 'probability'
_OUTPUT_DESCRIPTION = 'Scores of the labels respectively.'
def __init__(self, model_dir=None, strategy=None):
"""Initialize a new instance for BrowserFFT spec.
Args:
model_dir: The location to save the model checkpoint files.
strategy: An instance of TF distribute strategy. If none, it will use the
default strategy (either SingleDeviceStrategy or the current scoped
strategy.
"""
super(BrowserFFTSpec, self).__init__(model_dir, strategy)
self._preprocess_model = _load_browser_fft_preprocess_model()
self._tfjs_sc_model = _load_tfjs_speech_command_model()
@property
def target_sample_rate(self):
return 44100
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _ensure_length(self, wav, unused_label):
return len(wav) >= self.EXPECTED_WAVEFORM_LENGTH
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _split(self, wav, label):
"""Split the long audio samples into multiple trunks."""
# wav shape: (audio_samples, )
chunks = tf.math.floordiv(len(wav), self.EXPECTED_WAVEFORM_LENGTH)
unused = tf.math.floormod(len(wav), self.EXPECTED_WAVEFORM_LENGTH)
# Drop unused data
wav = wav[:len(wav) - unused]
# Split the audio sample into multiple chunks
wav = tf.reshape(wav, (chunks, 1, self.EXPECTED_WAVEFORM_LENGTH))
return wav, tf.repeat(tf.expand_dims(label, 0), len(wav))
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, EXPECTED_WAVEFORM_LENGTH], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _preprocess(self, x, label):
"""Preprocess the dataset to extract the spectrum."""
# x has shape (1, EXPECTED_WAVEFORM_LENGTH)
spectrum = self._preprocess_model(x)
# y has shape (1, embedding_len)
spectrum = tf.squeeze(spectrum, axis=0)
# y has shape (embedding_len,)
return spectrum, label
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
del is_training
autotune = tf.data.AUTOTUNE
ds = ds.filter(self._ensure_length)
ds = ds.map(self._split, num_parallel_calls=autotune).unbatch()
ds = ds.map(self._preprocess, num_parallel_calls=autotune)
if cache_fn:
ds = cache_fn(ds)
return ds
def create_model(self, num_classes, train_whole_model=False):
if num_classes <= 1:
raise ValueError(
'AudioClassifier expects `num_classes` to be greater than 1')
model = tf.keras.Sequential()
for layer in self._tfjs_sc_model.layers[:-1]:
model.add(layer)
model.add(
tf.keras.layers.Dense(
name='classification_head', units=num_classes,
activation='softmax'))
if not train_whole_model:
# Freeze all but the last layer of the model. The last layer will be
# fine-tuned during transfer learning.
for layer in model.layers[:-1]:
layer.trainable = False
return model
def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):
model.compile(
optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
hist = model.fit(
train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)
return hist
def create_serving_model(self, training_model):
"""Create a model for serving."""
combined = tf.keras.Sequential()
combined.add(self._preprocess_model)
combined.add(training_model)
# Build the model.
combined.build([None, self.EXPECTED_WAVEFORM_LENGTH])
return combined
def _export_metadata(self, tflite_filepath, index_to_label,
export_metadata_json_file):
"""Export TFLite metadata."""
with MetadataWriter(
tflite_filepath,
name=self._MODEL_NAME,
description=self._MODEL_DESCRIPTION,
version=self._MODEL_VERSION,
author=self._MODEL_AUTHOR,
licenses=self._MODEL_LICENSES) as writer:
writer.add_input(
name=self._INPUT_NAME,
description=self._INPUT_DESCRIPTION,
sample_rate=self._SAMPLE_RATE,
channels=self._CHANNELS)
writer.add_output(
labels=index_to_label,
name=self._OUTPUT_NAME,
description=self._OUTPUT_DESCRIPTION)
json_filepath = (os.path.splitext(tflite_filepath)[0] +
'.json') if export_metadata_json_file else None
writer.save(tflite_filepath, json_filepath)
def export_tflite(self,
model,
tflite_filepath,
with_metadata=True,
export_metadata_json_file=True,
index_to_label=None):
"""Converts the retrained model to tflite format and saves it.
This method overrides the default `CustomModel._export_tflite` method, and
include the pre-processing in the exported TFLite library since support
library can't handle audio tasks yet.
Args:
model: An instance of the keras classification model to be exported.
tflite_filepath: File path to save tflite model.
with_metadata: Whether the output tflite model contains metadata.
export_metadata_json_file: Whether to export metadata in json file. If
True, export the metadata in the same directory as tflite model.Used
only if `with_metadata` is True.
index_to_label: A list that map from index to label class name.
"""
combined = self.create_serving_model(model)
# Sets batch size from None to 1 when converting to tflite.
model_util.set_batch_size(model, batch_size=1)
model_util.export_tflite(
combined, tflite_filepath, quantization_config=None)
# Sets batch size back to None to support retraining later.
model_util.set_batch_size(model, batch_size=None)
if with_metadata:
if not ENABLE_METADATA:
print('Writing Metadata is not support in the installed tflite-support '
'version. Please use tflite-support >= 0.2.*')
else:
self._export_metadata(tflite_filepath, index_to_label,
export_metadata_json_file)
@mm_export('audio_classifier.YamNetSpec')
class YAMNetSpec(BaseSpec):
"""Model good at detecting environmental sounds, using YAMNet embedding."""
EXPECTED_WAVEFORM_LENGTH = 15600 # effectively 0.975s
EMBEDDING_SIZE = 1024
# Information used to populate TFLite metadata.
_MODEL_NAME = 'yamnet/classification'
_MODEL_DESCRIPTION = 'Recognizes sound events'
_MODEL_VERSION = 'v1'
_MODEL_AUTHOR = 'TensorFlow Lite Model Maker'
_MODEL_LICENSES = ('Apache License. Version 2.0 '
'http://www.apache.org/licenses/LICENSE-2.0.')
_SAMPLE_RATE = 16000
_CHANNELS = 1
_INPUT_NAME = 'audio_clip'
_INPUT_DESCRIPTION = 'Input audio clip to be classified.'
_YAMNET_OUTPUT_NAME = 'yamnet'
_YAMNET_OUTPUT_DESCRIPTION = ('Scores in range 0..1.0 for each of the 521 '
'output classes.')
_CUSTOM_OUTPUT_NAME = 'custom'
_CUSTOM_OUTPUT_DESCRIPTION = (
'Scores in range 0..1.0 for each output classes.')
def __init__(
self,
model_dir: None = None,
strategy: None = None,
yamnet_model_handle='https://tfhub.dev/google/yamnet/1',
frame_length=EXPECTED_WAVEFORM_LENGTH, # Window size 0.975 s
frame_step=EXPECTED_WAVEFORM_LENGTH // 2, # Hop of 0.975 /2 s
keep_yamnet_and_custom_heads=True):
"""Initialize a new instance for YAMNet spec.
Args:
model_dir: The location to save the model checkpoint files.
strategy: An instance of TF distribute strategy. If none, it will use the
default strategy (either SingleDeviceStrategy or the current scoped
strategy.
yamnet_model_handle: Path of the TFHub model for retrining.
frame_length: The number of samples in each audio frame. If the audio file
is shorter than `frame_length`, then the audio file will be ignored.
frame_step: The number of samples between two audio frames. This value
should be bigger than `frame_length`.
keep_yamnet_and_custom_heads: Boolean, decides if the final TFLite model
contains both YAMNet and custom trained classification heads. When set
to False, only the trained custom head will be preserved.
"""
super(YAMNetSpec, self).__init__(model_dir, strategy)
self._yamnet_model_handle = yamnet_model_handle
self._yamnet_model = hub.load(yamnet_model_handle)
self._frame_length = frame_length
self._frame_step = frame_step
self._keep_yamnet_and_custom_heads = keep_yamnet_and_custom_heads
@property
def target_sample_rate(self):
return self._SAMPLE_RATE
def create_model(self, num_classes, train_whole_model=False):
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(
input_shape=(YAMNetSpec.EMBEDDING_SIZE),
dtype=tf.float32,
name='embedding'),
tf.keras.layers.Dense(
num_classes, name='classification_head', activation='softmax')
])
return model
def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):
model.compile(
optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
hist = model.fit(
train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)
return hist
# Annotate the TF function with input_signature to avoid re-tracing. Otherwise
# the TF function gets retraced everytime the input shape is changed.
# Check https://www.tensorflow.org/api_docs/python/tf/function#args_1 for more
# information.
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _frame(self, wav, label):
clips = tf.signal.frame(
wav, frame_length=self._frame_length, frame_step=self._frame_step)
batch_labels = tf.repeat(tf.expand_dims(label, 0), len(clips))
return clips, batch_labels
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _extract_embedding(self, wav, label):
_, embeddings, _ = self._yamnet_model(wav) # (chunks, EMBEDDING_SIZE)
embedding = tf.reduce_mean(embeddings, axis=0)
return embedding, label
@tf.function(input_signature=[
tf.TensorSpec(shape=[EMBEDDING_SIZE], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _add_noise(self, embedding, label):
noise = tf.random.normal(
embedding.shape, mean=0.0, stddev=.2, dtype=tf.dtypes.float32)
return noise + embedding, label
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
autotune = tf.data.AUTOTUNE
ds = ds.map(self._frame, num_parallel_calls=autotune).unbatch()
ds = ds.map(self._extract_embedding, num_parallel_calls=autotune)
# Cache intermediate results right before data augmentation.
if cache_fn:
ds = cache_fn(ds)
if is_training:
ds = ds.map(self._add_noise, num_parallel_calls=autotune)
return ds
def _yamnet_labels(self):
class_map_path = self._yamnet_model.class_map_path().numpy()
class_map_csv_text = tf.io.read_file(class_map_path).numpy().decode('utf-8')
class_map_csv = io.StringIO(class_map_csv_text)
class_names = [
display_name for (class_index, mid,
display_name) in csv.reader(class_map_csv)
]
class_names = class_names[1:] # Skip CSV header
return class_names
def _export_metadata(self, tflite_filepath, index_to_label,
export_metadata_json_file):
"""Export TFLite metadata."""
with MetadataWriter(
tflite_filepath,
name=self._MODEL_NAME,
description=self._MODEL_DESCRIPTION,
version=self._MODEL_VERSION,
author=self._MODEL_AUTHOR,
licenses=self._MODEL_LICENSES) as writer:
writer.add_input(
name=self._INPUT_NAME,
description=self._INPUT_DESCRIPTION,
sample_rate=self._SAMPLE_RATE,
channels=self._CHANNELS)
if self._keep_yamnet_and_custom_heads:
writer.add_output(
labels=self._yamnet_labels(),
name=self._YAMNET_OUTPUT_NAME,
description=self._YAMNET_OUTPUT_DESCRIPTION)
writer.add_output(
labels=index_to_label,
name=self._CUSTOM_OUTPUT_NAME,
description=self._CUSTOM_OUTPUT_DESCRIPTION)
json_filepath = (os.path.splitext(tflite_filepath)[0] +
'.json') if export_metadata_json_file else None
writer.save(tflite_filepath, json_filepath)
def create_serving_model(self, training_model):
"""Create a model for serving."""
embedding_extraction_layer = hub.KerasLayer(
self._yamnet_model_handle, trainable=False)
keras_input = tf.keras.Input(
shape=(YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,),
dtype=tf.float32,
name='audio') # (1, wav)
reshaped_input = tf.reshape(keras_input,
(YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,)) # (wav)
scores, embeddings, _ = embedding_extraction_layer(reshaped_input)
serving_outputs = training_model(embeddings)
if self._keep_yamnet_and_custom_heads:
serving_model = tf.keras.Model(keras_input, [scores, serving_outputs])
else:
serving_model = tf.keras.Model(keras_input, serving_outputs)
return serving_model
def export_tflite(self,
model,
tflite_filepath,
with_metadata=True,
export_metadata_json_file=True,
index_to_label=None):
"""Converts the retrained model to tflite format and saves it.
This method overrides the default `CustomModel._export_tflite` method, and
include the spectrom extraction in the model.
The exported model has input shape (1, number of wav samples)
Args:
model: An instance of the keras classification model to be exported.
tflite_filepath: File path to save tflite model.
with_metadata: Whether the output tflite model contains metadata.
export_metadata_json_file: Whether to export metadata in json file. If
True, export the metadata in the same directory as tflite model. Used
only if `with_metadata` is True.
index_to_label: A list that map from index to label class name.
"""
serving_model = self.create_serving_model(model)
# TODO(b/164229433): Remove SELECT_TF_OPS once changes in the bug are
# released.
model_util.export_tflite(
serving_model, tflite_filepath, quantization_config=None)
if with_metadata:
if not ENABLE_METADATA:
print('Writing Metadata is not support in the current tflite-support '
'version. Please use tflite-support >= 0.2.*')
else:
self._export_metadata(tflite_filepath, index_to_label,
export_metadata_json_file)
|
#!/usr/bin/python
import datetime
import socket
LISTENPORT=3141
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', LISTENPORT))
while True:
data, addr = sock.recvfrom(1024)
print '%s: %s' % (datetime.datetime.now(), data)
with open('temperature.log', 'a') as f:
f.write('%s,%s\n' % (datetime.datetime.now(), data))
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
from enum import Enum
class SpaceAccessType(Enum):
NO_ACCESS = "No access"
UNLIMITED = "Unlimited"
USER_BUDGET = "User Budget (days)"
GROUP_BUDGET = "Group Budget (days)"
AUTO_CHARGED_MONTHLY_BUDGET = "Auto-charged budget (monthly)"
AUTO_CHARGED_WEEKLY_BUDGET = "Auto-charged budget (weekly)"
def getAccessSpacesOfNode(node):
return []
def checkUserAccessToSpace(user, space):
return False
def setDefaultAccessTypeUserProperties(user, access_type):
pass
|
#!/usr/bin/env python3 -W all
"""
ner-frog.py: perform named entity recognition for Dutch
usage: ner-frog.py < text
notes:
* adapted from: https://www.tutorialspoint.com/python/python_networking.htm
* requires frog running and listening on localhost port 8080
* output lines with format: token SPACE postag SPACE nertag
* outputs empty line between sentences
20180604 erikt(at)xs4all.nl
"""
from pynlpl.clients.frogclient import FrogClient
import sys
COMMAND = sys.argv.pop(0)
HOST = "localhost"
PORT = 8080
NOFROGCONTACTMSG = "no Frog found on port "+str(PORT)+"! is it running?"
NOFROGOUTPUTMSG = "no data received from Frog! is it running?"
NERID = 4
POSID = 3
TOKENID = 0
def error(string):
sys.exit(COMMAND+": error: "+string)
def tokenInfoIsComplete(row):
return(row[TOKENID] != None and len(row) >= NERID+1)
def tokenInfoIsIncomplete(row):
return(not tokenInfoIsComplete(row) and len(row) > 0 and row[TOKENID] != None)
def printTokenInfo(row):
print(row[TOKENID],row[POSID],row[NERID])
def printEndOfSentence():
print("")
def prettyPrint(data):
for row in data:
if tokenInfoIsComplete(row): printTokenInfo(row)
elif tokenInfoIsIncomplete(row): error("incomplete token: "+str(row))
else: printEndOfSentence()
printEndOfSentence()
def connectToFrog():
try: frogClient = FrogClient(HOST,PORT,returnall=True)
except Exception as e: error(NOFROGCONTACTMSG+" "+str(e))
return(frogClient)
def processWithFrog(frogClient,text):
try: frogOutput = frogClient.process(text)
except Exception as e: error(NOFROGOUTPUTMSG+" "+str(e))
return(frogOutput)
def main(argv):
frogClient = connectToFrog()
for line in sys.stdin:
frogOutput = processWithFrog(frogClient,line)
prettyPrint(frogOutput)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
import shutil
import time
import exifread
import os
if __name__ == '__main__':
photo_directory = input("Please input image directory: ")
filenames = os.listdir(photo_directory)
print(filenames, "\nDirectory contains files above, are you sure to process? (Y/N)")
answer = input("")
while answer != "Y" and answer != "N":
print("Invalid input, Y for yes and N for no: ")
answer = input("")
count = 0
if answer == "Y":
print("Processing...")
for filename in filenames:
file_path = os.path.join(photo_directory, filename)
# Ignore directories
if not os.path.isdir(file_path):
# Ignore .ini files
if not filename.endswith(".ini"):
with open(file_path, "rb") as f:
exif_info = exifread.process_file(f, stop_tag="EXIF DateTimeOriginal")
# Has exif
if exif_info.get("EXIF DateTimeOriginal", None) is not None:
taken_time_string = str(exif_info["EXIF DateTimeOriginal"])
taken_time_struct = time.strptime(taken_time_string, "%Y:%m:%d %H:%M:%S")
# Doesn't have exif
else:
create_timestamp = os.path.getctime(file_path)
modified_timestamp = os.path.getmtime(file_path)
taken_timestamp = modified_timestamp if create_timestamp > modified_timestamp else create_timestamp
taken_time_struct = time.localtime(taken_timestamp)
year_path = os.path.join(photo_directory, str(taken_time_struct.tm_year))
mon_path = os.path.join(year_path, str(taken_time_struct.tm_mon).zfill(2))
if not os.path.exists(year_path):
os.mkdir(year_path)
if not os.path.exists(mon_path):
os.mkdir(mon_path)
shutil.copy(file_path, mon_path)
count += 1
print("Finished (%d in total)." % count)
|
#!/usr/bin/env python
# coding: utf-8
# # Loading data
import pandas as pd
import plotly.express as px
from tqdm import tqdm
import functools
import numpy as np
from difflib import SequenceMatcher
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from datetime import datetime, timedelta
import pprint
import requests
import os
import getpass
import json
from queue import Queue
from threading import Thread
from time import time
import logging
import os
#cashing in case of multiple calls.
@functools.lru_cache(maxsize=128)
def get_tiles(municipalityId: int) -> pd.DataFrame:
"""Fetches tile information for a municipality id.
Args:
municipalityId: id of the municipality as defined in by the federal office of statistics,
https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html
Return:
A dataframe containing the following columns:
[tileId, ll_lon, ll_lat, urL-lon, ur_lat]
tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.
ll_lon: longitude coordinate of the lower left corner of the tile.
ll_lat: latitude coordinate of the lower left corner of the tile.
ur_lon: longitude coordinate of the upper right corner of the tile.
ur_lat: latitude coordinate of the upper right corner of the tile.
If municipalityId is invalid will print an error message and return an empty DataFrame
"""
api_request = (
BASE_URL
+ f'/grids/municipalities/{municipalityId}'
)
data = oauth.get(api_request, headers=headers).json()
if(data.get('status') == None):
tileID = [t['tileId'] for t in data['tiles']]
ll_lon = [t['ll']['x'] for t in data['tiles']]
ll_lat= [t['ll']['y'] for t in data['tiles']]
ur_lon = [t['ur']['x'] for t in data['tiles']]
ur_lat = [t['ur']['y'] for t in data['tiles']]
else:
print(f'get_tiles: failed with status code {data.get("status")}. {data.get("message")}')
return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})
return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})
def get_municipalityID(name: str) -> np.array(int):
"""Converts a municipality name to ID
Args:
name of municipality.
Returns:
An array containing all the municipality ID's corresponding to the name.
If the name invalid will return an empty array.
"""
return commune.loc[commune.GDENAME == name].GDENR.to_numpy()
def visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :
"""Visualizes coordinates in dataframe on map
Retrieves columns with name latitude and logitude and visualizes it on a map.
Args:
df: A dataframe containing the coordinates.
latitude: String key of the column in the dataframe containing the latitude.
longitude: String key of the column in the dataframe containing the longitude.
"""
fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,
mapbox_style="carto-positron")
fig.show()
def get_all_tiles_switzerland() -> pd.DataFrame:
"""Fetches the tile information for all the tiles in Switzerland.
Returns:
A Dataframe containg the tile information for every tile in switzerland.
The format of the DataFrame is the same as the return of get_tiles()
"""
tiles = get_tiles(commune.GDENR.unique()[0])
for c in tqdm(commune.GDENR.unique().tolist()):
tiles = tiles.append(get_tiles(c))
return tiles
def get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches daily demographics
Fetches the daily demographics, age distribution, of the tiles.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion
+----------+-----------------------+---------------------+
| | ageDistribution | maleProportion |
+----------+-----------------------+---------------------+
| 44554639 | NaN | 0.49828359484672546 |
+----------+-----------------------+---------------------+
| 44271906 | [0.21413850784301758, | 0.493218 |
| | 0.27691012620925903, | |
| | 0.37422287464141846, | |
| | 0.13472850620746613] | |
+----------+-----------------------+---------------------+
In the example above tile 44554639 does not have any age distribution data.
The data is k-anonymized. Therefor is some tiles are missing data it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score[t['tileId']] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return pd.DataFrame.from_dict(date2score).transpose()
def get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches hourly demographics of age categories for 24 hours
Fetches the hourly demographics, age distribution, of the tiles.
Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the demographics. The name
of the collumns are:
[age_cat, age_distribution, male_proportion]
+----------+---------------------+---------+------------------+-----------------+
| | | age_cat | age_distribution | male_proportion |
+----------+---------------------+---------+------------------+-----------------+
| tileID | time | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |
+----------+---------------------+---------+------------------+-----------------+
| | ... | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches hourly male proportion and age categories for 24 hours
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
Returns a dictionary with as a key the tileID, and as a value an object that is as follows:
{tileID: {dateTime:{ "ageDistribution": [0-19, 20-39, 40-64, 64+], "maleProportion": value},
{dateTime2: ...}}}
26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,
0.2758632302284241,
0.362215131521225,
0.16940800845623016],
'maleProportion': 0.4727686941623688},
'2020-01-27T01:00:00': {'ageDistribution': None,
'maleProportion': 0.4896690547466278},
'2020-01-27T02:00:00': {'ageDistribution': None,
'maleProportion': 0.48882684111595154},
The data is k-anonymized. Therefor is some values are None it means that no data was available
To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for dt in tqdm(dates, desc="get_hourly_demographics: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return date2score
data = get_hourly_demographics(tiles, day)
tile_id = []
time_data = []
age_distribution = []
age_cat = []
male_proportion = []
for i in data:
for time in data[i]:
if data[i][time].get("ageDistribution") != None:
for (idx,a) in enumerate(data[i][time].get("ageDistribution", [])):
age_cat.append(idx)
age_distribution.append(a)
tile_id.append(i)
time_data.append(time)
male_proportion.append(data[i][time].get("maleProportion"))
else:
tile_id.append(i)
time_data.append(time)
age_distribution.append(None)
male_proportion.append(data[i][time].get("maleProportion"))
age_cat.append(None)
return pd.DataFrame(data={'tileID': tile_id, "age_cat": age_cat, 'age_distribution':age_distribution, "male_proportion": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])
def get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches the daily density of tiles.
Fetches the daily density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the tileID
+----------+-------+
| | score |
+----------+-------+
| tileID | |
+----------+-------+
| 44394309 | 1351 |
+----------+-------+
| 44394315 | 1103 |
+----------+-------+
| 44460297 | 875 |
+----------+-------+
| 44488589 | 1387 |
+----------+-------+
| 44498028 | 678 |
+----------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
tileID = []
score = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
tileID.append(t['tileId'])
score.append(t["score"])
return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index("tileID")
def get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches the hourly density of tiles for 24 hours.
Fetches the hourly density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the [tileID, time]
+----------+---------------------+-------+
| | | score |
+----------+---------------------+-------+
| tileID | time | |
+----------+---------------------+-------+
| 44394309 | 2020-01-27T00:00:00 | 52 |
| +---------------------+-------+
| | 2020-01-27T01:00:00 | 68 |
| +---------------------+-------+
| | 2020-01-27T02:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T03:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T04:00:00 | 69 |
+----------+---------------------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
print("getHourlyDensity")
for dt in tqdm(dates, desc="get_hourly_density: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
for t in oauth.get(api_request, headers=headers).json().get("tiles",[]):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = t['score']
return date2score
tiles_data = []
time_data = []
score = []
data = get_hourly_density(tiles, day)
for t in data:
for time in data[t]:
time_data.append(time)
tiles_data.append(t)
score.append(data[t][time])
return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])
def fetch_data_city(city: str) -> None:
"""Fetches the data for a city if the data is not yet cashed on the computer.
"""
compression = ".xz"
folder = os.path.join(".","data")
def file_path(file_name: str) -> str:
return os.path.join(folder, file_name)
if not(os.path.exists(folder)):
os.mkdir(folder)
tiles_path = file_path(f'{city}Tiles.pkl{compression}')
hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')
hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')
daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')
daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')
if not(os.path.isfile(tiles_path)):
tiles = get_tiles(get_municipalityID(city)[0])
tiles.to_pickle(tiles_path)
else:
tiles = pd.read_pickle(tiles_path)
if not(os.path.isfile(hourly_dem_path)):
hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())
hourly_dem.to_pickle(hourly_dem_path)
if not(os.path.isfile(hourly_density_path)):
hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())
hourly_dens.to_pickle(hourly_density_path)
if not(os.path.isfile(daily_density_path)):
get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)
if not(os.path.isfile(daily_demographics_path)):
get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)
def clean_cities_list(cities: [str]) -> [str]:
"""Cleans the list of cities by removing all the cities that are not found in the
official list of cities provided by the Federal Statisitics Office.
Args:
List of cities to check and clean.
Return:
List containing a subset of the input list such that all elements are valid.
"""
invalid_cities = []
#validation that the cities names are valid
for c in cities:
if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:
city = []
sim_value = []
for f in commune.GDENAME:
r = SequenceMatcher(None, c, f).ratio()
if r > 0.5:
city.append(f)
sim_value.append(r)
d = pd.DataFrame(data={"city": city, "value": sim_value})
potential_cities = d.sort_values("value", ascending=False).head(5).city.to_numpy()
print(f"City nammed: {c} cannot be found in official records. Did you mean: {potential_cities} ? {c} will be ignored.")
invalid_cities.append(c)
return [c for c in cities if not(c in invalid_cities)]
# Multithread fetch implementation
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
city = self.queue.get()
if city == -1:
self.queue.put(-1)
break
try:
fetch_data_city(city)
finally:
self.queue.task_done()
def download_commune_excel() -> None:
'''
Downloads the excel spreadsheet from the Swiss Federal Statistical Office that maps the town name to unique ID
'''
print('Beginning commune file download with requests')
folder = os.path.join(".","data")
if not(os.path.exists(folder)):
os.mkdir(folder)
url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/11467406/master'
r = requests.get(url)
with open(os.path.join(".", "data", 'commune.xlsx'), 'wb') as f:
f.write(r.content)
print("End of commune file download")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BASE_URL = "https://api.swisscom.com/layer/heatmaps/demo"
TOKEN_URL = "https://consent.swisscom.com/o/oauth2/token"
MAX_NB_TILES_REQUEST = 100
headers = {"scs-version": "2"}
client_id = "" # customer key in the Swisscom digital market place
client_secret = "" # customer secret in the Swisscom digital market place
if client_id == "":
client_id = os.environ.get("CLIENT_ID", "")
if client_id == "":
client_id = input("Enter MIP Client ID: ")
os.environ["CLIENT_ID"] = client_id
if client_secret == "":
client_secret = os.environ.get("CLIENT_SECRET", "")
if client_secret == "":
client_secret = getpass.getpass('Enter MIP client secret:')
os.environ["CLIENT_SECRET"] = client_secret
# Fetch an access token
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
oauth.fetch_token(token_url=TOKEN_URL, client_id=client_id,
client_secret=client_secret)
def main():
ts = time()
if not(os.path.exists(os.path.join(".", "data", 'commune.xlsx'))):
download_commune_excel()
global commune
commune = pd.read_excel(os.path.join(".", "data", 'commune.xlsx'), sheet_name='GDE')
cities = ["Saas-Fee", "Arosa", "Bulle", "Laax","Belp" ,"Saanen","Adelboden", "Andermatt", "Davos", "Bulle", "Bern", "Genève", "Lausanne", "Zürich", "Neuchâtel", "Sion", "St. Gallen", "Appenzell", "Solothurn", "Zug", "Fribourg", "Luzern", "Ecublens (VD)", "Kloten", "Le Grand-Saconnex", "Nyon", "Zermatt", "Lugano"]
cities = clean_cities_list(cities)
queue = Queue()
for x in range(2):
worker = DownloadWorker(queue)
worker.deamen = True
worker.start()
for c in cities:
logger.info('Queueing {}'.format(c))
queue.put(c)
queue.join()
queue.put(-1)
logger.info('Took %s', time() - ts)
list_of_cities_path = os.path.join(".", "data","CityList.json")
cityList=[]
if os.path.isfile(list_of_cities_path):
with open(list_of_cities_path, "r") as filehandle:
cityList = json.load(filehandle)
with open(list_of_cities_path, "w") as filehandle:
for city in cities:
if not(city in cityList):
cityList.append(city)
json.dump(cityList, filehandle)
if __name__ == "__main__":
main()
# Other functions not currently used
def get_daily_demographics_male(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches Daily demographics.
Fetches the daily male proportion of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and the proportion of male. The name of the collumns are:
[tileID, maleProportion]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
maleProportion = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
if t.get("maleProportion") != None:
tileID.append(t['tileId'])
maleProportion.append(t["maleProportion"])
return pd.DataFrame(data={'tileID': tileID, 'maleProportion':maleProportion})
def get_daily_demographics_age(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches daily demographics of age categories
Fetches the daily demographics, age distribution, of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and a array of values corresponding to the age distribution. The name
of the collumns are:
[tileID, ageDistribution]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
ageDistribution = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if t.get("ageDistribution") != None:
tileID.append(t['tileId'])
ageDistribution.append(t["ageDistribution"])
return pd.DataFrame(data={'tileID': tileID, 'ageDistribution':ageDistribution})
|
from __future__ import absolute_import, division, print_function
import os
import time
import pandas as pd
import numpy as np
import seaborn as sns
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.decomposition import PCA
from src.meteoro_skills import CategoricalScores
from src.meteoro_skills import ContinuousScores
import tensorflow as tf
from tensorflow import keras
from keras import backend
from tensorflow.keras import layers
from keras.layers import GaussianNoise
from keras.layers import GaussianDropout
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
#from keras.models import model_from_yaml
from keras.models import load_model
print('TF version '+tf.__version__)
# ------------------------------------------------------------------------------
def tic():
global _start_time
_start_time = time.time()
def tac():
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec, 60)
(t_hour, t_min) = divmod(t_min, 60)
print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))
def mean_squared_error(y_test, y_pred):
return K.mean(K.square(y_pred - y_test), axis=-1)
# ------------------------------------------------------------------------------
class Training:
"""
This module is intended to automate the TensorFlow Neural Network training.
"""
PCA = PCA()
seed = 0
run_prefix = ''
version = ''
vernick = ''
file = ''
path = ''
fig_title = ''
path_fig = ''
mod_out_pth = ''
mod_out_name = ''
def __init__(self, random_seed=0,
run_prefix='',
version='',
version_nickname='',
csv_entry='',
csv_path='',
figure_path='',
model_out_path='',
model_out_name=''):
self.run_prefix = run_prefix
self.seed = random_seed
self.ver = version
self.vernick = version_nickname
self.file = csv_entry
self.path = csv_path
self.path_fig = figure_path
self.fig_title = run_prefix + version + version_nickname
self.mod_out_pth = model_out_path
self.mod_out_name = model_out_name
# -------------------------------------------------------------------------
# DROP DATA OUTSIDE INTERVAL
# -------------------------------------------------------------------------
@staticmethod
def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):
keepinterval = np.where((dataframe[target_col] >= keepfrom) &
(dataframe[target_col] <= keepto))
result = dataframe.iloc[keepinterval]
return result
# -------------------------------------------------------------------------
# BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION
# -------------------------------------------------------------------------
@staticmethod
def build_class_model():
'''
Fucntion to create the instance and configuration of the keras
model(Sequential and Dense).
'''
# Create the Keras model:
model = Sequential()
model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))
model.add(Dense(2, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)
return model
@staticmethod
def build_reg_model(input_size):
'''
Fucntion to create the instance and configuration of the keras
model(Sequential and Dense).
'''
model = Sequential()
model.add(GaussianNoise(0.01, input_shape=(input_size,)))
model.add(Dense(33, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
# -------------------------------------------------------------------------
# EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!
# -------------------------------------------------------------------------
def autoExecClass(self):
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# Load dataset:
df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]
x_arr = np.asanyarray(x)
y_arr = np.asanyarray(y)
y_arr = np.ravel(y_arr)
# Scaling the input paramaters:
# scaler_min_max = MinMaxScaler()
norm_sc = Normalizer()
x_normalized= norm_sc.fit_transform(x_arr)
# Split the dataset in test and train samples:
x_train, x_test, y_train, y_test = train_test_split(x_normalized,
y_arr, test_size=0.10,
random_state=101)
# Create the instance for KerasRegressor:
model=self.build_class_model()
tic()
#------------------------------------------------------------------------------
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(x_train, y_train,
epochs=EPOCHS, validation_split=0.2, batch_size=10,
verbose=0, callbacks=[PrintDot()])
print(history.history.keys())
# ------------------------------------------------------------------------------
# Visualize the model's training progress using the stats
# stored in the history object.
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
# ------------------------------------------------------------------------------
# Saving model to YAML:
# model_yaml = model.to_yaml()
# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:
# yaml_file.write(model_yaml)
#
# # serialize weights to HDF5
# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')
# print("Saved model to disk")
# tac()
# Saving the complete model in HDF5:
model.save(self.mod_out_pth + self.mod_out_name + '.h5')
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def autoExecReg(self):
# Fix random seed for reproducibility:
np.random.seed(self.seed)
# ------------------------------------------------------------------------------
df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')
df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',
'166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']
scaler = StandardScaler()
normed_input = scaler.fit_transform(df_input)
df_normed_input = pd.DataFrame(normed_input[:],
columns=colunas)
ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',
'lat']]
# regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]
# ------------------------------------------------------------------------------
# Choosing the number of components:
TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]
TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]
# ------------------------------------------------------------------------------
# Verifying the number of components that most contribute:
pca = self.PCA
pca1 = pca.fit(TB1)
plt.plot(np.cumsum(pca1.explained_variance_ratio_))
plt.xlabel('Number of components for TB1')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')
# ---
pca_trans1 = PCA(n_components=2)
pca1 = pca_trans1.fit(TB1)
TB1_transformed = pca_trans1.transform(TB1)
print("original shape: ", TB1.shape)
print("transformed shape:", TB1_transformed.shape)
# ------------------------------------------------------------------------------
pca = PCA()
pca2 = pca.fit(TB2)
plt.plot(np.cumsum(pca2.explained_variance_ratio_))
plt.xlabel('Number of components for TB2')
plt.ylabel('Cumulative explained variance');
plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')
# ---
pca_trans2 = PCA(n_components=2)
pca2 = pca_trans2.fit(TB2)
TB2_transformed = pca_trans2.transform(TB2)
print("original shape: ", TB2.shape)
print("transformed shape:", TB2_transformed.shape)
# ------------------------------------------------------------------------------
# JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:
PCA1 = pd.DataFrame(TB1_transformed[:],
columns=['pca1_1', 'pca_2'])
PCA2 = pd.DataFrame(TB2_transformed[:],
columns=['pca2_1', 'pca2_2'])
dataset = PCA1.join(PCA2, how='right')
dataset = dataset.join(ancillary, how='right')
dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')
# ------------------------------------------------------------------------------
dataset = self.keep_interval(0.2, 110.0, dataset, 'sfcprcp')
# ----------------------------------------
# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)
# n = 0.98
# to_remove = np.random.choice(
# dataset.index,
# size=int(dataset.shape[0] * n),
# replace=False)
# dataset = dataset.drop(to_remove)
# ------------------------------------------------------------------------------
# Split the data into train and test
# Now split the dataset into a training set and a test set.
# We will use the test set in the final evaluation of our model.
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# ------------------------------------------------------------------------------
# Inspect the data:
# Have a quick look at the joint distribution of a few pairs of columns from the training set.
colunas = list(dataset.columns.values)
# ------------------------------------------------------------------------------
# Also look at the overall statistics:
train_stats = train_dataset.describe()
train_stats.pop("sfcprcp")
train_stats = train_stats.transpose()
# ------------------------------------------------------------------------------
# Split features from labels:
# Separate the target value, or "label", from the features.
# This label is the value that you will train the model to predict.
y_train = train_dataset.pop('sfcprcp')
y_test = test_dataset.pop('sfcprcp')
# ------------------------------------------------------------------------------
# Normalize the data:
scaler = StandardScaler()
normed_train_data = scaler.fit_transform(train_dataset)
normed_test_data = scaler.fit_transform(test_dataset)
# ------------------------------------------------------------------------------
# Build the model:
model = self.build_reg_model(len(train_dataset.keys()))
# ------------------------------------------------------------------------------
# Inspect the model:
# Use the .summary method to print a simple description of the model
model.summary()
# ------------------------------------------------------------------------------
# It seems to be working, and it produces a result
# of the expected shape and type.
# Train the model:
# Train the model for 1000 epochs, and record the training
# and validation accuracy in the history object.
# ------------------------------------------------------------------------------
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, y_train,
epochs=EPOCHS, validation_split=0.2, verbose=0,
callbacks=[PrintDot()])
print(history.history.keys())
# ------------------------------------------------------------------------------
# Visualize the model's training progress using the stats
# stored in the history object.
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
self.plot_history(history)
# ------------------------------------------------------------------------------
model = self.build_reg_model(len(train_dataset.keys()))
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, y_train, epochs=EPOCHS,
validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])
# ------------------------------------------------------------------------------
# Ploting again, but with the EarlyStopping apllied:
self.plot_history_EarlyStopping(history)
# The graph shows that on the validation set, the average error
# is usually around +/- 2 MPG. Is this good?
# We'll leave that decision up to you.
# ------------------------------------------------------------------------------
# Let's see how well the model generalizes by using
# the test set, which we did not use when training the model.
# This tells us how well we can expect the model to predict
# when we use it in the real world.
loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} sfcprcp".format(mae))
#------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Make predictions
# Finally, predict SFCPRCP values using data in the testing set:
test_predictions = model.predict(normed_test_data).flatten()
# Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:
skills = ContinuousScores()
val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)
#converting to text file
print("converting arrays to text files")
my_scores = {'val_y_pred_mean': val_y_pred_mean,
'val_y_test_mean': val_y_test_mean,
'val_mae': val_mae,
'val_rmse': val_rmse,
'val_std': val_std,
'val_fseperc': val_fseperc,
'val_fse': val_fse,
'val_corr': val_corr,
'val_num_pixels': val_num_pixels}
with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:
myfile.write(str(my_scores))
print("Text file saved!")
plt.figure()
plt.scatter(y_test, test_predictions)
plt.xlabel('True Values [sfcprcp]')
plt.ylabel('Predictions [sfcprcp]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0, plt.xlim()[1]])
plt.ylim([0, plt.ylim()[1]])
plt.plot([-100, 100], [-100, 100])
fig_name = self.fig_title + "_plot_scatter_y_test_vs_y_pred.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
#------------------------------------------------------------------------------
ax = plt.gca()
ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('True Values [sfcprcp]')
ax.set_ylabel('Predictions [sfcprcp]')
plt.plot([-100, 100], [-100, 100])
fig_name = self.fig_title + "_plot_scatter_LOG_y_test_vs_y_pred.png"
plt.savefig(self.path_fig+fig_name)
plt.clf()
#------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# It looks like our model predicts reasonably well.
# Let's take a look at the error distribution.
error = test_predictions - y_test
plt.hist(error, bins=25)
plt.xlabel("Prediction Error [sfcprcp]")
plt.ylabel("Count")
fig_name = self.fig_title + "_prediction_error.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
# ------------------------------------------------------------------------------
# HISTROGRAM 2D
plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 110), (0.2, 110)]))
plt.axis('equal')
plt.axis('square')
plt.plot([0, 100], [0, 100], ls="--", c=".3")
plt.xlim([0, max(y_test)])
plt.ylim([0, max(y_test)])
plt.colorbar()
plt.xlabel("Observed rain rate (mm/h) - Training")
plt.ylabel("Predicted rain rate (mm/h) - Training")
fig_name = self.fig_title + "_hist2D.png"
plt.savefig(self.path_fig + fig_name)
plt.clf()
# ------------------------------------------------------------------------------
# Saving model to YAML:
model_yaml = model.to_yaml()
with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')
print("Saved model to disk")
# Saving the complete model in HDF5:
model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')
# -------------------------------------------------------------------------
# FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:
# -------------------------------------------------------------------------
def plot_history(self, history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [sfcprcp]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label='Val Error')
ylim_max = hist.val_mean_absolute_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$scfprcp^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label='Val Error')
ylim_max = hist.val_mean_squared_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
# plt.show()
fig_name = self.fig_title + "_error_per_epochs_history.png"
plt.savefig(self.path_fig + fig_name)
def plot_history_EarlyStopping(self, history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [sfcprcp]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label='Val Error')
ylim_max = hist.val_mean_absolute_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$sfcprcp^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label='Val Error')
ylim_max = hist.val_mean_squared_error.max() + 10
plt.ylim([0, ylim_max])
plt.legend()
fig_name = self.fig_title + "_error_per_epochs_EarlyStopping.png"
plt.savefig(self.path_fig + fig_name)
|
from utils import data_helper
if __name__ == "__main__":
dataset_path = r"../data/raw/cat"
(
train_set_x_orig,
train_set_y,
test_set_x_orig,
test_set_y,
classes,
) = data_helper.load_from_h5(dataset_path, "catvnoncat")
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
# Reshape the training and test examples
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
# standardize our dataset.
# One common preprocessing step in machine learning is to center and standardize your dataset,
# meaning that you substract the mean of the whole numpy array from each example, and then divide
# each example by the standard deviation of the whole numpy array. But for picture datasets,
# it is simpler and more convenient and works almost as well to just divide every row of the dataset
# by 255 (the maximum value of a pixel channel).
train_set_x = train_set_x_flatten / 255.0
test_set_x = test_set_x_flatten / 255.0
# To design a simple algorithm to distinguish cat images from non-cat images, base on Logistic Regression,
# perform the following steps:
# - Initialize the parameters of the model
# - Learn the parameters for the model by minimizing the cost
# - Use the learned parameters to make predictions (on the test set)
# - Analyse the results and conclude
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Roomscout.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_cluster_role_binding import V1ClusterRoleBinding
class TestV1ClusterRoleBinding(unittest.TestCase):
""" V1ClusterRoleBinding unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ClusterRoleBinding(self):
"""
Test V1ClusterRoleBinding
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_cluster_role_binding.V1ClusterRoleBinding()
pass
if __name__ == '__main__':
unittest.main()
|
import os
import os.path as op
from bento.distutils.utils \
import \
_is_setuptools_activated
if _is_setuptools_activated():
from setuptools.command.egg_info \
import \
egg_info as old_egg_info
else:
raise ValueError("You cannot use egg_info without setuptools enabled first")
from bento._config \
import \
IPKG_PATH
from bento.installed_package_description \
import \
InstalledPkgDescription
from bento.commands.egg_utils \
import \
EggInfo
class egg_info(old_egg_info):
def run(self):
self.run_command("build")
dist = self.distribution
n = dist.build_node.make_node(IPKG_PATH)
ipkg = InstalledPkgDescription.from_file(n.abspath())
egg_info = EggInfo.from_ipkg(ipkg)
egg_info_dir = op.join(self.egg_base, "%s.egg-info" % dist.pkg.name)
try:
os.makedirs(egg_info_dir)
except OSError, e:
if e.errno != 17:
raise
for filename, cnt in egg_info.iter_meta(dist.build_node):
filename = op.join(egg_info_dir, filename)
fid = open(filename, "w")
try:
fid.write(cnt)
finally:
fid.close()
|
import os
import uuid
from contextlib import contextmanager
from datetime import datetime
from xml.etree import cElementTree as ElementTree
from casexml.apps.phone.restore_caching import RestorePayloadPathCache
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.util.test_utils import unit_testing_only
from dimagi.utils.dates import utcnow_sans_milliseconds
from lxml import etree
from casexml.apps.case.xml import V1, V2, NS_VERSION_MAP
from casexml.apps.phone.restore import RestoreConfig, RestoreParams
from six.moves import range
TEST_DOMAIN_NAME = 'test-domain'
class _RestoreCaseBlock(object):
"""
Little shim class for working with XML case blocks in a restore payload
NOTE the recommended way to inspect case restore payloads is to
use <MockDevice>.sync().cases, so don't use this in tests.
"""
def __init__(self, xml_element, version=V2):
self.xml_element = xml_element
self.version = version
def to_string(self):
return ElementTree.tostring(self.xml_element)
def get_case_id(self):
if self.version == V1:
return self.xml_element.findtext('{{{0}}}case_id'.format(get_case_xmlns(self.version)))
else:
return self.xml_element.get('case_id')
def get_case_name(self):
assert self.version == V2, 'get_case_name not yet supported for legacy V1 casexml'
# note: there has to be a better way to work with namespaced xpath.... right?!?!
return self.xml_element.findtext('{{{0}}}create/{{{0}}}case_name'.format(get_case_xmlns(self.version)))
def bootstrap_case_from_xml(test_class, filename, case_id_override=None, domain=None):
starttime = utcnow_sans_milliseconds()
file_path = os.path.join(os.path.dirname(__file__), "data", filename)
with open(file_path, "r") as f:
xml_data = f.read()
updated_xml, uid, case_id = _replace_ids_in_xform_xml(
xml_data,
case_id_override=case_id_override,
)
domain = domain or 'test-domain'
result = submit_form_locally(updated_xml, domain=domain)
test_class.assertLessEqual(starttime, result.case.server_modified_on)
test_class.assertGreaterEqual(datetime.utcnow(), result.case.server_modified_on)
test_class.assertEqual(case_id, result.case.case_id)
return result.xform, result.case
def _replace_ids_in_xform_xml(xml_data, case_id_override=None):
# from our test forms, replace the UIDs so we don't get id conflicts
uid, case_id = (uuid.uuid4().hex for i in range(2))
if case_id_override:
case_id = case_id_override
xml_data = xml_data.replace("REPLACE_UID", uid)
xml_data = xml_data.replace("REPLACE_CASEID", case_id)
return xml_data, uid, case_id
def check_xml_line_by_line(test_case, expected, actual):
"""Does what it's called, hopefully parameters are self-explanatory"""
# this is totally wacky, but elementtree strips needless
# whitespace that mindom will preserve in the original string
parser = etree.XMLParser(remove_blank_text=True)
parsed_expected = etree.tostring(etree.XML(expected, parser), pretty_print=True).decode('utf-8')
parsed_actual = etree.tostring(etree.XML(actual, parser), pretty_print=True).decode('utf-8')
if parsed_expected == parsed_actual:
return
try:
expected_lines = parsed_expected.split("\n")
actual_lines = parsed_actual.split("\n")
test_case.assertEqual(
len(expected_lines),
len(actual_lines),
"Parsed xml files are different lengths\n" +
"Expected: \n%s\nActual:\n%s" % (parsed_expected, parsed_actual))
for i in range(len(expected_lines)):
test_case.assertEqual(expected_lines[i], actual_lines[i])
except AssertionError:
import logging
logging.error("Failure in xml comparison\nExpected:\n%s\nActual:\n%s" % (parsed_expected, parsed_actual))
raise
def get_case_xmlns(version):
return NS_VERSION_MAP.get(version, 'http://openrosa.org/http/response')
def extract_caseblocks_from_xml(payload_string, version=V2):
parsed_payload = ElementTree.fromstring(payload_string)
xml_blocks = parsed_payload.findall('{{{0}}}case'.format(get_case_xmlns(version)))
return [_RestoreCaseBlock(b, version) for b in xml_blocks]
@contextmanager
def _cached_restore(testcase, user, restore_id="", version=V2,
purge_restore_cache=False):
"""DEPRECATED use <MockDevice>.sync().cases"""
assert not hasattr(testcase, 'restore_config'), testcase
assert not hasattr(testcase, 'payload_string'), testcase
if restore_id and purge_restore_cache:
RestorePayloadPathCache(
domain=user.domain,
user_id=user.user_id,
sync_log_id=restore_id,
device_id=None,
).invalidate()
testcase.restore_config = RestoreConfig(
project=user.project,
restore_user=user, params=RestoreParams(restore_id, version=version),
**getattr(testcase, 'restore_options', {})
)
testcase.payload_string = testcase.restore_config.get_payload().as_string()
try:
yield
finally:
del testcase.restore_config, testcase.payload_string
def deprecated_check_user_has_case(testcase, user, case_blocks, should_have=True,
line_by_line=True, restore_id="", version=V2,
purge_restore_cache=False, return_single=False):
"""DEPRECATED use <MockDevice>.sync().cases"""
try:
restore_config = testcase.restore_config
payload_string = testcase.payload_string
except AttributeError:
with _cached_restore(testcase, user, restore_id, version, purge_restore_cache):
restore_config = testcase.restore_config
payload_string = testcase.payload_string
return _check_payload_has_cases(
testcase=testcase,
payload_string=payload_string,
username=user.username,
case_blocks=case_blocks,
should_have=should_have,
line_by_line=line_by_line,
version=version,
return_single=return_single,
restore_config=restore_config,
)
def _check_payload_has_cases(testcase, payload_string, username, case_blocks, should_have=True,
line_by_line=True, version=V2, return_single=False, restore_config=None):
"""DEPRECATED use <MockDevice>.sync().cases"""
if not isinstance(case_blocks, list):
case_blocks = [case_blocks]
return_single = True
XMLNS = NS_VERSION_MAP.get(version, 'http://openrosa.org/http/response')
blocks_from_restore = extract_caseblocks_from_xml(payload_string, version)
def check_block(case_block):
case_block.set('xmlns', XMLNS)
case_block = _RestoreCaseBlock(ElementTree.fromstring(ElementTree.tostring(case_block)), version=version)
case_id = case_block.get_case_id()
n = 0
def extra_info():
return "\n%s\n%s" % (case_block.to_string(), [b.to_string() for b in blocks_from_restore])
match = None
for block in blocks_from_restore:
if block.get_case_id() == case_id:
if should_have:
if line_by_line:
check_xml_line_by_line(
testcase,
case_block.to_string(),
block.to_string(),
)
match = block
n += 1
if n == 2:
testcase.fail(
"Block for case_id '%s' appears twice"
" in ota restore for user '%s':%s" % (case_id, username, extra_info())
)
else:
testcase.fail(
"User '%s' gets case '%s' "
"but shouldn't:%s" % (username, case_id, extra_info())
)
if not n and should_have:
testcase.fail("Block for case_id '%s' doesn't appear in ota restore for user '%s':%s"
% (case_id, username, extra_info()))
return match
matches = [check_block(case_block) for case_block in case_blocks]
return restore_config, matches[0] if return_single else matches
@unit_testing_only
def delete_all_cases():
FormProcessorTestUtils.delete_all_cases()
@unit_testing_only
def delete_all_xforms():
FormProcessorTestUtils.delete_all_xforms()
@unit_testing_only
def delete_all_sync_logs():
FormProcessorTestUtils.delete_all_sync_logs()
@unit_testing_only
def delete_all_ledgers():
FormProcessorTestUtils.delete_all_ledgers()
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-iam-service (5.10.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.accountcommon_permission import AccountcommonPermission
class ClientmodelClientCreationResponse(Model):
"""Clientmodel client creation response (clientmodel.ClientCreationResponse)
Properties:
client_id: (ClientId) REQUIRED str
client_name: (ClientName) REQUIRED str
client_permissions: (ClientPermissions) REQUIRED List[AccountcommonPermission]
namespace: (Namespace) REQUIRED str
redirect_uri: (RedirectUri) REQUIRED str
"""
# region fields
client_id: str # REQUIRED
client_name: str # REQUIRED
client_permissions: List[AccountcommonPermission] # REQUIRED
namespace: str # REQUIRED
redirect_uri: str # REQUIRED
# endregion fields
# region with_x methods
def with_client_id(self, value: str) -> ClientmodelClientCreationResponse:
self.client_id = value
return self
def with_client_name(self, value: str) -> ClientmodelClientCreationResponse:
self.client_name = value
return self
def with_client_permissions(self, value: List[AccountcommonPermission]) -> ClientmodelClientCreationResponse:
self.client_permissions = value
return self
def with_namespace(self, value: str) -> ClientmodelClientCreationResponse:
self.namespace = value
return self
def with_redirect_uri(self, value: str) -> ClientmodelClientCreationResponse:
self.redirect_uri = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "client_id"):
result["ClientId"] = str(self.client_id)
elif include_empty:
result["ClientId"] = ""
if hasattr(self, "client_name"):
result["ClientName"] = str(self.client_name)
elif include_empty:
result["ClientName"] = ""
if hasattr(self, "client_permissions"):
result["ClientPermissions"] = [i0.to_dict(include_empty=include_empty) for i0 in self.client_permissions]
elif include_empty:
result["ClientPermissions"] = []
if hasattr(self, "namespace"):
result["Namespace"] = str(self.namespace)
elif include_empty:
result["Namespace"] = ""
if hasattr(self, "redirect_uri"):
result["RedirectUri"] = str(self.redirect_uri)
elif include_empty:
result["RedirectUri"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
client_id: str,
client_name: str,
client_permissions: List[AccountcommonPermission],
namespace: str,
redirect_uri: str,
) -> ClientmodelClientCreationResponse:
instance = cls()
instance.client_id = client_id
instance.client_name = client_name
instance.client_permissions = client_permissions
instance.namespace = namespace
instance.redirect_uri = redirect_uri
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ClientmodelClientCreationResponse:
instance = cls()
if not dict_:
return instance
if "ClientId" in dict_ and dict_["ClientId"] is not None:
instance.client_id = str(dict_["ClientId"])
elif include_empty:
instance.client_id = ""
if "ClientName" in dict_ and dict_["ClientName"] is not None:
instance.client_name = str(dict_["ClientName"])
elif include_empty:
instance.client_name = ""
if "ClientPermissions" in dict_ and dict_["ClientPermissions"] is not None:
instance.client_permissions = [AccountcommonPermission.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["ClientPermissions"]]
elif include_empty:
instance.client_permissions = []
if "Namespace" in dict_ and dict_["Namespace"] is not None:
instance.namespace = str(dict_["Namespace"])
elif include_empty:
instance.namespace = ""
if "RedirectUri" in dict_ and dict_["RedirectUri"] is not None:
instance.redirect_uri = str(dict_["RedirectUri"])
elif include_empty:
instance.redirect_uri = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, ClientmodelClientCreationResponse]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[ClientmodelClientCreationResponse]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[ClientmodelClientCreationResponse, List[ClientmodelClientCreationResponse], Dict[Any, ClientmodelClientCreationResponse]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"ClientId": "client_id",
"ClientName": "client_name",
"ClientPermissions": "client_permissions",
"Namespace": "namespace",
"RedirectUri": "redirect_uri",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"ClientId": True,
"ClientName": True,
"ClientPermissions": True,
"Namespace": True,
"RedirectUri": True,
}
# endregion static methods
|
from google.appengine.ext import ndb
class Transaction(ndb.Model):
"""A simple model to store the properties of an order"""
total_amount = ndb.FloatProperty(indexed=False)
transaction_id = ndb.StringProperty(indexed=False)
transaction_ref = ndb.StringProperty(indexed=False)
post_date = ndb.StringProperty(indexed=False)
payment_id = ndb.StringProperty(indexed=True)
authorization_code = ndb.StringProperty(indexed=False)
result = ndb.StringProperty(indexed=False)
udf1 = ndb.StringProperty(indexed=False)
udf2 = ndb.StringProperty(indexed=False)
udf3 = ndb.StringProperty(indexed=False)
udf4 = ndb.StringProperty(indexed=False)
udf5 = ndb.StringProperty(indexed=False)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import logging
from decimal import Decimal as D
from django.db.models import Sum
from ralph_scrooge.utils.common import memoize
from ralph_scrooge.models import DailyUsage
from ralph_scrooge.plugins.base import BasePlugin
logger = logging.getLogger(__name__)
class NoPriceCostError(Exception):
"""
Raised when price is not defined for specified date.
"""
pass
class MultiplePriceCostError(Exception):
"""
Raised when multiple prices are defined for specified date.
"""
class BaseCostPlugin(BasePlugin):
"""
Base cost plugin
Every plugin which inherit from BaseCostPlugin should implement 1
methods - costs.
This class provides base methods for costs calculation, such as generating
usages (for given date) per service environment, pricing object etc.
"""
def run(self, type='costs', *args, **kwargs):
# find method with name the same as type param
if hasattr(self, type):
func = getattr(self, type)
if hasattr(func, '__call__'):
return func(*args, **kwargs)
raise AttributeError()
@abc.abstractmethod
def costs(self, *args, **kwargs):
"""
Should returns information about costs of usage (ex. team, service) per
service environment in format accepted by collector.
"""
pass
def total_cost(self, *args, **kwargs):
"""
By default total cost is just sum of all costs from `costs` method.
"""
costs = self.costs(*args, **kwargs)
return sum([sum([s['cost'] for s in c]) for c in costs.values()])
@memoize(skip_first=True)
def _get_price_from_cost(
self,
usage_price,
forecast,
warehouse=None,
service_environments=None,
excluded_services=None,
):
"""
Calculate price for single unit of usage type in period of time defined
by daterange of usage_price.
Price can be calculated overall or for single warehouse.
"""
total_usage = self._get_total_usage(
usage_type=usage_price.type,
start=usage_price.start,
end=usage_price.end,
warehouse=warehouse,
service_environments=service_environments,
excluded_services=excluded_services,
)
cost = usage_price.forecast_cost if forecast else usage_price.cost
price = 0
if total_usage and cost:
price = cost / D(total_usage)
return D(price)
@memoize(skip_first=True)
def _get_daily_usages_in_period(
self,
usage_type,
date=None,
start=None,
end=None,
warehouse=None,
service_environments=None,
excluded_services=None,
):
"""
Filter daily usages based on passed params
"""
daily_usages = DailyUsage.objects.filter(
type=usage_type,
)
if start and end:
daily_usages = daily_usages.filter(date__gte=start, date__lte=end)
elif date:
daily_usages = daily_usages.filter(date=date)
if warehouse:
daily_usages = daily_usages.filter(warehouse=warehouse)
if service_environments is not None:
daily_usages = daily_usages.filter(
service_environment__in=service_environments
)
if excluded_services:
daily_usages = daily_usages.exclude(
service_environment__service__in=excluded_services
)
return daily_usages.select_related('daily_pricing_object')
@memoize(skip_first=True)
def _get_total_usage(self, *args, **kwargs):
"""
Calculates total usage of usage type in period of time (between start
and end). Total usage can be calculated overall, for single warehouse,
for selected services or for services in warehouse.
:rtype: float
"""
daily_usages = self._get_daily_usages_in_period(*args, **kwargs)
return daily_usages.aggregate(
total=Sum('value')
).get('total') or 0
@memoize(skip_first=True)
def _get_usages_per_service_environment(self, *args, **kwargs):
"""
Method similar to `_get_total_usage_in_period`, but instead of
one-number result, it returns total cost per service in period of time
(between start and end). Total usage can be calculated overall, for
single warehouse, for selected services or for services in warehouse.
:rtype: list
"""
daily_usages = self._get_daily_usages_in_period(*args, **kwargs)
return list(daily_usages.values('service_environment').annotate(
usage=Sum('value'),
).order_by('service_environment'))
@memoize(skip_first=True)
def _get_usages_per_service(self, *args, **kwargs):
"""
Method similar to `_get_total_usage_in_period`, but instead of
one-number result, it returns total cost per service in period of time
(between start and end). Total usage can be calculated overall, for
single warehouse, for selected services or for services in warehouse.
:rtype: list
"""
daily_usages = self._get_daily_usages_in_period(*args, **kwargs)
return list(
daily_usages.values('service_environment__service').annotate(
usage=Sum('value'),
).order_by('service_environment__service')
)
@memoize(skip_first=True)
def _get_usages_per_pricing_object(self, *args, **kwargs):
"""
Works almost exactly as `_get_usages_in_period_per_service`, but
instead of returning data grouped by service, it returns usages
aggregated by single pricing_object.
:rtype: list
"""
daily_usages = self._get_daily_usages_in_period(*args, **kwargs)
return daily_usages
|
# -*- coding: utf-8 -*-
"""Analysis plugin related functions and classes for testing."""
from __future__ import unicode_literals
from plaso.analysis import mediator as analysis_mediator
from plaso.containers import artifacts
from plaso.containers import sessions
from plaso.engine import knowledge_base
from plaso.parsers import interface as parsers_interface
from plaso.parsers import mediator as parsers_mediator
from plaso.storage.fake import writer as fake_writer
from tests import test_lib as shared_test_lib
from tests.containers import test_lib as containers_test_lib
class AnalysisPluginTestCase(shared_test_lib.BaseTestCase):
"""The unit test case for an analysis plugin."""
def _AnalyzeEvents(
self, event_values_list, plugin, knowledge_base_values=None):
"""Analyzes events using the analysis plugin.
Args:
event_values_list (list[dict[str, str]]): list of event values.
plugin (AnalysisPlugin): plugin.
knowledge_base_values (Optional[dict[str, str]]): knowledge base values.
Returns:
FakeStorageWriter: storage writer.
"""
knowledge_base_object = self._SetUpKnowledgeBase(
knowledge_base_values=knowledge_base_values)
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
test_events = []
for event, event_data in containers_test_lib.CreateEventsFromValues(
event_values_list):
storage_writer.AddEventData(event_data)
event.SetEventDataIdentifier(event_data.GetIdentifier())
storage_writer.AddEvent(event)
test_events.append((event, event_data))
mediator = analysis_mediator.AnalysisMediator(
storage_writer, knowledge_base_object)
for event, event_data in test_events:
plugin.ExamineEvent(mediator, event, event_data)
analysis_report = plugin.CompileReport(mediator)
storage_writer.AddAnalysisReport(analysis_report)
return storage_writer
def _ParseAndAnalyzeFile(
self, path_segments, parser, plugin, knowledge_base_values=None):
"""Parses and analyzes a file using the parser and analysis plugin.
Args:
path_segments (list[str]): path segments inside the test data directory.
parser (BaseParser): parser.
plugin (AnalysisPlugin): plugin.
knowledge_base_values (Optional[dict[str, str]]): knowledge base values.
Returns:
FakeStorageWriter: storage writer.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
knowledge_base_object = self._SetUpKnowledgeBase(
knowledge_base_values=knowledge_base_values)
storage_writer = self._ParseFile(
path_segments, parser, knowledge_base_object)
mediator = analysis_mediator.AnalysisMediator(
storage_writer, knowledge_base_object)
for event in storage_writer.GetSortedEvents():
event_data = None
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
event_data = storage_writer.GetEventDataByIdentifier(
event_data_identifier)
plugin.ExamineEvent(mediator, event, event_data)
analysis_report = plugin.CompileReport(mediator)
storage_writer.AddAnalysisReport(analysis_report)
return storage_writer
def _ParseFile(self, path_segments, parser, knowledge_base_object):
"""Parses a file using the parser.
Args:
path_segments (list[str]): path segments inside the test data directory.
parser (BaseParser): parser.
knowledge_base_object (KnowledgeBase): knowledge base.
Returns:
FakeStorageWriter: storage writer.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
mediator = parsers_mediator.ParserMediator(
storage_writer, knowledge_base_object)
file_entry = self._GetTestFileEntry(path_segments)
mediator.SetFileEntry(file_entry)
if isinstance(parser, parsers_interface.FileEntryParser):
parser.Parse(mediator)
elif isinstance(parser, parsers_interface.FileObjectParser):
file_object = file_entry.GetFileObject()
try:
parser.Parse(mediator, file_object)
finally:
file_object.close()
else:
self.fail('Got unexpected parser type: {0:s}'.format(type(parser)))
return storage_writer
def _SetUpKnowledgeBase(self, knowledge_base_values=None):
"""Sets up a knowledge base.
Args:
knowledge_base_values (Optional[dict[str, str]]): knowledge base values.
Returns:
KnowledgeBase: knowledge base.
"""
knowledge_base_object = knowledge_base.KnowledgeBase()
if knowledge_base_values:
for identifier, value in iter(knowledge_base_values.items()):
if identifier == 'users':
self._SetUserAccounts(knowledge_base_object, value)
else:
knowledge_base_object.SetValue(identifier, value)
return knowledge_base_object
def _SetUserAccounts(self, knowledge_base_object, users):
"""Sets the user accounts in the knowledge base.
Args:
knowledge_base_object (KnowledgeBase): used to store information about
users.
users (list[dict[str, str])): users, for example [{'name': 'me',
'sid': 'S-1', 'uid': '1'}]
"""
for user in users:
identifier = user.get('sid', user.get('uid', None))
if not identifier:
continue
user_account_artifact = artifacts.UserAccountArtifact(
identifier=identifier, user_directory=user.get('path', None),
username=user.get('name', None))
knowledge_base_object.AddUserAccount(user_account_artifact)
|
""" .. _BDPReader-api:
BDPReader --- Converts BDP in XML format to in-memory BDP object.
-----------------------------------------------------------------
This module defines the BDPReader class.
"""
#system imports
from xml import sax
import os
# ADMIT imports
import admit.util.bdp_types as bt
import admit.util.utils as utils
from admit.xmlio.AdmitParser import AdmitParser
from admit.xmlio.ErrorHandler import ErrorHandler
class BDPReader(object):
""" Class to read in a bdp file (xml style) and convert it to a BDP object in memory. Only the
name of the bdp file (including any relative or absolute path) needs to be specified. The
given file will be passed to the AdmitParser where it will be parsed. The resulting data
will be inserted into a BDP object of the appropriate type (type is determined by the
contents of the bdp file). The BDP object is the returned.
Parameters
----------
file : str
File name (including any relative or absolute path) of the bdp file to be parsed and
converted to a BDP object.
Default : None.
Attributes
----------
File name (including any relative or absolute path) of the bdp file to be parsed and
converted to a BDP object.
"""
def __init__(self, file=None):
self.file = file
def read(self, file=None):
""" Method to convert a bdp file to a BDP object. Only the file name (including relative
or absolute path) needs to be given. The file is then parsed and the data inserted into
the appropriate BDP object. The type of BDP is determined from the data in the bdp file
itself. The resulting BDP object is returned.
Parameters
----------
file : str
File name (including any relative or absolute path) of the bdp file to be parsed and
converted to a BDP object.
Default : None
Returns
-------
BDP object of appropriate type based on the given input file.
"""
# error check the input
if self.file is None:
if file is not None:
self.file = file
else:
raise Exception("File name must be specified.")
# see if a path was also given with the file name, if not the used the current working
# directory
sloc = self.file.rfind("/")
if sloc == -1:
basedir = os.getcwd()
else :
basedir = self.file[:sloc]
# instanstiate a parser
BDPparser = sax.make_parser()
BDPContentHandler = AdmitParser(basedir, self.file)
# set the handlers
BDPparser.setContentHandler(BDPContentHandler)
BDPparser.setErrorHandler(ErrorHandler())
# parse the file, craeting the appropriate BDP object
BDPparser.parse(open(self.file))
# return the BDP object
return BDPContentHandler.getBDP()
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate plots for Fashion-MNIST results."""
from typing import List, Tuple
import numpy as np
from flwr_experimental.baseline.plot import bar_chart, line_chart
RESULTS = {
"fedavg-t10": [
(0, 0.03759999945759773),
(1, 0.03759999945759773),
(2, 0.03759999945759773),
(3, 0.03759999945759773),
(4, 0.03759999945759773),
(5, 0.03759999945759773),
(6, 0.03759999945759773),
(7, 0.03759999945759773),
(8, 0.03759999945759773),
(9, 0.03759999945759773),
(10, 0.03759999945759773),
(11, 0.03759999945759773),
(12, 0.03759999945759773),
(13, 0.03759999945759773),
(14, 0.03759999945759773),
(15, 0.03759999945759773),
(16, 0.03759999945759773),
(17, 0.03759999945759773),
(18, 0.03759999945759773),
(19, 0.03759999945759773),
(20, 0.03759999945759773),
],
"fedavg-t12": [
(0, 0.03759999945759773),
(1, 0.03759999945759773),
(2, 0.03759999945759773),
(3, 0.03759999945759773),
(4, 0.03759999945759773),
(5, 0.03759999945759773),
(6, 0.03759999945759773),
(7, 0.03759999945759773),
(8, 0.03759999945759773),
(9, 0.03759999945759773),
(10, 0.03759999945759773),
(11, 0.03759999945759773),
(12, 0.03759999945759773),
(13, 0.03759999945759773),
(14, 0.03759999945759773),
(15, 0.03759999945759773),
(16, 0.03759999945759773),
(17, 0.03759999945759773),
(18, 0.03759999945759773),
(19, 0.03759999945759773),
(20, 0.03759999945759773),
],
"fedavg-t14": [
(0, 0.03759999945759773),
(1, 0.03759999945759773),
(2, 0.6743999719619751),
(3, 0.6802999973297119),
(4, 0.6802999973297119),
(5, 0.6802999973297119),
(6, 0.6802999973297119),
(7, 0.7853999733924866),
(8, 0.7853999733924866),
(9, 0.7876999974250793),
(10, 0.7642999887466431),
(11, 0.8054999709129333),
(12, 0.8181999921798706),
(13, 0.8108999729156494),
(14, 0.7907000184059143),
(15, 0.763700008392334),
(16, 0.8091999888420105),
(17, 0.8296999931335449),
(18, 0.8123999834060669),
(19, 0.8123999834060669),
(20, 0.8101999759674072),
],
"fedavg-t16": [
(0, 0.03759999945759773),
(1, 0.7197999954223633),
(2, 0.7720999717712402),
(3, 0.7900999784469604),
(4, 0.7811999917030334),
(5, 0.7724000215530396),
(6, 0.8023999929428101),
(7, 0.8043000102043152),
(8, 0.8230999708175659),
(9, 0.8327999711036682),
(10, 0.8299000263214111),
(11, 0.8402000069618225),
(12, 0.853600025177002),
(13, 0.8370000123977661),
(14, 0.83160001039505),
(15, 0.8424000144004822),
(16, 0.830299973487854),
(17, 0.8476999998092651),
(18, 0.8632000088691711),
(19, 0.8636999726295471),
(20, 0.8657000064849854),
],
"fedfs-t10": [
(0, 0.03759999945759773),
(1, 0.7343000173568726),
(2, 0.7664999961853027),
(3, 0.7900000214576721),
(4, 0.805899977684021),
(5, 0.8237000107765198),
(6, 0.8406999707221985),
(7, 0.8263000249862671),
(8, 0.8442999720573425),
(9, 0.8564000129699707),
(10, 0.8651999831199646),
(11, 0.8375999927520752),
(12, 0.8646000027656555),
(13, 0.8669999837875366),
(14, 0.861299991607666),
(15, 0.8773999810218811),
(16, 0.800599992275238),
(17, 0.8676999807357788),
(18, 0.8763999938964844),
(19, 0.8695999979972839),
(20, 0.873199999332428),
],
"fedfs-t12": [
(0, 0.03759999945759773),
(1, 0.7153000235557556),
(2, 0.7835999727249146),
(3, 0.8083999752998352),
(4, 0.816100001335144),
(5, 0.8215000033378601),
(6, 0.8429999947547913),
(7, 0.8464000225067139),
(8, 0.8603000044822693),
(9, 0.8482999801635742),
(10, 0.8450000286102295),
(11, 0.866599977016449),
(12, 0.863099992275238),
(13, 0.8709999918937683),
(14, 0.873199999332428),
(15, 0.8701000213623047),
(16, 0.8600000143051147),
(17, 0.8766999840736389),
(18, 0.8697999715805054),
(19, 0.8795999884605408),
(20, 0.8830999732017517),
],
"fedfs-t14": [
(0, 0.03759999945759773),
(1, 0.7245000004768372),
(2, 0.7972000241279602),
(3, 0.8059999942779541),
(4, 0.8252999782562256),
(5, 0.8334000110626221),
(6, 0.8560000061988831),
(7, 0.8510000109672546),
(8, 0.8650000095367432),
(9, 0.8621000051498413),
(10, 0.866599977016449),
(11, 0.8615999817848206),
(12, 0.8636999726295471),
(13, 0.8740000128746033),
(14, 0.866100013256073),
(15, 0.867900013923645),
(16, 0.83160001039505),
(17, 0.8741999864578247),
(18, 0.8736000061035156),
(19, 0.8810999989509583),
(20, 0.8762000203132629),
],
"fedfs-t16": [
(0, 0.03759999945759773),
(1, 0.7476999759674072),
(2, 0.7982000112533569),
(3, 0.8276000022888184),
(4, 0.8256999850273132),
(5, 0.8312000036239624),
(6, 0.8536999821662903),
(7, 0.8483999967575073),
(8, 0.85589998960495),
(9, 0.8687000274658203),
(10, 0.8664000034332275),
(11, 0.8586999773979187),
(12, 0.8662999868392944),
(13, 0.8754000067710876),
(14, 0.878600001335144),
(15, 0.8763999938964844),
(16, 0.748199999332428),
(17, 0.8806999921798706),
(18, 0.8794000148773193),
(19, 0.8813999891281128),
(20, 0.8708000183105469),
],
}
RESULTS_WALL_CLOCK_TIME = {
"fedavg-14": 218.49,
"fedfs-14": 61.16,
"fedavg-16": 153.56,
"fedfs-16": 66.84,
}
def accuracy_t10() -> None:
"""Generate plots."""
lines = [
("FedAvg, t=10", RESULTS["fedavg-t10"]),
("FedFS, t=10", RESULTS["fedfs-t10"]),
]
plot(lines, "fmnist-progress-t10")
def accuracy_t12() -> None:
"""Generate plots."""
lines = [
("FedAvg, t=12", RESULTS["fedavg-t12"]),
("FedFS, t=12", RESULTS["fedfs-t12"]),
]
plot(lines, "fmnist-progress-t12")
def accuracy_t14() -> None:
"""Generate plots."""
lines = [
("FedAvg, t=14", RESULTS["fedavg-t14"]),
("FedFS, t=14", RESULTS["fedfs-t14"]),
]
plot(lines, "fmnist-progress-t14")
def accuracy_t16() -> None:
"""Generate plots."""
lines = [
("FedAvg, t=16", RESULTS["fedavg-t16"]),
("FedFS, t=16", RESULTS["fedfs-t16"]),
]
plot(lines, "fmnist-progress-t16")
def accuracy_fedavg_vs_fedfs() -> None:
"""Comparision of FedAvg vs FedFS."""
fedavg = [
RESULTS["fedavg-t10"][-1][1],
RESULTS["fedavg-t12"][-1][1],
RESULTS["fedavg-t14"][-1][1],
RESULTS["fedavg-t16"][-1][1],
]
fedfs = [
RESULTS["fedfs-t10"][-1][1],
RESULTS["fedfs-t12"][-1][1],
RESULTS["fedfs-t14"][-1][1],
RESULTS["fedfs-t16"][-1][1],
]
bar_chart(
y_values=[
np.array([x * 100 for x in fedavg]),
np.array([x * 100 for x in fedfs]),
],
bar_labels=["FedAvg", "FedFS"],
x_label="Timeout",
x_tick_labels=["T=10", "T=12", "T=14", "T=16"],
y_label="Accuracy",
filename="fmnist-accuracy_fedavg_vs_fedfs",
)
def wall_clock_time_fedavg_vs_fedfs() -> None:
"""Comparision of FedAvg vs FedFS."""
bar_chart(
y_values=[
np.array(
[
RESULTS_WALL_CLOCK_TIME["fedavg-14"],
RESULTS_WALL_CLOCK_TIME["fedavg-16"],
]
),
np.array(
[
RESULTS_WALL_CLOCK_TIME["fedfs-t14"],
RESULTS_WALL_CLOCK_TIME["fedfs-16"],
]
),
],
bar_labels=["FedAvg", "FedFS"],
x_label="Timeout",
x_tick_labels=["T=14", "T=16"],
y_label="Completion time",
filename="fmnist-time_fedavg_vs_fedfs",
)
def plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None:
"""Plot a single line chart."""
values = [np.array([x * 100 for _, x in val]) for _, val in lines]
labels = [label for label, _ in lines]
line_chart(
values, labels, "Round", "Accuracy", filename=filename, y_floor=0, y_ceil=100,
)
def main() -> None:
"""Call all plot functions."""
accuracy_t10()
accuracy_t12()
accuracy_t14()
accuracy_t16()
accuracy_fedavg_vs_fedfs()
wall_clock_time_fedavg_vs_fedfs()
if __name__ == "__main__":
main()
|
from setuptools import setup, PEP420PackageFinder
setup(
name="nilu-api-client",
version="1.0.0",
author="helgehatt",
description="NILU API client",
url="https://github.com/helgehatt/nilu-api-client",
packages=PEP420PackageFinder.find(),
package_data={"": ["**/files/*"]},
install_requires=["pandas", "requests"],
)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUVMsFetcher(NURESTFetcher):
""" Represents a NUVMs fetcher
Notes:
This fetcher enables to fetch NUVM objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUVM class that is managed.
Returns:
.NUVM: the managed class
"""
from .. import NUVM
return NUVM
|
# postgresql/psycopg2.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pypi.org/project/psycopg2/
psycopg2 Connect Arguments
--------------------------
Keyword arguments that are specific to the SQLAlchemy psycopg2 dialect
may be passed to :func:`_sa.create_engine()`, and include the following:
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect. This option sets the **default** isolation level for the
connection that is set immediately upon connection to the database before
the connection is pooled. This option is generally superseded by the more
modern :paramref:`_engine.Connection.execution_options.isolation_level`
execution option, detailed at :ref:`dbapi_autocommit`.
.. seealso::
:ref:`psycopg2_isolation_level`
:ref:`dbapi_autocommit`
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
using psycopg2's ``set_client_encoding()`` method.
.. seealso::
:ref:`psycopg2_unicode`
* ``use_native_unicode``: Under Python 2 only, this can be set to False to
disable the use of psycopg2's native Unicode support.
.. seealso::
:ref:`psycopg2_disable_native_unicode`
* ``executemany_mode``, ``executemany_batch_page_size``,
``executemany_values_page_size``: Allows use of psycopg2
extensions for optimizing "executemany"-style queries. See the referenced
section below for details.
.. seealso::
:ref:`psycopg2_executemany_mode`
.. tip::
The above keyword arguments are **dialect** keyword arguments, meaning
that they are passed as explicit keyword arguments to :func:`_sa.create_engine()`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@localhost/test",
isolation_level="SERIALIZABLE",
)
These should not be confused with **DBAPI** connect arguments, which
are passed as part of the :paramref:`_sa.create_engine.connect_args`
dictionary and/or are passed in the URL query string, as detailed in
the section :ref:`custom_dbapi_args`.
.. _psycopg2_ssl:
SSL Connections
---------------
The psycopg2 module has a connection argument named ``sslmode`` for
controlling its behavior regarding secure (SSL) connections. The default is
``sslmode=prefer``; it will attempt an SSL connection and if that fails it
will fall back to an unencrypted connection. ``sslmode=require`` may be used
to ensure that only secure connections are established. Consult the
psycopg2 / libpq documentation for further options that are available.
Note that ``sslmode`` is specific to psycopg2 so it is included in the
connection URI::
engine = sa.create_engine(
"postgresql+psycopg2://scott:tiger@192.168.0.199:5432/test?sslmode=require"
)
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
.. seealso::
`PQconnectdbParams \
<https://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
.. _psycopg2_multi_host:
Specifying multiple fallback hosts
-----------------------------------
psycopg2 supports multiple connection points in the connection string.
When the ``host`` parameter is used multiple times in the query section of
the URL, SQLAlchemy will create a single string of the host and port
information provided to make the connections::
create_engine(
"postgresql+psycopg2://user:password@/dbname?host=HostA:port1&host=HostB&host=HostC"
)
A connection to each host is then attempted until either a connection is successful
or all connections are unsuccessful in which case an error is raised.
.. versionadded:: 1.3.20 Support for multiple hosts in PostgreSQL connection
string.
.. seealso::
`PQConnString \
<https://www.postgresql.org/docs/10/libpq-connect.html#LIBPQ-CONNSTRING>`_
Empty DSN Connections / Environment Variable Connections
---------------------------------------------------------
The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
libpq client library, which by default indicates to connect to a localhost
PostgreSQL database that is open for "trust" connections. This behavior can be
further tailored using a particular set of environment variables which are
prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of
any or all elements of the connection string.
For this form, the URL can be passed without any elements other than the
initial scheme::
engine = create_engine('postgresql+psycopg2://')
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
function which in turn represents an empty DSN passed to libpq.
.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
.. seealso::
`Environment Variables\
<https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
PostgreSQL documentation on how to use ``PG_...``
environment variables for connections.
.. _psycopg2_execution_options:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`_engine.Connection.execution_options`,
:meth:`.Executable.execution_options`,
:meth:`_query.Query.execution_options`,
in addition to those not specific to DBAPIs:
* ``isolation_level`` - Set the transaction isolation level for the lifespan
of a :class:`_engine.Connection` (can only be set on a connection,
not a statement
or query). See :ref:`psycopg2_isolation_level`.
* ``stream_results`` - Enable or disable usage of psycopg2 server side
cursors - this feature makes use of "named" cursors in combination with
special result handling methods so that result rows are not fully buffered.
Defaults to False, meaning cursors are buffered by default.
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
specifies the maximum number of rows to buffer at a time. This is
interpreted by the :class:`.BufferedRowCursorResult`, and if omitted the
buffer will grow to ultimately store 1000 rows at a time.
.. versionchanged:: 1.4 The ``max_row_buffer`` size can now be greater than
1000, and the buffer will grow to that size.
.. _psycopg2_batch_mode:
.. _psycopg2_executemany_mode:
Psycopg2 Fast Execution Helpers
-------------------------------
Modern versions of psycopg2 include a feature known as
`Fast Execution Helpers \
<https://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which
have been shown in benchmarking to improve psycopg2's executemany()
performance, primarily with INSERT statements, by multiple orders of magnitude.
SQLAlchemy internally makes use of these extensions for ``executemany()`` style
calls, which correspond to lists of parameters being passed to
:meth:`_engine.Connection.execute` as detailed in :ref:`multiple parameter
sets <execute_multiple>`. The ORM also uses this mode internally whenever
possible.
The two available extensions on the psycopg2 side are the ``execute_values()``
and ``execute_batch()`` functions. The psycopg2 dialect defaults to using the
``execute_values()`` extension for all qualifying INSERT statements.
.. versionchanged:: 1.4 The psycopg2 dialect now defaults to a new mode
``"values_only"`` for ``executemany_mode``, which allows an order of
magnitude performance improvement for INSERT statements, but does not
include "batch" mode for UPDATE and DELETE statements which removes the
ability of ``cursor.rowcount`` to function correctly.
The use of these extensions is controlled by the ``executemany_mode`` flag
which may be passed to :func:`_sa.create_engine`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values_plus_batch')
Possible options for ``executemany_mode`` include:
* ``values_only`` - this is the default value. the psycopg2 execute_values()
extension is used for qualifying INSERT statements, which rewrites the INSERT
to include multiple VALUES clauses so that many parameter sets can be
inserted with one statement.
.. versionadded:: 1.4 Added ``"values_only"`` setting for ``executemany_mode``
which is also now the default.
* ``None`` - No psycopg2 extensions are not used, and the usual
``cursor.executemany()`` method is used when invoking statements with
multiple parameter sets.
* ``'batch'`` - Uses ``psycopg2.extras.execute_batch`` for all qualifying
INSERT, UPDATE and DELETE statements, so that multiple copies
of a SQL query, each one corresponding to a parameter set passed to
``executemany()``, are joined into a single SQL string separated by a
semicolon. When using this mode, the :attr:`_engine.CursorResult.rowcount`
attribute will not contain a value for executemany-style executions.
* ``'values_plus_batch'``- ``execute_values`` is used for qualifying INSERT
statements, ``execute_batch`` is used for UPDATE and DELETE.
When using this mode, the :attr:`_engine.CursorResult.rowcount`
attribute will not contain a value for executemany-style executions against
UPDATE and DELETE statements.
By "qualifying statements", we mean that the statement being executed
must be a Core :func:`_expression.insert`, :func:`_expression.update`
or :func:`_expression.delete` construct, and not a plain textual SQL
string or one constructed using :func:`_expression.text`. When using the
ORM, all insert/update/delete statements used by the ORM flush process
are qualifying.
The "page size" for the "values" and "batch" strategies can be affected
by using the ``executemany_batch_page_size`` and
``executemany_values_page_size`` engine parameters. These
control how many parameter sets
should be represented in each execution. The "values" page size defaults
to 1000, which is different that psycopg2's default. The "batch" page
size defaults to 100. These can be affected by passing new values to
:func:`_engine.create_engine`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values',
executemany_values_page_size=10000, executemany_batch_page_size=500)
.. versionchanged:: 1.4
The default for ``executemany_values_page_size`` is now 1000, up from
100.
.. seealso::
:ref:`execute_multiple` - General information on using the
:class:`_engine.Connection`
object to execute statements in such a way as to make
use of the DBAPI ``.executemany()`` method.
.. _psycopg2_unicode:
Unicode with Psycopg2
----------------------
The psycopg2 DBAPI driver supports Unicode data transparently. Under Python 2
only, the SQLAlchemy psycopg2 dialect will enable the
``psycopg2.extensions.UNICODE`` extension by default to ensure Unicode is
handled properly; under Python 3, this is psycopg2's default behavior.
The client character encoding can be controlled for the psycopg2 dialect
in the following ways:
* For PostgreSQL 9.1 and above, the ``client_encoding`` parameter may be
passed in the database URL; this parameter is consumed by the underlying
``libpq`` PostgreSQL client library::
engine = create_engine("postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8")
Alternatively, the above ``client_encoding`` value may be passed using
:paramref:`_sa.create_engine.connect_args` for programmatic establishment with
``libpq``::
engine = create_engine(
"postgresql+psycopg2://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'}
)
* For all PostgreSQL versions, psycopg2 supports a client-side encoding
value that will be passed to database connections when they are first
established. The SQLAlchemy psycopg2 dialect supports this using the
``client_encoding`` parameter passed to :func:`_sa.create_engine`::
engine = create_engine(
"postgresql+psycopg2://user:pass@host/dbname",
client_encoding="utf8"
)
.. tip:: The above ``client_encoding`` parameter admittedly is very similar
in appearance to usage of the parameter within the
:paramref:`_sa.create_engine.connect_args` dictionary; the difference
above is that the parameter is consumed by psycopg2 and is
passed to the database connection using ``SET client_encoding TO
'utf8'``; in the previously mentioned style, the parameter is instead
passed through psycopg2 and consumed by the ``libpq`` library.
* A common way to set up client encoding with PostgreSQL databases is to
ensure it is configured within the server-side postgresql.conf file;
this is the recommended way to set encoding for a server that is
consistently of one encoding in all databases::
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
.. _psycopg2_disable_native_unicode:
Disabling Native Unicode
^^^^^^^^^^^^^^^^^^^^^^^^
Under Python 2 only, SQLAlchemy can also be instructed to skip the usage of the
psycopg2 ``UNICODE`` extension and to instead utilize its own unicode
encode/decode services, which are normally reserved only for those DBAPIs that
don't fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`_sa.create_engine` will disable usage of ``psycopg2.extensions.
UNICODE``. SQLAlchemy will instead encode data itself into Python bytestrings
on the way in and coerce from bytes on the way back, using the value of the
:func:`_sa.create_engine` ``encoding`` parameter, which defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as most DBAPIs now support unicode fully.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all PostgreSQL dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`_sa.create_engine`
,
as well as the ``isolation_level`` argument used by
:meth:`_engine.Connection.execution_options`. When using the psycopg2 dialect
, these
options make use of psycopg2's ``set_isolation_level()`` connection method,
rather than emitting a PostgreSQL directive; this is because psycopg2's
API-level setting is always emitted at the start of each transaction in any
case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log PostgreSQL NOTICE messages
via the ``sqlalchemy.dialects.postgresql`` logger. When this logger
is set to the ``logging.INFO`` level, notice messages will be logged::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
Above, it is assumed that logging is configured externally. If this is not
the case, configuration such as ``logging.basicConfig()`` must be utilized::
import logging
logging.basicConfig() # log messages to stdout
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. seealso::
`Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
.. _psycopg2_hstore:
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when psycopg2 version 2.4 or greater is used, and
it is detected that the target database has the HSTORE type set up for use.
In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using
``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine
that the ``HSTORE`` extension is present.
This function is **skipped** if the version of psycopg2 installed is
less than version 2.4.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python
dictionaries being accepted as parameters regardless of the type of target
column in SQL**. The dictionaries are converted by this extension into a
textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
which may be more performant.
""" # noqa
from __future__ import absolute_import
import decimal
import logging
import re
from uuid import UUID as _python_UUID
from .array import ARRAY as PGARRAY
from .base import _ColonCast
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import cursor as _cursor
from ...util import collections_abc
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self._expect_unicode is True:
# for py2k, if the enum type needs unicode data (which is set up as
# part of the Enum() constructor based on values passed as py2k
# unicode objects) we have to use our own converters since
# psycopg2's don't work, a rare exception to the "modern DBAPIs
# support unicode everywhere" theme of deprecating
# convert_unicode=True. Use the special "force_nocheck" directive
# which forces unicode conversion to happen on the Python side
# without an isinstance() check. in py3k psycopg2 does the right
# thing automatically.
self._expect_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGARRAY(PGARRAY):
def bind_expression(self, bindvalue):
return _ColonCast(bindvalue, self)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
return None
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
return None
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
_psycopg2_fetched_rows = None
def create_server_side_cursor(self):
# use server-side cursors:
# https://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
def post_exec(self):
if (
self._psycopg2_fetched_rows
and self.compiled
and self.compiled.returning
):
# psycopg2 execute_values will provide for a real cursor where
# cursor.description works correctly. however, it executes the
# INSERT statement multiple times for multiple pages of rows, so
# while this cursor also supports calling .fetchall() directly, in
# order to get the list of all rows inserted across multiple pages,
# we have to retrieve the aggregated list from the execute_values()
# function directly.
strat_cls = _cursor.FullyBufferedCursorFetchStrategy
self.cursor_fetch_strategy = strat_cls(
self.cursor, initial_buffer=self._psycopg2_fetched_rows
)
self._log_notices(self.cursor)
def _log_notices(self, cursor):
# check also that notices is an iterable, after it's already
# established that we will be iterating through it. This is to get
# around test suites such as SQLAlchemy's using a Mock object for
# cursor
if not cursor.connection.notices or not isinstance(
cursor.connection.notices, collections_abc.Iterable
):
return
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
pass
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
pass
EXECUTEMANY_PLAIN = util.symbol("executemany_plain", canonical=0)
EXECUTEMANY_BATCH = util.symbol("executemany_batch", canonical=1)
EXECUTEMANY_VALUES = util.symbol("executemany_values", canonical=2)
EXECUTEMANY_VALUES_PLUS_BATCH = util.symbol(
"executemany_values_plus_batch",
canonical=EXECUTEMANY_BATCH | EXECUTEMANY_VALUES,
)
class PGDialect_psycopg2(PGDialect):
driver = "psycopg2"
supports_statement_cache = True
if util.py2k:
# turn off supports_unicode_statements for Python 2. psycopg2 supports
# unicode statements in Py2K. But! it does not support unicode *bound
# parameter names* because it uses the Python "%" operator to
# interpolate these into the string, and this fails. So for Py2K, we
# have to use full-on encoding for statements and parameters before
# passing to cursor.execute().
supports_unicode_statements = False
supports_server_side_cursors = True
default_paramstyle = "pyformat"
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
_has_native_hstore = True
engine_config_types = PGDialect.engine_config_types.union(
{"use_native_unicode": util.asbool}
)
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
sqltypes.ARRAY: _PGARRAY,
},
)
def __init__(
self,
use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
use_native_uuid=True,
executemany_mode="values_only",
executemany_batch_page_size=100,
executemany_values_page_size=1000,
**kwargs
):
PGDialect.__init__(self, **kwargs)
self.use_native_unicode = use_native_unicode
if not use_native_unicode and not util.py2k:
raise exc.ArgumentError(
"psycopg2 native_unicode mode is required under Python 3"
)
if not use_native_hstore:
self._has_native_hstore = False
self.use_native_hstore = use_native_hstore
self.use_native_uuid = use_native_uuid
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
# Parse executemany_mode argument, allowing it to be only one of the
# symbol names
self.executemany_mode = util.symbol.parse_user_argument(
executemany_mode,
{
EXECUTEMANY_PLAIN: [None],
EXECUTEMANY_BATCH: ["batch"],
EXECUTEMANY_VALUES: ["values_only"],
EXECUTEMANY_VALUES_PLUS_BATCH: ["values_plus_batch", "values"],
},
"executemany_mode",
)
if self.executemany_mode & EXECUTEMANY_VALUES:
self.insert_executemany_returning = True
self.executemany_batch_page_size = executemany_batch_page_size
self.executemany_values_page_size = executemany_values_page_size
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x) for x in m.group(1, 2, 3) if x is not None
)
if self.psycopg2_version < (2, 7):
raise ImportError(
"psycopg2 version 2.7 or higher is required."
)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = (
self.use_native_hstore
and self._hstore_oids(connection.connection) is not None
)
# PGDialect.initialize() checks server version for <= 8.2 and sets
# this flag to False if so
if not self.full_returning:
self.insert_executemany_returning = False
self.executemany_mode = EXECUTEMANY_PLAIN
self.supports_sane_multi_rowcount = not (
self.executemany_mode & EXECUTEMANY_BATCH
)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
"AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
"READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
"READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
"REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
"SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace("_", " ")]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
),
replace_context=err,
)
connection.set_isolation_level(level)
def set_readonly(self, connection, value):
connection.readonly = value
def get_readonly(self, connection):
return connection.readonly
def set_deferrable(self, connection, value):
connection.deferrable = value
def get_deferrable(self, connection):
return connection.deferrable
def do_ping(self, dbapi_connection):
cursor = None
before_autocommit = dbapi_connection.autocommit
try:
if not before_autocommit:
dbapi_connection.autocommit = True
cursor = dbapi_connection.cursor()
try:
cursor.execute(self._dialect_specific_select_one)
finally:
cursor.close()
if not before_autocommit and not dbapi_connection.closed:
dbapi_connection.autocommit = before_autocommit
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, cursor):
return False
else:
raise
else:
return True
def on_connect(self):
extras = self._psycopg2_extras()
extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
def on_connect(conn):
extras.register_uuid(None, conn)
fns.append(on_connect)
if util.py2k and self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
kw = {"oid": oid}
if util.py2k:
kw["unicode"] = True
kw["array_oid"] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
extras.register_default_json(
conn, loads=self._json_deserializer
)
extras.register_default_jsonb(
conn, loads=self._json_deserializer
)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
def do_executemany(self, cursor, statement, parameters, context=None):
if (
self.executemany_mode & EXECUTEMANY_VALUES
and context
and context.isinsert
and context.compiled._is_safe_for_fast_insert_values_helper
):
executemany_values = (
"(%s)" % context.compiled.insert_single_values_expr
)
if not self.supports_unicode_statements:
executemany_values = executemany_values.encode(self.encoding)
# guard for statement that was altered via event hook or similar
if executemany_values not in statement:
executemany_values = None
else:
executemany_values = None
if executemany_values:
statement = statement.replace(executemany_values, "%s")
if self.executemany_values_page_size:
kwargs = {"page_size": self.executemany_values_page_size}
else:
kwargs = {}
xtras = self._psycopg2_extras()
context._psycopg2_fetched_rows = xtras.execute_values(
cursor,
statement,
parameters,
template=executemany_values,
fetch=bool(context.compiled.returning),
**kwargs
)
elif self.executemany_mode & EXECUTEMANY_BATCH:
if self.executemany_batch_page_size:
kwargs = {"page_size": self.executemany_batch_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_batch(
cursor, statement, parameters, **kwargs
)
else:
cursor.executemany(statement, parameters)
@util.memoized_instancemethod
def _hstore_oids(self, conn):
extras = self._psycopg2_extras()
if hasattr(conn, "dbapi_connection"):
conn = conn.dbapi_connection
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
is_multihost = False
if "host" in url.query:
is_multihost = isinstance(url.query["host"], (list, tuple))
if opts:
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
if is_multihost:
opts["host"] = ",".join(url.query["host"])
# send individual dbname, user, password, host, port
# parameters to psycopg2.connect()
return ([], opts)
elif url.query:
# any other connection arguments, pass directly
opts.update(url.query)
if is_multihost:
opts["host"] = ",".join(url.query["host"])
return ([], opts)
else:
# no connection arguments whatsoever; psycopg2.connect()
# requires that "dsn" be present as a blank string.
return ([""], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
if getattr(connection, "closed", False):
return True
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/connection.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL SYSCALL error: Operation timed out",
"SSL SYSCALL error: Bad address",
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from desktop import api_public
from desktop.lib.botserver import api as botserver_api
if sys.version_info[0] > 2:
from django.urls import re_path
else:
from django.conf.urls import url as re_path
# "New" query API (i.e. connector based, lean arguments).
# e.g. https://demo.gethue.com/api/query/execute/hive
urlpatterns = [
re_path(r'^query/create_notebook/?$', api_public.create_notebook, name='api_create_notebook'),
re_path(r'^query/autocomplete/?$', api_public.autocomplete, name='api_autocomplete_databases'),
]
# Compatibility with "old" private API.
# e.g. https://demo.gethue.com/notebook/api/execute/hive
urlpatterns += [
re_path(r'^get_config/?$', api_public.get_config),
re_path(r'^get_namespaces/(?P<interface>[\w\-]+)/?$', api_public.get_context_namespaces), # To remove
re_path(r'^editor/create_notebook/?$', api_public.create_notebook, name='api_create_notebook'),
re_path(r'^editor/create_session/?$', api_public.create_session, name='api_create_session'),
re_path(r'^editor/close_session/?$', api_public.close_session, name='api_close_session'),
re_path(r'^editor/execute(?:/(?P<dialect>.+))?/?$', api_public.execute, name='api_execute'),
re_path(r'^editor/check_status/?$', api_public.check_status, name='api_check_status'),
re_path(r'^editor/fetch_result_data/?$', api_public.fetch_result_data, name='api_fetch_result_data'),
re_path(r'^editor/fetch_result_metadata/?$', api_public.fetch_result_metadata, name='api_fetch_result_metadata'),
re_path(r'^editor/fetch_result_size/?$', api_public.fetch_result_size, name='api_fetch_result_size'),
re_path(r'^editor/cancel_statement/?$', api_public.cancel_statement, name='api_cancel_statement'),
re_path(r'^editor/close_statement/?$', api_public.close_statement, name='api_close_statement'),
re_path(r'^editor/get_logs/?$', api_public.get_logs, name='api_get_logs'),
re_path(r'^editor/autocomplete/?$', api_public.autocomplete, name='api_autocomplete_databases'),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/?$",
api_public.autocomplete,
name="api_autocomplete_tables",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/?$",
api_public.autocomplete,
name="api_autocomplete_columns",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/?$",
api_public.autocomplete,
name="api_autocomplete_column",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/(?P<nested>.+)/?$",
api_public.autocomplete,
name="api_autocomplete_nested",
),
]
# Slack install API for using CORS by default
urlpatterns = [
re_path(r'^slack/install/?$', botserver_api.generate_slack_install_link, name='botserver.api.slack_install_link'),
]
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import takewhile
import inspect
import os.path
import re
from .charwidth import get_char_width
from .misc import seq2str2
from .platform import JYTHON, PY_VERSION
from .robottypes import is_string, is_unicode
from .unic import unic
MAX_ERROR_LINES = 40
_MAX_ASSIGN_LENGTH = 200
_MAX_ERROR_LINE_LENGTH = 78
_ERROR_CUT_EXPLN = ' [ Message content over the limit has been removed. ]'
_TAGS_RE = re.compile(r'\s*tags:(.*)', re.IGNORECASE)
def cut_long_message(msg):
if MAX_ERROR_LINES is None:
return msg
lines = msg.splitlines()
lengths = _count_line_lengths(lines)
if sum(lengths) <= MAX_ERROR_LINES:
return msg
start = _prune_excess_lines(lines, lengths)
end = _prune_excess_lines(lines, lengths, from_end=True)
return '\n'.join(start + [_ERROR_CUT_EXPLN] + end)
def _prune_excess_lines(lines, lengths, from_end=False):
if from_end:
lines.reverse()
lengths.reverse()
ret = []
total = 0
limit = MAX_ERROR_LINES // 2
for line, length in zip(lines[:limit], lengths[:limit]):
if total + length >= limit:
ret.append(_cut_long_line(line, total, from_end))
break
total += length
ret.append(line)
if from_end:
ret.reverse()
return ret
def _cut_long_line(line, used, from_end):
available_lines = MAX_ERROR_LINES // 2 - used
available_chars = available_lines * _MAX_ERROR_LINE_LENGTH - 3
if len(line) > available_chars:
if not from_end:
line = line[:available_chars] + '...'
else:
line = '...' + line[-available_chars:]
return line
def _count_line_lengths(lines):
return [ _count_virtual_line_length(line) for line in lines ]
def _count_virtual_line_length(line):
if not line:
return 1
lines, remainder = divmod(len(line), _MAX_ERROR_LINE_LENGTH)
return lines if not remainder else lines + 1
def format_assign_message(variable, value, cut_long=True):
formatter = {'$': unic, '@': seq2str2, '&': _dict_to_str}[variable[0]]
value = formatter(value)
if cut_long and len(value) > _MAX_ASSIGN_LENGTH:
value = value[:_MAX_ASSIGN_LENGTH] + '...'
return '%s = %s' % (variable, value)
def _dict_to_str(d):
if not d:
return '{ }'
return '{ %s }' % ' | '.join('%s=%s' % (unic(k), unic(v))
for k, v in d.items())
def get_console_length(text):
return sum(get_char_width(char) for char in text)
def pad_console_length(text, width):
if width < 5:
width = 5
diff = get_console_length(text) - width
if diff > 0:
text = _lose_width(text, diff+3) + '...'
return _pad_width(text, width)
def _pad_width(text, width):
more = width - get_console_length(text)
return text + ' ' * more
def _lose_width(text, diff):
lost = 0
while lost < diff:
lost += get_console_length(text[-1])
text = text[:-1]
return text
def split_args_from_name_or_path(name):
"""Split arguments embedded to name or path like ``Example:arg1:arg2``.
The separator can be either colon ``:`` or semicolon ``;``. If both are used,
the first one is considered to be the separator.
"""
if os.path.exists(name):
return os.path.abspath(name), []
index = _get_arg_separator_index_from_name_or_path(name)
if index == -1:
return name, []
args = name[index+1:].split(name[index])
name = name[:index]
if os.path.exists(name):
name = os.path.abspath(name)
return name, args
def _get_arg_separator_index_from_name_or_path(name):
colon_index = name.find(':')
# Handle absolute Windows paths
if colon_index == 1 and name[2:3] in ('/', '\\'):
colon_index = name.find(':', colon_index+1)
semicolon_index = name.find(';')
if colon_index == -1:
return semicolon_index
if semicolon_index == -1:
return colon_index
return min(colon_index, semicolon_index)
def split_tags_from_doc(doc):
doc = doc.rstrip()
tags = []
if not doc:
return doc, tags
lines = doc.splitlines()
match = _TAGS_RE.match(lines[-1])
if match:
doc = '\n'.join(lines[:-1]).rstrip()
tags = [tag.strip() for tag in match.group(1).split(',')]
return doc, tags
def getdoc(item):
doc = inspect.getdoc(item) or u''
if is_unicode(doc):
return doc
try:
return doc.decode('UTF-8')
except UnicodeDecodeError:
return unic(doc)
def getshortdoc(doc_or_item, linesep='\n'):
if not doc_or_item:
return u''
doc = doc_or_item if is_string(doc_or_item) else getdoc(doc_or_item)
lines = takewhile(lambda line: line.strip(), doc.splitlines())
return linesep.join(lines)
# https://bugs.jython.org/issue2772
if JYTHON and PY_VERSION < (2, 7, 2):
trailing_spaces = re.compile('\s+$', re.UNICODE)
def rstrip(string):
return trailing_spaces.sub('', string)
else:
def rstrip(string):
return string.rstrip()
|
import asyncio
import discord
import logging
from random import randint
from random import choice as randchoice
from redbot.core import bank, checks, commands, Config
from redbot.core.errors import BalanceTooHigh
from redbot.core.utils.chat_formatting import box, humanize_list, pagify
from .phrases import FRIENDS, SNACKBURR_PHRASES
log = logging.getLogger("red.aikaterna.snacktime")
class Snacktime(commands.Cog):
"""Snackburr's passing out pb jars!"""
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 2712291001, force_registration=True)
self.snackSchedule = {}
self.snacktimePrediction = {}
self.previousSpeaker = {}
self.snackInProgress = {}
self.acceptInput = {}
self.alreadySnacked = {}
self.msgsPassed = {}
self.startLock = {}
self.snacktimeCheckLock = {}
self.lockRequests = {}
self.channel_persona = {}
default_guild = {
"DELIVER_CHANNELS": [],
"FRIENDS": False,
"EVENT_START_DELAY": 1800,
"EVENT_START_DELAY_VARIANCE": 900,
"SNACK_DURATION": 240,
"SNACK_DURATION_VARIANCE": 120,
"MSGS_BEFORE_EVENT": 8,
"SNACK_AMOUNT": 200,
}
default_channel = {"repeatMissedSnacktimes": 0}
self.config.register_guild(**default_guild)
self.config.register_channel(**default_channel)
async def persona_choice(self, ctx: None, message: None):
if ctx:
invite_friends = await self.config.guild(ctx.guild).FRIENDS()
else:
invite_friends = await self.config.guild(message.guild).FRIENDS()
personas = FRIENDS
if not invite_friends:
return "Snackburr" if message else "ʕ •ᴥ•ʔ <"
elif invite_friends is True:
try:
del personas["Snackburr"]
except KeyError:
pass
if message:
return randchoice(list(personas.keys()))
else:
return randchoice(list(personas.values()))
async def get_response(self, msg, phrase_type):
scid = f"{msg.guild.id}-{msg.channel.id}"
persona = self.channel_persona[scid]
persona_phrase = FRIENDS.get(persona)
phrase = randchoice(SNACKBURR_PHRASES[phrase_type])
return f"`{persona_phrase} {phrase}`"
@commands.cooldown(1, 1, commands.BucketType.channel)
@commands.guild_only()
@commands.command()
async def eat(self, ctx, amount: int):
"""
all this talk about pb is makin me hungry.
how bout you guys?
"""
persona = await self.persona_choice(ctx=ctx, message=None)
if amount < 0:
return await ctx.send(f"`{persona} Woah slow down!`")
if amount > await bank.get_balance(ctx.author):
return await ctx.send(f"`{persona} You don't got that much pb!.. don't look at me..`")
await bank.withdraw_credits(ctx.author, amount)
first_phrase = randchoice(SNACKBURR_PHRASES["EAT_BEFORE"])
second_phrase = randchoice(SNACKBURR_PHRASES["EAT_AFTER"])
await ctx.send(f"`{persona} {ctx.author.display_name} {first_phrase} {second_phrase} {amount} whole pb jars!`")
@commands.guild_only()
@commands.group()
@checks.mod_or_permissions(manage_guild=True)
async def snackset(self, ctx):
"""snack stuff"""
if ctx.invoked_subcommand is None:
guild_data = await self.config.guild(ctx.guild).all()
channel_names = []
if guild_data["DELIVER_CHANNELS"]:
for channel_id in guild_data["DELIVER_CHANNELS"]:
channel_obj = self.bot.get_channel(channel_id)
if channel_obj:
channel_names.append(channel_obj.name)
if len(channel_names) == 0:
channel_names = ["No channels set."]
if guild_data["FRIENDS"] is True:
invite_friends = "Friends only"
elif guild_data["FRIENDS"] is False:
invite_friends = "Snackburr only"
else:
invite_friends = "Everyone's invited!"
msg = f"[Delivering in]: {humanize_list(channel_names)}\n"
msg += f"[Event start delay]: {guild_data['EVENT_START_DELAY']} seconds\n"
msg += f"[Event start variance]: {guild_data['EVENT_START_DELAY_VARIANCE']} seconds\n"
msg += f"[Friends status]: {invite_friends}\n"
msg += f"[Messages before event]: {guild_data['MSGS_BEFORE_EVENT']}\n"
msg += f"[Snack amount limit]: {guild_data['SNACK_AMOUNT']} pb\n"
msg += f"[Snack duration]: {guild_data['SNACK_DURATION']} seconds\n"
msg += f"[Snack duration variance]: {guild_data['SNACK_DURATION_VARIANCE']} seconds\n"
for page in pagify(msg, delims=["\n"]):
await ctx.send(box(page, lang="ini"))
@snackset.command()
async def errandtime(self, ctx, seconds: int):
"""How long snackburr needs to be out doin errands.. more or less."""
event_start_delay_variance = await self.config.guild(ctx.guild).EVENT_START_DELAY_VARIANCE()
if seconds <= event_start_delay_variance:
await ctx.send("errandtime must be greater than errandvariance!")
elif seconds <= 0:
await ctx.send("errandtime must be greater than 0")
else:
await self.config.guild(ctx.guild).EVENT_START_DELAY.set(seconds)
await ctx.send(f"snackburr's errands will now take around {round(seconds/60, 2)} minutes!")
@snackset.command()
async def errandvariance(self, ctx, seconds: int):
"""How early or late snackburr might be to snacktime"""
event_start_delay = await self.config.guild(ctx.guild).EVENT_START_DELAY()
if seconds >= event_start_delay:
await ctx.send("errandvariance must be less than errandtime!")
elif seconds < 0:
await ctx.send("errandvariance must be 0 or greater!")
else:
await self.config.guild(ctx.guild).EVENT_START_DELAY_VARIANCE.set(seconds)
await ctx.send(f"snackburr now might be {round(seconds/60, 2)} minutes early or late to snacktime")
@snackset.command(name="snacktime")
async def snacktimetime(self, ctx, seconds: int):
"""How long snackburr will hang out giving out snacks!.. more or less."""
snack_duration_variance = await self.config.guild(ctx.guild).SNACK_DURATION_VARIANCE()
if seconds <= snack_duration_variance:
await ctx.send("snacktime must be greater than snackvariance!")
elif seconds <= 0:
await ctx.send("snacktime must be greater than 0")
else:
await self.config.guild(ctx.guild).SNACK_DURATION.set(seconds)
await ctx.send(f"snacktimes will now last around {round(seconds/60, 2)} minutes!")
@snackset.command(name="snackvariance")
async def snacktimevariance(self, ctx, seconds: int):
"""How early or late snackburr might have to leave for errands"""
snack_duration = await self.config.guild(ctx.guild).SNACK_DURATION()
if seconds >= snack_duration:
await ctx.send("snackvariance must be less than snacktime!")
elif seconds < 0:
await ctx.send("snackvariance must be 0 or greater!")
else:
await self.config.guild(ctx.guild).SNACK_DURATION_VARIANCE.set(seconds)
await ctx.send(f"snackburr now may have to leave snacktime {round(seconds/60, 2)} minutes early or late")
@snackset.command()
async def msgsneeded(self, ctx, amt: int):
"""How many messages must pass in a conversation before a snacktime can start"""
if amt <= 0:
await ctx.send("msgsneeded must be greater than 0")
else:
await self.config.guild(ctx.guild).MSGS_BEFORE_EVENT.set(amt)
await ctx.send(f"snackburr will now wait until {amt} messages pass until he comes with snacks")
@snackset.command()
async def amount(self, ctx, amt: int):
"""How much pb max snackburr should give out to each person per snacktime"""
if amt <= 0:
await ctx.send("amount must be greater than 0")
else:
await self.config.guild(ctx.guild).SNACK_AMOUNT.set(amt)
await ctx.send(f"snackburr will now give out {amt} pb max per person per snacktime.")
@snackset.command(name="friends")
async def snackset_friends(self, ctx, choice: int):
"""snackburr's friends wanna know what all the hub-bub's about!
Do you want to
1: invite them to the party,
2: only allow snackburr to chillax with you guys, or
3: kick snackburr out on the curb in favor of his obviously cooler friends?
"""
if choice not in (1, 2, 3):
return await ctx.send_help()
choices = {
1: ("both", "Everybody's invited!"),
2: (False, "You chose to not invite snackburr's friends."),
3: (True, "You kick snackburr out in favor of his friends! Ouch. Harsh..."),
}
choice = choices[choice]
await self.config.guild(ctx.guild).FRIENDS.set(choice[0])
await ctx.send(choice[1])
@snackset.command()
async def deliver(self, ctx):
"""Asks snackburr to start delivering to this channel"""
deliver_channels = await self.config.guild(ctx.guild).DELIVER_CHANNELS()
if not deliver_channels:
deliver_channels = []
if ctx.channel.id not in deliver_channels:
deliver_channels.append(ctx.channel.id)
await self.config.guild(ctx.guild).DELIVER_CHANNELS.set(deliver_channels)
await ctx.send("snackburr will start delivering here!")
else:
deliver_channels.remove(ctx.channel.id)
await self.config.guild(ctx.guild).DELIVER_CHANNELS.set(deliver_channels)
await ctx.send("snackburr will stop delivering here!")
@commands.guild_only()
@commands.command()
async def snacktime(self, ctx):
"""Man i'm hungry! When's snackburr gonna get back with more snacks?"""
scid = f"{ctx.message.guild.id}-{ctx.message.channel.id}"
if self.snacktimePrediction.get(scid, None) == None:
if self.acceptInput.get(scid, False):
return
else:
phrases = [
r"Don't look at me. I donno where snackburr's at ¯\_(ツ)_/¯",
"I hear snackburr likes parties. *wink wink",
"I hear snackburr is attracted to channels with active conversations",
"If you party, snackburr will come! 〈( ^o^)ノ",
]
await ctx.send(randchoice(phrases))
return
seconds = self.snacktimePrediction[scid] - self.bot.loop.time()
if self.snacktimeCheckLock.get(scid, False):
if randint(1, 4) == 4:
await ctx.send("Hey, snackburr's on errands. I ain't his keeper Kappa")
return
self.snacktimeCheckLock[scid] = True
if seconds < 0:
await ctx.send(f"I'm not sure where snackburr is.. He's already {round(abs(seconds/60), 2)} minutes late!")
else:
await ctx.send(f"snackburr's out on errands! I think he'll be back in {round(seconds/60, 2)} minutes")
await asyncio.sleep(40)
self.snacktimeCheckLock[scid] = False
async def startSnack(self, message):
scid = f"{message.guild.id}-{message.channel.id}"
if self.acceptInput.get(scid, False):
return
self.channel_persona[scid] = await self.persona_choice(ctx=None, message=message)
await message.channel.send(await self.get_response(message, "SNACKTIME"))
self.acceptInput[scid] = True
self.alreadySnacked[scid] = []
guild_data = await self.config.guild(message.guild).all()
duration = guild_data["SNACK_DURATION"] + randint(
-guild_data["SNACK_DURATION_VARIANCE"], guild_data["SNACK_DURATION_VARIANCE"]
)
await asyncio.sleep(duration)
# sometimes fails sending messages and stops all future snacktimes. Hopefully this fixes it.
try:
# list isn't empty
if self.alreadySnacked.get(scid, False):
await message.channel.send(await self.get_response(message, "OUT"))
await self.config.channel(message.channel).repeatMissedSnacktimes.set(0)
else:
await message.channel.send(await self.get_response(message, "NO_TAKERS"))
repeat_missed_snacktimes = await self.config.channel(message.channel).repeatMissedSnacktimes()
await self.config.channel(message.channel).repeatMissedSnacktimes.set(repeat_missed_snacktimes + 1)
await asyncio.sleep(2)
if (repeat_missed_snacktimes + 1) > 9: # move to a setting
await message.channel.send(await self.get_response(message, "LONELY"))
deliver_channels = await self.config.guild(message.guild).DELIVER_CHANNELS()
new_deliver_channels = deliver_channels.remove(message.channel.id)
await self.config.guild(message.guild).DELIVER_CHANNELS.set(new_deliver_channels)
await self.config.channel(message.channel).repeatMissedSnacktimes.set(0)
except:
log.error("Snacktime: Failed to send message in startSnack")
self.acceptInput[scid] = False
self.snackInProgress[scid] = False
@commands.Cog.listener()
async def on_message(self, message):
if not message.guild:
return
if message.author.bot:
return
if not message.channel.permissions_for(message.guild.me).send_messages:
return
deliver_channels = await self.config.guild(message.guild).DELIVER_CHANNELS()
if not deliver_channels:
return
if message.channel.id not in deliver_channels:
return
scid = f"{message.guild.id}-{message.channel.id}"
if message.author.id != self.bot.user.id:
# if nobody has said anything since start
if self.previousSpeaker.get(scid, None) == None:
self.previousSpeaker[scid] = message.author.id
# if new speaker
elif self.previousSpeaker[scid] != message.author.id:
self.previousSpeaker[scid] = message.author.id
msgTime = self.bot.loop.time()
# if there's a scheduled snack
if self.snackSchedule.get(scid, None) != None:
# if it's time for a snack
if msgTime > self.snackSchedule[scid]:
# 1 schedule at a time, so remove schedule
self.snackSchedule[scid] = None
self.snackInProgress[scid] = True
# wait to make it more natural
naturalWait = randint(30, 240)
log.debug(f"Snacktime: snack trigger msg: {message.content}")
log.debug(f"Snacktime: Waiting {str(naturalWait)} seconds")
await asyncio.sleep(naturalWait)
# start snacktime
await self.startSnack(message)
# if no snack coming, schedule one
elif self.snackInProgress.get(scid, False) == False and not self.startLock.get(scid, False):
self.msgsPassed[scid] = self.msgsPassed.get(scid, 0) + 1
# check for collisions
msgs_before_event = await self.config.guild(message.guild).MSGS_BEFORE_EVENT()
if self.msgsPassed[scid] > msgs_before_event:
self.startLock[scid] = True
if self.lockRequests.get(scid, None) == None:
self.lockRequests[scid] = []
self.lockRequests[scid].append(message)
await asyncio.sleep(1)
log.debug(
f"Snacktime: :-+-|||||-+-: Lock request: {str(self.lockRequests[scid][0] == message)}"
)
if self.lockRequests[scid][0] == message:
await asyncio.sleep(5)
log.debug(f"Snacktime: {message.author.name} - I got the Lock")
self.lockRequests[scid] = []
# someone got through already
if self.msgsPassed[scid] < msgs_before_event or self.snackInProgress.get(scid, False):
log.debug("Snacktime: Lock: someone got through already.")
return
else:
log.debug(
"Snacktime: Lock: looks like i'm in the clear. lifting lock. If someone comes now, they should get the lock"
)
self.msgsPassed[scid] = msgs_before_event
self.startLock[scid] = False
else:
log.debug(f"Snacktime: {message.author.name} Failed lock")
return
if self.msgsPassed[scid] == msgs_before_event:
# schedule a snack
log.debug(f"Snacktime: activity: {message.content}")
guild_data = await self.config.guild(message.guild).all()
timeTillSnack = guild_data["EVENT_START_DELAY"] + randint(
-guild_data["EVENT_START_DELAY_VARIANCE"], guild_data["EVENT_START_DELAY_VARIANCE"],
)
log.debug(f"Snacktime: {str(timeTillSnack)} seconds till snacktime")
self.snacktimePrediction[scid] = msgTime + guild_data["EVENT_START_DELAY"]
self.snackSchedule[scid] = msgTime + timeTillSnack
self.msgsPassed[scid] = 0
# it's snacktime! who want's snacks?
if self.acceptInput.get(scid, False):
if message.author.id not in self.alreadySnacked.get(scid, []):
agree_phrases = [
"holds out hand",
"im ready",
"i'm ready",
"hit me up",
"hand over",
"hand me",
"kindly",
"i want",
"i'll have",
"ill have",
"yes",
"pls",
"plz",
"please",
"por favor",
"can i",
"i'd like",
"i would",
"may i",
"in my mouth",
"in my belly",
"snack me",
"gimme",
"give me",
"i'll take",
"ill take",
"i am",
"about me",
"me too",
"of course",
]
userWants = False
for agreePhrase in agree_phrases:
# no one word answers
if agreePhrase in message.content.lower() and len(message.content.split()) > 1:
userWants = True
break
if userWants:
if self.alreadySnacked.get(scid, None) == None:
self.alreadySnacked[scid] = []
self.alreadySnacked[scid].append(message.author.id)
# If user is blacklisted, don't give him/her anything.
# We're still passing it to the list to avoid this calculation down the line,
if await self.bot.allowed_by_whitelist_blacklist(
who=message.author
) is False:
return
await asyncio.sleep(randint(1, 6))
snack_amount = await self.config.guild(message.guild).SNACK_AMOUNT()
snackAmt = randint(1, snack_amount)
try:
if self.acceptInput.get(scid, False):
resp = await self.get_response(message, "GIVE")
resp = resp.format(message.author.name, snackAmt)
await message.channel.send(resp)
else:
resp = await self.get_response(message, "LAST_SECOND")
resp = resp.format(message.author.name, snackAmt)
await message.channel.send(resp)
try:
await bank.deposit_credits(message.author, snackAmt)
except BalanceTooHigh as b:
await bank.set_balance(message.author, b.max_balance)
except Exception as e:
log.info(
f"Failed to send pb message. {message.author.name} didn't get pb\n", exc_info=True,
)
else:
more_phrases = [
"more pl",
"i have some more",
"i want more",
"i have another",
"i have more",
"more snack",
]
userWants = False
for morePhrase in more_phrases:
if morePhrase in message.content.lower():
userWants = True
break
if userWants:
if await self.bot.allowed_by_whitelist_blacklist(
who=message.author
) is False:
return
await asyncio.sleep(randint(1, 6))
if self.acceptInput.get(scid, False):
resp = await self.get_response(message, "GREEDY")
await message.channel.send(resp.format(message.author.name))
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from datetime import datetime
from logging import getLogger
import collections
from sqlalchemy.sql.expression import tuple_
from ggrc import db
from ggrc import models
from ggrc.automapper.rules import rules
from ggrc.login import get_current_user
from ggrc.models.audit import Audit
from ggrc.models.relationship import Relationship
from ggrc.models.request import Request
from ggrc.rbac.permissions import is_allowed_update
from ggrc.services.common import Resource, get_cache
from ggrc.utils import benchmark, with_nop
# pylint: disable=invalid-name
logger = getLogger(__name__)
class Stub(collections.namedtuple("Stub", ["type", "id"])):
@classmethod
def from_source(cls, relationship):
return Stub(relationship.source_type, relationship.source_id)
@classmethod
def from_destination(cls, relationship):
return Stub(relationship.destination_type, relationship.destination_id)
class AutomapperGenerator(object):
def __init__(self, use_benchmark=True):
self.processed = set()
self.queue = set()
self.cache = collections.defaultdict(set)
self.instance_cache = {}
self.auto_mappings = set()
if use_benchmark:
self.benchmark = benchmark
else:
self.benchmark = with_nop
def related(self, obj):
if obj in self.cache:
return self.cache[obj]
# Pre-fetch neighborhood for enqueued object since we're gonna need that
# results in a few steps. This drastically reduces number of queries.
stubs = {s for rel in self.queue for s in rel}
stubs.add(obj)
# Union is here to convince mysql to use two separate indices and
# merge te results. Just using `or` results in a full-table scan
# Manual column list avoids loading the full object which would also try to
# load related objects
cols = db.session.query(
Relationship.source_type, Relationship.source_id,
Relationship.destination_type, Relationship.destination_id)
relationships = cols.filter(
tuple_(Relationship.source_type, Relationship.source_id).in_(
[(s.type, s.id) for s in stubs]
)
).union_all(
cols.filter(
tuple_(Relationship.destination_type,
Relationship.destination_id).in_(
[(s.type, s.id) for s in stubs]))
).all()
batch_requests = collections.defaultdict(set)
for (src_type, src_id, dst_type, dst_id) in relationships:
src = Stub(src_type, src_id)
dst = Stub(dst_type, dst_id)
# only store a neighbor if we queried for it since this way we know
# we'll be storing complete neighborhood by the end of the loop
batch_requests[src_type].add(src_id)
batch_requests[dst_type].add(dst_id)
if src in stubs:
self.cache[src].add(dst)
if dst in stubs:
self.cache[dst].add(src)
for type_, ids in batch_requests.iteritems():
model = getattr(models.all_models, type_)
instances = model.query.filter(model.id.in_(ids))
for instance in instances:
self.instance_cache[Stub(type_, instance.id)] = instance
return self.cache[obj]
def relate(self, src, dst):
if src < dst:
return (src, dst)
else:
return (dst, src)
def generate_automappings(self, relationship):
self.auto_mappings = set()
with self.benchmark("Automapping generate_automappings"):
# initial relationship is special since it is already created and
# processing it would abort the loop so we manually enqueue the
# neighborhood
src = Stub.from_source(relationship)
dst = Stub.from_destination(relationship)
self._step(src, dst)
self._step(dst, src)
count = 0
while len(self.queue) > 0:
if len(self.auto_mappings) > rules.count_limit:
break
count += 1
src, dst = entry = self.queue.pop()
if not (self._can_map_to(src, relationship) and
self._can_map_to(dst, relationship)):
continue
created = self._ensure_relationship(src, dst)
self.processed.add(entry)
if not created:
# If the edge already exists it means that auto mappings for it have
# already been processed and it is safe to cut here.
continue
self._step(src, dst)
self._step(dst, src)
if len(self.auto_mappings) <= rules.count_limit:
self._flush(relationship)
else:
relationship._json_extras = {
'automapping_limit_exceeded': True
}
def _can_map_to(self, obj, parent_relationship):
return is_allowed_update(obj.type, obj.id, parent_relationship.context)
def _flush(self, parent_relationship):
if len(self.auto_mappings) == 0:
return
with self.benchmark("Automapping flush"):
current_user = get_current_user()
now = datetime.now()
# We are doing an INSERT IGNORE INTO here to mitigate a race condition
# that happens when multiple simultaneous requests create the same
# automapping. If a relationship object fails our unique constraint
# it means that the mapping was already created by another request
# and we can safely ignore it.
inserter = Relationship.__table__.insert().prefix_with("IGNORE")
original = self.relate(Stub.from_source(parent_relationship),
Stub.from_destination(parent_relationship))
db.session.execute(inserter.values([{
"id": None,
"modified_by_id": current_user.id,
"created_at": now,
"updated_at": now,
"source_id": src.id,
"source_type": src.type,
"destination_id": dst.id,
"destination_type": dst.type,
"context_id": None,
"status": None,
"automapping_id": parent_relationship.id}
for src, dst in self.auto_mappings
if (src, dst) != original])) # (src, dst) is sorted
cache = get_cache(create=True)
if cache:
# Add inserted relationships into new objects collection of the cache,
# so that they will be logged within event and appropriate revisions
# will be created.
cache.new.update(
(relationship, relationship.log_json())
for relationship in Relationship.query.filter_by(
automapping_id=parent_relationship.id,
modified_by_id=current_user.id,
created_at=now,
updated_at=now,
)
)
def _step(self, src, dst):
explicit, implicit = rules[src.type, dst.type]
self._step_explicit(src, dst, explicit)
self._step_implicit(src, dst, implicit)
def _step_explicit(self, src, dst, explicit):
if len(explicit) != 0:
src_related = (o for o in self.related(src)
if o.type in explicit and o != dst)
for r in src_related:
entry = self.relate(r, dst)
if entry not in self.processed:
self.queue.add(entry)
def _step_implicit(self, src, dst, implicit):
if not hasattr(models.all_models, src.type):
logger.warning('Automapping by attr: cannot find model %s', src.type)
return
instance = self.instance_cache.get(src)
if instance is None:
model = getattr(models.all_models, src.type)
instance = model.query.filter(model.id == src.id).first()
self.instance_cache[src] = instance
if instance is None:
logger.warning("Automapping by attr: cannot load model %s: %s",
src.type, src.id)
return
for attr in implicit:
if hasattr(instance, attr.name):
values = getattr(instance, attr.name)
if not isinstance(values, collections.Iterable):
values = [values]
for value in values:
if value is not None:
entry = self.relate(Stub(value.type, value.id), dst)
if entry not in self.processed:
self.queue.add(entry)
else:
logger.warning('Automapping by attr: %s is None', attr.name)
else:
logger.warning(
'Automapping by attr: object %s has no attribute %s',
src, attr.name,
)
def _ensure_relationship(self, src, dst):
if dst in self.cache.get(src, []):
return False
if src in self.cache.get(dst, []):
return False
self.auto_mappings.add((src, dst))
if src in self.cache:
self.cache[src].add(dst)
if dst in self.cache:
self.cache[dst].add(src)
return True
def handle_relationship_post(source, destination):
"""Handle posting of special relationships.
This function handles direct relationships that do not have a relationship
object. A fake object is created with source and destination and auto
mappings are then generated.
Args:
source: Source model of relationship
destination: Destination model of relationship
"""
if source is None:
logger.warning("Automapping request listener: "
"no source, no mappings created")
return
if destination is None:
logger.warning("Automapping request listener: "
"no destination, no mappings created")
return
relationship = Relationship(source_type=source.type,
source_id=source.id,
destination_type=destination.type,
destination_id=destination.id)
AutomapperGenerator().generate_automappings(relationship)
def generate_relationship_snapshots(obj):
"""Generate needed snapshots for a given relationship.
If we post a relationship for a snapshotable object and an Audit, we will map
that object to audits program, make a snapshot for it and map the snapshot to
the Audit.
NOTE: this function will be deprecated soon.
Args:
obj: Relationship object.
"""
from ggrc.snapshotter import rules as snapshot_rules
parent = None
child = None
if "Audit" in obj.source_type:
parent = obj.source
child = obj.destination
elif "Audit" in obj.destination_type:
parent = obj.destination
child = obj.source
if parent and child.type in snapshot_rules.Types.all:
db.session.add(models.Snapshot(
parent=parent,
child_id=child.id,
child_type=child.type,
update_revision="new",
context=parent.context,
modified_by=get_current_user()
))
def register_automapping_listeners():
"""Register event listeners for auto mapper."""
# pylint: disable=unused-variable,unused-argument
@Resource.collection_posted.connect_via(Relationship)
def handle_relationship_collection_post(sender, objects=None, **kwargs):
"""Handle bulk creation of relationships.
This handler reuses auto mapper cache and is more efficient than handling
one object at a time.
Args:
objects: list of relationship Models.
"""
automapper = AutomapperGenerator()
for obj in objects:
if obj is None:
logger.warning("Automapping listener: no obj, no mappings created")
return
generate_relationship_snapshots(obj)
automapper.generate_automappings(obj)
@Resource.collection_posted.connect_via(Request)
def handle_requests_collection_post(sender, objects=None, **kwargs):
for obj in objects:
handle_relationship_post(obj, obj.audit)
@Resource.model_put.connect_via(Request)
def handle_request(sender, obj=None, src=None, service=None):
handle_relationship_post(obj, obj.audit)
@Resource.collection_posted.connect_via(Audit)
def handle_audits_collection_post(sender, objects=None, **kwargs):
for obj in objects:
handle_relationship_post(obj, obj.program)
|
# encoding: utf-8
from typing import Optional, Union
class Indent:
class IdentStart:
def __init__(self, text):
self.text = text
class IdentEnd:
def __init__(self, text):
self.text = text
class IdentEndLater:
def __init__(self, text):
self.text = text
def __init__(self, text: Optional[Union[str, "TextHolder"]] = None):
self.text = text
def __add__(self, other: str):
assert self.text is None
return Indent(other)
def __radd__(self, other: str):
assert self.text is None
return Indent.IdentStart(other)
def __rsub__(self, other: str):
assert self.text is None
return Indent.IdentEnd(other)
class IndentLater:
def __rsub__(self, other: str):
return Indent.IdentEndLater(other)
class TextHolder:
def __init__(self, text: Optional[str] = None):
super().__init__()
if text is None:
text = ""
self.text = text
self.ident_text = " "
self._ident = 0
def __add__(self, other: Union[str, int]):
if isinstance(other, Indent.IdentStart):
self.append(other.text)
self.ident(1)
elif isinstance(other, Indent.IdentEnd):
self.ident(-1)
self.append(other.text)
elif isinstance(other, Indent.IdentEndLater):
self.append(other.text)
self.ident(-1)
elif isinstance(other, Indent):
self.append(
TextHolder(str(other.text)).ident_all(
n=self._ident + 1, ident_text=self.ident_text
),
add_ident=False,
)
elif isinstance(other, str):
self.append(other)
elif isinstance(other, TextHolder):
self.append(
TextHolder(str(other.text)).ident_all(
n=self._ident, ident_text=self.ident_text
),
add_ident=False,
)
elif isinstance(other, int):
self.ident(other)
else:
raise TypeError(f"can only add str or int, but {type(other)} got")
return self
def __sub__(self, other):
if isinstance(other, int):
self.ident(-other)
else:
raise TypeError(f"can only add str or int, but {type(other)} got")
return self
def __bool__(self):
return bool(self.text)
def __str__(self):
return self.text
def append(
self,
text: Union[str, "TextHolder"],
ensure_new_line=True,
ignore_empty=True,
add_ident=True,
):
strtext = str(text)
if ignore_empty and not strtext:
return self
if not strtext.endswith("\n") and ensure_new_line:
strtext += "\n"
if add_ident:
self.text += self._ident * self.ident_text + strtext
else:
self.text += strtext
return self
def ident_all(self, n: int = 1, ident_text: str = None):
if ident_text is None:
ident_text = self.ident_text
text = self.text
if text.endswith("\n"):
text = text[:-1]
return "\n".join([ident_text * n + i for i in text.split("\n")])
def ident(self, n: int = 1):
self._ident += n
return self
|
from stix_shifter_utils.modules.base.stix_transmission.base_sync_connector import BaseSyncConnector
from stix_shifter_utils.stix_transmission.utils.RestApiClient import RestApiClient
from stix2matcher.matcher import Pattern
from stix2matcher.matcher import MatchListener
from stix2validator import validate_instance
import json
import re
from stix_shifter_utils.utils.error_response import ErrorResponder
class UnexpectedResponseException(Exception):
pass
class Connector(BaseSyncConnector):
def __init__(self, connection, configuration):
self.connector = __name__.split('.')[1]
self.connection = connection
self.configuration = configuration
self.timeout = connection['options'].get('timeout')
self.bundle_url = self.connection.get('url')
auth = None
conf_auth = configuration.get('auth', {})
if 'username' in conf_auth and 'password' in conf_auth:
auth = (conf_auth['username'], conf_auth['password'])
self.client = RestApiClient(None,
auth=auth,
url_modifier_function=lambda host_port, endpoint, headers: f'{endpoint}')
# We re-implement this method so we can fetch all the "bindings", as their method only
# returns the first for some reason
def match(self, pattern, observed_data_sdos, verbose=False):
compiled_pattern = Pattern(pattern)
matcher = MatchListener(observed_data_sdos, verbose)
compiled_pattern.walk(matcher)
found_bindings = matcher.matched()
if found_bindings:
matching_sdos = []
for binding in found_bindings:
matches = [match for match in matcher.get_sdos_from_binding(binding) if match not in matching_sdos]
matching_sdos.extend(matches)
else:
matching_sdos = []
return matching_sdos
def ping_connection(self):
return_obj = dict()
response = self.client.call_api(self.bundle_url, 'head', timeout=self.timeout)
response_txt = response.raise_for_status()
if response.code == 200:
return_obj['success'] = True
elif response.code == 301:
self.bundle_url = response.headers.get('Location')
return self.ping_connection()
else:
ErrorResponder.fill_error(return_obj, response_txt, ['message'], connector=self.connector)
return return_obj
def create_results_connection(self, search_id, offset, length):
observations = []
return_obj = dict()
if self.test_START_STOP_format(search_id):
# Remove leading 't' before timestamps from search_id. search_id is the stix pattern
search_id = re.sub("(?<=START\s)t|(?<=STOP\s)t", "", search_id)
response = self.client.call_api(self.bundle_url, 'get', timeout=self.timeout)
if response.code != 200:
response_txt = response.raise_for_status()
if ErrorResponder.is_plain_string(response_txt):
ErrorResponder.fill_error(return_obj, message=response_txt, connector=self.connector)
elif ErrorResponder.is_json_string(response_txt):
response_json = json.loads(response_txt)
ErrorResponder.fill_error(return_obj, response_json, ['reason'], connector=self.connector)
else:
raise UnexpectedResponseException
else:
try:
response_txt = response.read().decode('utf-8')
bundle = json.loads(response_txt)
if "stix_validator" in self.connection['options'] and self.connection['options'].get("stix_validator") is True:
results = validate_instance(bundle)
if results.is_valid is not True:
ErrorResponder.fill_error(return_obj, message='Invalid Objects in STIX Bundle.', connector=self.connector)
return return_obj
for obj in bundle["objects"]:
if obj["type"] == "observed-data":
observations.append(obj)
# Pattern match
try:
results = self.match(search_id, observations, False)
if len(results) != 0:
return_obj['success'] = True
return_obj['data'] = results[int(offset):int(offset + length)]
else:
return_obj['success'] = True
return_obj['data'] = []
except Exception as ex:
ErrorResponder.fill_error(return_obj, message='Object matching error: ' + str(ex), connector=self.connector)
except Exception as ex:
ErrorResponder.fill_error(return_obj, message='Invalid STIX bundle. Malformed JSON: ' + str(ex), connector=self.connector)
return return_obj
def delete_query_connection(self, search_id):
return_obj = dict()
return_obj['success'] = True
return return_obj
def test_START_STOP_format(self, query_string) -> bool:
# Matches START t'1234-56-78T00:00:00.123Z' STOP t'1234-56-78T00:00:00.123Z'
pattern = "START\s(t'\d{4}(-\d{2}){2}T\d{2}(:\d{2}){2}(\.\d+)?Z')\sSTOP"
match = re.search(pattern, query_string)
return bool(match)
|
import dlib
class HogDetector:
def __init__(self):
self.detector = dlib.get_frontal_face_detector()
def detect(self, frame):
bboxes = []
# landmarks = []
dets = self.detector(frame, 1)
for k, d in enumerate(dets):
bboxes.append(
(d.left(), d.top(), d.right() - d.left(), d.bottom() - d.top())
)
# shape = self.predictor(frame, d)
# landmarks.append([(p.x, p.y) for p in shape.parts()])
return bboxes
|
import numpy as np
import cv2
def rodrigues2matrix_cv(params):
rvec = np.array(params,dtype=np.float64)
rvec.shape = (1,3)
Rmat, jacobian = cv2.Rodrigues(rvec)
return Rmat
def rodrigues2matrix(params):
# Written after the docs at
# http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#rodrigues
try:
rvec = np.array(params,dtype=np.float)
rvec.shape = (1,3)
except:
print('bad rvec',rvec)
raise
theta = np.sqrt(np.sum(rvec**2))
if theta==0:
rvec = rvec
else:
rvec = rvec/theta
r = rvec[0] # drop dim
s = np.sin(theta)
c = np.cos(theta)
R = c*np.eye(3) + (1-c)*rvec*rvec.T + s*np.array([[0, -r[2], r[1]],
[r[2], 0, -r[0]],
[-r[1], r[0], 0]])
# -R.T might also be considered a valid rotation matrix, but it
# -does not have an eigenvector of 1.
return R
def matrix2rodrigues(R):
Rmat = np.array(R,dtype=np.float64)
assert Rmat.shape == (3,3)
rvec, jacobian = cv2.Rodrigues(Rmat)
return rvec
def rodrigues2angle_axis(params):
rvec = np.array(params)
rvec.shape = (1,3)
theta = np.sqrt(np.sum(rvec**2))
if theta==0:
rvec = rvec
else:
rvec = rvec/theta
r = rvec[0] # drop dim
return theta, r
|
# -*- coding: utf-8 -*-
import collections
from datetime import datetime
import re
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT, NaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull, array_equivalent
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com.is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(),tm.makeStringSeries(),
tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:
assert(isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(),tm.makeStringSeries(),
tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:
assert(isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(),tm.makePeriodFrame(),tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [ tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel()) ]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [ tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D()) ]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert(np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert(np.array_equal(result, exp))
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert(mask[0])
assert(not mask[1:].any())
mask = isnull(pidx[1:])
assert(not mask.any())
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
def test_downcast_conv():
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = com._possibly_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = com._possibly_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = com._possibly_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# conversions
expected = np.array([1,2])
for dtype in [np.float64,object,np.int64]:
arr = np.array([1.0,2.0],dtype=dtype)
result = com._possibly_downcast_to_dtype(arr,'infer')
tm.assert_almost_equal(result, expected)
expected = np.array([1.0,2.0,np.nan])
for dtype in [np.float64,object]:
arr = np.array([1.0,2.0,np.nan],dtype=dtype)
result = com._possibly_downcast_to_dtype(arr,'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32,np.float64,np.float32,np.bool_,np.int64,object]:
arr = np.array([],dtype=dtype)
result = com._possibly_downcast_to_dtype(arr,'int64')
tm.assert_almost_equal(result, np.array([],dtype=np.int64))
assert result.dtype == np.int64
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),
np.array([np.nan, 1+1j], dtype='complex'))
assert not array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),
np.array([np.nan, 1+2j], dtype='complex'))
assert not array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 2, np.nan]))
assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan]))
assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]))
assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8,9)]),value=np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (
1, 3.14, np.float64(3.14), 'a', tuple(), (1,), HashableClass(),
)
not_hashable = (
[], UnhashableClass1(),
)
abc_hashable_not_really_hashable = (
([],), UnhashableClass2(),
)
for i in hashable:
assert com.is_hashable(i)
for i in not_hashable:
assert not com.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not com.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# pandas.common.is_hashable()
assert not com.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if compat.PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, collections.Hashable)
assert com.is_hashable(c)
hash(c) # this will not raise
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert_equal(state.uniform(), npr.RandomState(5).uniform())
# Check with random state object
state2 = npr.RandomState(10)
assert_equal(com._random_state(state2).uniform(), npr.RandomState(10).uniform())
# check with no arg random state
assert isinstance(com._random_state(), npr.RandomState)
# Error for floats or strings
with tm.assertRaises(ValueError):
com._random_state('test')
with tm.assertRaises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='x'))
assert(matched == 'x')
matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='y'))
assert(matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert(matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert(matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert(matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert(matched == 'y')
class TestTake(tm.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert(result[3] == fill_value)
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2, 3]] == data[indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na, writeable=True):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
for writeable in [True, False]:
# Check that take_nd works both with writeable arrays (in which
# case fast typed memoryviews implementation) and read-only
# arrays alike.
_test_dtype(np.float64, True, writeable=writeable)
_test_dtype(np.float32, True, writeable=writeable)
_test_dtype(np.uint64, False, writeable=writeable)
_test_dtype(np.uint32, False, writeable=writeable)
_test_dtype(np.uint16, False, writeable=writeable)
_test_dtype(np.uint8, False, writeable=writeable)
_test_dtype(np.int64, False, writeable=writeable)
_test_dtype(np.int32, False, writeable=writeable)
_test_dtype(np.int16, False, writeable=writeable)
_test_dtype(np.int8, False, writeable=writeable)
_test_dtype(np.object_, True, writeable=writeable)
_test_dtype(np.bool, False, writeable=writeable)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert((result[3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert((result[:, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_3d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_3d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())
assert((result[3, :, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())
assert((result[:, 3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())
assert((result[:, :, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = com.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = com.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = com.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
self.assert_numpy_array_equal(result, expected)
result = com.take_1d(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_bool(self):
arr = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 1]], dtype=bool)
result = com.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
self.assert_numpy_array_equal(result, expected)
result = com.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
self.assert_numpy_array_equal(result, expected)
result = com.take_nd(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
#### this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
com.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = com.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000
arr = arr.view(dtype='datetime64[ns]')
indexer = [0, 2, -1, 1, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = com.take_nd(arr, indexer, axis=0,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
com.take_nd(arr, indexer, out=result2, axis=0,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = com.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = com.take_nd(arr, indexer, axis=1,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
com.take_nd(arr, indexer, out=result2, axis=1,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
class TestMaybe(tm.TestCase):
def test_maybe_convert_string_to_array(self):
result = com._maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
self.assertTrue(result.dtype == object)
result = com._maybe_convert_string_to_object(1)
self.assertEqual(result, 1)
arr = np.array(['x', 'y'], dtype=str)
result = com._maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
self.assertTrue(result.dtype == object)
# unicode
arr = np.array(['x', 'y']).astype('U')
result = com._maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
self.assertTrue(result.dtype == object)
# object
arr = np.array(['x', 2], dtype=object)
result = com._maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
self.assertTrue(result.dtype == object)
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert(com._dict_compat(data_datetime64) == expected)
assert(com._dict_compat(expected) == expected)
assert(com._dict_compat(data_unchanged) == data_unchanged)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnef
def _ensure_rank(array, rank, value=1):
return array if len(array) == rank else array + [value] * rank
def _volume(shape):
volume = 1
for s in shape:
volume *= s
return volume
def _broadcast_compatible(x,y):
return all(xi == yi or xi == 1 or yi == 1 for xi, yi in zip(x, y))
def _broadcastable(x,y):
return all(xi == yi or xi == 1 for xi, yi in zip(x, y))
def _broadcast_shape(x, y):
assert _broadcast_compatible(x, y), "arguments are not broadcast compatible ({} vs {})".format(x, y)
rank = max(len(x), len(y))
return [max(xi,yi) for (xi, yi) in zip(_ensure_rank(x, rank), _ensure_rank(y, rank))]
def _downsize_shape(input, kernel, padding, stride, dilation):
return [(i + p + q - (k - 1) * d - 1) // s + 1 for i, k, (p, q), s, d in
zip(input, kernel, padding, stride, dilation)] \
if padding else [(i + s - 1) // s for i, s in zip(input, stride)]
def _upsize_shape(input, kernel, padding, stride, dilation):
return [(i - 1) * s + (k - 1) * d + 1 - p - q for i, k, (p, q), s, d in
zip(input, kernel, padding, stride, dilation)] \
if padding else [i * s for i, s in zip(input, stride)]
def nullary_shape(shape, **kwargs):
return shape
def unary_shape(arg, **kwargs):
return arg
def binary_shape(left, right, **kwargs):
return _broadcast_shape(left, right)
def asymmetric_binary_shape(left, right, **kwargs):
assert _broadcastable(right, left), \
"second argument shape ({}) cannot be broadcast to first argument shape ({})".format(right, left)
return left
def ternary_shape(cond, left, right, **kwargs):
value = _broadcast_shape(left, right)
return _broadcast_shape(cond, value)
def pool_shape(input, size, border, padding, stride, dilation, output_shape=None, transposed=False, **kwargs):
rank = len(input)
assert len(size) == rank, "expected kernel shape of rank {}, found {}".format(rank, size)
assert not padding or len(padding) == rank, "expected 'padding' of length {}, found {}".format(rank, padding)
assert not stride or len(stride) == rank, "expected 'stride' of length {}, found {}".format(rank, stride)
assert not dilation or len(dilation) == rank, "expected 'dilation' of length {}, found {}".format(rank, dilation)
assert all(s > 0 for s in stride), "'stride' must be positive, found {}".format(stride)
assert all(d > 0 for d in dilation), "'dilation' must be positive, found {}".format(dilation)
stride = _ensure_rank(stride, rank)
dilation = _ensure_rank(dilation, rank)
if output_shape:
assert len(output_shape) == rank, "expected 'output_shape' of length {}, found {}".format(rank, output_shape)
assert all(s > 0 for s in output_shape), "'output_shape' must be positive, found {}".format(output_shape)
expected_shape = _downsize_shape(output_shape, size, padding, stride, dilation)
assert input == expected_shape, \
"expected input shape {} derived from 'output_shape' is incompatible with actual input shape {}".\
format(expected_shape, input)
return output_shape
if transposed:
return _upsize_shape(input, size, padding, stride, dilation)
else:
return _downsize_shape(input, size, padding, stride, dilation)
def pool_with_index_shape(input, size, border, padding, stride, dilation):
shape = pool_shape(input, size, border, padding, stride, dilation)
return (shape, shape)
def unpool_shape(input, size, border, padding, stride, dilation, output_shape, **kwargs):
return pool_shape(input, size, border, padding, stride, dilation, output_shape, transposed=True, **kwargs)
def sample_shape(input, index, size, border, padding, stride, dilation, output_shape=None, transposed=False):
assert index == input, "'index' shape {} does not match 'input' shape {}".format(input, index)
return pool_shape(input, size, border, padding, stride, dilation, output_shape, transposed)
def desample_shape(input, index, size, border, padding, stride, dilation, output_shape):
return sample_shape(input, index, size, border, padding, stride, dilation, output_shape, transposed=True)
def conv_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape=None, transposed=False):
rank = len(input)
assert len(filter) == rank, "expected filter shape of rank {}, found {}".format(rank, filter)
assert not padding or len(padding) == rank - 2, "expected 'padding' of length {}, found {}".format(rank - 2, padding)
assert not stride or len(stride) == rank - 2, "expected 'stride' of length {}, found {}".format(rank - 2, stride)
assert not dilation or len(dilation) == rank - 2, "expected 'dilation' of rank {}, found {}".format(rank - 2, dilation)
assert all(s > 0 for s in stride), "'stride' must be positive, found {}".format(stride)
assert all(d > 0 for d in dilation), "'dilation' must be positive, found {}".format(dilation)
assert groups >= 0, "'groups' must be non-negative, found {}".format(groups)
if groups == 0:
groups = output_shape[1] if transposed and output_shape else input[1]
if transposed:
assert filter[0] == input[1], "filter batch ({}) does not match input channels ({})".format(filter[0], input[1])
else:
assert filter[1] * groups == input[1], \
"filter channels ({}) times groups ({}) does not match input channels ({})".format(filter[1], groups, input[1])
assert filter[0] % groups == 0, "'groups' ({}) does not divide filter batch ({})".format(groups, filter[0])
assert len(bias) <= 2, "expected bias shape of rank at most 2, found {}".format(bias)
if len(bias) == 2:
assert bias[0] == 1, "'bias' batch dimension must be singular"
if len(bias):
channels = filter[1] * groups if transposed else filter[0]
assert bias[-1] == channels, "'bias' channels ({}) does not match filter batch ({})".format(bias[-1], channels)
stride = _ensure_rank(stride, rank - 2)
dilation = _ensure_rank(dilation, rank - 2)
if output_shape:
assert len(output_shape) == rank, "expected 'output_shape' of length {}, found {}".format(rank, output_shape)
assert all(s > 0 for s in output_shape), "'output_shape' must be positive, found {}".format(output_shape)
assert output_shape[0] == input[0], \
"output batch ({}) does not match input batch ({})".format(output_shape[0], input[0])
assert output_shape[1] == filter[1] * groups, \
"output channels ({}) does not match input channels ({}) times groups ({})".format(output_shape[1], input[1], groups)
expected_shape = [input[0], filter[0]] + _downsize_shape(output_shape[2:], filter[2:], padding, stride, dilation)
assert input == expected_shape, \
"expected input shape {} derived from 'output_shape' is incompatible with actual input shape {}". \
format(expected_shape, input)
return output_shape
if transposed:
return [input[0], filter[1] * groups] + _upsize_shape(input[2:], filter[2:], padding, stride, dilation)
else:
return [input[0], filter[0]] + _downsize_shape(input[2:], filter[2:], padding, stride, dilation)
def separable_conv_shape(input, plane_filter, point_filter, bias, border, padding, stride, dilation, groups,
output_shape=None, transposed=False):
assert all(x == 1 for x in point_filter[2:]), \
"point-wise filter must be singular in spatial dimensions, found {}".format(point_filter)
assert point_filter[1] == plane_filter[0], \
"channel dimension of point-wise filter ({}) does not equal batch dimension of depth-wise filter ({})".\
format(point_filter[1], plane_filter[0])
assert plane_filter[1] == 1, "channel dimension of plane-wise filter must be singular, found {}".format(plane_filter)
channels = point_filter[1] if transposed else input[1]
filter = [point_filter[0], channels] + plane_filter[2:]
return conv_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape, transposed)
def separable_deconv_shape(input, plane_filter, point_filter, bias, border, padding, stride, dilation, groups, output_shape):
return separable_conv_shape(input, plane_filter, point_filter, bias, border, padding, stride, dilation, groups,
output_shape, transposed=True)
def deconv_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape):
return conv_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape, transposed=True)
def reduce_shape(input, axes, **kwargs):
rank = len(input)
assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes)
return [1 if i in axes else input[i] for i in range(rank)]
def normalize_shape(input, **kwargs):
rank = len(input)
axes = kwargs.get('axes')
size = kwargs.get('size')
if axes:
assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes)
if size:
assert len(size) == rank, "expected 'size' of length {}, found {}".format(rank, size)
assert all(s >= 1 for s in size), "'size' must be positive, found {}".format(size)
return input
def moments_shape(input, axes):
shape = normalize_shape(input, axes=axes)
return (shape, shape)
def downsample_shape(input, factor, **kwargs):
rank = len(input)
assert len(factor) == rank - 2, "expected 'factor' of length {}, found {}".format(rank, factor)
assert all(i % f == 0 for i, f in zip(input[2:], factor)), \
"'factor' {} does not divide spatial input shape {}".format(factor, input[2:])
return input[:2] + [i // f for i, f in zip(input[2:], factor)]
def upsample_shape(input, factor, **kwargs):
rank = len(input)
assert len(factor) == rank - 2, "expected 'factor' of length {}, found {}".format(rank, factor)
return input[:2] + [i * f for i, f in zip(input[2:], factor)]
def reshape_shape(input, shape, axis_start, axis_count):
rank = len(input)
assert all(s >= -1 for s in shape), "items in 'shape' must be >= -1, found {}".format(shape)
assert sum(1 for s in shape if s == -1) <= 1, "at most one item may be -1 in 'shape', found {}".format(shape)
assert 0 <= axis_start <= rank, "'axis_start' must be in range [0,{}], found {}".format(rank, axis_start)
assert axis_count >= -1, "'axis_count' must be non-negative or -1, found {}".format(axis_count)
if axis_count == -1:
axis_count = rank - axis_start
axis_end = axis_start + axis_count
assert axis_end <= rank, "'axis_start' + 'axis_count' ({}) must be in range [0,{}]".format(axis_end, rank)
shape = list(shape) # don't modify original list
for i in range(len(shape)):
if shape[i] == 0:
shape[i] = input[i + axis_start]
input_range = input[axis_start:axis_end]
if -1 in shape:
idx = shape.index(-1)
assert _volume(input_range) % _volume(shape) == 0, \
"volume of 'shape' ({}) does not divide volume of 'input[{}:{}]' ({})".format(shape, axis_start, axis_end, input_range)
shape[idx] = _volume(input_range) // -_volume(shape)
else:
assert _volume(shape) == _volume(input_range), \
"volume of 'shape' ({}) does not equal volume of 'input[{}:{}]' ({})".format(shape, axis_start, axis_end, input_range)
return input[:axis_start] + shape + input[axis_end:]
def transpose_shape(input, axes):
rank = len(axes)
assert sorted(axes) == list(range(rank)), "axes must be a permutation of [0..{}], found {}".format(rank-1, axes)
return [input[axis] for axis in axes] + input[rank:]
def squeeze_shape(input, axes):
rank = len(input)
assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes)
return [input[i] for i in range(rank) if not i in axes]
def unsqueeze_shape(input, axes):
rank = len(input) + len(axes)
assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes)
output = list(input)
for axis in axes:
output = output[:axis] + [1] + output[axis:]
return output
def concat_shape(values, axis):
assert len(values) != 0, "'values' must be non-empty"
shape = list(values[0])
rank = len(shape)
assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis)
for value in values:
assert len(value) == len(shape), "'values' must have the same rank, found {}".format(values)
assert all(value[i] == shape[i] for i in range(rank) if i != axis), \
"shapes of 'values' must be identical for all dimensions other than 'axis' ({}), found {}".format(axis, values)
shape[axis] = sum(value[axis] for value in values)
return shape
def split_shape(value, axis, ratios):
rank = len(value)
assert 0 <= axis < rank, "axis must be in range [0,{}), found {}".format(rank, axis)
assert all(r > 0 for r in ratios), "'ratios' must be positive, found {}".format(ratios)
total = sum(ratios)
assert value[axis] % total == 0, \
"sum of 'ratios' ({}) does not divide input shape along dimension 'axis' ({})".format(total, value[axis])
unit = value[axis] // total
return [[unit * r if i == axis else value[i] for i in range(rank)] for r in ratios]
def stack_shape(values, axis):
assert len(values) != 0, "'values' must be non-empty"
shape = values[0]
rank = len(shape) + 1
assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis)
assert all(value == shape for value in values), "shapes of 'values' must be identical, found {}".format(values)
return shape[:axis] + [len(values)] + shape[axis:]
def unstack_shape(value, axis):
rank = len(value)
assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis)
return [value[:axis] + value[axis+1:]] * value[axis]
def slice_shape(input, axes, begin, end):
rank = len(input)
assert len(begin) == len(axes), \
"length of 'begin' ({}) does not equal length of 'axes' ({})".format(len(begin), len(axes))
assert len(end) == len(axes), \
"length of 'end' ({}) does not equal length of 'axes' ({})".format(len(end), len(axes))
assert all(0 <= axis < rank for axis in axes), "'axes' must be in range [0,{}), found {}".format(rank, axes)
begin = [offs + input[axis] if offs < 0 else offs for axis, offs in zip(axes, begin)]
end = [offs + input[axis] if offs <= 0 else offs for axis, offs in zip(axes, end)]
assert all(last > first for first, last in zip(begin, end)), \
"slice range ({},{}) is empty".format(begin, end)
assert all(first >= 0 and last <= input[axis] for axis, first, last in zip(axes, begin, end)), \
"slice range ({},{}) is out of tensor shape {}".format(begin, end, input)
output = list(input)
for axis, first, last in zip(axes, begin, end):
output[axis] = last - first
return output
def tile_shape(input, repeats):
rank = len(input)
assert len(repeats) == rank, "expected 'repeats' of length {}, found {}".format(rank, repeats)
return [i * r for i, r in zip(input, repeats)]
def pad_shape(input, padding, **kwargs):
rank = len(input)
assert len(padding) == rank, "expected 'padding' of length {}, found {}".format(rank, padding)
return [p + i + q for i, (p, q) in zip(input, padding)]
def matmul_shape(A, B, transposeA, transposeB):
assert len(A) == len(B), "argument rank mismatch ({} vs {})".format(len(A), len(B))
assert len(A) >= 2, "rank of arguments must be at least 2, found {}".format(len(A))
m = A[-1] if transposeA else A[-2]
n = B[-2] if transposeB else B[-1]
kA = A[-2] if transposeA else A[-1]
kB = B[-1] if transposeB else B[-2]
assert kA == kB, "inner dimensions must agree ({} vs {})".format(kA, kB)
return _broadcast_shape(A[:-2], B[:-2]) + [m,n]
def linear_shape(input, filter, bias):
assert len(input) == 2, "rank of input must be 2, found {}".format(len(input))
assert len(filter) == 2, "rank of filter must be 2, found {}".format(len(filter))
assert len(bias) <= 2, "rank of bias must be at most 2, found {}".format(len(bias))
assert input[1] == filter[1], "input channels ({}) does not match filter channels ({})".format(input[1], filter[1])
if len(bias) == 2:
assert bias[0] == 1, "'bias' batch dimension must be singular"
if len(bias):
c = len(bias) - 1
assert bias[c] == filter[0], "'bias' channels ({}) does not match filter batch ({})".format(bias[c], filter[0])
return [input[0], filter[0]]
def softmax_shape(input, axes):
rank = len(input)
assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes)
return input
def batchnorm_shape(input, mean, variance, offset, scale, epsilon):
assert epsilon >= 0, "'epsilon' must be non-negative, found {}".format(epsilon)
assert _broadcastable(mean, input), \
"'mean' shape {} cannot be broadcast to 'input' shape {}".format(mean, input)
assert _broadcastable(variance, input), \
"'variance' shape {} cannot be broadcast to 'input' shape {}".format(variance, input)
assert _broadcastable(offset, input), \
"'offset' shape {} cannot be broadcast to 'input' shape {}".format(offset, input)
assert _broadcastable(scale, input), \
"'scale' shape {} cannot be broadcast to 'input' shape {}".format(scale, input)
return input
def roi_shape(input, rois, batch_index, output_size, **kwargs):
rank = len(input)
assert len(output_size) == rank - 2, "expected 'output_size' of length {}, found {}".format(rank - 2, output_size)
assert all(s > 0 for s in output_size), "'output_size' must be positive, found {}".format(output_size)
assert len(rois) == 2, "'rois' must be of rank 2, found {}".format(rois)
assert rois[1] == 4, "'rois' must be of extent 4 along dimension 1, found {}".format(rois)
assert len(batch_index) == 1, "'batch_index' must be of rank 1, found {}".format(batch_index)
assert batch_index[0] == rois[0], \
"'batch_index' must be of same length as dimension 0 of rois; found {} vs {}".format(batch_index, rois)
rate = kwargs.get('sampling_rate')
if rate:
assert len(rate) == rank - 2, "expected 'sampling_rate' of length {}, found {}".format(rank - 2, rate)
assert all(r > 0 for r in rate), "'rate' must be positive, found {}".format(rate)
return [rois[0], input[1]] + output_size
def quantize_shape(input, *args, **kwargs):
for arg in args:
assert _broadcastable(arg, input), \
"'min/max' shape {} cannot be broadcast to 'input' shape {}".format(arg, input)
bits = kwargs.get('bits')
if bits is not None:
assert bits > 0, "'bits' must be positive, found {}".format(bits)
return input
def update_shape(variable, value):
assert value == variable, "shape of update value {} does not match shape of variable {}".format(value, variable)
return variable
def copy_n_shape(value, times):
assert times > 0, "'times' must be positive, found {}".format(times)
return [value] * times
def add_n_shape(values):
assert len(values) != 0, "values must be non-empty"
shape = values[0]
assert all(value == shape for value in values), "shapes of values must be identical, found {}".format(values)
return shape
def _get_shape(graph, value):
if isinstance(value, nnef.Identifier):
return graph.tensors[value].shape
elif isinstance(value, list):
return [_get_shape(graph, v) for v in value]
else:
return []
def _set_shape(graph, value, shape):
if isinstance(value, nnef.Identifier):
tensor = graph.tensors[value]
graph.tensors[value] = nnef.Tensor(tensor.name, tensor.dtype, shape, tensor.data, tensor.compression, tensor.quantization)
elif isinstance(value, list):
for v, s in zip(value, shape):
_set_shape(graph, v, s)
def infer_shapes(graph, custom_shapes={}):
# type: (nnef.Graph, dict)->None
for op in graph.operations:
func = _StandardShapeFuncs.get(op.name)
if func is None:
func = custom_shapes.get(op.name)
if func is None:
raise nnef.Error("shape inference function is not defined for operation '{}'".format(op.name))
input_shapes = [_get_shape(graph, input) for input in op.inputs.values()]
try:
output_shapes = func(*input_shapes, **op.attribs)
if not isinstance(output_shapes, tuple):
output_shapes = (output_shapes,)
outputs = op.outputs.values()
assert len(outputs) == len(output_shapes), \
"number of shapes ({}) does not match number of outputs ({})".format(len(outputs), len(output_shapes))
for output, shape in zip(outputs, output_shapes):
if isinstance(output, list):
assert isinstance(shape, list), "expected list of shapes"
assert len(output) == len(shape), \
"number of shapes ({}) does not match number of outputs ({})".format(len(output), len(shape))
_set_shape(graph, output, shape)
except AssertionError as e:
raise nnef.Error("while inferring shape of tensor(s) '{}' (operation '{}'): {}"
.format(', '.join(op.outputs.values()), op.name, e))
_StandardShapeFuncs = {
'external': nullary_shape,
'variable': nullary_shape,
'constant': nullary_shape,
'copy': unary_shape,
'neg': unary_shape,
'not': unary_shape,
'rcp': unary_shape,
'exp': unary_shape,
'log': unary_shape,
'sin': unary_shape,
'cos': unary_shape,
'abs': unary_shape,
'sign': unary_shape,
'floor': unary_shape,
'ceil': unary_shape,
'round': unary_shape,
'sqr': unary_shape,
'sqrt': unary_shape,
'rsqr': unary_shape,
'rsqrt': unary_shape,
'log2': unary_shape,
'sigmoid': unary_shape,
'tanh': unary_shape,
'relu': unary_shape,
'elu': unary_shape,
'softabs': unary_shape,
'softplus': unary_shape,
'leaky_relu': unary_shape,
'prelu': asymmetric_binary_shape,
'add': binary_shape,
'sub': binary_shape,
'mul': binary_shape,
'div': binary_shape,
'pow': binary_shape,
'min': binary_shape,
'max': binary_shape,
'lt': binary_shape,
'le': binary_shape,
'gt': binary_shape,
'ge': binary_shape,
'eq': binary_shape,
'ne': binary_shape,
'and': binary_shape,
'or': binary_shape,
'select': ternary_shape,
'clamp': ternary_shape,
'conv': conv_shape,
'deconv': deconv_shape,
'separable_conv': separable_conv_shape,
'separable_deconv': separable_deconv_shape,
'box': pool_shape,
'debox': unpool_shape,
'sample': sample_shape,
'desample': desample_shape,
'avg_pool': pool_shape,
'max_pool': pool_shape,
'argmax_pool': pool_shape,
'rms_pool': pool_shape,
'max_pool_with_index': pool_with_index_shape,
'max_unpool': unpool_shape,
'avg_unpool': unpool_shape,
'sum_reduce': reduce_shape,
'min_reduce': reduce_shape,
'max_reduce': reduce_shape,
'mean_reduce': reduce_shape,
'argmin_reduce': reduce_shape,
'argmax_reduce': reduce_shape,
'any_reduce': reduce_shape,
'all_reduce': reduce_shape,
'local_response_normalization': normalize_shape,
'local_mean_normalization': normalize_shape,
'local_variance_normalization': normalize_shape,
'local_contrast_normalization': normalize_shape,
'l1_normalization': normalize_shape,
'l2_normalization': normalize_shape,
'moments': moments_shape,
'batch_normalization': batchnorm_shape,
'nearest_downsample': downsample_shape,
'area_downsample': downsample_shape,
'nearest_upsample': upsample_shape,
'multilinear_upsample': upsample_shape,
'reshape': reshape_shape,
'transpose': transpose_shape,
'squeeze': squeeze_shape,
'unsqueeze': unsqueeze_shape,
'stack': stack_shape,
'unstack': unstack_shape,
'split': split_shape,
'concat': concat_shape,
'slice': slice_shape,
'tile': tile_shape,
'pad': pad_shape,
'matmul': matmul_shape,
'linear': linear_shape,
'softmax': softmax_shape,
'linear_quantize': quantize_shape,
'logarithmic_quantize': quantize_shape,
'avg_roi_pool': roi_shape,
'max_roi_pool': roi_shape,
'avg_roi_align': roi_shape,
'max_roi_align': roi_shape,
'roi_resample': roi_shape,
'update': update_shape,
'copy_n': copy_n_shape,
'add_n': add_n_shape,
}
|
from django.apps import AppConfig
from django.db.utils import OperationalError, ProgrammingError
from django.conf import settings
from django.utils.timezone import activate
import logging
class MoloAppConfig(AppConfig):
name = 'molo.core'
def ready(self):
from molo.core.models import Site, CmsSettings
logging.basicConfig()
logger = logging.getLogger(__name__)
try:
site = Site.objects.first()
if not site:
raise OperationalError("No site object")
timezone = CmsSettings.for_site(site).timezone
if timezone is None:
timezone_name = settings.TIME_ZONE
logger.warning(
'Timezone unset, defaulting to {0}'.format(timezone_name))
else:
timezone_name = timezone.title
except (OperationalError, ProgrammingError) as e:
timezone_name = settings.TIME_ZONE
logger.warning('Database error: {0}'.format(e))
logger.warning('Defaulting to timezone: {0}'.format(timezone_name))
activate(timezone_name)
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.relu6_grad_run import relu6_grad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_relu6_grad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, dimArgs
("relu6_grad_001", relu6_grad_run, ((1, 128), "float16")),
("relu6_grad_002", relu6_grad_run, ((8, 28, 28, 4), "float16")),
("relu6_grad_003", relu6_grad_run, ((8, 14, 14, 6), "float16")),
("relu6_grad_004", relu6_grad_run, ((8, 7, 7, 6), "float16")),
("relu6_grad_005", relu6_grad_run, ((8, 4, 4, 6), "float16")),
("relu6_grad_006", relu6_grad_run, ((8, 2, 2, 4), "float16")),
]
self.testarg_cloud = [
# testflag,opfuncname,testRunArgs, dimArgs
("relu6_grad_001", relu6_grad_run, ((1, 128), "float32")),
]
self.testarg_rpc_cloud = [
("relu6_grad_fp32_001", relu6_grad_run, ((8, 28, 28, 4), "float32")),
("relu6_grad_fp32_002", relu6_grad_run, ((8, 14, 14, 6), "float32")),
("relu6_grad_fp32_003", relu6_grad_run, ((8, 7, 7, 6), "float32")),
("relu6_grad_fp32_004", relu6_grad_run, ((8, 4, 4, 6), "float32")),
("relu6_grad_fp32_005", relu6_grad_run, ((8, 2, 2, 4), "float32")),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
# a=TestCase()
# a.setup()
# a.test_run()
|
from . import initial, input, message_passing, mlp, readout, update
from .gnn import GNN
from .input import GNNInput, get_dataset_from_files
__all__ = [
"GNN",
"GNNInput",
"get_dataset_from_files",
"initial",
"input",
"message_passing",
"mlp",
"readout",
"update",
]
|
import json
import os
from http import HTTPStatus
from typing import Any
import requests
class HTTPClient:
__instance = None
def __new__(cls):
"""
This method creates the only instance of the class(singleton pattern)
:param cls: The class
:return: The method returns the class instance
"""
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self):
"""
Class constructor, sets up the HTTP client object.
"""
self.api_url = "https://api.openweathermap.org"
self.api_keys = list(os.environ['WEATHER_API_KEY'].split(' '))
self.connection = requests.Session()
def get(self, url: str) -> Any:
"""
This method makes a request to the API with the given url.
:param url: A str of the query
:return: The method returns a dict of the API response.
"""
response = None
try:
keys = iter(self.api_keys)
key = next(keys, None)
while True:
request_url = f"/data/2.5/{url}&appid={key}"
res = self.connection.get(self.api_url + request_url)
response = json.loads(res.content.decode('UTF-8'))
if "message" in response.keys():
if response["message"] == "city not found":
response.pop("cod", None)
break
if res.status_code == HTTPStatus.OK:
break
if res.status_code != HTTPStatus.UNAUTHORIZED:
raise Exception(f"{request_url}, {res.status_code}, {res.reason}")
key = next(keys, None)
if key is None:
raise Exception(f"{request_url}, {res.status_code}, {res.reason}")
except Exception as exc:
print(f"[HTTPClient] get error: {exc}")
finally:
self.connection.close()
return response
|
"""
Abstract classes for typing purposes only
"""
# pylint: disable=no-self-use,pointless-statement,missing-docstring,invalid-name, too-few-public-methods
from __future__ import annotations
from typing import Optional, Dict, Sequence
class xmlFragment:
"""an abstract class representing the xml fragments returned by python-docx
"""
tag: str
prefix: Optional[str]
attrib: Dict[str, str]
nsmap: Dict[str, str]
text: Optional[str]
tail: Optional[str]
def getchildren(self) -> Sequence[xmlFragment]:
...
def getparent(self) -> Optional[xmlFragment]:
...
def getnext(self) -> Optional[xmlFragment]:
...
def xpath(self, x:str) -> Optional[xmlFragment]: # pylint: disable=unused-argument
...
class ct_altchunk(xmlFragment):
rId: str
class ct_p(xmlFragment):
...
class ct_numpr(xmlFragment):
...
# BASIC TYPES
class ct_onoff:
val: bool
class ct_string(xmlFragment):
val: str
class ct_decimalnumber(xmlFragment):
val: float
# TEXT TYPES
class ct_br(xmlFragment):
type: Optional[str]
clear: Optional[str]
class ct_pPr(xmlFragment):
numpr: Optional[ct_numpr]
class ct_rPr(xmlFragment):
vanish: Optional[ct_onoff]
webHidden: Optional[ct_onoff]
class ct_r(xmlFragment):
rPr: Optional[ct_rPr]
class ct_num(xmlFragment):
abstractNumId: xmlFragment # = OneAndOnlyOne('w: abstractNumId')
# lvlOverride = ZeroOrMore('w: lvlOverride')
numId: float # = RequiredAttribute('w: numId', ST_DecimalNumber)
# tables
class ct_cell(xmlFragment):
...
class ct_row(xmlFragment):
# tblPrEx = Optional[ct_tblPrEx]
tc: Optional[Sequence[ct_cell]]
class ct_tbl(xmlFragment):
tblPr: xmlFragment
tr: Optional[Sequence[ct_row]]
# parts
class part():
element: xmlFragment
class documentPart():
element: ct_document # pylint: disable=used-before-assignment
related_parts: Dict[str, part]
class altchunkpart():
element: documentPart
class ct_sectionPr(xmlFragment):
...
class ct_body(xmlFragment):
sectPr: Optional[ct_sectionPr]
class ct_document(xmlFragment):
body: ct_body
part: documentPart
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class Reverse(Op):
op = 'Reverse'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None,
'axis': None,
'op': self.op,
'in_ports_count': 2,
'out_ports_count': 1,
'infer': self.infer,
}
super().__init__(graph, mandatory_props, attrs)
@staticmethod
def infer(node):
input_shape = node.in_port(0).data.get_shape()
input_value = node.in_port(0).data.get_value()
assert input_shape is not None
if not node.has_valid('axis'):
assert 1 in node.in_nodes()
assert node.in_node(1).has_valid('value')
assert node.in_node(1).value.size == 1
node['axis'] = node.in_node(1).value.item()
node.in_port(1).disconnect()
assert node.has_valid('axis')
assert len(node.out_nodes()) == 1
if input_value is not None:
node.out_port(0).data.set_value(np.flip(input_value, node.axis))
else:
node.out_port(0).data.set_shape(input_shape)
|
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
json_view,
context_unpack,
opresource
)
from openprocurement.auctions.core.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data
)
from openprocurement.auctions.core.views.mixins import AuctionDocumentResource
from openprocurement.auctions.core.interfaces import (
IManager
)
from openprocurement.auctions.core.utils import (
get_file
)
@opresource(name='geb:Auction Documents',
collection_path='/auctions/{auction_id}/documents',
path='/auctions/{auction_id}/documents/{document_id}',
auctionsprocurementMethodType="geb",
description="Auction related binary files (PDFs, etc.)")
class AuctionDocumentResource(AuctionDocumentResource):
@json_view(permission='upload_auction_documents', validators=(validate_file_upload,))
def collection_post(self):
"""Auction Document Upload"""
save = None
manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager)
applicant = self.request.validated.get('document', self.request.validated.get('file'))
document = manager.create(applicant)
if document:
save = manager.save()
if save:
msg = 'Created auction document {}'.format(document.id)
extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_create'}, {'document_id': document['id']})
self.LOGGER.info(msg, extra=extra)
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
locations = self.request.current_route_url(_route_name=route, document_id=document.id, _query={})
self.request.response.headers['Location'] = locations
return {'data': document.serialize("view")}
@json_view(permission='view_auction')
def get(self):
"""Auction Document Read""" # TODO rm black box
document = self.request.validated['document']
offline = bool(document.get('documentType') == 'x_dgfAssetFamiliarization')
if self.request.params.get('download') and not offline:
return get_file(self.request)
document_data = document.serialize("view")
document_data['previousVersions'] = [
i.serialize("view")
for i in self.request.validated['documents']
if i.url != document.url or
(offline and i.dateModified != document.dateModified)
]
return {'data': document_data}
@json_view(content_type="application/json", permission='upload_auction_documents', validators=(validate_patch_document_data,))
def patch(self):
"""Auction Document Update"""
save = None
manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager)
manager.change()
save = manager.save()
if save:
extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_patch'})
msg = 'Updated auction document {}'.format(self.request.context.id)
self.LOGGER.info(msg, extra=extra)
return {'data': self.request.context.serialize("view")}
@json_view(permission='upload_auction_documents', validators=(validate_file_update,))
def put(self):
save = None
manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager)
document = manager.put()
save = manager.save()
if save:
extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_put'})
msg = 'Updated auction document {}'.format(document.id)
self.LOGGER.info(msg, extra=extra)
return {'data': document.serialize("view")}
|
from typing import List
from core.exceptions.data_exceptions import (DataIndexNotFoundException,
DataIllegalEventTypeException,
DataIllegalActivityTypeException)
from core.exceptions.input_exceptions import (InputFormatException,
InputTypeInconsistencyException)
from core.exceptions.graph_exceptions import (GraphEdgeIdMultiplyAssignedException,
GraphIncidentNodeNotFoundException,
GraphNodeIdMultiplyAssignedException)
from core.io.csv import CsvReader, CsvWriter
from core.model.aperiodic_ean import AperiodicEvent, AperiodicActivity
from core.model.graph import Graph
from core.model.impl.simple_dict_graph import SimpleDictGraph
from core.model.periodic_ean import ActivityType, EventType
from core.model.timetable import Timetable
from core.util.config import Config, default_config
class AperiodicEANReader:
"""
Class to process csv-lines, formatted in the LinTim
Activities-expanded.giv, Events-expanded.giv or Timetable-expanded.tim
format. Use a CsvReader with an instance of this class to read periodic
activities, events or timetable files. For convenience, :func:`~core.io.aperiodic_ean.read` is provided, that does
all the necessary work.
"""
def __init__(self, event_file_name: str, activity_file_name: str, aperiodic_timetable_file_name: str,
aperiodic_ean: Graph[AperiodicEvent, AperiodicActivity], timetable: Timetable = None):
"""
Constructor for a PeriodicEANReader for given file names and periodic
EAN. The given names will not influence the read file but the used name
in any error message, so be sure to use the same name in here aswell as
in the CsvReader!
:param activity_file_name source file name for aperiodic
activities
:param event_file_name source file name for aperiodic events.
:param aperiodic_timetable_file_name source file name for aperiodic
timetable.
:param aperiodic_ean aperiodic event activity network.
:param timetable aperiodic timetable
"""
self.aperiodicActivitiesFileName = activity_file_name
self.aperiodicEventsFileName = event_file_name
self.aperiodicTimetableFileName = aperiodic_timetable_file_name
self.aperiodicEAN = aperiodic_ean
self.timetable = timetable
@staticmethod
def parse_event_type(input_type: str, event_id: int) -> EventType:
"""
Parse the given input as an event type. Will raise, if it is not valid.
:param input_type: the input to parse
:param event_id: the event id. Only used for error handling
:return: the parsed event type
"""
if input_type.lower() == "arrival" or input_type.lower() == "\"arrival\"":
result = EventType.ARRIVAL
elif (input_type.lower() == "departure" or
input_type.lower() == "\"departure\""):
result = EventType.DEPARTURE
else:
raise DataIllegalEventTypeException(event_id, input_type)
return result
@staticmethod
def parse_activity_type(input_type: str, activity_id: int) -> ActivityType:
"""
Parse the given input as an activity type. Will raise, if it is not
valid.
:param input_type: the input to parse
:param activity_id: the activity id. Only used for error handling.
:return: the parsed activity type
"""
if input_type.lower() == "drive" or input_type.lower() == "\"drive\"":
result = ActivityType.DRIVE
elif (input_type.lower() == "wait" or
input_type.lower() == "\"wait\""):
result = ActivityType.WAIT
elif (input_type.lower() == "change" or
input_type.lower() == "\"change\""):
result = ActivityType.CHANGE
elif (input_type.lower() == "headway" or
input_type.lower() == "\"headway\""):
result = ActivityType.HEADWAY
elif (input_type.lower() == "turnaround" or
input_type.lower() == "\"turnaround\""):
result = ActivityType.TURNAROUND
else:
raise DataIllegalActivityTypeException(activity_id, input_type)
return result
def process_aperiodic_event(self, args: List[str], line_number: int) -> None:
"""
Process the content of an aperiodic event file.
:param args the content of the line.
:param line_number the line number, used for error handling
:raise exceptions if the line does not contain exactly 6 entries
if the specific types of the entries do not match
the expectations
if ht event type is not defined
if the event cannot be added to the EAN
"""
if len(args) != 6:
raise InputFormatException(self.aperiodicEventsFileName, len(args),
6)
try:
event_id = int(args[0])
except ValueError:
raise InputTypeInconsistencyException(self.aperiodicEventsFileName,
1, line_number, "int",
args[0])
try:
periodic_event_id = int(args[1])
except ValueError:
raise InputTypeInconsistencyException(self.aperiodicEventsFileName,
2, line_number, "int",
args[1])
event_type = AperiodicEANReader.parse_event_type(args[2], event_id)
try:
time = int(args[3])
except ValueError:
raise InputTypeInconsistencyException(self.aperiodicEventsFileName,
4, line_number, "int",
args[3])
try:
passengers = float(args[4])
except ValueError:
raise InputTypeInconsistencyException(self.aperiodicEventsFileName,
5, line_number, "float",
args[4])
try:
stopId = int(args[5])
except ValueError:
raise InputTypeInconsistencyException(self.aperiodicEventsFileName,
6, line_number, "int",
args[5])
aperiodicEvent = AperiodicEvent(event_id, periodic_event_id, stopId,
event_type, time, passengers)
eventAdded = self.aperiodicEAN.addNode(aperiodicEvent)
if not eventAdded:
raise GraphNodeIdMultiplyAssignedException(event_id)
if self.timetable is not None:
self.timetable[aperiodicEvent] = time
def process_aperiodic_activity(self, args: [str], line_number: int) -> None:
"""
Process the content of a periodic activity file.
:param args the content of the line
:param line_number the line number, used for error handling
:raise exceptions if the line does not contain exactly 8 entries
if the specific types of the entries do not match
the expectations
if the activity type is not defined
if the activity cannot be added to the EAN.
"""
if len(args) != 8:
raise InputFormatException(self.aperiodicActivitiesFileName,
len(args), 8)
try:
activityId = int(args[0])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
1, line_number, "int",
args[0]))
try:
periodicActivityId = int(args[1])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
2, line_number, "int",
args[1]))
activityType = AperiodicEANReader.parse_activity_type(args[2], activityId)
try:
sourceEventId = int(args[3])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
4, line_number, "int",
args[3]))
try:
targetEventId = int(args[4])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
5, line_number, "int",
args[4]))
try:
lowerBound = int(args[5])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
6, line_number, "int",
args[5]))
try:
upperBound = int(args[6])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
7, line_number, "int",
args[6]))
try:
passengers = float(args[7])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicActivitiesFileName,
8, line_number, "int",
args[7]))
sourceEvent = self.aperiodicEAN.getNode(sourceEventId)
if not sourceEvent:
raise GraphIncidentNodeNotFoundException(activityId, sourceEventId)
targetEvent = self.aperiodicEAN.getNode(targetEventId)
if not targetEvent:
raise GraphIncidentNodeNotFoundException(activityId, targetEventId)
aperiodicActivity = AperiodicActivity(activityId, periodicActivityId,
activityType, sourceEvent,
targetEvent, lowerBound,
upperBound, passengers)
activityAdded = self.aperiodicEAN.addEdge(aperiodicActivity)
if not activityAdded:
raise GraphEdgeIdMultiplyAssignedException(activityId)
def process_aperiodic_timetable_entry(self, args: [str], line_number) -> None:
"""
Process the content of a timetable file.
:param args the content of the line
:param line_number the line number, used for error handling
:raise exceptions if the line does not exactly contain 2 entries
if the specific types of the entries do not match
the expectations
if the event does not exist
"""
if len(args) != 2:
raise InputFormatException(self.aperiodicTimetableFileName,
len(args), 2)
try:
eventId = int(args[0])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicTimetableFileName,
1, line_number, "int",
args[0]))
try:
time = int(args[1])
except ValueError:
raise(InputTypeInconsistencyException(self.aperiodicTimetableFileName,
2, line_number, "int",
args[1]))
event = self.aperiodicEAN.getNode(eventId)
if not event:
raise DataIndexNotFoundException("Aperiodic event", eventId)
event.setTime(time)
if self.timetable is not None:
self.timetable[event] = time
@staticmethod
def read(read_events: bool=True, read_activities: bool=True, read_seperate_timetable: bool=False,
read_disposition_timetable: bool=False, config: Config=default_config, event_file_name: str = "",
activity_file_name: str = "", timetable_file_name: str = "",
ean: Graph[AperiodicEvent, AperiodicActivity] = None, timetable: Timetable = None,
time_units_per_minute: int=0) -> (Graph[AperiodicEvent, AperiodicActivity], Timetable):
""""
Read the aperiodic EAN defined by the given file names. Will read the
timetable, if a file name is given. The data will be appended to the
fivfiven EAN and timetable object, if they are given. Otherwie, a new
EAN will be created.
:param read_events:
:param read_activities:
:param read_seperate_timetable:
:param read_disposition_timetable:
:param config:
:param time_units_per_minute:
:param event_file_name the file name to read the events
:param activity_file_name the file name to read the activities
:param timetable_file_name the file name to read the timetable
:param ean the aperiodic ean to store the read values in
:param timetable the aperiodic timetable to store the read
timetable in.
"""
if not ean:
ean = SimpleDictGraph()
if time_units_per_minute == 0:
time_units_per_minute = config.getIntegerValue("time_units_per_minute")
if not timetable:
timetable = Timetable(time_units_per_minute)
if read_events and not event_file_name:
event_file_name = config.getStringValue("default_events_expanded_file")
if read_activities and not activity_file_name:
activity_file_name = config.getStringValue("default_activities_expanded_file")
if read_disposition_timetable and not timetable_file_name:
timetable_file_name = config.getStringValue("default_disposition_timetable_file")
elif read_seperate_timetable and not timetable_file_name:
timetable_file_name = config.getStringValue("default_timetable_expanded_file")
reader = AperiodicEANReader(activity_file_name, event_file_name, timetable_file_name,
ean, timetable)
if read_events:
CsvReader.readCsv(event_file_name,
reader.process_aperiodic_event)
if read_activities:
CsvReader.readCsv(activity_file_name,
reader.process_aperiodic_activity)
if read_disposition_timetable or read_seperate_timetable:
CsvReader.readCsv(timetable_file_name,
reader.process_aperiodic_timetable_entry)
return ean, timetable
class AperiodicEANWriter:
"""
Implementation of an aperiodic EAN writer as a static method. Just call
writeEAN.
"""
@staticmethod
def write(ean: Graph[AperiodicEvent, AperiodicActivity], timetable: Timetable = None,
config: Config = default_config, write_events: bool = True, events_file_name: str = "",
events_header: str = None, write_activities: bool = False, activities_file_name: str = "",
activities_header: str = "", write_timetable: bool=False, write_disposition_timetable: bool=False,
timetable_file_name: str="", timetable_header: str="") -> None:
"""
Write the given ean. Which data should be written can be controlled
with writeEvents, writeActivities and writeAperiodicTimetable. If no
filenames or headers are given for a datatype and it should be written,
the corresponding values are read from the given config (or the
default config, if none is given). For the timetable, the data will be
read from the events, if no timetable object is given.
:param write_timetable:
:param write_disposition_timetable:
:param timetable_file_name:
:param timetable_header:
:param ean: the ean to write
:param config: the config to read from, if necessary values are not given
:param write_events: whether to write the events
:param aperiodicEventsFileName the file name to write the events to
:param events_header: the header to write in the event file
:param write_activities: whether to write the activities
:param activities_file_name the file name to write the activities to
:param activities_header: the header to write in the activities file
:param timetable
:param events_file_name
"""
if write_events:
if not events_file_name:
events_file_name = config.getStringValue("default_events_expanded_file")
if not events_header:
events_header = config.getStringValue("events_header")
CsvWriter.writeListStatic(events_file_name, ean.getNodes(), lambda e: e.toCsvStrings(None),
header=events_header)
if write_activities:
if not activities_file_name:
activities_file_name = config.getStringValue("default_activities_expanded_file")
if not activities_header:
activities_header = config.getStringValue("activities_header")
CsvWriter.writeListStatic(activities_file_name, ean.getEdges(), AperiodicActivity.toCsvStrings,
header=activities_header)
if write_disposition_timetable:
if not timetable_file_name:
timetable_file_name = config.getStringValue("default_disposition_timetable_file")
if not timetable_header:
timetable_header = config.getStringValue("timetable_header_disposition")
elif write_timetable:
if not timetable_file_name:
timetable_file_name = config.getStringValue("default_timetable_expanded_file")
if not timetable_header:
timetable_header = config.getStringValue("timetable_header")
if write_disposition_timetable or write_timetable:
CsvWriter.writeListStatic(timetable_file_name, ean.getNodes(),
lambda e: e.toCsvStringsForTimetable(timetable), header=timetable_header)
@staticmethod
def setEventTimesFromTimetable(ean: Graph[AperiodicEvent, AperiodicActivity], timetable: Timetable) -> None:
"""
Set the time members in the aperiodic events according to the values
mapped to by the given timetable object.
:param ean the EAN in which the event times are to be updated.
:param timetable the timetable object from which the updated times
shall be taken.
"""
for event in ean.getNodes():
try:
event.setTime(timetable[event])
except ValueError:
#Ignore this. For events, that are not in the timetable, we dont want to set a new time.
pass
|
from boa.builtins import breakpoint
def Main(operation):
result = False
if operation == 1:
m = 3
breakpoint()
result = True
elif operation == 2:
breakpoint()
result = False
elif operation == 3:
b = 'hello'
breakpoint()
j = 32
breakpoint()
result = True
elif operation == 4:
n = 2
res = another_method(n)
result = res
return result
def another_method(j):
q = j + 5
breakpoint()
return q
|
import argparse
import torch
from transformers import BertForSequenceClassification
def export_onnx_model(args, model, onnx_model_path):
with torch.no_grad():
inputs = {'input_ids': torch.ones(1,args.max_len, dtype=torch.int32),
'attention_mask': torch.ones(1,args.max_len, dtype=torch.int32),
'token_type_ids': torch.ones(1,args.max_len, dtype=torch.int32)}
outputs = model(**inputs)
symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}
torch.onnx.export(model, # model being run
(inputs['input_ids'],
inputs['attention_mask'],
inputs['token_type_ids']), # model input (or a tuple for
# multiple inputs)
onnx_model_path, # where to save the model (can be a file
# or file-like object)
opset_version=11, # the ONNX version to export the model
do_constant_folding=True, # whether to execute constant folding
input_names=['input_ids', # the model's input names
'input_mask',
'segment_ids'],
output_names=['output'], # the model's output names
dynamic_axes={'input_ids': symbolic_names, # variable length axes
'input_mask' : symbolic_names,
'segment_ids' : symbolic_names})
print("ONNX Model exported to {0}".format(onnx_model_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Export bert onnx model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input_dir',
type=str,
help='input_dir of bert model, must contain config.json')
parser.add_argument(
'--task_name',
type=str,
choices=["MRPC", "MNLI", "SST-2"],
help='tasks names of bert model')
parser.add_argument(
'--max_len',
type=int,
default=128,
help='Maximum length of the sentence pairs')
parser.add_argument(
'--do_lower_case',
type=bool,
default=True,
help='whether lower the tokenizer')
parser.add_argument(
'--output_model',
type=str,
default='bert_mini_sst2.onnx',
help='path to exported model file')
args = parser.parse_args()
model = BertForSequenceClassification.from_pretrained(args.input_dir)
export_onnx_model(args, model, args.output_model)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import estimator_export
# The default learning rates are a historical artifact of the initial
# implementation.
_DNN_LEARNING_RATE = 0.001
_LINEAR_LEARNING_RATE = 0.005
def _check_no_sync_replicas_optimizer(optimizer):
if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'SyncReplicasOptimizer does not support multi optimizers case. '
'Therefore, it is not supported in DNNLinearCombined model. '
'If you want to use this optimizer, please use either DNN or Linear '
'model.')
def _linear_learning_rate(num_linear_feature_columns):
"""Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float.
"""
default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)
return min(_LINEAR_LEARNING_RATE, default_learning_rate)
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
def _dnn_linear_combined_model_fn(features,
labels,
mode,
head,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
config=None,
batch_norm=False,
linear_sparse_combiner='sum'):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
linear_feature_columns: An iterable containing all the feature columns used
by the Linear model.
linear_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the Linear model. Defaults to the Ftrl
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
the DNN model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
dnn_hidden_units: List of hidden units per DNN layer.
dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability we will drop out a given DNN
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
batch_norm: Whether to use batch normalization after each hidden layer.
linear_sparse_combiner: A string specifying how to reduce the linear model
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum".
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time, or `input_layer_partitioner` is missing,
or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
'Either linear_feature_columns or dnn_feature_columns must be defined.')
num_ps_replicas = config.num_ps_replicas if config else 0
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
shared_state_manager = feature_column_v2.maybe_create_shared_state_manager(
list(linear_feature_columns) + list(dnn_feature_columns))
# Build DNN Logits.
dnn_parent_scope = 'dnn'
if not dnn_feature_columns:
dnn_logits = None
else:
dnn_optimizer = optimizers.get_optimizer_instance(
dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
_check_no_sync_replicas_optimizer(dnn_optimizer)
if not dnn_hidden_units:
raise ValueError(
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified.')
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner) as scope:
dnn_absolute_scope = scope.name
dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
hidden_units=dnn_hidden_units,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
batch_norm=batch_norm,
input_layer_partitioner=input_layer_partitioner,
shared_state_manager=shared_state_manager)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
if not linear_feature_columns:
linear_logits = None
else:
linear_optimizer = optimizers.get_optimizer_instance(
linear_optimizer,
learning_rate=_linear_learning_rate(len(linear_feature_columns)))
_check_no_sync_replicas_optimizer(linear_optimizer)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
linear_absolute_scope = scope.name
logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
feature_columns=linear_feature_columns,
sparse_combiner=linear_sparse_combiner)
linear_logits = logit_fn(features=features)
_add_layer_summary(linear_logits, scope.name)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
dnn_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=dnn_absolute_scope)))
if linear_logits is not None:
train_ops.append(
linear_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=linear_absolute_scope)))
train_op = control_flow_ops.group(*train_ops)
with ops.control_dependencies([train_op]):
return state_ops.assign_add(global_step, 1).op
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
@estimator_export('estimator.DNNLinearCombinedClassifier')
class DNNLinearCombinedClassifier(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined classification models.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_id_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedClassifier(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set dnn_optimizer to:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# To apply learning rate decay, you can set dnn_optimizer to a callable:
lambda: tf.AdamOptimizer(
learning_rate=tf.exponential_decay(
learning_rate=0.1,
global_step=tf.get_global_step(),
decay_steps=10000,
decay_rate=0.96)
# It is the same for linear_optimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
batch_norm=False,
linear_sparse_combiner='sum'):
"""Initializes a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Can also be a string (one of 'Adagrad',
'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Can also be a string (one of 'Adagrad',
'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad
optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
batch_norm: Whether to use batch normalization after each hidden layer.
linear_sparse_combiner: A string specifying how to reduce the linear model
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config,
batch_norm=batch_norm,
linear_sparse_combiner=linear_sparse_combiner)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
@estimator_export('estimator.DNNLinearCombinedRegressor')
class DNNLinearCombinedRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined models for regression.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedRegressor(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set dnn_optimizer to:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# To apply learning rate decay, you can set dnn_optimizer to a callable:
lambda: tf.AdamOptimizer(
learning_rate=tf.exponential_decay(
learning_rate=0.1,
global_step=tf.get_global_step(),
decay_steps=10000,
decay_rate=0.96)
# It is the same for linear_optimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using mean squared error.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
label_dimension=1,
weight_column=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
batch_norm=False,
linear_sparse_combiner='sum'):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Can also be a string (one of 'Adagrad',
'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Can also be a string (one of 'Adagrad',
'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad
optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
batch_norm: Whether to use batch normalization after each hidden layer.
linear_sparse_combiner: A string specifying how to reduce the linear model
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension, weight_column=weight_column,
loss_reduction=loss_reduction),
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config,
batch_norm=batch_norm,
linear_sparse_combiner=linear_sparse_combiner)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
|
import pandas as pd
import json, hashlib, os, codecs, base64
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.igfTables import User
class UserAdaptor(BaseAdaptor):
'''
An adaptor class for table User
'''
def _email_check(self, email):
'''
An internal function to check if email_id has '@' or not
:param email: a string containing the email id
'''
if '@' not in email:
raise ValueError('Email id {0} is not correctly formatted'.format(email))
def _encrypt_password(self, series, password_column='password',
salt_column='encryption_salt',
ht_pass_column='ht_password'):
'''
An internal function for encrypting password
:param series: A pandas data series
:param password_column: Name of the password column, default password
:param salt_column: Name of the salt column, default encryption_salt
:param ht_pass_column: Name of the ht_password column, default ht_password
:returns: A pandas series
'''
try:
if not isinstance(series, pd.Series):
series=pd.DataFrame(series)
if password_column in series.index and \
not pd.isnull(series[password_column]): # password is optional
salt=codecs.encode(os.urandom(32),"hex").decode("utf-8") # calculate salt value
password=series[password_column] # fetch password
if not isinstance(password, str):
password=str(series.password_column).encode('utf-8') # encode password if its not a string
if password: # always encrypt password
ht_pass=\
'{0}{1}'.format(\
'{SHA}',
base64.b64encode(\
hashlib.sha1(password.encode('utf-8')).\
digest()).decode()) # calculate sha1 for htaccess password
series[ht_pass_column]=ht_pass # set htaccess password
key=salt+password # construct key using salt and password
password=hashlib.sha512(str(key).encode('utf-8')).hexdigest() # create password hash
series[password_column]=password # set hash to data series
series[salt_column]=salt # set salt to data series
return series
except:
raise
def _map_missing_user_status(self,data_series,categoty_column,hpc_user_column,
hpc_user,non_hpc_user):
'''
An internal function for assigning user status
:param data_series: A pandas data series
:param categoty_column: Name of the category column ## FIX TYPO
:param hpc_user_column: Name of the hpc username column
:param hpc_user: HPC user tag
:param non_hpc_user: Non HPC user tag
:returns: A pandas data series
'''
try:
if not isinstance(data_series, pd.Series):
data_series=pd.DataFrame(data_series)
if categoty_column not in data_series or \
pd.isnull(data_series[categoty_column]):
if hpc_user_column in data_series and \
not pd.isnull(data_series[hpc_user_column]) and \
data_series[hpc_user_column]!='':
data_series[categoty_column]=hpc_user # assign hpc user
else:
data_series[categoty_column]=non_hpc_user # non hpc user
return data_series
except:
raise
def _preprocess_data(self,data, password_column='password', categoty_column='category',
email_column='email_id', hpc_user_column='hpc_username',
hpc_user='HPC_USER', non_hpc_user='NON_HPC_USER',
user_igf_id_column='user_igf_id', username_column='username',
salt_column='encryption_salt'):
'''
An internal function for preprocess data before loading
:param data: A pamdas dataframe or a list of dictionaries
:param password_column: Name of the password column, default password
:param categoty_column: Name of the user category column, default category
:param email_column: Name of the email id column, default email_id
:param hpc_user_column: Name of the hpc username column, default hpc_username
:param hpc_user: Tag name for HPC user, default HPC_USER
:param non_hpc_user: Tag name for non HPC user, default NON_HPC_USER
:param user_igf_id_column: Name of the user id column, default user_igf_id
:param username_column: Name of the igf username column, default username
:param salt_column: Name of the salt column, default encryption_salt
:returns: A pandas dataframe
'''
try:
if not isinstance(data, pd.DataFrame):
data=pd.DataFrame(data)
new_data=data.apply(lambda x: self._encrypt_password(series=x),1) # encrypt password
new_data[email_column].map(lambda x: self._email_check(email=x)) # check email id, it should contail '@'
new_data=new_data.fillna('')
if categoty_column not in new_data.columns:
new_data[categoty_column]=None # add category column if it doesn't exists
new_data.apply(\
lambda x: self._map_missing_user_status(\
data_series=x,
categoty_column=categoty_column,
hpc_user_column=hpc_user_column,
hpc_user=hpc_user,
non_hpc_user=non_hpc_user),
axis=1) # assign categoty, if user has hpc_username, then its 'HPC_USER'
return new_data
except:
raise
def store_user_data(self, data, autosave=True):
'''
Load data to user table
:param data: A pandas dataframe
:param autosave: A toggle for autocommit, default True
:returns: None
'''
try:
if not isinstance(data, pd.DataFrame):
data=pd.DataFrame(data)
data=self._preprocess_data(data=data)
self.store_records(table=User, data=data, mode='serial' )
if autosave:
self.commit_session()
except:
if autosave:
self.rollback_session()
raise
def fetch_user_records_igf_id(self, user_igf_id):
'''
A method for fetching data for User table
:param user_igf_id: an igf id
:returns: user object
'''
try:
user=\
self.fetch_records_by_column(\
table=User,
column_name=User.user_igf_id,
column_id=user_igf_id,
output_mode='one' )
return user
except:
raise
def fetch_user_records_email_id(self, user_email_id):
'''
A method for fetching data for User table
:param user_email_id: an email id
:returns: user object
'''
try:
user=\
self.fetch_records_by_column(\
table=User,
column_name=User.email_id,
column_id=user_email_id,
output_mode='one' )
return user
except:
raise
def check_user_records_email_id(self,email_id):
'''
A method for checking existing user data in db
:param email_id: An email id
:returns: True if the file is present in db or False if its not
'''
try:
user_check=False
user_obj=\
self.fetch_records_by_column(\
table=User,
column_name=User.email_id,
column_id=email_id,
output_mode='one_or_none' )
if user_obj is not None:
user_check=True
return user_check
except:
raise
|
# coding: utf-8
import re
class CanonicalPhoneGenerationException(Exception):
pass
def gen_canonical_phone(original_phone, first_number='7', check_code=True):
# удалим все не цифровые символы
phone = re.sub('\D', '', original_phone)
if not (10 <= len(phone) <= 11):
raise CanonicalPhoneGenerationException('Номер должен состоять из 10 или 11 цифр (%s)' % str(original_phone))
# избавимся от ведущей цифры 11ти значного номера
phone = phone[-10:]
if check_code and phone[0] != '9':
raise CanonicalPhoneGenerationException('Код города должен начинатся с 9 (%s)' % str(original_phone))
# установим ведущую семерку
phone = '%s%s' % (first_number, phone)
return phone
def canonise_phone(phone):
"""
пытается присти к каноническому виду.
в случае не удачи, возвращает исходный номер
"""
if phone:
try:
phone = gen_canonical_phone(phone)
except CanonicalPhoneGenerationException:
pass
return phone
def prettify_phone(phone):
import phonenumbers
parsed_number = phonenumbers.parse(phone, 'RU')
pretty_number = phonenumbers.format_number(parsed_number, phonenumbers.PhoneNumberFormat.NATIONAL)
return pretty_number
|
# -*- coding: utf-8 -*-
import datetime
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
from s3 import S3Method
from .controllers import deploy_index
RED_CROSS = "Red Cross / Red Crescent"
def config(settings):
"""
Template settings for IFRC's Resource Management System
- Americas Zone
http://eden.sahanafoundation.org/wiki/Deployments/IFRC
"""
T = current.T
# -------------------------------------------------------------------------
# System Name
#
settings.base.system_name = T("Resource Management System")
settings.base.system_name_short = T("RMS")
# -------------------------------------------------------------------------
# Pre-Populate
#
settings.base.prepopulate.append("RMSAmericas")
settings.base.prepopulate_demo.append("RMSAmericas/Demo")
# -------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
#
settings.base.theme = "RMSAmericas"
# Uncomment to disable responsive behavior of datatables
#settings.ui.datatables_responsive = False
# Uncomment to show a default cancel button in standalone create/update forms
settings.ui.default_cancel_button = True
# @todo: configure custom icons
#settings.ui.custom_icons = {
# "male": "icon-male",
# "female": "icon-female",
# "medical": "icon-plus-sign-alt",
#}
# =========================================================================
# System Settings
# -------------------------------------------------------------------------
# Security Policy
settings.security.policy = 8 # Delegations
settings.security.map = True
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = True
settings.auth.registration_link_user_to = {"staff": T("Staff"),
"volunteer": T("Volunteer"),
#"member": T("Member")
}
# This hides the options from the UI
#settings.auth.registration_link_user_to_default = ["volunteer"]
#settings.auth.record_approval = True
# @ToDo: Should we fallback to organisation_id if site_id is None?
settings.auth.registration_roles = {"site_id": ["reader",
],
}
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = True
settings.auth.person_realm_member_org = True
# Activate entity role manager tabs for OrgAdmins
settings.auth.entity_role_manager = True
def ifrc_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
# Do not apply realms for Master Data
# @ToDo: Restore Realms and add a role/functionality support for Master Data
if tablename in ("hrm_certificate",
"hrm_department",
"hrm_job_title",
"hrm_course",
"hrm_programme",
"member_membership_type",
"vol_award",
):
return None
db = current.db
s3db = current.s3db
# Entity reference fields
EID = "pe_id"
OID = "organisation_id"
SID = "site_id"
#GID = "group_id"
PID = "person_id"
# Owner Entity Foreign Key
realm_entity_fks = {"pr_contact": [("org_organisation", EID),
#("po_household", EID),
("pr_person", EID),
],
"pr_contact_emergency": EID,
"pr_physical_description": EID,
"pr_address": [("org_organisation", EID),
("pr_person", EID),
],
"pr_image": EID,
"pr_identity": PID,
"pr_education": PID,
"pr_note": PID,
"hrm_human_resource": SID,
"hrm_training": PID,
"hrm_training_event": OID,
"inv_adj": SID,
"inv_recv": SID,
"inv_send": SID,
"inv_inv_item": SID,
"inv_track_item": "track_org_id",
"inv_adj_item": "adj_id",
"req_req_item": "req_id",
#"po_household": "area_id",
#"po_organisation_area": "area_id",
}
# Default Foreign Keys (ordered by priority)
default_fks = (#"household_id",
"catalog_id",
"project_id",
"project_location_id",
)
# Link Tables
#realm_entity_link_table = {
# "project_task": Storage(tablename = "project_task_project",
# link_key = "task_id"
# )
# }
#if tablename in realm_entity_link_table:
# # Replace row with the record from the link table
# link_table = realm_entity_link_table[tablename]
# table = s3db[link_table.tablename]
# rows = db(table[link_table.link_key] == row.id).select(table.id,
# limitby=(0, 1))
# if rows:
# # Update not Create
# row = rows.first()
# Check if there is a FK to inherit the realm_entity
realm_entity = 0
fk = realm_entity_fks.get(tablename, None)
fks = [fk] if not isinstance(fk, list) else list(fk)
fks.extend(default_fks)
for default_fk in fks:
if isinstance(default_fk, tuple):
instance_type, fk = default_fk
else:
instance_type, fk = None, default_fk
if fk not in table.fields:
continue
# Inherit realm_entity from parent record
if fk == EID:
if instance_type:
ftable = s3db.table(instance_type)
if not ftable:
continue
else:
ftable = s3db.pr_person
query = (ftable[EID] == row[EID])
else:
ftablename = table[fk].type[10:] # reference tablename
ftable = s3db[ftablename]
query = (table.id == row["id"]) & \
(table[fk] == ftable.id)
record = db(query).select(ftable.realm_entity,
limitby = (0, 1)
).first()
if record:
realm_entity = record.realm_entity
break
#else:
# Continue to loop through the rest of the default_fks
# Fall back to default get_realm_entity function
use_user_organisation = False
#use_user_root_organisation = False
# Suppliers & Partners are owned by the user's organisation
if realm_entity == 0 and tablename == "org_organisation":
ottable = s3db.org_organisation_type
ltable = db.org_organisation_organisation_type
query = (ltable.organisation_id == row["id"]) & \
(ltable.organisation_type_id == ottable.id)
otype = db(query).select(ottable.name,
limitby = (0, 1)
).first()
if not otype or otype.name != RED_CROSS:
use_user_organisation = True
# Facilities, Forums & Requisitions are owned by the user's organisation
elif tablename in ("org_facility", "pr_forum", "req_req"):
use_user_organisation = True
elif tablename == "hrm_training":
# Inherit realm entity from the related HR record
htable = s3db.hrm_human_resource
query = (table.id == row["id"]) & \
(htable.person_id == table.person_id) & \
(htable.deleted != True)
rows = db(query).select(htable.realm_entity,
limitby = (0, 2)
)
if len(rows) == 1:
realm_entity = rows.first().realm_entity
else:
# Ambiguous => try course organisation
ctable = s3db.hrm_course
otable = s3db.org_organisation
query = (table.id == row["id"]) & \
(ctable.id == table.course_id) & \
(otable.id == ctable.organisation_id)
org = db(query).select(otable.pe_id,
limitby = (0, 1)
).first()
if org:
realm_entity = org.pe_id
# otherwise: inherit from the person record
# Groups are owned by the user's organisation
#elif tablename in ("pr_group",):
elif tablename == "pr_group":
use_user_organisation = True
auth = current.auth
user = auth.user
if user:
if use_user_organisation:
# @ToDo - this might cause issues if the user's org is different from the realm that gave them permissions to create the Org
realm_entity = s3db.pr_get_pe_id("org_organisation",
user.organisation_id)
#elif use_user_root_organisation:
# realm_entity = s3db.pr_get_pe_id("org_organisation",
# auth.root_org())
return realm_entity
settings.auth.realm_entity = ifrc_realm_entity
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
("pt-br", "Portuguese (Brazil)"),
("es", "Spanish"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.timezone = "America/Bogota"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Unsortable 'pretty' date format (for use in English)
settings.L10n.date_format = "%d-%b-%Y"
# Make last name in person/user records mandatory
#settings.L10n.mandatory_lastname = True # mother's surname
settings.L10n.mandatory_middlename = True # father's surname
# Uncomment this to Translate Layer Names
settings.L10n.translate_gis_layer = True
# Translate Location Names
settings.L10n.translate_gis_location = True
# Uncomment this for Alternate Location Names
settings.L10n.name_alt_gis_location = True
# Uncomment this to Translate Organisation Names/Acronyms
settings.L10n.translate_org_organisation = True
# Names of Orgs with specific settings
HNRC = "Honduran Red Cross"
# -------------------------------------------------------------------------
# Finance settings
#
def currencies(default):
""" RMS- and NS-specific currencies (lazy setting) """
# Currencies that are common for all NS
currencies = {"EUR" : "Euros",
"CHF" : "Swiss Francs",
"USD" : "United States Dollars",
}
# NS-specific currencies
root_org = current.auth.root_org_name()
if root_org == HNRC:
currencies["HNL"] = "Honduran Lempira"
return currencies
settings.fin.currencies = currencies
def currency_default(default):
""" NS-specific default currencies (lazy setting) """
root_org = current.auth.root_org_name()
if root_org == HNRC:
default = "HNL"
#else:
# default = "USD"
return default
settings.fin.currency_default = currency_default
def currency_represent(currency):
""" NS-specific currency represent """
if currency == "HNL":
root_org = current.auth.root_org_name()
if root_org == HNRC:
return "L"
return currency
# -------------------------------------------------------------------------
# Map Settings
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
settings.gis.display_L0 = True
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# GeoNames username
settings.gis.geonames_username = "rms_dev"
# @ToDo: Lazy fn once we have NS to enable this for
# (off for HN & off by default)
settings.gis.postcode_selector = False
# -------------------------------------------------------------------------
# Use the label 'Camp' instead of 'Shelter'
#
settings.ui.camp = True
# -------------------------------------------------------------------------
# Filter Manager
#
#settings.search.filter_manager = False
# -------------------------------------------------------------------------
# Default Summary
#
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report", "ajax_init": True}],
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
)
# -------------------------------------------------------------------------
# Content Management
#
#settings.cms.hide_index = True
settings.cms.richtext = True
# -------------------------------------------------------------------------
# Messaging
# Parser
#settings.msg.parser = "IFRC"
# =========================================================================
# Module Settings
# -------------------------------------------------------------------------
# Members
#
settings.member.cv_tab = True
# -------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Branches
settings.org.branches = True
# Set the length of the auto-generated org/site code the default is 10
#settings.org.site_code_len = 3
# Set the label for Sites
settings.org.site_label = "Office/Warehouse/Facility"
# Enable certain fields just for specific Organisations
#settings.org.dependent_fields = \
# {"pr_person.middle_name" : (CVTL, VNRC),
# "pr_person_details.mother_name" : (BRCS, ),
# "pr_person_details.father_name" : (ARCS, BRCS),
# "pr_person_details.grandfather_name" : (ARCS, ),
# "pr_person_details.affiliations" : (PRC, ),
# "pr_person_details.company" : (PRC, ),
# "vol_details.availability" : (VNRC, ),
# "vol_details.card" : (ARCS, ),
# "vol_volunteer_cluster.vol_cluster_type_id" : (PRC, ),
# "vol_volunteer_cluster.vol_cluster_id" : (PRC, ),
# "vol_volunteer_cluster.vol_cluster_position_id" : (PRC, ),
# }
# -------------------------------------------------------------------------
# Human Resource Management
#
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = True
settings.hrm.mix_staff = True
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to allow HRs to have multiple Job Titles
#settings.hrm.multiple_job_titles = True
# Uncomment to have each root Org use a different Job Title Catalog
settings.hrm.org_dependent_job_titles = True
settings.hrm.staff_departments = False
settings.hrm.teams = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Certificates
#settings.hrm.use_certificates = False
# Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin)
#settings.hrm.filter_certificates = True
# Uncomment to auto-create certificates for courses
settings.hrm.create_certificates_from_courses = "organisation_id"
settings.hrm.use_code = True
settings.hrm.use_description = None # Replaced by Medical Information
# Uncomment to enable the use of HR Education
settings.hrm.use_education = True
# Uncomment to hide Job Titles
settings.hrm.use_job_titles = False
settings.hrm.use_medical = "Medical Information"
settings.hrm.use_national_id = True
settings.hrm.use_skills = True
# Custom label for Organisations in HR module
settings.hrm.organisation_label = "National Society / Branch"
# Custom label for Top-level Organisations in HR module
settings.hrm.root_organisation_label = "National Society"
# Uncomment to consolidate tabs into a single CV
settings.hrm.cv_tab = True
settings.hrm.vol_experience = "programme"
# Uncomment to consolidate tabs into Staff Record (set to False to hide the tab)
settings.hrm.record_tab = "record"
# Use Locations for Training Events, not Facilities
settings.hrm.event_site = False
# Training Instructors are Multiple
settings.hrm.training_instructors = "multiple"
# Training Filters are Contains
settings.hrm.training_filter_and = True
settings.hrm.record_label = "Information"
# Pass marks are defined by Course
settings.hrm.course_pass_marks = True
# Work History & Missions
settings.hrm.staff_experience = "both"
# Uncomment to do a search for duplicates in the new AddPersonWidget2
settings.pr.lookup_duplicates = True
settings.pr.separate_name_fields = 3
#def dob_required(default):
# """ NS-specific dob_required (lazy setting) """
# if current.auth.override is True:
# default = False
# else:
# root_org = current.auth.root_org_name()
# if root_org == HNRC:
# default = False
# else:
# # Human Talent module for zone
# default = True
# return default
#settings.pr.dob_required = dob_required
def hrm_course_grades(default):
""" Course Grades """
default = {0: T("No Show"),
1: T("Left Early"),
#2: T("Attendance"),
8: T("Pass"),
9: T("Fail"),
}
return default
settings.hrm.course_grades = hrm_course_grades
# =========================================================================
def vol_programme_active(person_id):
"""
Whether a Volunteer counts as 'Active' based on the number of hours
they've done (both Trainings & Programmes) per month, averaged over
the last year.
If nothing recorded for the last 3 months, don't penalise as assume
that data entry hasn't yet been done.
@ToDo: This should be based on the HRM record, not Person record
- could be active with Org1 but not with Org2
"""
now = current.request.utcnow
# Time spent on Programme work
htable = current.s3db.hrm_programme_hours
query = (htable.deleted == False) & \
(htable.person_id == person_id) & \
(htable.date != None)
programmes = current.db(query).select(htable.hours,
htable.date,
orderby=htable.date)
if programmes:
# Ignore up to 3 months of records
three_months_prior = (now - datetime.timedelta(days=92))
end = max(programmes.last().date, three_months_prior.date())
last_year = end - datetime.timedelta(days=365)
# Is this the Volunteer's first year?
if programmes.first().date > last_year:
# Only start counting from their first month
start = programmes.first().date
else:
# Start from a year before the latest record
start = last_year
# Total hours between start and end
programme_hours = 0
for programme in programmes:
if programme.date >= start and programme.date <= end and programme.hours:
programme_hours += programme.hours
# Average hours per month
months = max(1, (end - start).days / 30.5)
average = programme_hours / months
# Active?
if average >= 8:
return True
return False
def hrm_vol_active(default):
""" Whether & How to track Volunteers as Active """
#root_org = current.auth.root_org_name()
#if root_org in (ARCS, IRCS):
# # Simple checkbox
# return True
#elif root_org in (CVTL, PMI, PRC):
# # Use formula based on hrm_programme
# return vol_programme_active
#elif root_org in (CRMADA, ):
# # Use formula based on vol_activity
# return vol_activity_active
#return False
# Use formula based on hrm_programme
return vol_programme_active
settings.hrm.vol_active = hrm_vol_active
settings.hrm.vol_active_tooltip = "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year"
# Roles which are permitted to export ID cards
ID_CARD_EXPORT_ROLES = ("ORG_ADMIN", "hr_manager", "hr_assistant")
# -------------------------------------------------------------------------
# RIT
settings.deploy.team_label = "RIT"
settings.customise_deploy_home = deploy_index
# Alerts get sent to all recipients
settings.deploy.manual_recipients = False
settings.deploy.post_to_twitter = True
# -------------------------------------------------------------------------
# Projects
settings.project.assign_staff_tab = False
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
settings.project.mode_drr = True
# Uncomment this to use Activity Types for Activities & Projects
#settings.project.activity_types = True
# Uncomment this to use Codes for projects
settings.project.codes = True
# Uncomment this to call project locations 'Communities'
#settings.project.community = True
# Uncomment this to enable Demographics in 3W projects
#settings.project.demographics = True
# Uncomment this to enable Hazards in 3W projects
settings.project.hazards = True
# Uncomment this to enable Indicators in projects
# Just HNRC
#settings.project.indicators = True
# Uncomment this to use multiple Budgets per project
settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Ondelete behaviour for ProjectPlanningModel
settings.project.planning_ondelete = "RESTRICT"
# Uncomment this to enable Programmes in projects
settings.project.programmes = True
# Uncomment this to enable Themes in 3W projects
settings.project.themes = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Host National Society"),
2: T("Partner"),
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier"),
9: T("Partner National Society"),
}
# -------------------------------------------------------------------------
# Inventory Management
# Hide Staff Management Tabs for Facilities in Inventory Module
settings.inv.facility_manage_staff = False
settings.inv.show_mode_of_transport = True
settings.inv.send_show_time_in = True
#settings.inv.collapse_tabs = True
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
#settings.inv.org_dependent_warehouse_types = True
# Settings for HNRC:
settings.inv.stock_count = False
settings.inv.item_status = {#0: current.messages["NONE"], # Not defined yet
0: T("Good"),
1: T("Damaged"),
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
settings.inv.recv_types = {#0: current.messages["NONE"], In Shipment Types
#11: T("Internal Shipment"), In Shipment Types
32: T("Donation"),
34: T("Purchase"),
36: T("Consignment"), # Borrowed
37: T("In Transit"), # Loaning warehouse space to another agency
}
# -------------------------------------------------------------------------
# Request Management
# Uncomment to disable Inline Forms in Requests module
settings.req.inline_forms = False
settings.req.req_type = ["Stock"]
settings.req.use_commit = False
# Should Requests ask whether Transportation is required?
settings.req.ask_transport = True
settings.req.pack_values = False
# Disable Request Matching as we don't want users making requests to see what stock is available
settings.req.prompt_match = False
# Uncomment to disable Recurring Request
settings.req.recurring = False # HNRC
# =========================================================================
# Template Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "RMS",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
#module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
#module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
#module_type = None # No Menu
)),
("setup", Storage(
name_nice = T("Setup"),
#description = "WebSetup",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
#module_type = None,
)),
# Uncomment to enable internal support requests
("support", Storage(
name_nice = T("Support"),
#description = "Support Requests",
restricted = True,
#module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
#module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
#module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
#module_type = 1
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
#module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
#module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
#module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
#module_type = 4
)),
#("asset", Storage(
# name_nice = T("Assets"),
# #description = "Recording and Assigning Assets",
# restricted = True,
# #module_type = 5,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
#module_type = 10,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
#module_type = 2
)),
("budget", Storage(
name_nice = T("Budgets"),
#description = "Tracking of Budgets",
restricted = True,
#module_type = None
)),
#("survey", Storage(
# name_nice = T("Assessments"),
# #description = "Create, enter, and manage surveys.",
# restricted = True,
# #module_type = 5,
# )),
# Used by RIT
("event", Storage(
name_nice = T("Events"),
#description = "Events",
restricted = True,
#module_type = 10
)),
("member", Storage(
name_nice = T("Partners"),
#description = "Membership Management System",
restricted = True,
#module_type = 10,
)),
("deploy", Storage(
name_nice = T("Regional Intervention Teams"),
#description = "Alerting and Deployment of Disaster Response Teams",
restricted = True,
#module_type = 10,
)),
#("po", Storage(
# name_nice = T("Recovery Outreach"),
# #description = "Population Outreach",
# restricted = True,
# #module_type = 10,
# )),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
#module_type = None,
)),
#("vulnerability", Storage(
# name_nice = T("Vulnerability"),
# #description = "Manages vulnerability indicators",
# restricted = True,
# #module_type = 10,
# )),
])
# -------------------------------------------------------------------------
# Functions which are local to this Template
# -------------------------------------------------------------------------
def ns_only(tablename,
fieldname = "organisation_id",
required = True,
branches = True,
updateable = True,
limit_filter_opts = True
):
"""
Function to configure an organisation_id field to be restricted to just
NS/Branch
@param required: Field is mandatory
@param branches: Include Branches
@param updateable: Limit to Orgs which the user can update
@param limit_filter_opts: Also limit the Filter options
NB If limit_filter_opts=True, apply in customise_xx_controller inside prep,
after standard_prep is run
"""
# Lookup organisation_type_id for Red Cross
db = current.db
s3db = current.s3db
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == RED_CROSS).select(ttable.id,
limitby=(0, 1),
cache = s3db.cache,
).first().id
except AttributeError:
# No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
return
# Load standard model
f = s3db[tablename][fieldname]
if limit_filter_opts:
# Find the relevant filter widget & limit it's options
filter_widgets = s3db.get_config(tablename, "filter_widgets")
filter_widget = None
if filter_widgets:
from s3 import FS, S3HierarchyFilter
for w in filter_widgets:
if isinstance(w, S3HierarchyFilter) and \
w.field == "organisation_id":
filter_widget = w
break
if filter_widget is not None:
selector = FS("organisation_organisation_type.organisation_type_id")
filter_widget.opts["filter"] = (selector == type_id)
# Label
if branches:
f.label = T("National Society / Branch")
else:
f.label = T("National Society")
# Requires
# Filter by type
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
filter_opts = [row.organisation_id for row in rows]
auth = current.auth
s3_has_role = auth.s3_has_role
Admin = s3_has_role("ADMIN")
if branches:
if Admin:
parent = True
else:
# @ToDo: Set the represent according to whether the user can see resources of just a single NS or multiple
# @ToDo: Consider porting this into core
user = auth.user
if user:
realms = user.realms
#delegations = user.delegations
if realms:
parent = True
else:
parent = False
else:
parent = True
else:
# Keep the represent function as simple as possible
parent = False
# Exclude branches
btable = s3db.org_organisation_branch
rows = db((btable.deleted != True) &
(btable.branch_id.belongs(filter_opts))).select(btable.branch_id)
filter_opts = list(set(filter_opts) - set(row.branch_id for row in rows))
organisation_represent = s3db.org_OrganisationRepresent
represent = organisation_represent(parent=parent)
f.represent = represent
from s3 import IS_ONE_OF
requires = IS_ONE_OF(db, "org_organisation.id",
represent,
filterby = "id",
filter_opts = filter_opts,
updateable = updateable,
orderby = "org_organisation.name",
sort = True)
if not required:
from gluon import IS_EMPTY_OR
requires = IS_EMPTY_OR(requires)
f.requires = requires
if parent:
# Use hierarchy-widget
from s3 import FS, S3HierarchyWidget
# No need for parent in represent (it's a hierarchy view)
node_represent = organisation_represent(parent = False)
# Filter by type
# (no need to exclude branches - we wouldn't be here if we didn't use branches)
selector = FS("organisation_organisation_type.organisation_type_id")
f.widget = S3HierarchyWidget(lookup = "org_organisation",
filter = (selector == type_id),
represent = node_represent,
multiple = False,
leafonly = False,
)
else:
# Dropdown not Autocomplete
f.widget = None
# Comment
if (Admin or s3_has_role("ORG_ADMIN")):
# Need to do import after setting Theme
from s3layouts import S3PopupLink
from s3 import S3ScriptItem
add_link = S3PopupLink(c = "org",
f = "organisation",
vars = {"organisation_type.name": RED_CROSS},
label = T("Create National Society"),
title = T("National Society"),
)
comment = f.comment
if not comment or isinstance(comment, S3PopupLink):
f.comment = add_link
elif isinstance(comment[1], S3ScriptItem):
# Don't overwrite scripts
f.comment[0] = add_link
else:
f.comment = add_link
else:
# Not allowed to add NS/Branch
f.comment = ""
# -------------------------------------------------------------------------
def user_org_default_filter(selector, tablename=None):
"""
Default filter for organisation_id:
* Use the user's organisation if logged-in and associated with an
organisation.
"""
auth = current.auth
user_org_id = auth.is_logged_in() and auth.user.organisation_id
if user_org_id:
return user_org_id
else:
# no default
return {}
# -------------------------------------------------------------------------
#def user_org_and_children_default_filter(selector, tablename=None):
# """
# Default filter for organisation_id:
# * Use the user's organisation if logged-in and associated with an
# organisation.
# """
# auth = current.auth
# user_org_id = auth.is_logged_in() and auth.user.organisation_id
# if user_org_id:
# db = current.db
# s3db = current.s3db
# otable = s3db.org_organisation
# org = db(otable.id == user_org_id).select(otable.pe_id,
# limitby=(0, 1)
# ).first()
# if org:
# pe_id = org.pe_id
# pe_ids = s3db.pr_get_descendants((pe_id,),
# entity_types=("org_organisation",))
# rows = db(otable.pe_id.belongs(pe_ids)).select(otable.id)
# ids = [row.id for row in rows]
# ids.append(user_org_id)
# return ids
# else:
# return user_org_id
# else:
# # no default
# return {}
# -------------------------------------------------------------------------
def customise_auth_user_controller(**attr):
"""
Customise admin/user() and default/user() controllers
"""
# Organisation needs to be an NS/Branch
ns_only("auth_user",
required = True,
branches = True,
updateable = False, # Need to see all Orgs in Registration screens
)
table = current.db.auth_user
table.first_name.label = T("Forenames")
table.last_name.label = T("Father's Surname")
return attr
settings.customise_auth_user_controller = customise_auth_user_controller
# -------------------------------------------------------------------------
def customise_deploy_alert_resource(r, tablename):
s3db = current.s3db
# Only send Alerts via Email
# @ToDo: Also send via Twitter
f = s3db[tablename].contact_method
f.readable = f.writable = False
#from s3 import S3SQLCustomForm
#crud_form = S3SQLCustomForm("mission_id",
# "subject",
# "body",
# "modified_on",
# )
#s3db.configure(tablename,
# crud_form = crud_form,
# list_fields = ["mission_id",
# "subject",
# "body",
# ],
# )
settings.customise_deploy_alert_resource = customise_deploy_alert_resource
# -------------------------------------------------------------------------
def deploy_application_onaccept(form):
"""
RIT Members should be added to the RIT Role
"""
db = current.db
s3db = current.s3db
htable = db.hrm_human_resource
ptable = db.pr_person
# Find the Person
human_resource_id = form.vars.get("human_resource_id")
if human_resource_id:
query = (htable.id == human_resource_id)
else:
table = db.deploy_application
query = (table.id == form.vars.get("id")) & \
(table.human_resource_id == htable.id)
hr = db(query).select(htable.person_id,
limitby=(0, 1)
).first()
person_id = hr.person_id
# Do they have a User Account?
ltable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id)
link = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if link:
# Add them to the RIT role
current.auth.s3_assign_role(link.user_id, "RIT_MEMBER")
# -------------------------------------------------------------------------
def customise_deploy_application_resource(r, tablename):
current.s3db.configure(tablename,
create_onaccept = deploy_application_onaccept,
)
settings.customise_deploy_application_resource = customise_deploy_application_resource
# -------------------------------------------------------------------------
def customise_deploy_mission_resource(r, tablename):
s3db = current.s3db
s3db[tablename].event_type_id.label = T("Disaster Type")
COUNTRY = current.messages.COUNTRY
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"date",
"location_id",
"event_type_id",
)
#from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter
#filter_widgets = [S3TextFilter(["name",
# "event_type_id$name",
# "location_id",
# ],
# label=T("Search")
# ),
# S3LocationFilter("location_id",
# label=COUNTRY,
# widget="multiselect",
# levels=["L0"],
# hidden=True
# ),
# S3OptionsFilter("event_type_id",
# widget="multiselect",
# hidden=True
# ),
# #S3OptionsFilter("status",
# # options=s3db.deploy_mission_status_opts,
# # hidden=True
# # ),
# S3DateFilter("date",
# hide_time=True,
# hidden=True
# ),
# ]
list_fields = ["name",
"date",
"event_type_id",
(COUNTRY, "location_id"),
(T("Responses"), "response_count"),
(T("Members Deployed"), "hrquantity"),
]
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_deploy_mission_resource = customise_deploy_mission_resource
# -------------------------------------------------------------------------
def customise_event_event_type_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Disaster Type"),
title_display = T("Disaster Type Details"),
title_list = T("Disaster Types"),
title_update = T("Edit Disaster Type Details"),
title_upload = T("Import Disaster Types"),
label_list_button = T("List Disaster Types"),
label_delete_button = T("Delete Disaster Type"),
msg_record_created = T("Disaster Type added"),
msg_record_modified = T("Disaster Type Details updated"),
msg_record_deleted = T("Disaster Type deleted"),
msg_list_empty = T("No Disaster Types currently defined"))
settings.customise_event_event_type_resource = customise_event_event_type_resource
# -------------------------------------------------------------------------
def customise_hrm_certificate_controller(**attr):
table = current.s3db.hrm_course
auth = current.auth
if auth.s3_has_role("ADMIN"):
# See all Certificates
pass
elif auth.s3_has_roles(("training_coordinator",
"training_assistant",
)):
# Only show this Center's Certificates
organisation_id = auth.user.organisation_id
current.response.s3.filter = (table.organisation_id == organisation_id) | \
(table.organisation_id == None)
# Default to this Training Center
table.organisation_id.default = organisation_id
else:
# See NS Certificates
organisation_id = auth.root_org()
current.response.s3.filter = (table.organisation_id == organisation_id) | \
(table.organisation_id == None)
# Default to this NS
table.organisation_id.default = organisation_id
return attr
settings.customise_hrm_certificate_controller = customise_hrm_certificate_controller
# -------------------------------------------------------------------------
def customise_hrm_course_controller(**attr):
table = current.s3db.hrm_course
auth = current.auth
if auth.s3_has_role("ADMIN"):
# See all Courses
pass
elif auth.s3_has_roles(("training_coordinator",
"training_assistant",
)):
# Only show this Center's courses
current.response.s3.filter = (table.organisation_id == auth.user.organisation_id) | (table.organisation_id == None)
else:
# See NS Courses
current.response.s3.filter = (table.organisation_id == auth.root_org()) | (table.organisation_id == None)
return attr
settings.customise_hrm_course_controller = customise_hrm_course_controller
# -------------------------------------------------------------------------
def customise_hrm_course_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_NOT_IN_DB
from s3 import S3SQLCustomForm
db = current.db
auth = current.auth
s3db = current.s3db
table = s3db[tablename]
# Code should be Unique
f = table.code
f.requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, "hrm_course.code"))
if auth.s3_has_roles(("training_coordinator",
"training_assistant",
)):
f = table.organisation_id
f.label = T("Training Center")
f.comment = False # Don't create here
org_represent = s3db.org_OrganisationRepresent(parent=False)
f.represent = org_represent
list_fields = ["code",
"name",
]
has_role = auth.s3_has_role
if has_role("ADMIN"):
table.organisation_id.label = T("National Society / Training Center")
list_fields.insert(0, "organisation_id")
#f.readable = f.writable = True
#ttable = s3db.org_organisation_type
#try:
# type_id = db(ttable.name == "Training Center").select(ttable.id,
# limitby=(0, 1),
# ).first().id
#except:
# # No/incorrect prepop done - skip (e.g. testing impacts of CSS changes in this theme)
# pass
#else:
# ltable = s3db.org_organisation_organisation_type
# rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
# filter_opts = [row.organisation_id for row in rows]
# f.requires = IS_ONE_OF(db, "org_organisation.id",
# org_represent,
# orderby = "org_organisation.name",
# sort = True,
# filterby = "id",
# filter_opts = filter_opts,
# )
elif has_role("training_coordinator"):
f.default = auth.user.organisation_id
crud_form = S3SQLCustomForm("organisation_id",
"code",
"name",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
orderby = "hrm_course.code",
)
settings.customise_hrm_course_resource = customise_hrm_course_resource
# -------------------------------------------------------------------------
#def customise_hrm_department_controller(**attr):
# # Organisation needs to be an NS/Branch
# ns_only("hrm_department",
# required = False,
# branches = False,
# )
# return attr
#settings.customise_hrm_department_controller = customise_hrm_department_controller
# -------------------------------------------------------------------------
#def emergency_contact_represent(row):
# """
# Representation of Emergency Contacts (S3Represent label renderer)
# @param row: the row
# """
# items = [row["pr_contact_emergency.name"]]
# relationship = row["pr_contact_emergency.relationship"]
# if relationship:
# items.append(" (%s)" % relationship)
# phone_number = row["pr_contact_emergency.phone"]
# if phone_number:
# items.append(": %s" % phone_number)
# return "".join(items)
# -------------------------------------------------------------------------
def customise_hrm_home():
from gluon import URL
from s3 import s3_redirect_default
has_role = current.auth.s3_has_role
len_roles = len(current.session.s3.roles)
if (len_roles <= 2) or \
(len_roles == 3 and has_role("RIT_MEMBER") and not has_role("ADMIN")):
# No specific Roles
# Go to Personal Profile
s3_redirect_default(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
s3_redirect_default(URL(f="human_resource", args="summary"))
settings.customise_hrm_home = customise_hrm_home
# -------------------------------------------------------------------------
def customise_hrm_experience_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Work History"),
title_display = T("Work History Details"),
title_list = T("Work History"),
title_update = T("Edit Work History"),
label_list_button = T("List Work History"),
label_delete_button = T("Delete Work History"),
msg_record_created = T("Work History added"),
msg_record_modified = T("Work History updated"),
msg_record_deleted = T("Work History deleted"),
msg_list_empty = T("No entries currently registered"))
settings.customise_hrm_experience_resource = customise_hrm_experience_resource
# -------------------------------------------------------------------------
def hrm_human_resource_create_onaccept(form):
"""
If the Staff/Volunteer is RC then create them a user account with a random password
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
# Call normal onaccept
s3db.hrm_human_resource_onaccept(form)
# Is the person RC?
organisation_id = form_vars.get("organisation_id")
if not organisation_id:
hr_id = form_vars.get("id")
if not hr_id:
# Nothing we can do!
current.log.warning("Cannot create user for HR as no id in the form")
return
htable = s3db.hrm_human_resource
hr = db(htable.id == hr_id).select(htable.id,
htable.person_id,
htable.type,
htable.organisation_id,
limitby = (0, 1),
).first()
try:
organisation_id = hr.organisation_id
except AttributeError:
# Nothing we can do!
current.log.warning("Cannot create user for HR %s as cannot find HR record" % hr_id)
return
else:
hr = None
ttable = s3db.org_organisation_type
ltable = s3db.org_organisation_organisation_type
query = (ttable.name == RED_CROSS) & \
(ltable.organisation_type_id == ttable.id) & \
(ltable.organisation_id == organisation_id)
RC = db(query).select(ltable.id,
limitby=(0, 1),
).first()
if not RC:
return
# Collect the Details needed
person_id = form_vars.get("person_id")
if not person_id:
if not hr:
hr_id = form_vars.get("id")
if not hr_id:
# Nothing we can do!
current.log.warning("Cannot create user for HR as no id in the form")
return
htable = s3db.hrm_human_resource
hr = db(htable.id == hr_id).select(htable.id,
htable.person_id,
htable.type,
limitby = (0, 1),
).first()
try:
person_id = hr.person_id
except AttributeError:
current.log.warning("Cannot create user for HR %s as cannot find HR record" % hr_id)
return
ptable = s3db.pr_person
person = db(ptable.id == person_id).select(ptable.id,
ptable.first_name,
ptable.middle_name, # NB We use middle_name for User in RMS Americas!
ptable.pe_id,
limitby = (0, 1),
).first()
try:
pe_id = person.pe_id
except AttributeError:
# Nothing we can do!
return
ctable = s3db.pr_contact
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "EMAIL")
contact = db(query).select(ctable.value,
limitby = (0, 1),
).first()
try:
email = contact.value
except AttributeError:
# Nothing we can do!
hr_id = form_vars.get("id")
current.log.warning("Cannot create user for HR %s as cannot find Email" % hr_id)
return
hr_type = form_vars.get("type")
if not hr_type:
if not hr:
hr_id = form_vars.get("id")
if not hr_id:
# Nothing we can do!
current.log.warning("Cannot create user for HR as no id in the form")
return
htable = s3db.hrm_human_resource
hr = db(htable.id == hr_id).select(htable.id,
htable.type,
limitby = (0, 1),
).first()
try:
hr_type = str(hr.type)
except AttributeError:
# Nothing we can do!
current.log.warning("Cannot create user for HR %s as cannot find HR record" % hr_id)
return
if hr_type == "1":
link_user_to = "staff"
else:
link_user_to = "volunteer"
# This field has been manually added to the form
language = current.request.post_vars.get("language")
auth = current.auth
# Generate a password
password, crypted = auth.s3_password(8)
# Create User
user = Storage(organisation_id = organisation_id,
language = language,
first_name = person.first_name,
last_name = person.middle_name, # NB We use middle_name for User in RMS Americas!
email = email,
link_user_to = link_user_to,
password = str(crypted),
)
#user = auth.get_or_create_user(user, login=False)
user_id = db.auth_user.insert(**user)
# Set the HR record to be owned by this user
if hr:
hr.update_record(owned_by_user=user_id)
else:
hr_id = form_vars.get("id")
db(s3db.hrm_human_resource.id == hr_id).update(owned_by_user=user_id)
# Set the Person record to be owned by this user
person.update_record(owned_by_user=user_id)
# Cascade down to components
# pr_address
atable = s3db.pr_address
db(atable.pe_id == pe_id).update(owned_by_user=user_id)
# pr_contact
db(ctable.pe_id == pe_id).update(owned_by_user=user_id)
# Link to Person so that we find this in the 'Link'
ltable = s3db.pr_person_user
ltable.insert(pe_id = pe_id,
user_id = user_id,
)
# Approve User, link to Person & send them a Welcome email
user.update(id = user_id)
messages = auth.messages
messages.lock_keys = False
messages.welcome_email = \
"""Welcome to %(system_name)s
- You can start using %(system_name)s at: %(url)s
- Your password is: %(password)s
- To edit your profile go to: %(url)s%(profile)s
Thank you"""
messages.lock_keys = True
auth.s3_approve_user(user, password=password)
# -------------------------------------------------------------------------
def customise_hrm_insurance_resource(r, tablename):
table = current.s3db.hrm_insurance
table.type.default = "HEALTH"
table.insurance_number.label = T("Affiliate Number")
table.phone.label = T("Emergency Number")
table.insurer.label = "%s / %s" % (T("Insurance Company"),
T("Social Work or Prepaid"),
)
settings.customise_hrm_insurance_resource = customise_hrm_insurance_resource
# -------------------------------------------------------------------------
def hrm_human_resource_onvalidation(form):
"""
Check that the Organization ID is unique per NS
"""
# Read Code
form_vars_get = form.vars.get
code = form_vars_get("code")
if code is None:
return
db = current.db
s3db = current.s3db
# Lookup Root Org
organisation_id = form_vars_get("organisation_id")
otable = s3db.org_organisation
root_org = db(otable.id == organisation_id).select(otable.root_organisation,
limitby = (0, 1)
).first()
root_organisation = root_org.root_organisation
# Check for another HR in the same NS with same code
htable = s3db.hrm_human_resource
query = (htable.code == code) & \
(htable.organisation_id == otable.id) & \
(otable.root_organisation == root_organisation)
human_resource_id = form_vars_get("id")
if human_resource_id:
# Update Form: Skip our own record
query &= (htable.id != human_resource_id)
match = db(query).select(htable.id,
limitby = (0, 1)
).first()
if match:
# Error
form.errors["code"] = current.T("Organization ID already in use")
return
# -------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
# Organization ID needs to be unique per NS
current.s3db.configure(tablename,
onvalidation = hrm_human_resource_onvalidation,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
#controller = current.request.controller
#if controller != "deploy":
# # Default Filter
# from s3 import s3_set_default_filter
# s3_set_default_filter("~.organisation_id",
# user_org_and_children_default_filter,
# tablename = "hrm_human_resource")
s3 = current.response.s3
# Enable scalability-optimized strategies
settings.base.bigtable = True
if current.request.function == "trainee":
EXTERNAL = True
else:
EXTERNAL = False
def add_language(form):
from gluon import LABEL, OPTION, SELECT
from s3 import s3_addrow
formstyle = settings.get_ui_formstyle()
language_opts = [OPTION(T("Spanish"), _value="es", _selected="selected"),
OPTION(T("French"), _value="fr"),
OPTION(T("English"), _value="en"),
]
s3_addrow(form,
LABEL("%s:" % T("Language"),
_id = "auth_user_language__label",
_for = "auth_user_language",
),
SELECT(_id = "auth_user_language",
_name = "language",
*language_opts
),
"",
formstyle,
"auth_user_language__row",
position = 3,
)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
from s3 import FS
db = current.db
s3db = current.s3db
auth = current.auth
resource = r.resource
table = r.table
if EXTERNAL:
f = table.organisation_id
f.label = T("Organization")
# Organisation cannot be an NS/Branch
# Lookup organisation_type_id for Red Cross
ttable = s3db.org_organisation_type
type_ids = db(ttable.name.belongs((RED_CROSS, "Training Center"))).select(ttable.id,
limitby = (0, 2),
cache = s3db.cache,
)
if type_ids:
from s3 import IS_ONE_OF
ltable = db.org_organisation_organisation_type
type_ids = [t.id for t in type_ids]
rows = db(ltable.organisation_type_id.belongs(type_ids)).select(ltable.organisation_id)
not_filter_opts = [row.organisation_id for row in rows]
f.requires = IS_ONE_OF(db, "org_organisation.id",
f.represent,
not_filterby = "id",
not_filter_opts = not_filter_opts,
updateable = True,
orderby = "org_organisation.name",
sort = True)
resource.add_filter(~FS("organisation_id").belongs(not_filter_opts))
# Find the relevant filter widget & limit it's options
filter_widgets = s3db.get_config("hrm_human_resource", "filter_widgets")
filter_widget = None
if filter_widgets:
from s3 import S3HierarchyFilter
for w in filter_widgets:
if isinstance(w, S3HierarchyFilter) and \
w.field == "organisation_id":
filter_widget = w
break
if filter_widget is not None:
filter_widget.opts["filter"] = (~FS("id").belongs(not_filter_opts))
else:
otable = s3db.org_organisation
otable.root_organisation.label = T("National Society")
# Organisation needs to be an NS/Branch
ns_only("hrm_human_resource",
required = True,
branches = True,
# default
#limit_filter_opts = True,
)
export_formats = list(settings.get_ui_export_formats())
if r.method in ("create", "summary", None):
# Provide a default Organization ID
organisation_id = auth.user.organisation_id
if organisation_id:
org = db(otable.id == organisation_id).select(otable.root_organisation,
limitby = (0, 1)
).first()
root_organisation_id = org.root_organisation
f = table.code
query = (otable.root_organisation == root_organisation_id) & \
(otable.id == table.organisation_id)
last_code = db(query).select(f,
limitby = (0, 1),
orderby = ~f
).first()
last_code = last_code.code
if last_code:
f.default = int(last_code) + 1
else:
f.default = 1
if not r.id:
# Filter to just RC people
resource.add_filter(FS("organisation_id$organisation_type.name") == RED_CROSS)
resource.configure(create_onaccept = hrm_human_resource_create_onaccept,
form_postp = add_language,
)
# Custom list_fields
list_fields = [(T("Full Name"), "person_id"),
"organisation_id",
(T("Program"), "person_id$hours.programme_id"),
(T("National ID"), "person_id$national_id.value"),
"code",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
r.resource.configure(list_fields = list_fields)
# Bind method for signature list export + add export icon
from templates.RMSAmericas.siglist import HRSignatureList
s3db.set_method("hrm", "human_resource",
method = "siglist",
action = HRSignatureList,
)
export_formats.append(("siglist.pdf", "fa fa-list", T("Export Signature List")))
s3.formats["siglist.pdf"] = r.url(method="siglist")
if auth.s3_has_roles(ID_CARD_EXPORT_ROLES):
if r.representation == "card":
# Configure ID card layout
from templates.RMSAmericas.idcards import IDCardLayout
resource.configure(pdf_card_layout = IDCardLayout)
if not r.id and not r.component:
# Add export-icon for ID cards
export_formats.append(("card", "fa fa-id-card", T("Export ID Cards")))
s3.formats["card"] = r.url(method="")
settings.ui.export_formats = export_formats
if not auth.s3_has_role("ADMIN") and \
auth.s3_has_roles(("training_coordinator", "training_assistant")):
# Filter People to just those trained by this Reference Center
resource.add_filter(FS("training.training_event_id$organisation_id") == auth.user.organisation_id)
# Default to Volunteers
table.type.default = 2
# Hide Venues from the list of Offices
from gluon import IS_EMPTY_OR
ttable = s3db.org_facility_type
ltable = s3db.org_site_facility_type
query = (ltable.facility_type_id == ttable.id) & \
(ttable.name == "Venue")
venues = db(query).select(ltable.site_id)
venues = [v.site_id for v in venues]
stable = s3db.org_site
dbset = db(~stable.site_id.belongs(venues))
f = table.site_id
new_requires = f.requires.other
new_requires.dbset = dbset
f.requires = IS_EMPTY_OR(new_requires)
table = s3db.pr_person
table.first_name.label = T("Forenames")
table.middle_name.label = T("Father's Surname")
table.last_name.label = T("Mother's Surname")
# For the filter
s3db.hrm_competency.skill_id.label = T("Language")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if not EXTERNAL and \
r.method in (None, "create") and \
isinstance(output, dict):
form = output.get("form")
if form:
add_language(form)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -------------------------------------------------------------------------
def customise_hrm_job_title_resource(r, tablename):
s3db = current.s3db
f = s3db.hrm_job_title.type
f.default = 3 # Both
#f.readable = f.writable = False
label = T("Position")
label_create = T("Create Position")
current.response.s3.crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Position Details"),
title_list = T("Position Catalog"),
title_update = T("Edit Position"),
title_upload = T("Import Positions"),
label_list_button = T("List Positions"),
label_delete_button = T("Delete Position"),
msg_record_created = T("Position added"),
msg_record_modified = T("Position updated"),
msg_record_deleted = T("Position deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
from s3layouts import S3PopupLink
f = s3db.hrm_job_title_id.attr
f.label = label
f.comment = S3PopupLink(c = "hrm",
f = "job_title",
label = label_create,
title = label,
)
settings.customise_hrm_job_title_resource = customise_hrm_job_title_resource
# -------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
# Organisation needs to be an NS
ns_only("hrm_job_title",
required = False,
branches = False,
)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if current.auth.s3_has_role("ADMIN"):
from s3 import S3OptionsFilter, S3TextFilter
filter_widgets = [S3TextFilter(["name",
],
label=T("Search")
),
S3OptionsFilter("organisation_id",
),
]
current.s3db.configure("hrm_job_title",
filter_widgets = filter_widgets,
)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -------------------------------------------------------------------------
def customise_hrm_programme_controller(**attr):
table = current.s3db.hrm_programme
# Organisation needs to be an NS/Branch
ns_only("hrm_programme",
required = False,
branches = False,
)
# non-Admins should only see programmes for their NS
auth = current.auth
if not auth.s3_has_role("ADMIN"):
current.response.s3.filter = (table.organisation_id == auth.root_org())
f = table.name_long
f.readable = f.writable = False
return attr
settings.customise_hrm_programme_controller = customise_hrm_programme_controller
# -------------------------------------------------------------------------
def customise_hrm_programme_hours_controller(**attr):
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.person_id$human_resource.organisation_id",
user_org_default_filter,
tablename = "hrm_programme_hours")
attr["csv_template"] = ("../../themes/RMSAmericas/formats", "hrm_programme_hours")
return attr
settings.customise_hrm_programme_hours_controller = customise_hrm_programme_hours_controller
# -------------------------------------------------------------------------
def skip_create(deduplicate):
""" Decorator for deduplicators to prevent creation of new records """
def wrapped(item):
if callable(deduplicate):
deduplicate(item)
item.strategy = [item.METHOD.UPDATE]
return wrapped
def customise_hrm_programme_hours_resource(r, tablename):
from s3 import S3SQLCustomForm
s3db = current.s3db
phtable = s3db.hrm_programme_hours
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Hours of Service"),
title_display = T("Hours Details"),
title_list = T("Hours of Service"),
title_update = T("Edit Hours"),
label_list_button = T("List Hours"),
label_delete_button = T("Delete Hours"),
msg_record_created = T("Hours added"),
msg_record_modified = T("Hours updated"),
msg_record_deleted = T("Hours deleted"),
msg_list_empty = T("Currently no hours recorded"))
# Show new custom fields
phtable.event.readable = phtable.event.writable = True
phtable.place.readable = phtable.place.writable = True
# Hide old fields so they don't appear in list_fields in hrm_Record
#phtable.programme_id.readable = phtable.programme_id.writable = False
phtable.job_title_id.readable = phtable.job_title_id.writable = False
crud_form = S3SQLCustomForm("date",
"programme_id",
"place",
"event",
"hours",
)
# Only visible in hrm_Record which controls list_fields itself
#list_fields = ["date",
# "programme_id",
# "place",
# "event",
# "training_id$training_event_id$location_id",
# "training_id$training_event_id$course_id",
# "hours",
# ]
configure = s3db.configure
configure("hrm_programme_hours",
crud_form = crud_form,
#list_fields = list_fields,
)
# Prevent create during imports
get_config = s3db.get_config
configure("pr_person",
deduplicate = skip_create(get_config("pr_person", "deduplicate")),
)
configure("org_organisation",
deduplicate = skip_create(get_config("org_organisation", "deduplicate")),
)
configure("hrm_programme",
deduplicate = skip_create(get_config("hrm_programme", "deduplicate")),
)
settings.customise_hrm_programme_hours_resource = customise_hrm_programme_hours_resource
# -------------------------------------------------------------------------
def customise_hrm_skill_resource(r, tablename):
#label = T("Language")
label_create = T("Create Language")
current.response.s3.crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Language Details"),
title_list = T("Language Catalog"),
title_update = T("Edit Language"),
label_list_button = T("List Languages"),
label_delete_button = T("Delete Language"),
msg_record_created = T("Language added"),
msg_record_modified = T("Language updated"),
msg_record_deleted = T("Language deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
# No use since cannot be sure this runs before hrm_competency table is loaded
#from s3layouts import S3PopupLink
#f = current.s3db.hrm_skill_id.attr
#f.label = label
#f.comment = S3PopupLink(c = "hrm",
# f = "skill",
# label = label_create,
# title = label,
# )
settings.customise_hrm_skill_resource = customise_hrm_skill_resource
# -------------------------------------------------------------------------
def customise_hrm_competency_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Language"),
title_display = T("Language Details"),
title_list = T("Languages"),
title_update = T("Edit Language"),
label_list_button = T("List Languages"),
label_delete_button = T("Delete Language"),
msg_record_created = T("Language added"),
msg_record_modified = T("Language updated"),
msg_record_deleted = T("Language deleted"),
msg_list_empty = T("No entries currently registered"))
label = T("Language")
from s3layouts import S3PopupLink
f = current.s3db.hrm_competency.skill_id
f.label = label
f.comment = S3PopupLink(c = "hrm",
f = "skill",
label = T("Create Language"),
title = label,
)
settings.customise_hrm_competency_resource = customise_hrm_competency_resource
# -------------------------------------------------------------------------
def hrm_training_onaccept(form):
"""
Add People to the RIT Alert List if they have passed the RIT course
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
# Lookup full record
table = db.hrm_training
record = db(table.id == form_vars.id).select(table.id,
table.person_id,
table.course_id,
table.grade,
limitby=(0, 1)).first()
try:
course_id = record.course_id
except AttributeError:
current.log.error("Cannot find Training record")
return
# Lookup the RIT Course ID
ctable = db.hrm_course
row = db(ctable.name == "Regional Intervention Teams").select(ctable.id,
cache = s3db.cache,
limitby=(0, 1)
).first()
try:
rit_course_id = row.id
except AttributeError:
current.log.error("Cannot find RIT Course: Prepop not done?")
return
if course_id != rit_course_id:
# Nothing to do
return
if record.grade != 8:
# Not passed: Nothing to do
return
# Is person already a RIT Member?
person_id = record.person_id
htable = s3db.hrm_human_resource
hr = db(htable.person_id == person_id).select(htable.id,
limitby=(0, 1)
).first()
try:
human_resource_id = hr.id
except AttributeError:
current.log.error("Cannot find Human Resource record")
return
dtable = s3db.deploy_application
exists = db(dtable.human_resource_id == human_resource_id).select(dtable.id,
limitby=(0, 1)
).first()
if not exists:
# Add them to the list
dtable.insert(human_resource_id = human_resource_id)
# Add them to the RIT role
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id)
link = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if link:
current.auth.s3_assign_role(link.user_id, "RIT_MEMBER")
# -------------------------------------------------------------------------
def hrm_training_postimport(import_info):
"""
Create Users for Persons created
"""
training_ids = import_info["created"]
if not training_ids:
# No new people created
return
db = current.db
s3db = current.s3db
# Find all the Persons
ttable = s3db.hrm_training
ptable = s3db.pr_person
query = (ttable.id.belongs(training_ids)) & \
(ttable.person_id == ptable.id)
trainings = db(query).select(ptable.pe_id)
person_pe_ids = {p.pe_id for p in trainings}
if not person_pe_ids:
# No people?
return
# Remove those with a User Account
ltable = s3db.pr_person_user
users = db(ltable.pe_id.belongs(person_pe_ids)).select(ltable.pe_id)
user_pe_ids = [u.pe_id for u in users]
discard = person_pe_ids.discard
for pe_id in user_pe_ids:
discard(pe_id)
if not person_pe_ids:
# Nobody without a User Account already
return
# Read Person Details
ctable = s3db.pr_contact
dtable = s3db.pr_person_details
htable = s3db.hrm_human_resource
left = [ctable.on((ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "EMAIL")
),
dtable.on(dtable.person_id == ptable.id),
htable.on(htable.person_id == ptable.id),
]
persons = db(ptable.pe_id.belongs(person_pe_ids)).select(ptable.id,
ptable.first_name,
# RMSAmericas uses Apellido Paterno for Last Name
ptable.middle_name,
#ptable.last_name,
ctable.value,
dtable.language,
htable.type,
htable.organisation_id,
left=left,
)
auth = current.auth
utable = db.auth_user
create_user = utable.insert
approve_user = auth.s3_approve_user
cert_table = s3db.hrm_certification
# For each Person
for p in persons:
person = p["pr_person"]
hr = p["hrm_human_resource"]
if hr.type == 1:
link_user_to = "staff"
else:
link_user_to = "volunteer"
# Set random password
password, crypted = auth.s3_password(8)
# Create a User Account
user = Storage(first_name = person.first_name,
last_name = person.middle_name,
#last_name = person.last_name,
email = p["pr_contact.value"],
language = p["pr_person_details.language"],
password = crypted,
organisation_id = hr.organisation_id,
link_user_to = link_user_to,
)
user_id = create_user(**user)
# Standard Approval (inc Link to Person/HR and Send out Welcome Email with password)
user["id"] = user_id
approve_user(user, password)
# Fixup permissions
person_id = person.id
db(htable.person_id == person_id).update(owned_by_user = user_id)
db(ttable.person_id == person_id).update(owned_by_user = user_id)
db(cert_table.person_id == person_id).update(owned_by_user = user_id)
# -------------------------------------------------------------------------
def customise_hrm_training_controller(**attr):
s3 = current.response.s3
# Default Filter
#from s3 import s3_set_default_filter
#s3_set_default_filter("~.person_id$human_resource.organisation_id",
# user_org_default_filter,
# tablename = "hrm_training")
auth = current.auth
if not auth.s3_has_role("ADMIN") and \
auth.s3_has_roles(("training_coordinator", "training_assistant")):
TC = True
# Filter Trainings to just those done by this Reference Center
from s3 import FS
query = FS("~.training_event_id$organisation_id") == auth.user.organisation_id
s3.filter = query
else:
TC = False
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "import":
# HR records may be created via importing them as participants
s3db = current.s3db
# Default to Volunteers
s3db.hrm_human_resource.type.default = 2
if TC:
# Doesn't work as email created after human_resource
#s3db.configure("hrm_human_resource",
# create_onaccept = hrm_human_resource_create_onaccept,
# )
# Create User Accounts for those Persons without them
s3db.configure("hrm_training",
postimport = hrm_training_postimport,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_training_controller = customise_hrm_training_controller
# -------------------------------------------------------------------------
def customise_hrm_training_resource(r, tablename):
s3db = current.s3db
table = s3db.hrm_training
f = table.grade
f.readable = f.writable = True
f = table.qualitative_feedback
f.readable = f.writable = True
s3db.hrm_certification.number.label = T("Registration Number")
from s3 import S3SQLCustomForm, S3TextFilter, S3OptionsFilter, S3DateFilter
if r.function == "person":
crud_form = S3SQLCustomForm("course_id",
"end_date",
"grade",
"grade_details",
"qualitative_feedback",
"certification_from_training.number",
)
else:
crud_form = S3SQLCustomForm("person_id",
"end_date",
"grade",
"grade_details",
"qualitative_feedback",
"certification_from_training.number",
)
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$last_name",
"course_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees."),
_class="filter-search",
),
S3OptionsFilter("training_event_id$site_id",
label = T("Country"),
represent = s3db.org_SiteRepresent(show_type=False),
),
S3OptionsFilter("person_id$human_resource.organisation_id",
label = T("Organization"),
),
S3OptionsFilter("course_id",
),
S3OptionsFilter("grade",
),
S3DateFilter("date",
hide_time=True,
),
]
default_onaccept = s3db.get_config(tablename, "onaccept")
if default_onaccept and not isinstance(default_onaccept, list): # Catch running twice
onaccept = [default_onaccept,
hrm_training_onaccept,
]
else:
onaccept = hrm_training_onaccept
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
onaccept = onaccept,
)
settings.customise_hrm_training_resource = customise_hrm_training_resource
# -------------------------------------------------------------------------
def customise_hrm_training_event_resource(r, tablename):
from s3 import IS_ONE_OF, S3SQLCustomForm, S3SQLInlineComponent
db = current.db
auth = current.auth
s3db = current.s3db
table = s3db.hrm_training_event
org_represent = s3db.org_OrganisationRepresent(parent=False)
f = table.organisation_id
f.label = T("Training Center")
f.comment = False # Don't create here
f.represent = org_represent
list_fields = ["organisation_id",
"course_id",
#"site_id",
"location_id",
"start_date",
"training_event_instructor.person_id",
"comments",
]
if auth.s3_has_role("ADMIN"):
#f.readable = f.writable = True
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == "Training Center").select(ttable.id,
limitby=(0, 1),
).first().id
except AttributeError:
# No/incorrect prepop done - skip (e.g. testing impacts of CSS changes in this theme)
pass
else:
ltable = s3db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
filter_opts = [row.organisation_id for row in rows]
f.requires = IS_ONE_OF(db, "org_organisation.id",
org_represent,
orderby = "org_organisation.name",
sort = True,
filterby = "id",
filter_opts = filter_opts,
)
elif auth.s3_has_roles(("training_coordinator", "training_assistant")):
organisation_id = auth.user.organisation_id
f.default = organisation_id
f.writable = False
list_fields.pop(0) # organisation_id
table.course_id.requires.set_filter(filterby = "organisation_id",
filter_opts = [organisation_id],
)
# Hours are Optional
from gluon import IS_EMPTY_OR
table.hours.requires = IS_EMPTY_OR(table.hours)
#site_represent = S3Represent(lookup = "org_site")
# Filter list of Venues
#f = table.site_id
#f.default = None
#f.label = T("Country")
#f.represent = site_represent
#ftable = s3db.org_facility
#ltable = s3db.org_site_facility_type
#ttable = s3db.org_facility_type
#query = (ftable.deleted == False) & \
# (ftable.site_id == ltable.site_id) & \
# (ltable.facility_type_id == ttable.id) & \
# (ttable.name == "Venue")
#rows = db(query).select(ftable.site_id)
#filter_opts = [row.site_id for row in rows]
#f.requires = IS_ONE_OF(db, "org_site.site_id",
# site_represent,
# filterby="site_id",
# filter_opts=filter_opts,
# )
# Multiple Instructors
crud_form = S3SQLCustomForm("organisation_id",
# @ToDo: Filter Courses by Training Center
"course_id",
#"site_id",
"location_id",
"start_date",
"end_date",
S3SQLInlineComponent("training_event_instructor",
label = T("Instructor"),
fields = [("", "person_id")],
# @ToDo: Filter to HRMs (this should be done through AC?)
#filterby = ({"field": "type",
# "options": 3,
# },),
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_hrm_training_event_resource = customise_hrm_training_event_resource
# -------------------------------------------------------------------------
def hrm_training_event_report_pdf_export(r, **attr):
"""
Generate a PDF Export of a training Event Report
"""
from s3 import s3_fullname, s3_str
record = r.record
T = current.T
db = current.db
s3db = current.s3db
current_language = T.accepted_language
if current_language == "es":
# Reach different translation
title = s3_str(T("Training Event Report"))
else:
title = s3_str(T("Training Report"))
if record.course_id:
course_name = s3db.hrm_training_event.course_id.represent(record.course_id)
title = "%s: %s" % (title, course_name)
def callback(r):
from gluon.html import DIV, TABLE, TD, TH, TR
rtable = s3db.hrm_training_event_report
date_represent = rtable.date.represent
org_represent = s3db.org_OrganisationRepresent(parent = False,
acronym = False)
# Logo
otable = db.org_organisation
org_id = record.organisation_id
org = db(otable.id == org_id).select(otable.name,
otable.acronym, # Present for consistent cache key
otable.logo,
limitby=(0, 1),
).first()
#if settings.get_L10n_translate_org_organisation():
#org_name = org_represent(org_id)
#else:
# org_name = org.name
logo = org.logo
if logo:
logo = s3db.org_organisation_logo(org)
elif current.deployment_settings.get_org_branches():
root_org = current.cache.ram(
# Common key with auth.root_org
"root_org_%s" % org_id,
lambda: s3db.org_root_organisation(org_id),
time_expire=120
)
logo = s3db.org_organisation_logo(root_org)
# Read the report
report = db(rtable.training_event_id == r.id).select(limitby = (0, 1),
).first()
# Header
header = TABLE(TR(TH("%s:" % T("Name")),
TD(s3_fullname(report.person_id)),
TH("%s:" % T("Training Date")),
TD(date_represent(record.start_date)),
),
TR(TH("%s:" % T("Position")),
TD(rtable.job_title_id.represent(report.job_title_id)),
TH("%s:" % T("Finance Codes")),
TD(report.code),
),
TR(TH("%s:" % T("National Society Visited")),
TD(org_represent(report.organisation_id)),
TH("%s:" % T("Report Date")),
TD(date_represent(report.date)),
),
TR(TH("%s:" % T("Training Purpose")),
TD(report.purpose,
_colspan = 3,
),
),
)
# Main
main = TABLE(TR(TH("1. %s" % T("Objectives"))),
TR(TD(report.objectives)),
TR(TH("2. %s" % T("Methodology"))),
TR(TD(report.methodology)),
TR(TH("3. %s" % T("Implemented Actions"))),
TR(TD(report.actions)),
TR(TH("4. %s" % T("About the participants"))),
TR(TD(report.participants)),
TR(TH("5. %s" % T("Results and Lessons Learned"))),
TR(TD(report.results)),
TR(TH("6. %s" % T("Follow-up Required"))),
TR(TD(report.followup)),
TR(TH("7. %s" % T("Additional relevant information"))),
TR(TD(report.additional)),
TR(TH("8. %s" % T("General Comments"))),
TR(TD(report.comments)),
)
output = DIV(TABLE(TR(TD(logo),
#TD(org_name), # This isn't rtl-proof, check vol_service_record for how to handle that if-required
)),
TABLE(TR(TD(title))),
TABLE(header),
TABLE(main),
)
return output
attr["rheader"] = None
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
pdf_title = title
return exporter(r.resource,
request = r,
method = "list",
pdf_title = pdf_title,
pdf_table_autogrow = "B",
pdf_callback = callback,
**attr
)
# -------------------------------------------------------------------------
def customise_hrm_training_event_controller(**attr):
T = current.T
auth = current.auth
s3db = current.s3db
s3 = current.response.s3
if not auth.s3_has_role("ADMIN") and \
auth.s3_has_roles(("training_coordinator", "training_assistant")):
# Filter People to just those trained by this Reference Center
from s3 import FS
query = FS("~.organisation_id") == auth.user.organisation_id
s3.filter = query
s3db.set_method("hrm", "training_event",
method = "report_pdf_export",
action = hrm_training_event_report_pdf_export,
)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "training_event_report" and r.component_id:
from gluon.html import A, DIV, URL
from s3 import ICON
s3.rfooter = DIV(A(ICON("print"),
" ",
T("PDF Report"),
_href=URL(args=[r.id, "report_pdf_export"]),#, extension="pdf"),
_class="action-btn",
),
)
return result
s3.prep = custom_prep
attr["rheader"] = lambda r: \
s3db.hrm_rheader(r, tabs=[(T("Training Event Details"), None),
(T("Participants"), "participant"),
(T("Report"), "training_event_report"),
])
return attr
settings.customise_hrm_training_event_controller = customise_hrm_training_event_controller
# -------------------------------------------------------------------------
def customise_hrm_training_event_report_resource(r, tablename):
s3db = current.s3db
table = s3db.hrm_training_event_report
table.person_id.default = current.auth.s3_logged_in_person()
table.person_id.label = T("Name")
ns_only("hrm_training_event_report",
required = False,
branches = False,
updateable = False,
)
table.organisation_id.label = T("National Society Visited")
table.code.label = T("Finance Codes")
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("person_id",
"job_title_id",
"organisation_id",
"purpose",
"code",
"date",
(("1. %s" % table.objectives.label), "objectives"),
(("2. %s" % table.methodology.label), "methodology"),
(("3. %s" % table.actions.label), "actions"),
(("4. %s" % table.participants.label), "participants"),
(("5. %s" % table.results.label), "results"),
(("6. %s" % table.followup.label), "followup"),
(("7. %s" % table.additional.label), "additional"),
(("8. %s" % table.comments.label), "comments"),
S3SQLInlineComponent("document",
label = "9. %s" % T("Supporting Documentation"),
link = False,
fields = ["file"],
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_hrm_training_event_report_resource = customise_hrm_training_event_report_resource
# -------------------------------------------------------------------------
def customise_member_membership_resource(r, tablename):
from s3layouts import S3PopupLink
ADD_MEMBERSHIP_TYPE = T("Create Partner Type")
s3db = current.s3db
table = s3db.member_membership
table.code.label = T("Partner ID")
table.membership_type_id.comment = S3PopupLink(f = "membership_type",
label = ADD_MEMBERSHIP_TYPE,
title = ADD_MEMBERSHIP_TYPE,
tooltip = T("Add a new partner type to the catalog."),
)
list_fields = [(T("Full Name"), "person_id"),
"organisation_id",
"membership_type_id",
"code",
(T("National ID"), "person_id$national_id.value"),
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
"membership_fee",
(T("Paid"), "paid"),
]
s3db.configure(tablename,
list_fields = list_fields,
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Partner"),
title_display = T("Partner Details"),
title_list = T("Partners"),
title_update = T("Edit Partner Details"),
title_upload = T("Import Partners"),
label_list_button = T("List Partners"),
label_delete_button = T("Delete Partner"),
msg_record_created = T("Partner added"),
msg_record_modified = T("Partner updated"),
msg_record_deleted = T("Partner deleted"),
msg_list_empty = T("No Partners currently defined"))
settings.customise_member_membership_resource = customise_member_membership_resource
# -------------------------------------------------------------------------
def customise_member_membership_controller(**attr):
ns_only("member_membership",
required = True,
branches = True,
updateable = True,
)
return attr
settings.customise_member_membership_controller = customise_member_membership_controller
# -------------------------------------------------------------------------
def customise_member_membership_type_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Partner Type"),
title_display = T("Partner Type Details"),
title_list = T("Partner Types"),
title_update = T("Edit Partner Type Details"),
title_upload = T("Import Partner Types"),
label_list_button = T("List Partner Types"),
label_delete_button = T("Delete Partner Type"),
msg_record_created = T("Partner Type added"),
msg_record_modified = T("Partner Type updated"),
msg_record_deleted = T("Partner Type deleted"),
msg_list_empty = T("No Partner Types currently defined"))
settings.customise_member_membership_type_resource = customise_member_membership_type_resource
# -------------------------------------------------------------------------
def customise_member_membership_type_controller(**attr):
ns_only("member_membership_type",
required = False,
branches = False,
updateable = True,
)
return attr
settings.customise_member_membership_type_controller = customise_member_membership_type_controller
# -------------------------------------------------------------------------
def customise_inv_home():
"""
Homepage for the Inventory module
"""
from gluon import URL
from s3 import s3_redirect_default
auth = current.auth
if auth.user and auth.user.site_id:
has_role = auth.s3_has_role
if has_role("national_wh_manager") or \
has_role(current.session.s3.system_roles.ORG_ADMIN):
pass
else:
# Redirect to this Warehouse
table = current.s3db.inv_warehouse
wh = current.db(table.site_id == auth.user.site_id).select(table.id,
limitby = (0, 1)
).first()
if wh:
s3_redirect_default(URL(c="inv", f="warehouse",
args = [wh.id, "inv_item"],
))
# Redirect to Warehouse Summary Page
s3_redirect_default(URL(c="inv", f="warehouse", args="summary"))
settings.customise_inv_home = customise_inv_home
# -------------------------------------------------------------------------
def inv_pdf_header(r, title=None):
"""
PDF header for Stock Reports
@param r: the S3Request
@param title: the report title
"""
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
from gluon.html import DIV, H2, H4, P, TABLE, TR, TD
# Report title and subtitle
title = H2(title) if title else ""
subtitle = ""
get_vars = r.get_vars
report = get_vars.get("report")
if report == "movements":
from s3 import S3TypeConverter, S3DateTime
# Get earliest/latest date from filter
convert = S3TypeConverter.convert
dtstr = get_vars.get("_transaction.date__ge")
earliest = convert(datetime.datetime, dtstr) if dtstr else ""
dtstr = get_vars.get("_transaction.date__le")
latest = convert(datetime.datetime, dtstr) if dtstr else ""
# Convert into local calendar/format
if earliest:
earliest = S3DateTime.date_represent(earliest, utc=True)
if latest:
latest = S3DateTime.date_represent(latest, utc=True)
# Add as subtitle
if earliest or latest:
subtitle = P(" - ".join((earliest, latest)))
output = TABLE(TR(TD(DIV(logo, H4(name))),
TD(DIV(title, subtitle)),
),
)
return output
# -------------------------------------------------------------------------
def customise_inv_inv_item_resource(r, tablename):
s3db = current.s3db
resource = r.resource
if resource.tablename == "inv_inv_item" and r.method == "grouped":
report = r.get_vars.get("report")
if report == "weight_and_volume":
# Add field methods for total weight and volume
from gluon import Field
table = s3db.inv_inv_item
table.total_weight = Field.Method("total_weight",
s3db.inv_item_total_weight,
)
table.total_volume = Field.Method("total_volume",
s3db.inv_item_total_volume,
)
s3db.configure("inv_inv_item",
extra_fields = ["item_id$weight",
"item_id$volume",
],
)
elif report == "movements":
# Inject a date filter for transactions
filter_widgets = resource.get_config("filter_widgets")
from s3 import S3DateFilter
date_filter = S3DateFilter("transaction_date",
label = T("Date"),
fieldtype = "date",
selector = "_transaction.date",
)
filter_widgets.insert(1, date_filter)
# Stock Reports
stock_reports = {"default": {
"title": T("Stock Position Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
"bin",
"item_id$name",
"quantity",
"pack_value",
"total_value",
],
"groupby": ["site_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "quantity"),
("sum", "total_value"),
],
"pdf_header": inv_pdf_header,
},
"weight_and_volume": {
"title": T("Weight and Volume Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
"bin",
"item_id$name",
"quantity",
"item_id$weight",
"item_id$volume",
"total_weight",
"total_volume",
],
"groupby": ["site_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "quantity"),
("sum", "total_weight"),
("sum", "total_volume"),
],
"pdf_header": inv_pdf_header,
},
"movements": {
"title": T("Stock Movements Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
"bin",
"item_id$name",
(T("Origin/Destination"), "sites"),
(T("Documents"), "documents"),
(T("Initial Quantity"), "original_quantity"),
(T("Incoming"), "quantity_in"),
(T("Outgoing"), "quantity_out"),
(T("Final Quantity"), "quantity"),
],
"groupby": ["site_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "original_quantity"),
("sum", "quantity_in"),
("sum", "quantity_out"),
("sum", "quantity"),
],
"extract": s3db.inv_stock_movements,
"pdf_header": inv_pdf_header,
},
}
current.s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
grouped = stock_reports,
)
settings.customise_inv_inv_item_resource = customise_inv_inv_item_resource
# -------------------------------------------------------------------------
def customise_inv_send_resource(r, tablename):
s3db = current.s3db
s3db.configure("inv_send",
list_fields = ["id",
"send_ref",
"req_ref",
#"sender_id",
"site_id",
"date",
"recipient_id",
"delivery_date",
"to_site_id",
"status",
#"driver_name",
#"driver_phone",
#"vehicle_plate_no",
#"time_out",
"comments",
],
)
# Custom Waybill
s3db.set_method("inv", "send",
method = "form",
action = PrintableShipmentForm,
)
settings.customise_inv_send_resource = customise_inv_send_resource
# -------------------------------------------------------------------------
def customise_inv_recv_resource(r, tablename):
# Custom GRN
current.s3db.set_method("inv", "recv",
method = "form",
action = PrintableShipmentForm,
)
settings.customise_inv_recv_resource = customise_inv_recv_resource
# -------------------------------------------------------------------------
def customise_inv_warehouse_resource(r, tablename):
settings.inv.recv_tab_label = "Received/Incoming Shipments"
settings.inv.send_tab_label = "Sent Shipments"
# Only Nepal RC use Warehouse Types
s3db = current.s3db
field = s3db.inv_warehouse.warehouse_type_id
field.readable = field.writable = False
list_fields = s3db.get_config("inv_warehouse", "list_fields")
try:
list_fields.remove("warehouse_type_id")
except ValueError:
# Already removed
pass
settings.customise_inv_warehouse_resource = customise_inv_warehouse_resource
# -------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
#root_org = current.auth.root_org_name()
#if root_org != HNRC:
# return
# Simplify Form
s3db = current.s3db
table = s3db.org_facility
table.code.readable = table.code.writable = False
table.opening_times.readable = table.opening_times.writable = False
table.website.readable = table.website.writable = False
field = s3db.org_site_facility_type.facility_type_id
field.readable = field.writable = False
# Simplify Search Fields
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
text_fields = ["name",
#"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
for level in levels:
lfield = "location_id$%s" % level
text_fields.append(lfield)
s3db.configure("org_facility",
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
),
S3OptionsFilter("organisation_id"),
S3LocationFilter("location_id",
levels = levels,
),
]
)
settings.customise_org_facility_resource = customise_org_facility_resource
# -------------------------------------------------------------------------
def customise_org_office_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Organisation needs to be an NS/Branch
ns_only("org_office",
required = True,
branches = True,
# default
#limit_filter_opts = True,
)
return result
s3.prep = custom_prep
return attr
settings.customise_org_office_controller = customise_org_office_controller
# -------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3 = current.response.s3
type_filter = current.request.get_vars.get("organisation_type.name")
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component or r.component_name == "branch":
resource = r.resource
table = resource.table
if r.function == "training_center":
auth = current.auth
if not auth.s3_has_role("ADMIN"):
# See NS Training Centers only
resource.add_filter(table.root_organisation == auth.root_org())
if not auth.s3_has_role("ORG_ADMIN"):
resource.configure(insertable = False)
type_label = T("Type")
if r.get_vars.get("caller") == "org_facility_organisation_id":
# Simplify
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"acronym",
"phone",
"comments",
)
resource.configure(crud_form = crud_form,
)
else:
list_fields = ["name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"country",
"website",
]
if type_filter:
type_names = type_filter.split(",")
if len(type_names) == 1:
# Strip Type from list_fields
try:
list_fields.remove("organisation_organisation_type.organisation_type_id")
except ValueError:
# Already removed
pass
type_label = ""
if type_filter == RED_CROSS:
# Modify filter_widgets
filter_widgets = resource.get_config("filter_widgets")
# Remove type (always 'RC')
filter_widgets.pop(1)
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create National Society"),
title_display = T("National Society Details"),
title_list = T("Red Cross & Red Crescent National Societies"),
title_update = T("Edit National Society"),
title_upload = T("Import Red Cross & Red Crescent National Societies"),
label_list_button = T("List Red Cross & Red Crescent National Societies"),
label_delete_button = T("Delete National Society"),
msg_record_created = T("National Society added"),
msg_record_modified = T("National Society updated"),
msg_record_deleted = T("National Society deleted"),
msg_list_empty = T("No Red Cross & Red Crescent National Societies currently registered")
)
# Add Region to list_fields
list_fields.insert(-1, "region_id")
# Region is required
table.region_id.requires = table.region_id.requires.other
else:
table.region_id.readable = table.region_id.writable = False
if type_filter == "Supplier":
# Show simple free-text contact field
contact_field = table.contact
contact_field.readable = True
contact_field.writable = True
# Include contact information in list_fields
list_fields = ["name",
"acronym",
"country",
"contact",
"phone",
"website",
]
resource.configure(list_fields=list_fields)
if r.interactive:
table.country.label = T("Country")
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = type_label,
multiple = False,
#widget = "hierarchy",
),
"region_id",
"country",
"contact",
"phone",
"website",
"logo",
"comments",
)
resource.configure(crud_form=crud_form)
return result
s3.prep = custom_prep
settings = current.deployment_settings
if type_filter == "Supplier":
# Suppliers have simpler Tabs (hide Offices, Warehouses and Contacts)
tabs = [(T("Basic Details"), None, {"native": 1}),
]
if settings.get_L10n_translate_org_organisation():
tabs.append((T("Local Names"), "name"))
attr["rheader"] = lambda r: current.s3db.org_rheader(r, tabs=tabs)
elif type_filter == "Academic,Bilateral,Government,Intergovernmental,NGO,UN agency":
# Partners have simpler Tabs (hide Offices, Warehouses and Contacts)
tabs = [(T("Basic Details"), None, {"native": 1}),
(T("Projects"), "project"),
]
if settings.get_L10n_translate_org_organisation():
tabs.insert(1, (T("Local Names"), "name"))
attr["rheader"] = lambda r: current.s3db.org_rheader(r, tabs=tabs)
else:
# Enable tab for PDF card configurations
settings.org.pdf_card_configs = True
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -------------------------------------------------------------------------
def customise_pr_address_resource(r, tablename):
#if current.auth.root_org_name() in ("Honduran Red Cross",
# "Paraguayan Red Cross",
# ):
# Location Hierarchy loaded: Leave things as they are since we have the
# pass
#else:
s3db = current.s3db
s3db.gis_location.addr_street.label = T("Address")
s3db.configure("pr_address",
list_fields = ["type",
(current.messages.COUNTRY, "location_id$L0"),
(T("Address"), "location_id$addr_street"),
#(settings.get_ui_label_postcode(),
# "location_id$addr_postcode")
],
)
settings.customise_pr_address_resource = customise_pr_address_resource
# -------------------------------------------------------------------------
def customise_pr_contact_resource(r, tablename):
table = current.s3db[tablename]
table.comments.readable = table.comments.writable = False
table.contact_description.readable = table.contact_description.writable = False
table.priority.readable = table.priority.writable = False
settings.customise_pr_contact_resource = customise_pr_contact_resource
# -------------------------------------------------------------------------
def customise_pr_education_resource(r, tablename):
s3db = current.s3db
table = s3db[tablename]
table.country.readable = table.country.writable = True
table.grade.readable = table.grade.writable = False
table.major.readable = table.major.writable = False
s3db.configure(tablename,
list_fields = [# Normally accessed via component
#"person_id",
"year",
"level_id",
"award",
#"major",
#"grade",
"institute",
],
)
settings.customise_pr_education_resource = customise_pr_education_resource
# -------------------------------------------------------------------------
def customise_pr_forum_resource(r, tablename):
table = current.s3db.pr_forum
table.forum_type.readable = table.forum_type.writable = False
settings.customise_pr_forum_resource = customise_pr_forum_resource
# -------------------------------------------------------------------------
def customise_pr_forum_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method == "assign":
auth = current.auth
has_role = auth.s3_has_role
if not has_role("ADMIN") and has_role("training_coordinator"):
# Filter people to just those Trained by this Reference Center or Staff of this Reference Center
from s3 import FS
organisation_id = auth.user.organisation_id
query = (FS("training.training_event_id$organisation_id") == organisation_id) | \
(FS("user.organisation_id") == organisation_id)
s3.filter = query
return result
s3.prep = custom_prep
return attr
settings.customise_pr_forum_controller = customise_pr_forum_controller
# -------------------------------------------------------------------------
#def customise_pr_group_controller(**attr):
# # Organisation needs to be an NS/Branch
# ns_only("org_organisation_team",
# required = False,
# branches = True,
# )
# return attr
#settings.customise_pr_group_controller = customise_pr_group_controller
# -------------------------------------------------------------------------
def customise_pr_person_resource(r, tablename):
table = current.s3db[tablename]
table.first_name.label = T("Forenames")
table.middle_name.label = T("Father's Surname")
table.last_name.label = T("Mother's Surname")
settings.customise_pr_person_resource = customise_pr_person_resource
# -------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Enable scalability-optimized strategies
settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
auth = current.auth
has_role = auth.s3_has_role
EXTERNAL = False
if "profile" in current.request.get_vars:
profile = True
else:
len_roles = len(current.session.s3.roles)
if (len_roles <= 2) or \
(len_roles == 3 and has_role("RIT_MEMBER") and not has_role("ADMIN")):
profile = True
else:
profile = False
if r.function == "trainee_person":
EXTERNAL = True
s3.crud_strings["pr_person"].update(
title_display = T("External Trainee Details"),
title_update = T("External Trainee Details")
)
if profile:
# Configure for personal mode
# People can edit their own HR data
configure = s3db.configure
configure("hrm_human_resource",
deletable = False,
#editable = True,
insertable = False,
)
if not has_role("RIT_MEMBER"):
#configure("hrm_certification",
# deletable = True,
# editable = True,
# insertable = True,
# )
configure("hrm_training",
deletable = False,
editable = False,
insertable = False,
)
component_name = r.component_name
method = r.method
if method == "import":
# HR records may be created via import
# Default to Volunteers
s3db.hrm_human_resource.type.default = 2
# Doesn't work as email created after human_resource
#s3db.configure("hrm_human_resource",
# create_onaccept = hrm_human_resource_create_onaccept,
# )
elif method == "record" or component_name == "human_resource":
table = s3db.hrm_human_resource
if EXTERNAL:
db = current.db
f = table.organisation_id
f.label = T("Organization")
# Organisation cannot be an NS/Branch
# Lookup organisation_type_id for Red Cross
ttable = s3db.org_organisation_type
type_ids = db(ttable.name.belongs((RED_CROSS, "Training Center"))).select(ttable.id,
limitby=(0, 2),
cache = s3db.cache,
)
if type_ids:
from s3 import IS_ONE_OF
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id.belongs(type_ids)).select(ltable.organisation_id)
not_filter_opts = [row.organisation_id for row in rows]
f.requires = IS_ONE_OF(db, "org_organisation.id",
f.represent,
not_filterby = "id",
not_filter_opts = not_filter_opts,
updateable = True,
orderby = "org_organisation.name",
sort = True)
else:
# Organisation needs to be an NS/Branch
if auth.s3_has_roles(("surge_capacity_manager",
"ns_training_manager",
"ns_training_assistant",
"training_coordinator",
"training_assistant",
)):
updateable = False
else:
updateable = True
ns_only("hrm_human_resource",
required = True,
branches = True,
updateable = updateable,
)
f = table.essential
f.readable = f.writable = False
f = table.site_contact
f.readable = f.writable = False
if method == "record":
if not auth.s3_has_roles(("ORG_ADMIN",
"hr_manager",
"hr_assistant",
)):
table.organisation_id.writable = False
# Hide the Site field as this data isn't loaded & we want to keep things simple
# @ToDo: Re-enable for specific NS as-required
f = table.site_id
f.readable = f.writable = False
# Use default form (legacy)
#s3db.clear_config("hrm_human_resource", "crud_form")
elif not component_name:
s3db.configure("pr_person",
listadd = True,
)
# Basic Details tab
f = s3db.pr_person.middle_name
f.readable = f.writable = True
f = s3db.pr_person_details.nationality2
f.readable = f.writable = True
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("first_name",
"middle_name",
"last_name",
"date_of_birth",
"gender",
"person_details.marital_status",
"person_details.nationality",
"person_details.nationality2",
"comments",
)
s3db.configure("pr_person",
crud_form = crud_form,
)
elif component_name == "appraisal":
atable = r.component.table
atable.organisation_id.readable = atable.organisation_id.writable = False
# Organisation needs to be an NS
#ns_only("hrm_appraisal",
# required = True,
# branches = False,
# )
field = atable.supervisor_id
field.readable = field.writable = False
field = atable.job_title_id
field.comment = None
field.label = T("Sector") # RDRT-specific
from s3 import IS_ONE_OF
field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
field.represent,
filterby = "type",
filter_opts = (4,),
)
elif component_name == "certification":
ctable = r.component.table
ctable.organisation_id.readable = False
elif component_name == "competency":
ctable = r.component.table
ctable.skill_id.label = T("Language")
ctable.organisation_id.readable = False
elif component_name == "experience":
# 2 options here: Work Experience & Missions
# These have very different views
# Work Experience
etable = r.component.table
etable.organisation_id.readable = etable.organisation_id.writable = False
etable.job_title_id.readable = etable.job_title_id.writable = False
etable.responsibilities.readable = etable.responsibilities.writable = False
etable.hours.readable = etable.hours.writable = False
etable.supervisor_id.readable = etable.supervisor_id.writable = False
etable.organisation.readable = etable.organisation.writable = True
etable.job_title.readable = etable.job_title.writable = True
from s3 import S3LocationSelector
etable.location_id.label = T("Country")
etable.location_id.widget = S3LocationSelector(levels=("L0",),
show_map=False,
show_postcode=False,
)
elif component_name == "identity":
#itable = r.component.table
# Default
#itable.country_code.readable = itable.country_code.writable = False
#itable.ia_name.readable = itable.ia_name.writable = False
f = r.component.table.ia_name
f.readable = f.writable = False
list_fields = ["type",
"value",
"valid_until",
]
s3db.configure("pr_identity",
list_fields = list_fields,
)
# Moved to MedicalTab
#elif component_name == "physical_description":
# from gluon import DIV
# dtable = r.component.table
# dtable.medical_conditions.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Medical Conditions"),
# T("Chronic Illness, Disabilities, Mental/Psychological Condition etc.")))
# dtable.allergic.writable = dtable.allergic.readable = True
# dtable.allergies.writable = dtable.allergies.readable = True
# dtable.ethnicity.writable = dtable.ethnicity.readable = False
# dtable.other_details.writable = dtable.other_details.readable = False
# import json
# SEPARATORS = (",", ":")
# s3.jquery_ready.append('''S3.showHidden('%s',%s,'%s')''' % \
# ("allergic", json.dumps(["allergies"], separators=SEPARATORS), "pr_physical_description"))
if not EXTERNAL and \
auth.s3_has_roles(ID_CARD_EXPORT_ROLES):
# Show button to export ID card
settings.hrm.id_cards = True
return True
s3.prep = custom_prep
if current.request.controller in ("hrm", "vol"):
attr["csv_template"] = ("../../themes/RMSAmericas/formats", "hrm_person")
# Common rheader for all views
attr["rheader"] = s3db.hrm_rheader
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -------------------------------------------------------------------------
def customise_pr_physical_description_resource(r, tablename):
from gluon import DIV
from s3 import S3SQLCustomForm
s3db = current.s3db
#s3db.pr_physical_description.medical_conditions.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Medical Conditions"),
# T("Chronic Illness, Disabilities, Mental/Psychological Condition etc.")))
s3db.pr_physical_description.medical_conditions.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Medical Conditions"),
T("It is important to include, if they exist: surgical history, medical restrictions, vaccines, etc.")))
s3db.configure(tablename,
crud_form = S3SQLCustomForm("blood_type",
"medical_conditions",
"medication",
"diseases",
"allergic",
"allergies",
),
)
settings.customise_pr_physical_description_resource = customise_pr_physical_description_resource
# -------------------------------------------------------------------------
def customise_supply_item_category_resource(r, tablename):
#root_org = current.auth.root_org_name()
#if root_org == HNRC:
# Not using Assets Module
field = current.s3db.supply_item_category.can_be_asset
field.readable = field.writable = False
settings.customise_supply_item_category_resource = customise_supply_item_category_resource
# -------------------------------------------------------------------------
def customise_project_window_resource(r, tablename):
r.resource.configure(deletable = False,
insertable = False,
)
settings.customise_project_window_resource = customise_project_window_resource
# -------------------------------------------------------------------------
def customise_project_activity_data_resource(r, tablename):
if current.auth.s3_has_roles(("monitoring_evaluation", "ORG_ADMIN")):
# Normal Access
return
# Project Manager
if r.method == "update":
table = current.s3db.project_activity_data
if r.tablename == "project_activity_data":
record_id = r.id
else:
record_id = r.component_id
record = current.db(table.id == record_id).select(table.value,
limitby=(0, 1)
).first()
if record.value:
# Redirect to Read-only mode
from gluon import redirect
redirect(r.url(method="read"))
else:
# Cannot edit anything
for f in table.fields:
table[f].writable = False
# Except add a Real value
table.value.writable = True
# Or Amend the Comments
table.comments.writable = True
else:
s3db = current.s3db
table = s3db.project_window
record = current.db(table.deleted == False).select(table.start_date,
table.end_date,
limitby = (0, 1)
).first()
if record:
if record.start_date <= r.utcnow.date() <= record.end_date:
# Inside the time window: Project Manager may update Actuals
return
# Outside the time window: Project Manager cannot add the Actual value
s3db.project_activity_data.value.writable = False
s3db.configure("project_activity_data",
updateable = False,
)
settings.customise_project_activity_data_resource = customise_project_activity_data_resource
# -------------------------------------------------------------------------
def customise_project_organisation_resource(r, tablename):
root_org = current.auth.root_org_name()
if root_org == HNRC:
from gluon import IS_IN_SET
currency_opts = {"EUR" : "EUR",
"CHF" : "CHF",
"HNL" : "L",
"USD" : "USD",
}
f = current.s3db.project_organisation.currency
f.represent = currency_represent
f.requires = IS_IN_SET(currency_opts)
settings.customise_project_organisation_resource = customise_project_organisation_resource
# -------------------------------------------------------------------------
def project_project_postprocess(form):
"""
When using Budget Monitoring (i.e. HNRC) then create the entries
"""
db = current.db
s3db = current.s3db
project_id = form.vars.id
# Read Budget Entity ID, Start Date and End Date
ptable = s3db.project_project
project = db(ptable.id == project_id).select(ptable.budget_entity_id,
ptable.name,
ptable.start_date,
ptable.end_date,
limitby=(0, 1)
).first()
if not project:
return
# Copy Project Name to Budget Name
budget_entity_id = project.budget_entity_id
btable = s3db.budget_budget
query = (btable.budget_entity_id == budget_entity_id)
budget = db(query).select(btable.id, # Needed for update_record
# If we want to provide smoothed default expected values
#btable.total_budget,
btable.currency,
# Assume Monthly
#btable.monitoring_frequency,
limitby=(0, 1)
).first()
if not budget:
return
# Build Budget Name from Project Name
project_name = project.name
# Check for duplicates
query = (btable.name == project_name) & \
(btable.id != budget.id)
duplicate = db(query).select(btable.id,
limitby=(0, 1)
).first()
if not duplicate:
budget_name = project_name[:128]
else:
# Need another Unique name
import uuid
budget_name = "%s %s" % (project_name[:91], uuid.uuid4())
budget.update_record(name = budget_name)
mtable = s3db.budget_monitoring
exists = db(mtable.budget_entity_id == budget_entity_id).select(mtable.id,
limitby=(0, 1))
if not exists:
# Create Monitoring Data entries
start_date = project.start_date
end_date = project.end_date
if not start_date or not end_date:
return
# Assume Monthly
#monitoring_frequency = budget.monitoring_frequency
#if not monitoring_frequency:
# return
#total_budget = budget.total_budget
currency = budget.currency
# Create entries for the 1st of every month between start_date and end_date
from dateutil import rrule
dates = list(rrule.rrule(rrule.MONTHLY, bymonthday=1, dtstart=start_date, until=end_date))
for d in dates:
mtable.insert(budget_entity_id = budget_entity_id,
# @ToDo: This needs to be modified whenever entries are manually edited
# Set/update this in budget_monitoring_onaccept
# - also check here that we don't exceed overall budget
start_date = start_date,
end_date = d,
currency = currency,
)
# Start date relates to previous entry
start_date = d
# -------------------------------------------------------------------------
def customise_project_programme_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("project_programme",
required = True,
branches = False,
updateable = True,
)
return attr
settings.customise_project_programme_controller = customise_project_programme_controller
# -------------------------------------------------------------------------
def customise_project_project_controller(**attr):
tablename = "project_project"
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_default_filter,
tablename = "project_project")
# Load standard model
s3db = current.s3db
table = s3db[tablename]
# Disable Map Tab on Summary View
# - until we can support multiple Points per Record
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
#{"name": "map",
# "label": "Map",
# "widgets": [{"method": "map",
# "ajax_init": True}],
# },
)
# @ToDo: S3SQLInlineComponent for Project orgs
# Get IDs for Partner NS/Partner Donor
# db = current.db
# ttable = db.org_organisation_type
# rows = db(ttable.deleted != True).select(ttable.id,
# ttable.name,
# )
# rc = []
# not_rc = []
# nappend = not_rc.append
# for row in rows:
# if row.name == RED_CROSS:
# rc.append(row.id)
# elif row.name == "Supplier":
# pass
# else:
# nappend(row.id)
# Custom Fields
table.organisation_id.label = T("Host National Society")
# Custom Crud Form
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == HNRC:
# @ToDo: Use Inter-American Framework instead (when extending to Zone office)
# @ToDo: Add 'Business Line' (when extending to Zone office)
project_settings = settings.project
project_settings.details_tab = True
#project_settings.community_volunteers = True
# Done in a more structured way instead
objectives = None
outputs = None
project_settings.goals = True
project_settings.outcomes = True
project_settings.outputs = True
project_settings.indicators = True
project_settings.indicator_criteria = True
project_settings.status_from_activities = True
table.human_resource_id.label = T("Coordinator")
# Use Budget module instead of ProjectAnnualBudget
project_settings.multiple_budgets = False
project_settings.budget_monitoring = True
# Require start/end dates
table.start_date.requires = table.start_date.requires.other
table.end_date.requires = table.end_date.requires.other
budget = S3SQLInlineComponent("budget",
label = T("Budget"),
#link = False,
multiple = False,
fields = ["total_budget",
"currency",
#"monitoring_frequency",
],
)
btable = s3db.budget_budget
# Need to provide a name
import random, string
btable.name.default = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
btable.monitoring_frequency.default = 3 # Monthly
btable.currency.represent = currency_represent
currency_opts = {"EUR" : "EUR",
"CHF" : "CHF",
"HNL" : "L",
"USD" : "USD",
}
from gluon import IS_IN_SET
btable.currency.requires = IS_IN_SET(currency_opts)
s3db.budget_monitoring.currency.represent = currency_represent
postprocess = project_project_postprocess
list_fields = s3db.get_config("project_project", "list_fields")
list_fields += [(T("Actual Progress"), "actual_progress_by_activities"),
(T("Planned Progress"), "planned_progress_by_activities"),
]
else:
objectives = "objectives"
outputs = S3SQLInlineComponent(
"output",
label = T("Outputs"),
fields = ["name", "status"],
)
budget = None
postprocess = None
if settings.get_project_programmes():
# Inject inline link for programmes including AddResourceLink
#from s3layouts import S3PopupLink
comment = s3db.project_programme_id.attr.comment
comment.vars = {"caller": "link_defaultprogramme",
"prefix": "project",
"parent": "programme_project",
}
programme = S3SQLInlineLink("programme",
label = T("Program"),
field = "programme_id",
multiple = False,
comment = comment,
)
else:
programme = None
crud_form = S3SQLCustomForm(
"organisation_id",
programme,
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
budget,
#S3SQLInlineComponent(
# "location",
# label = T("Locations"),
# fields = ["location_id"],
#),
# Outputs
outputs,
S3SQLInlineLink(
"hazard",
label = T("Hazards"),
field = "hazard_id",
help_field = s3db.project_hazard_help_fields,
cols = 4,
translate = True,
),
S3SQLInlineLink(
"sector",
label = T("Sectors"),
field = "sector_id",
cols = 4,
translate = True,
),
S3SQLInlineLink(
"theme",
label = T("Themes"),
field = "theme_id",
help_field = s3db.project_theme_help_fields,
cols = 4,
translate = True,
# Filter Theme by Sector
filterby = "theme_id:project_theme_sector.sector_id",
match = "sector_project.sector_id",
script = '''
$.filterOptionsS3({
'trigger':{'alias':'sector','name':'sector_id','inlineType':'link'},
'target':{'alias':'theme','name':'theme_id','inlineType':'link'},
'lookupPrefix':'project',
'lookupResource':'theme',
'lookupKey':'theme_id:project_theme_sector.sector_id',
'showEmptyField':false,
'tooltip':'project_theme_help_fields(id,name)'
})'''
),
objectives,
"human_resource_id",
# Disabled since we need organisation_id filtering to either organisation_type_id == RC or NOT
# & also hiding Branches from RCs
# & also rewriting for organisation_type_id via link table
# Partner NS
# S3SQLInlineComponent(
# "organisation",
# name = "partnerns",
# label = T("Partner National Societies"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": rc,
# }],
# filterby = dict(field = "role",
# options = [9])
# ),
# Partner Orgs
# S3SQLInlineComponent(
# "organisation",
# name = "partner",
# label = T("Partner Organizations"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [2])
# ),
# Donors
# S3SQLInlineComponent(
# "organisation",
# name = "donor",
# label = T("Donor(s)"),
# fields = ["organisation_id",
# "amount",
# "currency"],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [3])
# ),
#"budget",
#"currency",
"comments",
postprocess = postprocess,
)
s3db.configure(tablename,
crud_form = crud_form,
)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method == "grouped":
grouped = {"default":
{"title": T("Global Report of Projects Status"),
"fields": [(T("Project"), "name"),
(T("Program"), "programme.name"),
(T("Donor"), "donor.organisation_id"),
(T("Budget"), "budget.total_budget"),
(T("Location"), "location.location_id"),
"start_date",
"end_date",
],
"orderby": ["name",
],
"aggregate": [("sum", "budget.total_budget"),
],
},
}
from s3 import S3DateFilter, S3OptionsFilter
filter_widgets = [S3DateFilter("date",
label = T("Time Period"),
hide_time = True,
),
S3OptionsFilter("programme_project.programme_id",
label = T("Programs"),
),
S3OptionsFilter("theme_project.theme_id",
label = T("Themes"),
),
S3OptionsFilter("sector_project.sector_id",
label = T("Sectors"),
),
S3OptionsFilter("beneficiary.parameter_id",
label = T("Beneficiaries"),
),
S3OptionsFilter("hazard_project.hazard_id",
label = T("Hazards"),
),
]
s3db.configure(tablename,
filter_widgets = filter_widgets,
grouped = grouped,
)
elif r.component:
if r.component_name == "organisation":
component_id = r.component_id
if component_id:
# No r.component.record :/
ctable = s3db.project_organisation
crecord = current.db(ctable.id == component_id).select(ctable.role,
limitby=(0, 1)
).first()
if crecord.role == settings.get_project_organisation_lead_role():
ns_only("project_organisation",
required = True,
branches = False,
updateable = True,
)
#ctable.organisation_id.requires = \
# s3db.org_organisation_requires(required = True,
# # Only allowed to add Projects for Orgs
# # that the user has write access to
# updateable = True,
# )
else:
# Lead Organisation needs to be an NS (not a branch)
ns_only(tablename,
required = True,
branches = False,
# default
#limit_filter_opts = True,
)
# Set the Host NS filter as Visible so that the default filter works
filter_widgets = s3db.get_config(tablename, "filter_widgets")
for widget in filter_widgets:
if widget.field == "organisation_id":
widget.opts.hidden = False
break
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -------------------------------------------------------------------------
#def customise_project_beneficiary_resource(r, tablename):
# """
# Link Project Beneficiaries to Activity Type
# """
# if r.interactive and r.component:
# if r.tablename == "project_project":
# # We are a component of the Project
# project_id = r.id
# elif r.tablename == "project_location":
# # We are a component of the Project Location
# project_id = r.record.project_id
# else:
# # Unknown!
# return
# db = current.db
# s3db = current.s3db
# # Filter Activity Type by Sector
# ltable = s3db.project_sector_project
# rows = db(ltable.project_id == project_id).select(ltable.sector_id)
# sectors = [row.sector_id for row in rows]
# ltable = s3db.project_activity_type_sector
# rows = db(ltable.sector_id.belongs(sectors)).select(ltable.activity_type_id)
# filteropts = [row.activity_type_id for row in rows]
# def postprocess(form):
# # Update project_location.activity_type
# beneficiary_id = form.vars.get("id", None)
# table = db.project_beneficiary
# row = db(table.id == beneficiary_id).select(table.project_location_id,
# limitby = (0, 1)
# ).first()
# if not row:
# return
# project_location_id = row.project_location_id
# if not project_location_id:
# return
# ltable = db.project_beneficiary_activity_type
# row = db(ltable.beneficiary_id == beneficiary_id).select(ltable.activity_type_id,
# limitby = (0, 1)
# ).first()
# if not row:
# return
# activity_type_id = row.activity_type_id
# ltable = s3db.project_activity_type_location
# query = (ltable.project_location_id == project_location_id) & \
# (ltable.activity_type_id == activity_type_id)
# exists = db(query).select(ltable.id,
# limitby = (0, 1)
# ).first()
# if not exists:
# ltable.insert(project_location_id = project_location_id,
# activity_type_id = activity_type_id,
# )
# from s3 import S3SQLCustomForm, S3SQLInlineLink
# crud_form = S3SQLCustomForm(#"project_id",
# "project_location_id",
# S3SQLInlineLink("activity_type",
# field = "activity_type_id",
# filterby = "id",
# options = filteropts,
# label = T("Activity Type"),
# multiple = False,
# ),
# "parameter_id",
# "value",
# "target_value",
# "date",
# "end_date",
# "comments",
# postprocess = postprocess,
# )
# s3db.configure(tablename,
# crud_form = crud_form,
# )
# elif not r.component:
# # Report
# from s3 import S3OptionsFilter
# resource = r.resource
# filter_widgets = resource.get_config("filter_widgets")
# filter_widgets.insert(1,
# S3OptionsFilter("beneficiary_activity_type.activity_type_id",
# label = T("Activity Type"),
# ))
# report_options = resource.get_config("report_options")
# report_options.rows.append("beneficiary_activity_type.activity_type_id")
# # Same object so would be added twice
# #report_options.cols.append("beneficiary_activity_type.activity_type_id")
# resource.configure(filter_widgets = filter_widgets,
# report_options = report_options,
# )
# Only used for activity_types which aren't used by HNRC
#settings.customise_project_beneficiary_resource = customise_project_beneficiary_resource
# -------------------------------------------------------------------------
#def customise_project_indicator_resource(r, tablename):
# table = current.s3db.project_indicator
# table.definition.label = T("Indicator Definition")
# table.measures.label = T("Indicator Criteria")
#settings.customise_project_indicator_resource = customise_project_indicator_resource
# -------------------------------------------------------------------------
def customise_project_indicator_data_resource(r, tablename):
table = current.s3db.project_indicator_data
f = table.start_date
f.readable = f.writable = True
f.label = T("Start Date")
table.end_date.label = T("End Date")
if r.method == "update":
has_role = current.auth.s3_has_role
if has_role("monitoring_evaluation") or has_role("ORG_ADMIN"):
# Normal Access
return
# Project Manager
if r.tablename == "project_indicator_data":
record_id = r.id
else:
record_id = r.component_id
record = current.db(table.id == record_id).select(table.value,
limitby=(0, 1)
).first()
if record.value:
# Redirect to Read-only mode
# @ToDo: Remove 'Update' button from the read-only page
from gluon import redirect
redirect(r.url(method="read"))
else:
# Cannot edit anything
for f in table.fields:
table[f].writable = False
# Except add a Real value
table.value.writable = True
# Or Amend the Comments
table.comments.writable = True
settings.customise_project_indicator_data_resource = customise_project_indicator_data_resource
# -------------------------------------------------------------------------
def customise_project_location_resource(r, tablename):
s3db = current.s3db
table = s3db.project_location
table.name.readable = False
table.percentage.readable = table.percentage.writable = False
#ist_fields = s3db.get_config(tablename, "list_fields")
#try:
# list_fields.remove((T("Activity Types"), "activity_type.name"))
#except:
# # Already removed
# pass
settings.customise_project_location_resource = customise_project_location_resource
# -------------------------------------------------------------------------
def customise_project_location_controller(**attr):
s3 = current.response.s3
# Custom postp
#standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp (just does same thing but different)
#if callable(standard_postp):
# output = standard_postp(r, output)
if r.representation == "plain":
# Map Popup
from gluon import A, TABLE, TR, TD, B, URL
s3db = current.s3db
table = s3db.project_project
project_id = r.record.project_id
resource = s3db.resource("project_project", id=project_id)
list_fields = ("name",
"status_id",
"start_date",
"end_date",
"budget.total_budget",
"budget.currency",
"hazard_project.hazard_id",
"sector_project.sector_id",
"theme_project.theme_id",
# Contact
"human_resource_id",
"overall_status_by_indicators",
)
data = resource.select(list_fields, represent=True)
record = data.rows[0]
item = TABLE(TR(TD(B("%s:" % table.name.label)),
TD(record["project_project.name"]),
),
TR(TD(B("%s:" % table.status_id.label)),
TD(record["project_project.status_id"]),
),
TR(TD(B("%s:" % table.start_date.label)),
TD(record["project_project.start_date"]),
),
TR(TD(B("%s:" % table.end_date.label)),
TD(record["project_project.end_date"]),
),
TR(TD(B("%s:" % T("Budget"))),
TD("%s %s" % (record["budget_budget.currency"],
record["budget_budget.total_budget"])),
),
TR(TD(B("%s:" % s3db.project_hazard_project.hazard_id.label)),
TD(record["project_hazard_project.hazard_id"]),
),
TR(TD(B("%s:" % s3db.project_sector_project.sector_id.label)),
TD(record["project_sector_project.sector_id"]),
),
TR(TD(B("%s:" % s3db.project_theme_project.theme_id.label)),
TD(record["project_theme_project.theme_id"]),
),
TR(TD(B("%s:" % table.human_resource_id.label)),
TD(record["project_project.human_resource_id"]),
),
TR(TD(B("%s:" % T("Cumulative Status"))),
TD(record["project_project.overall_status_by_indicators"]),
),
)
title = s3.crud_strings["project_project"].title_display
# Assume authorised to see details
popup_url = URL(f="project", args=[project_id])
details_btn = A(T("Open"),
_href=popup_url,
_class="btn",
_id="details-btn",
_target="_blank")
output = dict(item = item,
title = title,
details_btn = details_btn,
)
return output
s3.postp = custom_postp
return attr
settings.customise_project_location_controller = customise_project_location_controller
# -------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
# Request is mandatory
field = current.s3db.req_commit.req_id
field.requires = field.requires.other
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# -------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
s3db = current.s3db
# Request is mandatory
field = s3db.req_commit.req_id
field.requires = field.requires.other
table = s3db.req_req
table.req_ref.represent = lambda v, show_link=True, pdf=True: \
s3db.req_ref_represent(v, show_link, pdf)
table.site_id.label = T("Deliver To")
# Hide Drivers list_field
list_fields = s3db.get_config("req_req", "list_fields")
try:
list_fields.remove((T("Drivers"), "drivers"))
except ValueError:
# Already removed
pass
# Custom Request Form
s3db.set_method("req", "req",
method = "form",
action = PrintableShipmentForm,
)
settings.customise_req_req_resource = customise_req_req_resource
# =============================================================================
class PrintableShipmentForm(S3Method):
""" REST Method Handler for Printable Shipment Forms """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes
@note: always returns PDF, disregarding the requested format
"""
output = {}
if r.http == "GET":
if r.id:
tablename = r.tablename
if tablename == "req_req":
output = self.request_form(r, **attr)
elif tablename == "inv_send":
output = self.waybill(r, **attr)
elif tablename == "inv_recv":
output = self.goods_received_note(r, **attr)
else:
# Not supported
r.error(405, current.ERROR.BAD_METHOD)
else:
# Record ID is required
r.error(400, current.ERROR.BAD_REQUEST)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def request_form(self, r, **attr):
"""
Request Form
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
s3db = current.s3db
# Master record (=req_req)
resource = s3db.resource(r.tablename,
id = r.id,
components = ["req_item"],
)
# Columns and data for the form header
header_fields = ["req_ref",
"date",
"date_required",
(T("Deliver to"), "site_id"),
(T("Reason for Request"), "purpose"),
"requester_id",
"site_id$site_id:inv_warehouse.contact",
"comments",
]
header_data = resource.select(header_fields,
start = 0,
limit = 1,
represent = True,
show_links = False,
raw_data = True,
)
if not header_data:
r.error(404, current.ERROR.BAD_RECORD)
# Generate PDF header
pdf_header = self.request_form_header(header_data)
# Filename from send_ref
header_row = header_data.rows[0]
pdf_filename = header_row["_row"]["req_req.req_ref"]
# Component (=inv_track_item)
component = resource.components["req_item"]
body_fields = ["item_id",
"item_pack_id",
"quantity",
"comments",
]
# Aggregate methods and column names
aggregate = [("sum", "req_req_item.quantity"),
]
# Generate the JSON data dict
json_data = self._json_data(component,
body_fields,
aggregate = aggregate,
)
# Generate the grouped items table
from s3 import S3GroupedItemsTable
output = S3GroupedItemsTable(component,
data = json_data,
totals_label = T("Total"),
title = T("Logistics Requisition"),
pdf_header = pdf_header,
pdf_footer = self.request_form_footer,
)
# ...and export it as PDF
return output.pdf(r, filename=pdf_filename)
# -------------------------------------------------------------------------
@classmethod
def request_form_header(cls, data):
"""
Header for Request Forms
@param data: the S3ResourceData for the req_req
"""
row = data.rows[0]
labels = dict((rfield.colname, rfield.label) for rfield in data.rfields)
def row_(left, right):
return cls._header_row(left, right, row=row, labels=labels)
from gluon import DIV, H2, H4, TABLE, TD, TH, TR, P
T = current.T
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
# The title
title = H2(T("Logistics Requisition"))
# Waybill details
dtable = TABLE(
TR(TD(DIV(logo, H4(name)), _colspan = 2),
TD(DIV(title), _colspan = 2),
),
row_("req_req.req_ref", None),
row_("req_req.date", "req_req.date_required"),
row_("req_req.site_id", "req_req.purpose"),
row_("req_req.requester_id", "inv_warehouse.contact"),
)
# Waybill comments
ctable = TABLE(TR(TH(T("Comments"))),
TR(TD(row["req_req.comments"])),
)
return DIV(dtable, P(" "), ctable)
# -------------------------------------------------------------------------
@staticmethod
def request_form_footer(r):
"""
Footer for Request Forms
@param r: the S3Request
"""
T = current.T
from gluon import TABLE, TH, TR
return TABLE(TR(TH(" "),
TH(T("Name")),
TH(T("Signature")),
TH(T("Date")),
),
TR(TH(T("Requester"))),
TR(TH(T("Budget Administrator"))),
TR(TH(T("Finance"))),
)
# -------------------------------------------------------------------------
def waybill(self, r, **attr):
"""
Waybill
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
s3db = current.s3db
# Component declarations to distinguish between the
# origin and destination warehouses
s3db.add_components("inv_send",
inv_warehouse = ({"name": "origin",
"joinby": "site_id",
"pkey": "site_id",
"filterby": False,
"multiple": False,
},
{"name": "destination",
"joinby": "site_id",
"pkey": "to_site_id",
"filterby": False,
"multiple": False,
},
),
)
# Master record (=inv_send)
resource = s3db.resource(r.tablename,
id = r.id,
components = ["origin",
"destination",
"track_item",
],
)
# Columns and data for the form header
header_fields = ["send_ref",
"req_ref",
"date",
"delivery_date",
(T("Origin"), "site_id"),
(T("Destination"), "to_site_id"),
"sender_id",
"origin.contact",
"recipient_id",
"destination.contact",
"transported_by",
"transport_ref",
(T("Delivery Address"), "destination.location_id"),
"comments",
]
header_data = resource.select(header_fields,
start = 0,
limit = 1,
represent = True,
show_links = False,
raw_data = True,
)
if not header_data:
r.error(404, current.ERROR.BAD_RECORD)
# Generate PDF header
pdf_header = self.waybill_header(header_data)
# Filename from send_ref
header_row = header_data.rows[0]
pdf_filename = header_row["_row"]["inv_send.send_ref"]
# Component (=inv_track_item)
component = resource.components["track_item"]
body_fields = ["bin",
"item_id",
"item_pack_id",
"quantity",
(T("Total Volume (m3)"), "total_volume"),
(T("Total Weight (kg)"), "total_weight"),
"supply_org_id",
"inv_item_status",
]
# Any extra fields needed for virtual fields
component.configure(extra_fields = ["item_id$weight",
"item_id$volume",
],
)
# Aggregate methods and column names
aggregate = [("sum", "inv_track_item.quantity"),
("sum", "inv_track_item.total_volume"),
("sum", "inv_track_item.total_weight"),
]
# Generate the JSON data dict
json_data = self._json_data(component,
body_fields,
aggregate = aggregate,
)
# Generate the grouped items table
from s3 import S3GroupedItemsTable
output = S3GroupedItemsTable(component,
data = json_data,
totals_label = T("Total"),
title = T("Waybill"),
pdf_header = pdf_header,
pdf_footer = self.waybill_footer,
)
# ...and export it as PDF
return output.pdf(r, filename=pdf_filename)
# -------------------------------------------------------------------------
@classmethod
def waybill_header(cls, data):
"""
Header for Waybills
@param data: the S3ResourceData for the inv_send
"""
row = data.rows[0]
labels = dict((rfield.colname, rfield.label) for rfield in data.rfields)
def row_(left, right):
return cls._header_row(left, right, row=row, labels=labels)
from gluon import DIV, H2, H4, TABLE, TD, TH, TR, P
T = current.T
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
# The title
title = H2(T("Waybill"))
# Waybill details
dtable = TABLE(
TR(TD(DIV(logo, H4(name)), _colspan = 2),
TD(DIV(title), _colspan = 2),
),
row_("inv_send.send_ref", "inv_send.req_ref"),
row_("inv_send.date", "inv_send.delivery_date"),
row_("inv_send.site_id", "inv_send.to_site_id"),
row_("inv_send.sender_id", "inv_send.recipient_id"),
row_("inv_origin_warehouse.contact",
"inv_destination_warehouse.contact",
),
row_("inv_send.transported_by", "inv_send.transport_ref"),
row_("inv_destination_warehouse.location_id", None),
)
# Waybill comments
ctable = TABLE(TR(TH(T("Comments"))),
TR(TD(row["inv_send.comments"])),
)
return DIV(dtable, P(" "), ctable)
# -------------------------------------------------------------------------
@staticmethod
def waybill_footer(r):
"""
Footer for Waybills
@param r: the S3Request
"""
T = current.T
from gluon import TABLE, TD, TH, TR
return TABLE(TR(TH(T("Shipment")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(T("Sent by"))),
TR(TD(T("Transported by"))),
TR(TH(T("Received by")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(" ")),
)
# -------------------------------------------------------------------------
def goods_received_note(self, r, **attr):
"""
GRN (Goods Received Note)
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
s3db = current.s3db
# Master record (=inv_recv)
resource = s3db.resource(r.tablename,
id = r.id,
components = ["track_item"],
)
# Columns and data for the form header
header_fields = ["eta",
"date",
(T("Origin"), "from_site_id"),
(T("Destination"), "site_id"),
"sender_id",
"recipient_id",
"send_ref",
"recv_ref",
"comments",
]
header_data = resource.select(header_fields,
start = 0,
limit = 1,
represent = True,
show_links = False,
raw_data = True,
)
if not header_data:
r.error(404, current.ERROR.BAD_RECORD)
# Generate PDF header
pdf_header = self.goods_received_note_header(header_data)
# Filename from send_ref
header_row = header_data.rows[0]
pdf_filename = header_row["_row"]["inv_recv.recv_ref"]
# Component (=inv_track_item)
component = resource.components["track_item"]
body_fields = ["recv_bin",
"item_id",
"item_pack_id",
"recv_quantity",
(T("Total Volume (m3)"), "total_recv_volume"),
(T("Total Weight (kg)"), "total_recv_weight"),
"supply_org_id",
"inv_item_status",
]
# Any extra fields needed for virtual fields
component.configure(extra_fields = ["item_id$weight",
"item_id$volume",
],
)
# Aggregate methods and column names
aggregate = [("sum", "inv_track_item.recv_quantity"),
("sum", "inv_track_item.total_recv_volume"),
("sum", "inv_track_item.total_recv_weight"),
]
# Generate the JSON data dict
json_data = self._json_data(component,
body_fields,
aggregate = aggregate,
)
# Generate the grouped items table
from s3 import S3GroupedItemsTable
output = S3GroupedItemsTable(component,
data = json_data,
totals_label = T("Total"),
title = T("Goods Received Note"),
pdf_header = pdf_header,
pdf_footer = self.goods_received_note_footer,
)
# ...and export it as PDF
return output.pdf(r, filename=pdf_filename)
# -------------------------------------------------------------------------
@classmethod
def goods_received_note_header(cls, data):
"""
Header for Goods Received Notes
@param data: the S3ResourceData for the inv_recv
"""
row = data.rows[0]
labels = dict((rfield.colname, rfield.label) for rfield in data.rfields)
def row_(left, right):
return cls._header_row(left, right, row=row, labels=labels)
from gluon import DIV, H2, H4, TABLE, TD, TH, TR, P
T = current.T
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
# The title
title = H2(T("Goods Received Note"))
# GRN details
dtable = TABLE(TR(TD(DIV(logo, H4(name)), _colspan = 2),
TD(DIV(title), _colspan = 2),
),
row_("inv_recv.eta", "inv_recv.date"),
row_("inv_recv.from_site_id", "inv_recv.site_id"),
row_("inv_recv.sender_id", "inv_recv.recipient_id"),
row_("inv_recv.send_ref", "inv_recv.recv_ref"),
)
# GRN comments
ctable = TABLE(TR(TH(T("Comments"))),
TR(TD(row["inv_recv.comments"])),
)
return DIV(dtable, P(" "), ctable)
# -------------------------------------------------------------------------
@staticmethod
def goods_received_note_footer(r):
"""
Footer for Goods Received Notes
@param r: the S3Request
"""
T = current.T
from gluon import TABLE, TD, TH, TR
return TABLE(TR(TH(T("Delivered by")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(T(" "))),
TR(TH(T("Received by")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(" ")),
)
# -------------------------------------------------------------------------
@staticmethod
def _header_row(left, right, row=None, labels=None):
"""
Helper function to generate a 2-column table row
for the PDF header
@param left: the column name for the left column
@param right: the column name for the right column,
or None for an empty column
@param row: the S3ResourceData row
@param labels: dict of labels {colname: label}
"""
from gluon import TD, TH, TR
if right:
header_row = TR(TH(labels[left]),
TD(row[left]),
TH(labels[right]),
TD(row[right]),
)
else:
header_row = TR(TH(labels[left]),
TD(row[left], _colspan = 3),
)
return header_row
# -------------------------------------------------------------------------
@staticmethod
def _json_data(component, list_fields, aggregate=None):
"""
Extract, group and aggregate the data for the form body
@param component: the component (S3Resource)
@param list_fields: the columns for the form body
(list of field selectors)
@param aggregate: aggregation methods and fields,
a list of tuples (method, column name)
"""
# Extract the data
data = component.select(list_fields,
limit = None,
raw_data = True,
represent = True,
show_links = False,
)
# Get the column names and labels
columns = []
append_column = columns.append
labels = {}
for rfield in data.rfields:
colname = rfield.colname
append_column(colname)
labels[colname] = rfield.label
# Group and aggregate the items
from s3 import S3GroupedItems
gi = S3GroupedItems(data.rows,
aggregate = aggregate,
)
# Convert into JSON-serializable dict for S3GroupedItemsTable
json_data = gi.json(fields = columns,
labels = labels,
as_dict = True,
)
return json_data
# END =========================================================================
|
#!/usr/bin/env python3
"""
Author : mahmoudabdelrahman <mahmoudabdelrahman@localhost>
Date : 2022-01-28
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('item',
metavar='str',
nargs='+',
help='Item(s) to bring')
parser.add_argument('-s',
'--sorted',
action='store_true',
help='Sort the items')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
items = args.item
length= len(items)
if args.sorted:
items.sort()
list_of_items = ''
if length == 1:
list_of_items = items[0]
elif length == 2:
list_of_items = ' and '.join(items)
else:
items[-1] = 'and ' + items[-1]
list_of_items = ', '.join(items)
print(f'You are bringing {list_of_items}.')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
import os
import argparse
"""
Splits a single file of MEDLINE formatted abstracts
into M files of N abstracts.
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str,
help="MEDLINE file to split")
parser.add_argument("outdir", type=str,
help="Where to save the file splits.")
parser.add_argument("-N", type=int, default=100,
help="Number of abstracts per file.")
parser.add_argument("-M", type=int, default=None,
help="Maximum number of files.")
return parser.parse_args()
def main(args):
with open(args.infile, 'r') as inF:
file_counter = 0
buf = []
nread = 0
for (i, line) in enumerate(inF):
if args.M is not None and file_counter >= args.M:
break
print(f"{file_counter}\r", end='', flush=True)
line = line.strip()
if line == '':
if i == 0: # Skip initial blank lines.
continue
nread += 1
buf.append(line)
if nread >= args.N and line == '':
outfile = f"out{file_counter:>04d}.txt"
outpath = os.path.join(args.outdir, outfile)
with open(outpath, 'w') as outF:
outF.write('\n'.join(buf))
buf = []
nread = 0
file_counter += 1
if len(buf) > 0:
outfile = f"out{file_counter:>04d}.txt"
outpath = os.path.join(args.outdir, outfile)
with open(outpath, 'w') as outF:
outF.write('\n'.join(buf))
if __name__ == "__main__":
args = parse_args()
main(args)
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
try:
from unittest import mock
except ImportError:
import mock
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient, ApiVersion
from azure.search.documents.indexes import SearchIndexClient, SearchIndexerClient
from azure.search.documents.indexes.models import SearchIndexerDataContainer, SearchIndexerDataSourceConnection
CREDENTIAL = AzureKeyCredential(key="test_api_key")
class TestSearchIndexClient(object):
def test_index_init(self):
client = SearchIndexClient("endpoint", CREDENTIAL)
assert client._headers == {
"api-key": "test_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_index_credential_roll(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexClient("endpoint", credential)
assert client._headers == {
"api-key": "old_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
credential.update("new_api_key")
assert client._headers == {
"api-key": "new_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_get_search_client(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexClient("endpoint", credential)
search_client = client.get_search_client('index')
assert isinstance(search_client, SearchClient)
@mock.patch(
"azure.search.documents.indexes._generated._operations_mixin.SearchClientOperationsMixin.get_service_statistics"
)
def test_get_service_statistics(self, mock_get_stats):
client = SearchIndexClient("endpoint", CREDENTIAL)
client.get_service_statistics()
assert mock_get_stats.called
assert mock_get_stats.call_args[0] == ()
assert mock_get_stats.call_args[1] == {"headers": client._headers}
@mock.patch(
"azure.search.documents.indexes._generated._operations_mixin.SearchClientOperationsMixin.get_service_statistics"
)
def test_get_service_statistics_v2020_06_30(self, mock_get_stats):
client = SearchIndexClient("endpoint", CREDENTIAL, api_version=ApiVersion.V2020_06_30)
client.get_service_statistics()
assert mock_get_stats.called
assert mock_get_stats.call_args[0] == ()
assert mock_get_stats.call_args[1] == {"headers": client._headers}
def test_index_endpoint_https(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexClient("endpoint", credential)
assert client._endpoint.startswith('https')
client = SearchIndexClient("https://endpoint", credential)
assert client._endpoint.startswith('https')
with pytest.raises(ValueError):
client = SearchIndexClient("http://endpoint", credential)
with pytest.raises(ValueError):
client = SearchIndexClient(12345, credential)
class TestSearchIndexerClient(object):
def test_indexer_init(self):
client = SearchIndexerClient("endpoint", CREDENTIAL)
assert client._headers == {
"api-key": "test_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_indexer_credential_roll(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexerClient("endpoint", credential)
assert client._headers == {
"api-key": "old_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
credential.update("new_api_key")
assert client._headers == {
"api-key": "new_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_indexer_endpoint_https(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexerClient("endpoint", credential)
assert client._endpoint.startswith('https')
client = SearchIndexerClient("https://endpoint", credential)
assert client._endpoint.startswith('https')
with pytest.raises(ValueError):
client = SearchIndexerClient("http://endpoint", credential)
with pytest.raises(ValueError):
client = SearchIndexerClient(12345, credential)
def test_datasource_with_empty_connection_string(self):
container = SearchIndexerDataContainer(name='searchcontainer')
data_source_connection = SearchIndexerDataSourceConnection(
name="test",
type="azureblob",
connection_string="",
container=container
)
packed_data_source_connection = data_source_connection._to_generated()
assert packed_data_source_connection.credentials.connection_string == "<unchanged>"
|
# executes the module loader in blender
import os
import subprocess
from shutil import copyfile
from Utility.OS_Extension import get_first_valid_path
from Utility.Config import Config
from Utility.Logging_Extension import logger
# ====== Warning when starting blender ======
# "connect failed: No such file or directory"
# "Color management: using fallback mode for management"
# === "connect failed: No such file or directory"
# IGNORE THIS MESSAGE, IT EVEN APPEARS IF BLENDER IS DIRECTLY CALLED FROM THE COMMANDLINE
# This could be due to deleting blenders own python folder (THIS IS THE DEFAULT IN UBUNTU)
#
# === "Color management: using fallback mode for management"
# IGNORE THIS MESSAGE
# http://blender.stackexchange.com/questions/5436/how-do-i-solve-color-management-using-fallback-mode-for-management
# This is normal but you can make this message go away by building with OpenColorIO.
# ====== ====== ====== ====== ====== ======
# ************************************************************************************
# ************ MAKE SURE YOU RUN THIS SCRIPT WITH A PYTHON 3 INTERPRETER ************
# ************************************************************************************
def execute_blender_script(blender_script_ifp,
background_mode=True,
path_to_blend_file=None,
debug_output=False):
# ****
# IF THE SCRIPT IS RUNNING IN FOREGROUND MODE NO PRINT INFORMATION
# IS FLUSHED TO COMMAND LINE DURING EXECUTION
# ****
logger.info('execute_blender_script: ...')
logger.vinfo('blender_script_ifp', blender_script_ifp)
parent_dp = os.path.dirname(os.path.realpath(__file__))
example_config_path = os.path.join(parent_dp, 'Config', 'blender_script_executor_example.cfg')
config_path = os.path.join(parent_dp, 'Config', 'blender_script_executor.cfg')
if not os.path.isfile(config_path):
copyfile(example_config_path, config_path)
blender_script_config = Config(path_to_config_file=config_path)
path_to_blender_list = blender_script_config.get_option_value(
'path_to_blender', list)
path_to_blender = get_first_valid_path(
path_to_blender_list)
if path_to_blender is None:
logger.info('No valid blender path provided!')
logger.info('Adjust the value in Config/blender_script_executor.cfg')
assert False
options = []
if path_to_blend_file is not None:
logger.info('path_to_blend_file: ' + path_to_blend_file)
options += [path_to_blend_file]
if background_mode:
options += ['--background'] # without gui
# https://docs.blender.org/api/blender_python_api_2_61_release/info_tips_and_tricks.html#show-all-operators
if debug_output:
options += ['--debug']
path_to_blender_scripts_parent_folder = os.path.dirname(os.path.realpath(__file__))
# This calls the script Blender.BlenderUtility.Config_Blender_Environment.py,
# which will add additional modules to the python path
path_to_module_loader = os.path.join(
path_to_blender_scripts_parent_folder, 'Blender_Library_Configuration.py')
options += ['--python', path_to_module_loader]
options += ['--python', blender_script_ifp]
# options += ['--'] # this tells blender to treat the following arguments as custom arguments
# options += ['--module_path_1', str('some_path')]
logger.info('Call scripts in blender ... (' + path_to_blender + ')')
subprocess.call([path_to_blender] + options)
logger.info('execute_blender_script: Done')
|
from decimal import Decimal
from unittest.mock import patch
import graphene
import pytest
from ....checkout import calculations
from ....payment.error_codes import PaymentErrorCode
from ....payment.gateways.dummy_credit_card import (
TOKEN_EXPIRED,
TOKEN_VALIDATION_MAPPING,
)
from ....payment.interface import CustomerSource, PaymentMethodInfo, TokenConfig
from ....payment.models import ChargeStatus, Payment, TransactionKind
from ....payment.utils import fetch_customer_id, store_customer_id
from ...tests.utils import assert_no_permission, get_graphql_content
from ..enums import OrderAction, PaymentChargeStatusEnum
DUMMY_GATEWAY = "mirumee.payments.dummy"
VOID_QUERY = """
mutation PaymentVoid($paymentId: ID!) {
paymentVoid(paymentId: $paymentId) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_void_success(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment_txn_preauth.pk)
variables = {"paymentId": payment_id}
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentVoid"]
assert not data["errors"]
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.is_active is False
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
def test_payment_void_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment_txn_preauth.pk)
variables = {"paymentId": payment_id}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentVoid"]
assert data["errors"]
assert data["errors"][0]["field"] is None
assert data["errors"][0]["message"] == "Unable to void the transaction."
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
assert payment_txn_preauth.is_active is True
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
assert not txn.is_success
CREATE_PAYMENT_MUTATION = """
mutation CheckoutPaymentCreate($checkoutId: ID!, $input: PaymentInput!) {
checkoutPaymentCreate(checkoutId: $checkoutId, input: $input) {
payment {
transactions {
kind,
token
}
chargeStatus
}
paymentErrors {
code
field
}
}
}
"""
def test_checkout_add_payment_without_shipping_method_and_not_shipping_required(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["paymentErrors"]
transactions = data["payment"]["transactions"]
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.billing_address_1 == checkout.billing_address.street_address_1
assert payment.billing_first_name == checkout.billing_address.first_name
assert payment.billing_last_name == checkout.billing_address.last_name
def test_checkout_add_payment_without_shipping_method_with_shipping_required(
user_api_client, checkout_with_shipping_required, address
):
checkout = checkout_with_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert data["paymentErrors"][0]["code"] == "SHIPPING_METHOD_NOT_SET"
assert data["paymentErrors"][0]["field"] == "shippingMethod"
def test_checkout_add_payment_with_shipping_method_and_shipping_required(
user_api_client, checkout_with_shipping_required, other_shipping_method, address
):
checkout = checkout_with_shipping_required
checkout.billing_address = address
checkout.shipping_address = address
checkout.shipping_method = other_shipping_method
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["paymentErrors"]
transactions = data["payment"]["transactions"]
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.billing_address_1 == checkout.billing_address.street_address_1
assert payment.billing_first_name == checkout.billing_address.first_name
assert payment.billing_last_name == checkout.billing_address.last_name
def test_checkout_add_payment(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
return_url = "https://www.example.com"
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
"returnUrl": return_url,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["paymentErrors"]
transactions = data["payment"]["transactions"]
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.billing_address_1 == checkout.billing_address.street_address_1
assert payment.billing_first_name == checkout.billing_address.first_name
assert payment.billing_last_name == checkout.billing_address.last_name
assert payment.return_url == return_url
def test_checkout_add_payment_default_amount(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
variables = {
"checkoutId": checkout_id,
"input": {"gateway": DUMMY_GATEWAY, "token": "sample-token"},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["paymentErrors"]
transactions = data["payment"]["transactions"]
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
def test_checkout_add_payment_bad_amount(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": str(
calculations.checkout_total(
checkout=checkout, lines=list(checkout)
).gross.amount
+ Decimal(1)
),
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert (
data["paymentErrors"][0]["code"]
== PaymentErrorCode.PARTIAL_PAYMENT_NOT_ALLOWED.name
)
def test_checkout_add_payment_not_supported_gateways(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.currency = "EUR"
checkout.save(update_fields=["billing_address", "currency"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"input": {"gateway": DUMMY_GATEWAY, "token": "sample-token", "amount": "10.0"},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert (
data["paymentErrors"][0]["code"] == PaymentErrorCode.NOT_SUPPORTED_GATEWAY.name
)
assert data["paymentErrors"][0]["field"] == "gateway"
def test_use_checkout_billing_address_as_payment_billing(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
# check if proper error is returned if address is missing
assert data["paymentErrors"][0]["field"] == "billingAddress"
assert (
data["paymentErrors"][0]["code"]
== PaymentErrorCode.BILLING_ADDRESS_NOT_SET.name
)
# assign the address and try again
address.street_address_1 = "spanish-inqusition"
address.save()
checkout.billing_address = address
checkout.save()
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
get_graphql_content(response)
checkout.refresh_from_db()
assert checkout.payments.count() == 1
payment = checkout.payments.first()
assert payment.billing_address_1 == address.street_address_1
def test_create_payment_for_checkout_with_active_payments(
checkout_with_payments, user_api_client, address
):
# given
checkout = checkout_with_payments
address.street_address_1 = "spanish-inqusition"
address.save()
checkout.billing_address = address
checkout.save()
total = calculations.checkout_total(checkout=checkout, lines=list(checkout))
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
payments_count = checkout.payments.count()
previous_active_payments = checkout.payments.filter(is_active=True)
previous_active_payments_ids = list(
previous_active_payments.values_list("pk", flat=True)
)
assert len(previous_active_payments_ids) > 0
# when
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
# then
data = content["data"]["checkoutPaymentCreate"]
assert not data["paymentErrors"]
checkout.refresh_from_db()
assert checkout.payments.all().count() == payments_count + 1
active_payments = checkout.payments.all().filter(is_active=True)
assert active_payments.count() == 1
assert active_payments.first().pk not in previous_active_payments_ids
CAPTURE_QUERY = """
mutation PaymentCapture($paymentId: ID!, $amount: Decimal!) {
paymentCapture(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_capture_success(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert not data["errors"]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
def test_payment_capture_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 0}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert len(data["errors"]) == 1
assert data["errors"][0]["message"] == "Amount should be a positive number."
def test_payment_capture_with_payment_non_authorized_yet(
staff_api_client, permission_manage_orders, payment_dummy
):
"""Ensure capture a payment that is set as authorized is failing with
the proper error message.
"""
payment = payment_dummy
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 1}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [
{"field": None, "message": "Cannot find successful auth transaction."}
]
def test_payment_capture_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
# given
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
# when
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
# then
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [{"field": None, "message": "Unable to process capture"}]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
@patch(
"saleor.payment.gateways.dummy_credit_card.plugin."
"DummyCreditCardGatewayPlugin.DEFAULT_ACTIVE",
True,
)
def test_payment_capture_gateway_dummy_credit_card_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
# given
token = TOKEN_EXPIRED
error = TOKEN_VALIDATION_MAPPING[token]
payment = payment_txn_preauth
payment.gateway = "mirumee.payments.dummy_credit_card"
payment.save()
transaction = payment.transactions.last()
transaction.token = token
transaction.save()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
monkeypatch.setattr(
"saleor.payment.gateways.dummy_credit_card.dummy_success", lambda: False
)
# when
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
# then
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [{"field": None, "message": error}]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
REFUND_QUERY = """
mutation PaymentRefund($paymentId: ID!, $amount: Decimal!) {
paymentRefund(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_refund_success(
staff_api_client, permission_manage_orders, payment_txn_captured
):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment.total)}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentRefund"]
assert not data["errors"]
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_REFUNDED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
def test_payment_refund_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_captured
):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 0}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentRefund"]
assert len(data["errors"]) == 1
assert data["errors"][0]["message"] == "Amount should be a positive number."
def test_payment_refund_error(
staff_api_client, permission_manage_orders, payment_txn_captured, monkeypatch
):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment.total)}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentRefund"]
assert data["errors"] == [{"field": None, "message": "Unable to process refund"}]
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
assert not txn.is_success
def test_payments_query(
payment_txn_captured, permission_manage_orders, staff_api_client
):
query = """ {
payments(first: 20) {
edges {
node {
id
gateway
capturedAmount {
amount
currency
}
total {
amount
currency
}
actions
chargeStatus
transactions {
amount {
currency
amount
}
}
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["payments"]["edges"][0]["node"]
pay = payment_txn_captured
assert data["gateway"] == pay.gateway
amount = str(data["capturedAmount"]["amount"])
assert Decimal(amount) == pay.captured_amount
assert data["capturedAmount"]["currency"] == pay.currency
total = str(data["total"]["amount"])
assert Decimal(total) == pay.total
assert data["total"]["currency"] == pay.currency
assert data["chargeStatus"] == PaymentChargeStatusEnum.FULLY_CHARGED.name
assert data["actions"] == [OrderAction.REFUND.name]
txn = pay.transactions.get()
assert data["transactions"] == [
{"amount": {"currency": pay.currency, "amount": float(str(txn.amount))}}
]
def test_query_payment(payment_dummy, user_api_client, permission_manage_orders):
query = """
query payment($id: ID!) {
payment(id: $id) {
id
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"id": payment_id}
response = user_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
received_id = content["data"]["payment"]["id"]
assert received_id == payment_id
def test_query_payments(payment_dummy, permission_manage_orders, staff_api_client):
query = """
{
payments(first: 20) {
edges {
node {
id
}
}
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
response = staff_api_client.post_graphql(
query, {}, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
edges = content["data"]["payments"]["edges"]
payment_ids = [edge["node"]["id"] for edge in edges]
assert payment_ids == [payment_id]
@pytest.fixture
def braintree_customer_id():
return "1234"
@pytest.fixture
def dummy_customer_id():
return "4321"
def test_store_payment_gateway_meta(customer_user, braintree_customer_id):
gateway_name = "braintree"
meta_key = "BRAINTREE.customer_id"
META = {meta_key: braintree_customer_id}
store_customer_id(customer_user, gateway_name, braintree_customer_id)
assert customer_user.private_metadata == META
customer_user.refresh_from_db()
assert fetch_customer_id(customer_user, gateway_name) == braintree_customer_id
@pytest.fixture
def token_config_with_customer(braintree_customer_id):
return TokenConfig(customer_id=braintree_customer_id)
@pytest.fixture
def set_braintree_customer_id(customer_user, braintree_customer_id):
gateway_name = "braintree"
store_customer_id(customer_user, gateway_name, braintree_customer_id)
return customer_user
@pytest.fixture
def set_dummy_customer_id(customer_user, dummy_customer_id):
gateway_name = DUMMY_GATEWAY
store_customer_id(customer_user, gateway_name, dummy_customer_id)
return customer_user
def test_list_payment_sources(
mocker, dummy_customer_id, set_dummy_customer_id, user_api_client
):
gateway = DUMMY_GATEWAY
query = """
{
me {
storedPaymentSources {
gateway
creditCardInfo {
lastDigits
}
}
}
}
"""
card = PaymentMethodInfo(last_4="5678", exp_year=2020, exp_month=12, name="JohnDoe")
source = CustomerSource(id="test1", gateway=gateway, credit_card_info=card)
mock_get_source_list = mocker.patch(
"saleor.graphql.account.resolvers.gateway.list_payment_sources",
return_value=[source],
autospec=True,
)
response = user_api_client.post_graphql(query)
mock_get_source_list.assert_called_once_with(gateway, dummy_customer_id)
content = get_graphql_content(response)["data"]["me"]["storedPaymentSources"]
assert content is not None and len(content) == 1
assert content[0] == {"gateway": gateway, "creditCardInfo": {"lastDigits": "5678"}}
def test_stored_payment_sources_restriction(
mocker, staff_api_client, customer_user, permission_manage_users
):
# Only owner of storedPaymentSources can fetch it.
card = PaymentMethodInfo(last_4="5678", exp_year=2020, exp_month=12, name="JohnDoe")
source = CustomerSource(id="test1", gateway="dummy", credit_card_info=card)
mocker.patch(
"saleor.graphql.account.resolvers.gateway.list_payment_sources",
return_value=[source],
autospec=True,
)
customer_user_id = graphene.Node.to_global_id("User", customer_user.pk)
query = """
query PaymentSources($id: ID!) {
user(id: $id) {
storedPaymentSources {
creditCardInfo {
firstDigits
}
}
}
}
"""
variables = {"id": customer_user_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
assert_no_permission(response)
|
from sympy.testing.pytest import raises, XFAIL
from sympy.external import import_module
from sympy import (
Symbol, Mul, Add, Abs, sin, asin, cos, Pow, csc, sec,
Limit, oo, Derivative, Integral, factorial, sqrt, root,
conjugate, StrictLessThan, LessThan, StrictGreaterThan,
GreaterThan, Sum, Product, E, log, tan, Function, binomial,
exp, floor, ceiling, Unequality
)
from sympy.core.relational import Eq, Ne, Lt, Le, Gt, Ge
from sympy.physics.quantum.state import Bra, Ket
from sympy.abc import x, y, z, a, b, c, t, k, n
antlr4 = import_module("antlr4")
# disable tests if antlr4-python*-runtime is not present
if not antlr4:
disabled = True
theta = Symbol('theta')
f = Function('f')
# shorthand definitions
def _Add(a, b):
return Add(a, b, evaluate=False)
def _Mul(a, b):
return Mul(a, b, evaluate=False)
def _Pow(a, b):
return Pow(a, b, evaluate=False)
def _Sqrt(a):
return sqrt(a, evaluate=False)
def _Conjugate(a):
return conjugate(a, evaluate=False)
def _Abs(a):
return Abs(a, evaluate=False)
def _factorial(a):
return factorial(a, evaluate=False)
def _exp(a):
return exp(a, evaluate=False)
def _log(a, b):
return log(a, b, evaluate=False)
def _binomial(n, k):
return binomial(n, k, evaluate=False)
def test_import():
from sympy.parsing.latex._build_latex_antlr import (
build_parser,
check_antlr_version,
dir_latex_antlr
)
# XXX: It would be better to come up with a test for these...
del build_parser, check_antlr_version, dir_latex_antlr
# These LaTeX strings should parse to the corresponding SymPy expression
GOOD_PAIRS = [
(r"0", 0),
(r"1", 1),
(r"-3.14", _Mul(-1, 3.14)),
(r"(-7.13)(1.5)", _Mul(_Mul(-1, 7.13), 1.5)),
(r"x", x),
(r"2x", 2*x),
(r"x^2", x**2),
(r"x^{3 + 1}", x**_Add(3, 1)),
(r"-c", -c),
(r"a \cdot b", a * b),
(r"a / b", a / b),
(r"a \div b", a / b),
(r"a + b", a + b),
(r"a + b - a", _Add(a+b, -a)),
(r"a^2 + b^2 = c^2", Eq(a**2 + b**2, c**2)),
(r"(x + y) z", _Mul(_Add(x, y), z)),
(r"\left(x + y\right) z", _Mul(_Add(x, y), z)),
(r"\left( x + y\right ) z", _Mul(_Add(x, y), z)),
(r"\left( x + y\right ) z", _Mul(_Add(x, y), z)),
(r"\left[x + y\right] z", _Mul(_Add(x, y), z)),
(r"\left\{x + y\right\} z", _Mul(_Add(x, y), z)),
(r"1+1", _Add(1, 1)),
(r"0+1", _Add(0, 1)),
(r"1*2", _Mul(1, 2)),
(r"0*1", _Mul(0, 1)),
(r"x = y", Eq(x, y)),
(r"x \neq y", Ne(x, y)),
(r"x < y", Lt(x, y)),
(r"x > y", Gt(x, y)),
(r"x \leq y", Le(x, y)),
(r"x \geq y", Ge(x, y)),
(r"x \le y", Le(x, y)),
(r"x \ge y", Ge(x, y)),
(r"\lfloor x \rfloor", floor(x)),
(r"\lceil x \rceil", ceiling(x)),
(r"\langle x |", Bra('x')),
(r"| x \rangle", Ket('x')),
(r"\sin \theta", sin(theta)),
(r"\sin(\theta)", sin(theta)),
(r"\sin^{-1} a", asin(a)),
(r"\sin a \cos b", _Mul(sin(a), cos(b))),
(r"\sin \cos \theta", sin(cos(theta))),
(r"\sin(\cos \theta)", sin(cos(theta))),
(r"\frac{a}{b}", a / b),
(r"\frac{a + b}{c}", _Mul(a + b, _Pow(c, -1))),
(r"\frac{7}{3}", _Mul(7, _Pow(3, -1))),
(r"(\csc x)(\sec y)", csc(x)*sec(y)),
(r"\lim_{x \to 3} a", Limit(a, x, 3)),
(r"\lim_{x \rightarrow 3} a", Limit(a, x, 3)),
(r"\lim_{x \Rightarrow 3} a", Limit(a, x, 3)),
(r"\lim_{x \longrightarrow 3} a", Limit(a, x, 3)),
(r"\lim_{x \Longrightarrow 3} a", Limit(a, x, 3)),
(r"\lim_{x \to 3^{+}} a", Limit(a, x, 3, dir='+')),
(r"\lim_{x \to 3^{-}} a", Limit(a, x, 3, dir='-')),
(r"\infty", oo),
(r"\lim_{x \to \infty} \frac{1}{x}", Limit(_Pow(x, -1), x, oo)),
(r"\frac{d}{dx} x", Derivative(x, x)),
(r"\frac{d}{dt} x", Derivative(x, t)),
(r"f(x)", f(x)),
(r"f(x, y)", f(x, y)),
(r"f(x, y, z)", f(x, y, z)),
(r"\frac{d f(x)}{dx}", Derivative(f(x), x)),
(r"\frac{d\theta(x)}{dx}", Derivative(Function('theta')(x), x)),
(r"x \neq y", Unequality(x, y)),
(r"|x|", _Abs(x)),
(r"||x||", _Abs(Abs(x))),
(r"|x||y|", _Abs(x)*_Abs(y)),
(r"||x||y||", _Abs(_Abs(x)*_Abs(y))),
(r"\pi^{|xy|}", Symbol('pi')**_Abs(x*y)),
(r"\int x dx", Integral(x, x)),
(r"\int x d\theta", Integral(x, theta)),
(r"\int (x^2 - y)dx", Integral(x**2 - y, x)),
(r"\int x + a dx", Integral(_Add(x, a), x)),
(r"\int da", Integral(1, a)),
(r"\int_0^7 dx", Integral(1, (x, 0, 7))),
(r"\int_a^b x dx", Integral(x, (x, a, b))),
(r"\int^b_a x dx", Integral(x, (x, a, b))),
(r"\int_{a}^b x dx", Integral(x, (x, a, b))),
(r"\int^{b}_a x dx", Integral(x, (x, a, b))),
(r"\int_{a}^{b} x dx", Integral(x, (x, a, b))),
(r"\int^{b}_{a} x dx", Integral(x, (x, a, b))),
(r"\int_{f(a)}^{f(b)} f(z) dz", Integral(f(z), (z, f(a), f(b)))),
(r"\int (x+a)", Integral(_Add(x, a), x)),
(r"\int a + b + c dx", Integral(_Add(_Add(a, b), c), x)),
(r"\int \frac{dz}{z}", Integral(Pow(z, -1), z)),
(r"\int \frac{3 dz}{z}", Integral(3*Pow(z, -1), z)),
(r"\int \frac{1}{x} dx", Integral(Pow(x, -1), x)),
(r"\int \frac{1}{a} + \frac{1}{b} dx",
Integral(_Add(_Pow(a, -1), Pow(b, -1)), x)),
(r"\int \frac{3 \cdot d\theta}{\theta}",
Integral(3*_Pow(theta, -1), theta)),
(r"\int \frac{1}{x} + 1 dx", Integral(_Add(_Pow(x, -1), 1), x)),
(r"x_0", Symbol('x_{0}')),
(r"x_{1}", Symbol('x_{1}')),
(r"x_a", Symbol('x_{a}')),
(r"x_{b}", Symbol('x_{b}')),
(r"h_\theta", Symbol('h_{theta}')),
(r"h_{\theta}", Symbol('h_{theta}')),
(r"h_{\theta}(x_0, x_1)",
Function('h_{theta}')(Symbol('x_{0}'), Symbol('x_{1}'))),
(r"x!", _factorial(x)),
(r"100!", _factorial(100)),
(r"\theta!", _factorial(theta)),
(r"(x + 1)!", _factorial(_Add(x, 1))),
(r"(x!)!", _factorial(_factorial(x))),
(r"x!!!", _factorial(_factorial(_factorial(x)))),
(r"5!7!", _Mul(_factorial(5), _factorial(7))),
(r"\sqrt{x}", sqrt(x)),
(r"\sqrt{x + b}", sqrt(_Add(x, b))),
(r"\sqrt[3]{\sin x}", root(sin(x), 3)),
(r"\sqrt[y]{\sin x}", root(sin(x), y)),
(r"\sqrt[\theta]{\sin x}", root(sin(x), theta)),
(r"\sqrt{\frac{12}{6}}", _Sqrt(_Mul(12, _Pow(6, -1)))),
(r"\overline{z}", _Conjugate(z)),
(r"\overline{\overline{z}}", _Conjugate(_Conjugate(z))),
(r"\overline{x + y}", _Conjugate(_Add(x, y))),
(r"\overline{x} + \overline{y}", _Conjugate(x) + _Conjugate(y)),
(r"x < y", StrictLessThan(x, y)),
(r"x \leq y", LessThan(x, y)),
(r"x > y", StrictGreaterThan(x, y)),
(r"x \geq y", GreaterThan(x, y)),
(r"\mathit{x}", Symbol('x')),
(r"\mathit{test}", Symbol('test')),
(r"\mathit{TEST}", Symbol('TEST')),
(r"\mathit{HELLO world}", Symbol('HELLO world')),
(r"\sum_{k = 1}^{3} c", Sum(c, (k, 1, 3))),
(r"\sum_{k = 1}^3 c", Sum(c, (k, 1, 3))),
(r"\sum^{3}_{k = 1} c", Sum(c, (k, 1, 3))),
(r"\sum^3_{k = 1} c", Sum(c, (k, 1, 3))),
(r"\sum_{k = 1}^{10} k^2", Sum(k**2, (k, 1, 10))),
(r"\sum_{n = 0}^{\infty} \frac{1}{n!}",
Sum(_Pow(_factorial(n), -1), (n, 0, oo))),
(r"\prod_{a = b}^{c} x", Product(x, (a, b, c))),
(r"\prod_{a = b}^c x", Product(x, (a, b, c))),
(r"\prod^{c}_{a = b} x", Product(x, (a, b, c))),
(r"\prod^c_{a = b} x", Product(x, (a, b, c))),
(r"\exp x", _exp(x)),
(r"\exp(x)", _exp(x)),
(r"\ln x", _log(x, E)),
(r"\ln xy", _log(x*y, E)),
(r"\log x", _log(x, 10)),
(r"\log xy", _log(x*y, 10)),
(r"\log_{2} x", _log(x, 2)),
(r"\log_{a} x", _log(x, a)),
(r"\log_{11} x", _log(x, 11)),
(r"\log_{a^2} x", _log(x, _Pow(a, 2))),
(r"[x]", x),
(r"[a + b]", _Add(a, b)),
(r"\frac{d}{dx} [ \tan x ]", Derivative(tan(x), x)),
(r"\binom{n}{k}", _binomial(n, k)),
(r"\tbinom{n}{k}", _binomial(n, k)),
(r"\dbinom{n}{k}", _binomial(n, k)),
(r"\binom{n}{0}", _binomial(n, 0)),
(r"a \, b", _Mul(a, b)),
(r"a \thinspace b", _Mul(a, b)),
(r"a \: b", _Mul(a, b)),
(r"a \medspace b", _Mul(a, b)),
(r"a \; b", _Mul(a, b)),
(r"a \thickspace b", _Mul(a, b)),
(r"a \quad b", _Mul(a, b)),
(r"a \qquad b", _Mul(a, b)),
(r"a \! b", _Mul(a, b)),
(r"a \negthinspace b", _Mul(a, b)),
(r"a \negmedspace b", _Mul(a, b)),
(r"a \negthickspace b", _Mul(a, b)),
(r"\int x \, dx", Integral(x, x)),
]
def test_parseable():
from sympy.parsing.latex import parse_latex
for latex_str, sympy_expr in GOOD_PAIRS:
assert parse_latex(latex_str) == sympy_expr
# At time of migration from latex2sympy, should work but doesn't
FAILING_PAIRS = [
(r"\log_2 x", _log(x, 2)),
(r"\log_a x", _log(x, a)),
]
def test_failing_parseable():
from sympy.parsing.latex import parse_latex
for latex_str, sympy_expr in FAILING_PAIRS:
with raises(Exception):
assert parse_latex(latex_str) == sympy_expr
# These bad LaTeX strings should raise a LaTeXParsingError when parsed
BAD_STRINGS = [
r"(",
r")",
r"\frac{d}{dx}",
r"(\frac{d}{dx})",
r"\sqrt{}",
r"\sqrt",
r"\overline{}",
r"\overline",
r"{",
r"}",
r"\mathit{x + y}",
r"\mathit{21}",
r"\frac{2}{}",
r"\frac{}{2}",
r"\int",
r"!",
r"!0",
r"_",
r"^",
r"|",
r"||x|",
r"()",
r"((((((((((((((((()))))))))))))))))",
r"-",
r"\frac{d}{dx} + \frac{d}{dt}",
r"f(x,,y)",
r"f(x,y,",
r"\sin^x",
r"\cos^2",
r"@",
r"#",
r"$",
r"%",
r"&",
r"*",
r"" "\\",
r"~",
r"\frac{(2 + x}{1 - x)}",
]
def test_not_parseable():
from sympy.parsing.latex import parse_latex, LaTeXParsingError
for latex_str in BAD_STRINGS:
with raises(LaTeXParsingError):
parse_latex(latex_str)
# At time of migration from latex2sympy, should fail but doesn't
FAILING_BAD_STRINGS = [
r"\cos 1 \cos",
r"f(,",
r"f()",
r"a \div \div b",
r"a \cdot \cdot b",
r"a // b",
r"a +",
r"1.1.1",
r"1 +",
r"a / b /",
]
@XFAIL
def test_failing_not_parseable():
from sympy.parsing.latex import parse_latex, LaTeXParsingError
for latex_str in FAILING_BAD_STRINGS:
with raises(LaTeXParsingError):
parse_latex(latex_str)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Header Packet Rx-handling gateware. """
import unittest
from nmigen import *
from nmigen.hdl.ast import Fell
from usb_protocol.types.superspeed import LinkCommand
from .header import HeaderPacket, HeaderQueue
from .crc import compute_usb_crc5, HeaderPacketCRC
from .command import LinkCommandGenerator
from ..physical.coding import SHP, EPF, stream_matches_symbols
from ...stream import USBRawSuperSpeedStream
from ....test.utils import LunaSSGatewareTestCase, ss_domain_test_case
class RawHeaderPacketReceiver(Elaboratable):
""" Class that monitors the USB bus for Header Packet, and receives them.
This class performs the validations required at the link layer of the USB specification;
which include checking the CRC-5 and CRC-16 embedded within the header packet.
Attributes
----------
sink: USBRawSuperSpeedStream(), input (monitor only)
Stream that the USB data to be monitored.
packet: HeaderPacket(), output
The de-serialized form of our header packet.
new_packet: Signal(), output
Strobe; indicates that a new, valid header packet has been received. The new
packet is now available on :attr:``packet``.
bad_packet: Signal(), output
Strobe; indicates that a corrupted, invalid header packet has been received.
:attr:``packet`` has not been updated.
expected_sequence: Signal(3), input
Indicates the next expected sequence number; used to validate the received packet.
"""
def __init__(self):
#
# I/O port
#
self.sink = USBRawSuperSpeedStream()
# Header packet output.
self.packet = HeaderPacket()
# State indications.
self.new_packet = Signal()
self.bad_packet = Signal()
# Sequence tracking.
self.expected_sequence = Signal(3)
self.bad_sequence = Signal()
def elaborate(self, platform):
m = Module()
sink = self.sink
# Store our header packet in progress; which we'll output only once it's been validated.
packet = HeaderPacket()
# Cache our expected CRC5, so we can pipeline generation and comparison.
expected_crc5 = Signal(5)
# Keep our "new packet" signal de-asserted unless asserted explicitly.
m.d.ss += self.new_packet.eq(0)
#
# CRC-16 Generator
#
m.submodules.crc16 = crc16 = HeaderPacketCRC()
m.d.comb += crc16.data_input.eq(sink.data),
#
# Receiver Sequencing
#
with m.FSM(domain="ss"):
# WAIT_FOR_HPSTART -- we're currently waiting for HPSTART framing, which indicates
# that the following 16 symbols (4 words) will be a header packet.
with m.State("WAIT_FOR_HPSTART"):
# Don't start our CRC until we're past our HPSTART header.
m.d.comb += crc16.clear.eq(1)
is_hpstart = stream_matches_symbols(sink, SHP, SHP, SHP, EPF)
with m.If(is_hpstart):
m.next = "RECEIVE_DW0"
# RECEIVE_DWn -- the first three words of our header packet are data words meant form
# the protocol layer; we'll receive them so we can pass them on to the protocol layer.
for n in range(3):
with m.State(f"RECEIVE_DW{n}"):
with m.If(sink.valid):
m.d.comb += crc16.advance_crc.eq(1)
m.d.ss += packet[f'dw{n}'].eq(sink.data)
m.next = f"RECEIVE_DW{n+1}"
# RECEIVE_DW3 -- we'll receive and parse our final data word, which contains the fields
# relevant to the link layer.
with m.State("RECEIVE_DW3"):
with m.If(sink.valid):
m.d.ss += [
# Collect the fields from the DW...
packet.crc16 .eq(sink.data[ 0:16]),
packet.sequence_number .eq(sink.data[16:19]),
packet.dw3_reserved .eq(sink.data[19:22]),
packet.hub_depth .eq(sink.data[22:25]),
packet.deferred .eq(sink.data[25]),
packet.delayed .eq(sink.data[26]),
packet.crc5 .eq(sink.data[27:32]),
# ... and pipeline a CRC of the to the link control word.
expected_crc5 .eq(compute_usb_crc5(sink.data[16:27]))
]
m.next = "CHECK_PACKET"
# CHECK_PACKET -- we've now received our full packet; we'll check it for validity.
with m.State("CHECK_PACKET"):
# A minor error occurs if if one of our CRCs mismatches; in which case the link can
# continue after sending an LBAD link command. [USB3.2r1: 7.2.4.1.5].
# We'll strobe our less-severe "bad packet" indicator, but still reject the header.
crc5_failed = (expected_crc5 != packet.crc5)
crc16_failed = (crc16.crc != packet.crc16)
with m.If(crc5_failed | crc16_failed):
m.d.comb += self.bad_packet.eq(1)
# Our worst-case scenario is we're receiving a packet with an unexpected sequence
# number; this indicates that we've lost sequence, and our device should move back to
# into Recovery [USB3.2r1: 7.2.4.1.5].
with m.Elif(packet.sequence_number != self.expected_sequence):
m.d.comb += self.bad_sequence.eq(1)
# If neither of the above checks failed, we now know we have a valid header packet!
# We'll output our packet, and then return to IDLE.
with m.Else():
m.d.ss += [
self.new_packet .eq(1),
self.packet .eq(packet)
]
m.next = "WAIT_FOR_HPSTART"
return m
class RawHeaderPacketReceiverTest(LunaSSGatewareTestCase):
FRAGMENT_UNDER_TEST = RawHeaderPacketReceiver
def initialize_signals(self):
yield self.dut.sink.valid.eq(1)
def provide_data(self, *tuples):
""" Provides the receiver with a sequence of (data, ctrl) values. """
# Provide each word of our data to our receiver...
for data, ctrl in tuples:
yield self.dut.sink.data.eq(data)
yield self.dut.sink.ctrl.eq(ctrl)
yield
@ss_domain_test_case
def test_good_packet_receive(self):
dut = self.dut
# Data input for an actual Link Management packet (seq #0).
yield from self.provide_data(
# data ctrl
(0xF7FBFBFB, 0b1111),
(0x00000280, 0b0000),
(0x00010004, 0b0000),
(0x00000000, 0b0000),
(0x10001845, 0b0000),
)
# ... after a cycle to process, we should see an indication that the packet is good.
yield from self.advance_cycles(2)
self.assertEqual((yield dut.new_packet), 1)
self.assertEqual((yield dut.bad_packet), 0)
self.assertEqual((yield dut.bad_sequence), 0)
@ss_domain_test_case
def test_bad_sequence_receive(self):
dut = self.dut
# Expect a sequence number other than the one we'll be providing.
yield dut.expected_sequence.eq(3)
# Data input for an actual Link Management packet (seq #0).
yield from self.provide_data(
# data ctrl
(0xF7FBFBFB, 0b1111),
(0x00000280, 0b0000),
(0x00010004, 0b0000),
(0x00000000, 0b0000),
(0x10001845, 0b0000),
)
# ... after a cycle to process, we should see an indication that the packet is good.
yield from self.advance_cycles(1)
self.assertEqual((yield dut.new_packet), 0)
self.assertEqual((yield dut.bad_packet), 0)
self.assertEqual((yield dut.bad_sequence), 1)
@ss_domain_test_case
def test_bad_packet_receive(self):
dut = self.dut
# Data input for an actual Link Management packet (seq #0),
# but with the last word corrupted to invalidate our CRC16.
yield from self.provide_data(
# data ctrl
(0xF7FBFBFB, 0b1111),
(0x00000280, 0b0000),
(0x00010004, 0b0000),
(0xFFFFFFFF, 0b0000),
(0x10001845, 0b0000),
)
# ... after a cycle to process, we should see an indication that the packet is bad.
yield from self.advance_cycles(1)
self.assertEqual((yield dut.new_packet), 0)
self.assertEqual((yield dut.bad_packet), 1)
self.assertEqual((yield dut.bad_sequence), 0)
@ss_domain_test_case
def test_bad_crc_and_sequence_receive(self):
dut = self.dut
# Completely invalid link packet, guaranteed to have a bad sequence number & CRC.
yield from self.provide_data(
# data ctrl
(0xF7FBFBFB, 0b1111),
(0xFFFFFFFF, 0b0000),
(0xFFFFFFFF, 0b0000),
(0xFFFFFFFF, 0b0000),
(0xFFFFFFFF, 0b0000),
)
# Once we've processed this, we should see that there's a bad packet; but that it's
# corrupted enough that our sequence no longer matters.
yield from self.advance_cycles(1)
self.assertEqual((yield dut.new_packet), 0)
self.assertEqual((yield dut.bad_packet), 1)
self.assertEqual((yield dut.bad_sequence), 0)
class HeaderPacketReceiver(Elaboratable):
""" Receiver-side Header Packet logic.
This module handles all header-packet-reception related logic for the link layer; including
header packet reception, buffering, flow control (credit management), and link command transmission.
Attributes
----------
sink: USBRawSuperSpeedStream(), input stream [monitor only]
Stream that carries data from the physical layer, to be monitored.
source: USBRawSuperSpeedStream(), output stream
Stream that carries link commands from this unit down down to the physical layer.
enable: Signal(), input
When asserted, this unit will be enabled; and will be allowed to start
transmitting link commands. Asserting this signal after a reset will perform
a header sequence and link credit advertisement.
usb_reset: Signal(), input
Strobe; can be asserted to indicate that a USB reset has occurred, and sequencing
should be restarted.
queue: HeaderQueue(), output stream
Stream carrying any header packets to be transmitted.
retry_received: Signal(), input
Strobe; should be asserted when the transmitter has seen a RETRY handshake.
retry_required: Signal(), output
Strobe; pulsed to indicate that we should send a RETRY handshake.
link_command_sent: Signal(), output
Strobe; pulses each time a link command is completed.
keepalive_required: Signal(), input
Strobe; when asserted; a keepalive packet will be generated.
packet_received: Signal(), output
Strobe; pulsed when an event occurs that should reset the USB3 "packet received" timers.
This does *not* indicate valid data is present on the output :attr:``queue``; this has its
own valid signal.
bad_packet_received: Signal(), output
Strobe; pulsed when a receive error occurs. For error counting at the link level.
accept_power_state: Signal(), input
Strobe; when pulsed, a LAU (Link-state acceptance) will be generated.
reject_power_state: Signal(), input
Strobe; when pulsed, a LXU (Link-state rejection) will be generated.
acknowledge_power_state: Signal(), input
Strobe; when pulsed, a LPMA (Link-state acknowledgement) will be generated.
"""
SEQUENCE_NUMBER_WIDTH = 3
def __init__(self, *, buffer_count=4, downstream_facing=False):
self._buffer_count = buffer_count
self._is_downstream_facing = downstream_facing
#
# I/O port
#
self.sink = USBRawSuperSpeedStream()
self.source = USBRawSuperSpeedStream()
# Simple controls.
self.enable = Signal()
self.usb_reset = Signal()
# Header Packet Queue
self.queue = HeaderQueue()
# Event signaling.
self.retry_received = Signal()
self.retry_required = Signal()
self.recovery_required = Signal()
self.link_command_sent = Signal()
self.keepalive_required = Signal()
self.packet_received = Signal()
self.bad_packet_received = Signal()
self.accept_power_state = Signal()
self.reject_power_state = Signal()
self.acknowledge_power_state = Signal()
def elaborate(self, platform):
m = Module()
#
# Sequence tracking.
#
# Keep track of which sequence number we expect to see.
expected_sequence_number = Signal(self.SEQUENCE_NUMBER_WIDTH)
# Keep track of which credit we'll need to issue next...
next_credit_to_issue = Signal(range(self._buffer_count))
# ... and which header we'll need to ACK next.
# We'll start with the maximum number, so our first advertisement wraps us back around to zero.
next_header_to_ack = Signal.like(expected_sequence_number, reset=-1)
#
# Task "queues".
#
# Keep track of how many header received acknowledgements (LGOODs) we need to send.
acks_to_send = Signal(range(self._buffer_count + 1), reset=1)
enqueue_ack = Signal()
dequeue_ack = Signal()
with m.If(enqueue_ack & ~dequeue_ack):
m.d.ss += acks_to_send.eq(acks_to_send + 1)
with m.If(dequeue_ack & ~enqueue_ack):
m.d.ss += acks_to_send.eq(acks_to_send - 1)
# Keep track of how many link credits we've yet to free.
# We'll start with every one of our buffers marked as "pending free"; this ensures
# we perform our credit restoration properly.
credits_to_issue = Signal.like(acks_to_send, reset=self._buffer_count)
enqueue_credit_issue = Signal()
dequeue_credit_issue = Signal()
with m.If(enqueue_credit_issue & ~dequeue_credit_issue):
m.d.ss += credits_to_issue.eq(credits_to_issue + 1)
with m.If(dequeue_credit_issue & ~enqueue_credit_issue):
m.d.ss += credits_to_issue.eq(credits_to_issue - 1)
# Keep track of whether we should be sending an LBAD.
lbad_pending = Signal()
# Keep track of whether a retry has been requested.
retry_pending = Signal()
with m.If(self.retry_required):
m.d.ss += retry_pending.eq(1)
# Keep track of whether a keepalive has been requested.
keepalive_pending = Signal()
with m.If(self.keepalive_required):
m.d.ss += keepalive_pending.eq(1)
# Keep track of whether we're expected to send an power state response.
lau_pending = Signal()
lxu_pending = Signal()
lpma_pending = Signal()
with m.If(self.accept_power_state):
m.d.ss += lau_pending.eq(1)
with m.If(self.reject_power_state):
m.d.ss += lxu_pending.eq(1)
with m.If(self.acknowledge_power_state):
m.d.ss += lpma_pending.eq(1)
#
# Header Packet Buffers
#
# Track which buffer will be filled next.
read_pointer = Signal(range(self._buffer_count))
write_pointer = Signal.like(read_pointer)
# Track how many buffers we currently have in use.
buffers_filled = Signal.like(credits_to_issue, reset=0)
reserve_buffer = Signal()
release_buffer = Signal()
with m.If(reserve_buffer & ~release_buffer):
m.d.ss += buffers_filled.eq(buffers_filled + 1)
with m.If(release_buffer & ~reserve_buffer):
m.d.ss += buffers_filled.eq(buffers_filled - 1)
# Create buffers to receive any incoming header packets.
buffers = Array(HeaderPacket() for _ in range(self._buffer_count))
#
# Packet reception (physical layer -> link layer).
#
# Flag that determines when we should ignore packets.
#
# After a receive error, we'll want to ignore all packets until we see a "retry"
# link command; so we don't receive packets out of order.
ignore_packets = Signal()
# Create our raw packet parser / receiver.
m.submodules.receiver = rx = RawHeaderPacketReceiver()
m.d.comb += [
# Our receiver passively monitors the data received for header packets.
rx.sink .tap(self.sink),
# Ensure it's always up to date about what sequence numbers we expect.
rx.expected_sequence .eq(expected_sequence_number),
# If we ever get a bad header packet sequence, we're required to retrain
# the link [USB3.2r1: 7.2.4.1.5]. Pass the event through directly.
self.recovery_required .eq(rx.bad_sequence & ~ignore_packets),
# Notify the link layer when packets are received, for keeping track of our timers.
self.packet_received .eq(rx.new_packet),
# Notify the link layer if any bad packets are received; for diagnostics.
self.bad_packet_received .eq(rx.bad_packet)
]
# If we receive a valid packet, it's time for us to buffer it!
with m.If(rx.new_packet & ~ignore_packets):
m.d.ss += [
# Load our header packet into the next write buffer...
buffers[write_pointer] .eq(rx.packet),
# ... advance to the next buffer and sequence number...
write_pointer .eq(write_pointer + 1),
expected_sequence_number .eq(expected_sequence_number + 1),
]
m.d.comb += [
# ... mark the buffer space as occupied by valid data ...
reserve_buffer .eq(1),
# ... and queue an ACK for this packet.
enqueue_ack .eq(1)
]
# If we receive a bad packet, we'll need to request that the other side re-send.
# The rules for this are summarized in [USB3.2r1: 7.2.4.1.5], and in comments below.
with m.If(rx.bad_packet & ~ignore_packets):
m.d.ss += [
# First, we'll need to schedule transmission of an LBAD, which notifies the other
# side that we received a bad packet; and that it'll need to transmit all unack'd
# header packets to us again.
lbad_pending .eq(1),
# Next, we'll need to make sure we don't receive packets out of sequence. This means
# we'll have to start ignoring packets until the other side responds to the LBAD.
# The other side respond with an Retry link command (LRTY) once it's safe for us to
# pay attention to packets again.
ignore_packets .eq(1)
]
# Finally, if we receive a Retry link command, this means we no longer need to ignore packets.
# This typically happens in response to us sending an LBAD and marking future packets as ignored.
with m.If(self.retry_received):
m.d.ss += ignore_packets.eq(0)
#
# Packet delivery (link layer -> physical layer).
#
m.d.comb += [
# As long as we have at least one buffer filled, we have header packets pending.
self.queue.valid .eq(buffers_filled > 0),
# Always provide the value of our oldest packet out to our consumer.
self.queue.header .eq(buffers[read_pointer])
]
# If the protocol layer is marking one of our packets as consumed, we no longer
# need to buffer it -- it's the protocol layer's problem, now!
with m.If(self.queue.valid & self.queue.ready):
# Move on to reading from the next buffer in sequence.
m.d.ss += read_pointer.eq(read_pointer + 1)
m.d.comb += [
# First, we'll free the buffer associated with the relevant packet...
release_buffer .eq(1),
# ... and request that our link partner be notified of the new space.
enqueue_credit_issue .eq(1)
]
#
# Automatic credit expiration.
#
# FIXME: implement this!
#
# Link command generation.
#
m.submodules.lc_generator = lc_generator = LinkCommandGenerator()
m.d.comb += [
self.source .stream_eq(lc_generator.source),
self.link_command_sent .eq(lc_generator.done),
]
with m.FSM(domain="ss"):
# DISPATCH_COMMAND -- the state in which we identify any pending link commands necessary,
# and then move to the state in which we'll send them.
with m.State("DISPATCH_COMMAND"):
with m.If(self.enable):
# NOTE: the order below is important; changing it can easily break things:
# - ACKS must come before credits, as we must send an LGOOD before we send our initial credits.
# - LBAD must come after ACKs and credit management, as all scheduled ACKs need to be
# sent to the other side for the LBAD to have the correct semantic meaning.
with m.If(retry_pending):
m.next = "SEND_LRTY"
# If we have acknowledgements to send, send them.
with m.Elif(acks_to_send):
m.next = "SEND_ACKS"
# If we have link credits to issue, move to issuing them to the other side.
with m.Elif(credits_to_issue):
m.next = "ISSUE_CREDITS"
# If we need to send an LBAD, do so.
with m.Elif(lbad_pending):
m.next = "SEND_LBAD"
# If we need to send a link power-state command, do so.
with m.Elif(lxu_pending):
m.next = "SEND_LXU"
# If we need to send a keepalive, do so.
with m.Elif(keepalive_pending):
m.next = "SEND_KEEPALIVE"
# Once we've become disabled, we'll want to prepare for our next enable.
# This means preparing for our advertisement, by:
with m.If(Fell(self.enable) | self.usb_reset):
m.d.ss += [
# -Resetting our pending ACKs to 1, so we perform an sequence number advertisement
# when we're next enabled.
acks_to_send .eq(1),
# -Decreasing our next sequence number; so we maintain a continuity of sequence numbers
# without counting the advertising one. This doesn't seem to be be strictly necessary
# per the spec; but seem to make analyzers happier, so we'll go with it.
next_header_to_ack .eq(next_header_to_ack - 1),
# - Clearing all of our buffers.
read_pointer .eq(0),
write_pointer .eq(0),
buffers_filled .eq(0),
# - Preparing to re-issue all of our buffer credits.
next_credit_to_issue .eq(0),
credits_to_issue .eq(self._buffer_count),
# - Clear our pending events.
retry_pending .eq(0),
lbad_pending .eq(0),
keepalive_pending .eq(0),
ignore_packets .eq(0)
]
# If this is a USB Reset, also reset our sequences.
with m.If(self.usb_reset):
m.d.ss += [
expected_sequence_number .eq(0),
next_header_to_ack .eq(-1)
]
# SEND_ACKS -- a valid header packet has been received, or we're advertising
# our initial sequence number; send an LGOOD packet.
with m.State("SEND_ACKS"):
# Send an LGOOD command, acknowledging the last received packet header.
m.d.comb += [
lc_generator.generate .eq(1),
lc_generator.command .eq(LinkCommand.LGOOD),
lc_generator.subtype .eq(next_header_to_ack)
]
# Wait until our link command is done, and then move on.
with m.If(lc_generator.done):
# Move to the next header packet in the sequence, and decrease
# the number of outstanding ACKs.
m.d.comb += dequeue_ack .eq(1)
m.d.ss += next_header_to_ack .eq(next_header_to_ack + 1)
# If this was the last ACK we had to send, move back to our dispatch state.
with m.If(acks_to_send == 1):
m.next = "DISPATCH_COMMAND"
# ISSUE_CREDITS -- header packet buffers have been freed; and we now need to notify the
# other side, so it knows we have buffers available.
with m.State("ISSUE_CREDITS"):
# Send an LCRD command, indicating that we have a free buffer.
m.d.comb += [
lc_generator.generate .eq(1),
lc_generator.command .eq(LinkCommand.LCRD),
lc_generator.subtype .eq(next_credit_to_issue)
]
# Wait until our link command is done, and then move on.
with m.If(lc_generator.done):
# Move to the next credit...
m.d.comb += dequeue_credit_issue .eq(1)
m.d.ss += next_credit_to_issue .eq(next_credit_to_issue + 1)
# If this was the last credit we had to issue, move back to our dispatch state.
with m.If(credits_to_issue == 1):
m.next = "DISPATCH_COMMAND"
# SEND_LBAD -- we've received a bad header packet; we'll need to let the other side know.
with m.State("SEND_LBAD"):
m.d.comb += [
lc_generator.generate .eq(1),
lc_generator.command .eq(LinkCommand.LBAD),
]
# Once we've sent the LBAD, we can mark is as no longer pending and return to our dispatch.
# (We can't ever have multiple LBADs queued up; as we ignore future packets after sending one.)
with m.If(lc_generator.done):
m.d.ss += lbad_pending.eq(0)
m.next = "DISPATCH_COMMAND"
# SEND_LRTY -- our transmitter has requested that we send an retry indication to the other side.
# We'll do our transmitter a favor and do so.
with m.State("SEND_LRTY"):
m.d.comb += [
lc_generator.generate .eq(1),
lc_generator.command .eq(LinkCommand.LRTY)
]
with m.If(lc_generator.done):
m.d.ss += retry_pending.eq(0)
m.next = "DISPATCH_COMMAND"
# SEND_KEEPALIVE -- our link layer timer has requested that we send a keep-alive,
# indicating that we're still in U0 and the link is still good. Do so.
with m.State("SEND_KEEPALIVE"):
# Send the correct packet type for the direction our port is facing.
command = LinkCommand.LDN if self._is_downstream_facing else LinkCommand.LUP
m.d.comb += [
lc_generator.generate .eq(1),
lc_generator.command .eq(command)
]
# Once we've send the keepalive, we can mark is as no longer pending and return to our dispatch.
# (There's no sense in sending repeated keepalives; one gets the message across.)
with m.If(lc_generator.done):
m.d.ss += keepalive_pending.eq(0)
m.next = "DISPATCH_COMMAND"
# SEND_LXU -- we're being instructed to reject a requested power-state transfer.
# We'll send an LXU packet to inform the other side of the rejection.
with m.State("SEND_LXU"):
m.d.comb += [
lc_generator.generate .eq(1),
lc_generator.command .eq(LinkCommand.LXU)
]
with m.If(lc_generator.done):
m.d.ss += lxu_pending.eq(0)
m.next = "DISPATCH_COMMAND"
return m
if __name__ == "__main__":
unittest.main()
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch updates / deletes of storage buckets / blobs.
See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
"""
from email.encoders import encode_noop
from email.generator import Generator
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.parser import Parser
import io
import json
import requests
import six
from google.cloud import _helpers
from google.cloud import exceptions
from google.cloud.storage._http import Connection
class MIMEApplicationHTTP(MIMEApplication):
"""MIME type for ``application/http``.
Constructs payload from headers and body
:type method: str
:param method: HTTP method
:type uri: str
:param uri: URI for HTTP request
:type headers: dict
:param headers: HTTP headers
:type body: str
:param body: (Optional) HTTP payload
"""
def __init__(self, method, uri, headers, body):
if isinstance(body, dict):
body = json.dumps(body)
headers["Content-Type"] = "application/json"
headers["Content-Length"] = len(body)
if body is None:
body = ""
lines = ["%s %s HTTP/1.1" % (method, uri)]
lines.extend(
["%s: %s" % (key, value) for key, value in sorted(headers.items())]
)
lines.append("")
lines.append(body)
payload = "\r\n".join(lines)
if six.PY2:
# email.message.Message is an old-style class, so we
# cannot use 'super()'.
MIMEApplication.__init__(self, payload, "http", encode_noop)
else: # pragma: NO COVER Python3
super_init = super(MIMEApplicationHTTP, self).__init__
super_init(payload, "http", encode_noop)
class _FutureDict(object):
"""Class to hold a future value for a deferred request.
Used by for requests that get sent in a :class:`Batch`.
"""
@staticmethod
def get(key, default=None):
"""Stand-in for dict.get.
:type key: object
:param key: Hashable dictionary key.
:type default: object
:param default: Fallback value to dict.get.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot get(%r, default=%r) on a future" % (key, default))
def __getitem__(self, key):
"""Stand-in for dict[key].
:type key: object
:param key: Hashable dictionary key.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot get item %r from a future" % (key,))
def __setitem__(self, key, value):
"""Stand-in for dict[key] = value.
:type key: object
:param key: Hashable dictionary key.
:type value: object
:param value: Dictionary value.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot set %r -> %r on a future" % (key, value))
class _FutureResponse(requests.Response):
"""Reponse that returns a placeholder dictionary for a batched requests."""
def __init__(self, future_dict):
super(_FutureResponse, self).__init__()
self._future_dict = future_dict
self.status_code = 204
def json(self):
return self._future_dict
@property
def content(self):
return self._future_dict
class Batch(Connection):
"""Proxy an underlying connection, batching up change operations.
:type client: :class:`google.cloud.storage.client.Client`
:param client: The client to use for making connections.
"""
_MAX_BATCH_SIZE = 1000
def __init__(self, client):
super(Batch, self).__init__(client)
self._requests = []
self._target_objects = []
def _do_request(self, method, url, headers, data, target_object, timeout=None):
"""Override Connection: defer actual HTTP request.
Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred.
:type method: str
:param method: The HTTP method to use in the request.
:type url: str
:param url: The URL to send the request to.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:type data: str
:param data: The data to send as the body of the request.
:type target_object: object
:param target_object:
(Optional) This allows us to enable custom behavior in our batch
connection. Here we defer an HTTP request and complete
initialization of the object at a later time.
:type timeout: float or tuple
:param timeout: (optional) The amount of time, in seconds, to wait
for the server response. By default, the method waits indefinitely.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response.
"""
if len(self._requests) >= self._MAX_BATCH_SIZE:
raise ValueError(
"Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE
)
self._requests.append((method, url, headers, data, timeout))
result = _FutureDict()
self._target_objects.append(target_object)
if target_object is not None:
target_object._properties = result
return _FutureResponse(result)
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
# Use timeout of last request, default to None (indefinite)
timeout = None
for method, uri, headers, body, _timeout in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
timeout = _timeout
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body, timeout
def _finish_futures(self, responses):
"""Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred.
"""
# If a bad status occurs, we track it, but don't raise an exception
# until all futures have been populated.
exception_args = None
if len(self._target_objects) != len(responses): # pragma: NO COVER
raise ValueError("Expected a response for every request.")
for target_object, subresponse in zip(self._target_objects, responses):
if not 200 <= subresponse.status_code < 300:
exception_args = exception_args or subresponse
elif target_object is not None:
try:
target_object._properties = subresponse.json()
except ValueError:
target_object._properties = subresponse.content
if exception_args is not None:
raise exceptions.from_http_response(exception_args)
def finish(self):
"""Submit a single `multipart/mixed` request with deferred requests.
:rtype: list of tuples
:returns: one ``(headers, payload)`` tuple per deferred request.
"""
headers, body, timeout = self._prepare_batch_request()
url = "%s/batch/storage/v1" % self.API_BASE_URL
# Use the private ``_base_connection`` rather than the property
# ``_connection``, since the property may be this
# current batch.
response = self._client._base_connection._make_request(
"POST", url, data=body, headers=headers, timeout=timeout
)
responses = list(_unpack_batch_response(response))
self._finish_futures(responses)
return responses
def current(self):
"""Return the topmost batch, or None."""
return self._client.current_batch
def __enter__(self):
self._client._push_batch(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.finish()
finally:
self._client._pop_batch()
def _generate_faux_mime_message(parser, response):
"""Convert response, content -> (multipart) email.message.
Helper for _unpack_batch_response.
"""
# We coerce to bytes to get consistent concat across
# Py2 and Py3. Percent formatting is insufficient since
# it includes the b in Py3.
content_type = _helpers._to_bytes(response.headers.get("content-type", ""))
faux_message = b"".join(
[b"Content-Type: ", content_type, b"\nMIME-Version: 1.0\n\n", response.content]
)
if six.PY2:
return parser.parsestr(faux_message)
else: # pragma: NO COVER Python3
return parser.parsestr(faux_message.decode("utf-8"))
def _unpack_batch_response(response):
"""Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response)
if not isinstance(message._payload, list): # pragma: NO COVER
raise ValueError("Bad response: not multi-part")
for subrequest in message._payload:
status_line, rest = subrequest._payload.split("\n", 1)
_, status, _ = status_line.split(" ", 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
msg_headers = dict(sub_message._headers)
content_id = msg_headers.get("Content-ID")
subresponse = requests.Response()
subresponse.request = requests.Request(
method="BATCH", url="contentid://{}".format(content_id)
).prepare()
subresponse.status_code = int(status)
subresponse.headers.update(msg_headers)
subresponse._content = payload.encode("utf-8")
yield subresponse
|
import flask
from devices import devices
from models import JsonEncoder
from pins import pins
from settings import settings
app = flask.Flask(__name__)
app.json_encoder = JsonEncoder
app.register_blueprint(devices, url_prefix="/devices")
app.register_blueprint(settings, url_prefix="/settings")
app.register_blueprint(pins, url_prefix="/pins")
@app.route("/", defaults={"path": None})
@app.route("/<path:path>")
def index(path: str):
return open("static/index.html", "r").read()
if __name__ == "__main__":
import __init__
app.run()
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint: disable=protected-access
from typing import (
Any,
IO,
Union,
List,
TYPE_CHECKING
)
from azure.core.tracing.decorator import distributed_trace
from azure.core.polling import LROPoller
from azure.core.polling.base_polling import LROBasePolling
from ._generated._form_recognizer_client import FormRecognizerClient as FormRecognizer
from ._response_handlers import (
prepare_receipt,
prepare_content_result,
prepare_form_result
)
from ._generated.models import AnalyzeOperationResult
from ._helpers import get_content_type, get_authentication_policy, error_map, POLLING_INTERVAL
from ._user_agent import USER_AGENT
from ._polling import AnalyzePolling
if TYPE_CHECKING:
from azure.core.credentials import AzureKeyCredential, TokenCredential
from ._models import FormPage, RecognizedForm
class FormRecognizerClient(object):
"""FormRecognizerClient extracts information from forms and images into structured data.
It is the interface to use for analyzing receipts, recognizing content/layout from
forms, and analyzing custom forms from trained models. It provides different methods
based on inputs from a URL and inputs from a stream.
:param str endpoint: Supported Cognitive Services endpoints (protocol and hostname,
for example: https://westus2.api.cognitive.microsoft.com).
:param credential: Credentials needed for the client to connect to Azure.
This is an instance of AzureKeyCredential if using an API key or a token
credential from :mod:`azure.identity`.
:type credential: :class:`~azure.core.credentials.AzureKeyCredential` or
:class:`~azure.core.credentials.TokenCredential`
.. admonition:: Example:
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_fr_client_with_key]
:end-before: [END create_fr_client_with_key]
:language: python
:dedent: 8
:caption: Creating the FormRecognizerClient with an endpoint and API key.
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_fr_client_with_aad]
:end-before: [END create_fr_client_with_aad]
:language: python
:dedent: 8
:caption: Creating the FormRecognizerClient with a token credential.
"""
def __init__(self, endpoint, credential, **kwargs):
# type: (str, Union[AzureKeyCredential, TokenCredential], Any) -> None
authentication_policy = get_authentication_policy(credential)
polling_interval = kwargs.pop("polling_interval", POLLING_INTERVAL)
self._client = FormRecognizer(
endpoint=endpoint,
credential=credential, # type: ignore
sdk_moniker=USER_AGENT,
authentication_policy=authentication_policy,
polling_interval=polling_interval,
**kwargs
)
def _receipt_callback(self, raw_response, _, headers): # pylint: disable=unused-argument
analyze_result = self._client._deserialize(AnalyzeOperationResult, raw_response)
return prepare_receipt(analyze_result)
@distributed_trace
def begin_recognize_receipts(self, receipt, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given US sales receipt.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png' or 'image/tiff'.
See fields found on a receipt here:
https://aka.ms/azsdk/python/formrecognizer/receiptfields
:param receipt: JPEG, PNG, PDF and TIFF type file stream or bytes.
Currently only supports US sales receipts.
:type receipt: bytes or IO[bytes]
:keyword bool include_field_elements:
Whether or not to include field elements such as lines and words in addition to form fields.
:keyword content_type: Media type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_receipts.py
:start-after: [START recognize_receipts]
:end-before: [END recognize_receipts]
:language: python
:dedent: 8
:caption: Recognize US sales receipt fields.
"""
polling_interval = kwargs.pop("polling_interval", self._client._config.polling_interval)
continuation_token = kwargs.pop("continuation_token", None)
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError("Call begin_recognize_receipts_from_url() to analyze a receipt from a URL.")
include_field_elements = kwargs.pop("include_field_elements", False)
if content_type is None:
content_type = get_content_type(receipt)
return self._client.begin_analyze_receipt_async(
file_stream=receipt,
content_type=content_type,
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._receipt_callback),
polling=LROBasePolling(timeout=polling_interval, **kwargs),
error_map=error_map,
continuation_token=continuation_token,
**kwargs
)
@distributed_trace
def begin_recognize_receipts_from_url(self, receipt_url, **kwargs):
# type: (str, Any) -> LROPoller[List[RecognizedForm]]
"""Extract field text and semantic values from a given US sales receipt.
The input document must be the location (URL) of the receipt to be analyzed.
See fields found on a receipt here:
https://aka.ms/azsdk/python/formrecognizer/receiptfields
:param str receipt_url: The URL of the receipt to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF and TIFF. Currently only supports
US sales receipts.
:keyword bool include_field_elements:
Whether or not to include field elements such as lines and words in addition to form fields.
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_receipts_from_url.py
:start-after: [START recognize_receipts_from_url]
:end-before: [END recognize_receipts_from_url]
:language: python
:dedent: 8
:caption: Recognize US sales receipt fields from a URL.
"""
polling_interval = kwargs.pop("polling_interval", self._client._config.polling_interval)
continuation_token = kwargs.pop("continuation_token", None)
include_field_elements = kwargs.pop("include_field_elements", False)
return self._client.begin_analyze_receipt_async(
file_stream={"source": receipt_url},
include_text_details=include_field_elements,
cls=kwargs.pop("cls", self._receipt_callback),
polling=LROBasePolling(timeout=polling_interval, **kwargs),
error_map=error_map,
continuation_token=continuation_token,
**kwargs
)
def _content_callback(self, raw_response, _, headers): # pylint: disable=unused-argument
analyze_result = self._client._deserialize(AnalyzeOperationResult, raw_response)
return prepare_content_result(analyze_result)
@distributed_trace
def begin_recognize_content(self, form, **kwargs):
# type: (Union[bytes, IO[bytes]], Any) -> LROPoller[List[FormPage]]
"""Extract text and content/layout information from a given document.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png' or 'image/tiff'.
:param form: JPEG, PNG, PDF and TIFF type file stream or bytes.
:type form: bytes or IO[bytes]
:keyword content_type: Media type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.FormPage`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.FormPage]]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_content.py
:start-after: [START recognize_content]
:end-before: [END recognize_content]
:language: python
:dedent: 8
:caption: Recognize text and content/layout information from a form.
"""
polling_interval = kwargs.pop("polling_interval", self._client._config.polling_interval)
continuation_token = kwargs.pop("continuation_token", None)
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError("Call begin_recognize_content_from_url() to analyze a document from a URL.")
if content_type is None:
content_type = get_content_type(form)
return self._client.begin_analyze_layout_async(
file_stream=form,
content_type=content_type,
cls=kwargs.pop("cls", self._content_callback),
polling=LROBasePolling(timeout=polling_interval, **kwargs),
error_map=error_map,
continuation_token=continuation_token,
**kwargs
)
@distributed_trace
def begin_recognize_content_from_url(self, form_url, **kwargs):
# type: (str, Any) -> LROPoller[List[FormPage]]
"""Extract text and layout information from a given document.
The input document must be the location (URL) of the document to be analyzed.
:param str form_url: The URL of the form to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF and TIFF.
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.FormPage`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.FormPage]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling_interval = kwargs.pop("polling_interval", self._client._config.polling_interval)
continuation_token = kwargs.pop("continuation_token", None)
return self._client.begin_analyze_layout_async(
file_stream={"source": form_url},
cls=kwargs.pop("cls", self._content_callback),
polling=LROBasePolling(timeout=polling_interval, **kwargs),
error_map=error_map,
continuation_token=continuation_token,
**kwargs
)
@distributed_trace
def begin_recognize_custom_forms(self, model_id, form, **kwargs):
# type: (str, Union[bytes, IO[bytes]], Any) -> LROPoller[List[RecognizedForm]]
"""Analyze a custom form with a model trained with or without labels. The form
to analyze should be of the same type as the forms that were used to train the model.
The input document must be of one of the supported content types - 'application/pdf',
'image/jpeg', 'image/png' or 'image/tiff'.
:param str model_id: Custom model identifier.
:param form: JPEG, PNG, PDF and TIFF type file stream or bytes.
:type form: bytes or IO[bytes]
:keyword bool include_field_elements:
Whether or not to include field elements such as lines and words in addition to form fields.
:keyword content_type: Media type of the body sent to the API. Content-type is
auto-detected, but can be overridden by passing this keyword argument. For options,
see :class:`~azure.ai.formrecognizer.FormContentType`.
:paramtype content_type: str or ~azure.ai.formrecognizer.FormContentType
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_recognize_custom_forms.py
:start-after: [START recognize_custom_forms]
:end-before: [END recognize_custom_forms]
:language: python
:dedent: 8
:caption: Recognize fields and values from a custom form.
"""
if not model_id:
raise ValueError("model_id cannot be None or empty.")
cls = kwargs.pop("cls", None)
polling_interval = kwargs.pop("polling_interval", self._client._config.polling_interval)
continuation_token = kwargs.pop("continuation_token", None)
content_type = kwargs.pop("content_type", None)
if content_type == "application/json":
raise TypeError("Call begin_recognize_custom_forms_from_url() to analyze a document from a URL.")
include_field_elements = kwargs.pop("include_field_elements", False)
if content_type is None:
content_type = get_content_type(form)
def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argument
analyze_result = self._client._deserialize(AnalyzeOperationResult, raw_response)
return prepare_form_result(analyze_result, model_id)
deserialization_callback = cls if cls else analyze_callback
return self._client.begin_analyze_with_custom_model(
file_stream=form,
model_id=model_id,
include_text_details=include_field_elements,
content_type=content_type,
cls=deserialization_callback,
polling=LROBasePolling(timeout=polling_interval, lro_algorithms=[AnalyzePolling()], **kwargs),
error_map=error_map,
continuation_token=continuation_token,
**kwargs
)
@distributed_trace
def begin_recognize_custom_forms_from_url(self, model_id, form_url, **kwargs):
# type: (str, str, Any) -> LROPoller[List[RecognizedForm]]
"""Analyze a custom form with a model trained with or without labels. The form
to analyze should be of the same type as the forms that were used to train the model.
The input document must be the location (URL) of the document to be analyzed.
:param str model_id: Custom model identifier.
:param str form_url: The URL of the form to analyze. The input must be a valid, encoded URL
of one of the supported formats: JPEG, PNG, PDF and TIFF.
:keyword bool include_field_elements:
Whether or not to include field elements such as lines and words in addition to form fields.
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a list[:class:`~azure.ai.formrecognizer.RecognizedForm`].
:rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedForm]
:raises ~azure.core.exceptions.HttpResponseError:
"""
if not model_id:
raise ValueError("model_id cannot be None or empty.")
cls = kwargs.pop("cls", None)
polling_interval = kwargs.pop("polling_interval", self._client._config.polling_interval)
continuation_token = kwargs.pop("continuation_token", None)
include_field_elements = kwargs.pop("include_field_elements", False)
def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argument
analyze_result = self._client._deserialize(AnalyzeOperationResult, raw_response)
return prepare_form_result(analyze_result, model_id)
deserialization_callback = cls if cls else analyze_callback
return self._client.begin_analyze_with_custom_model(
file_stream={"source": form_url},
model_id=model_id,
include_text_details=include_field_elements,
cls=deserialization_callback,
polling=LROBasePolling(timeout=polling_interval, lro_algorithms=[AnalyzePolling()], **kwargs),
error_map=error_map,
continuation_token=continuation_token,
**kwargs
)
def close(self):
# type: () -> None
"""Close the :class:`~azure.ai.formrecognizer.FormRecognizerClient` session.
"""
return self._client.close()
def __enter__(self):
# type: () -> FormRecognizerClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self._client.__exit__(*args) # pylint:disable=no-member
|
import re
import lst_scripts
def test_version():
with open("lstirf/__init__.py") as f:
__version__ = re.search('^__version__ = "(.*)"$', f.read()).group(1)
assert lst_scripts.__version__ == __version__
|
from rest_framework.authentication import SessionAuthentication as RESTSessionAuthentication
class SessionAuthentication(RESTSessionAuthentication):
"""
This class is needed, because REST Framework's default SessionAuthentication does never return 401's,
because they cannot fill the WWW-Authenticate header with a valid value in the 401 response. As a
result, we cannot distinguish calls that are not unauthorized (401 unauthorized) and calls for which
the user does not have permission (403 forbidden). See https://github.com/encode/django-rest-framework/issues/5968
We do set authenticate_header function in SessionAuthentication, so that a value for the WWW-Authenticate
header can be retrieved and the response code is automatically set to 401 in case of unauthenticated requests.
"""
def authenticate_header(self, request):
return 'Session'
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-return-doc, invalid-unary-operand-type
"""Module for builtin continuous pulse functions."""
import functools
from typing import Union, Tuple, Optional
import numpy as np
from qiskit.pulse.exceptions import PulseError
def constant(times: np.ndarray, amp: complex) -> np.ndarray:
"""Continuous constant pulse.
Args:
times: Times to output pulse for.
amp: Complex pulse amplitude.
"""
return np.full(len(times), amp, dtype=np.complex_)
def zero(times: np.ndarray) -> np.ndarray:
"""Continuous zero pulse.
Args:
times: Times to output pulse for.
"""
return constant(times, 0)
def square(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous square wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
freq: Pulse frequency. units of 1/dt.
phase: Pulse phase.
"""
x = times * freq + phase / np.pi
return amp * (2 * (2 * np.floor(x) - np.floor(2 * x)) + 1).astype(np.complex_)
def sawtooth(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous sawtooth wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
freq: Pulse frequency. units of 1/dt.
phase: Pulse phase.
"""
x = times * freq + phase / np.pi
return amp * 2 * (x - np.floor(1 / 2 + x)).astype(np.complex_)
def triangle(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous triangle wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
freq: Pulse frequency. units of 1/dt.
phase: Pulse phase.
"""
return amp * (-2 * np.abs(sawtooth(times, 1, freq, phase=(phase - np.pi / 2) / 2)) + 1).astype(
np.complex_
)
def cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous cosine wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt.
phase: Pulse phase.
"""
return amp * np.cos(2 * np.pi * freq * times + phase).astype(np.complex_)
def sin(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous cosine wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt.
phase: Pulse phase.
"""
return amp * np.sin(2 * np.pi * freq * times + phase).astype(np.complex_)
def _fix_gaussian_width(
gaussian_samples,
amp: float,
center: float,
sigma: float,
zeroed_width: Optional[float] = None,
rescale_amp: bool = False,
ret_scale_factor: bool = False,
) -> np.ndarray:
r"""Enforce that the supplied gaussian pulse is zeroed at a specific width.
This is achieved by subtracting $\Omega_g(center \pm zeroed_width/2)$ from all samples.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Standard deviation of pulse.
zeroed_width: Subtract baseline from gaussian pulses to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a gaussian pulse. If unsupplied,
defaults to $2*(center + 1)$ such that $\Omega_g(-1)=0$ and $\Omega_g(2*(center + 1))=0$.
rescale_amp: If True the pulse will be rescaled so that $\Omega_g(center)=amp$.
ret_scale_factor: Return amplitude scale factor.
"""
if zeroed_width is None:
zeroed_width = 2 * (center + 1)
zero_offset = gaussian(np.array([zeroed_width / 2]), amp, 0, sigma)
gaussian_samples -= zero_offset
amp_scale_factor = 1.0
if rescale_amp:
amp_scale_factor = amp / (amp - zero_offset) if amp - zero_offset != 0 else 1.0
gaussian_samples *= amp_scale_factor
if ret_scale_factor:
return gaussian_samples, amp_scale_factor
return gaussian_samples
def gaussian(
times: np.ndarray,
amp: complex,
center: float,
sigma: float,
zeroed_width: Optional[float] = None,
rescale_amp: bool = False,
ret_x: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
r"""Continuous unnormalized gaussian pulse.
Integrated area under curve is $\Omega_g(amp, sigma) = amp \times np.sqrt(2\pi \sigma^2)$
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`. If `zeroed_width` is set pulse amplitude at center
will be $amp-\Omega_g(center \pm zeroed_width/2)$ unless `rescale_amp` is set,
in which case all samples will be rescaled such that the center
amplitude will be `amp`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
zeroed_width: Subtract baseline from gaussian pulses to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a gaussian pulse.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)=amp$.
ret_x: Return centered and standard deviation normalized pulse location.
$x=(times-center)/sigma.
"""
times = np.asarray(times, dtype=np.complex_)
x = (times - center) / sigma
gauss = amp * np.exp(-(x ** 2) / 2).astype(np.complex_)
if zeroed_width is not None:
gauss = _fix_gaussian_width(
gauss,
amp=amp,
center=center,
sigma=sigma,
zeroed_width=zeroed_width,
rescale_amp=rescale_amp,
)
if ret_x:
return gauss, x
return gauss
def gaussian_deriv(
times: np.ndarray,
amp: complex,
center: float,
sigma: float,
ret_gaussian: bool = False,
zeroed_width: Optional[float] = None,
rescale_amp: bool = False,
) -> np.ndarray:
r"""Continuous unnormalized gaussian derivative pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
ret_gaussian: Return gaussian with which derivative was taken with.
zeroed_width: Subtract baseline of pulse to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a pulse.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)=amp$.
"""
gauss, x = gaussian(
times,
amp=amp,
center=center,
sigma=sigma,
zeroed_width=zeroed_width,
rescale_amp=rescale_amp,
ret_x=True,
)
gauss_deriv = -x / sigma * gauss
if ret_gaussian:
return gauss_deriv, gauss
return gauss_deriv
def _fix_sech_width(
sech_samples,
amp: float,
center: float,
sigma: float,
zeroed_width: Optional[float] = None,
rescale_amp: bool = False,
ret_scale_factor: bool = False,
) -> np.ndarray:
r"""Enforce that the supplied sech pulse is zeroed at a specific width.
This is achieved by subtracting $\Omega_g(center \pm zeroed_width/2)$ from all samples.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Standard deviation of pulse.
zeroed_width: Subtract baseline from sech pulses to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a sech pulse. If unsupplied,
defaults to $2*(center + 1)$ such that $\Omega_g(-1)=0$ and $\Omega_g(2*(center + 1))=0$.
rescale_amp: If True the pulse will be rescaled so that $\Omega_g(center)=amp$.
ret_scale_factor: Return amplitude scale factor.
"""
if zeroed_width is None:
zeroed_width = 2 * (center + 1)
zero_offset = sech(np.array([zeroed_width / 2]), amp, 0, sigma)
sech_samples -= zero_offset
amp_scale_factor = 1.0
if rescale_amp:
amp_scale_factor = amp / (amp - zero_offset) if amp - zero_offset != 0 else 1.0
sech_samples *= amp_scale_factor
if ret_scale_factor:
return sech_samples, amp_scale_factor
return sech_samples
def sech_fn(x, *args, **kwargs):
r"""Hyperbolic secant function"""
return 1.0 / np.cosh(x, *args, **kwargs)
def sech(
times: np.ndarray,
amp: complex,
center: float,
sigma: float,
zeroed_width: Optional[float] = None,
rescale_amp: bool = False,
ret_x: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
r"""Continuous unnormalized sech pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
zeroed_width: Subtract baseline from pulse to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start and end of the pulse.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)=amp$.
ret_x: Return centered and standard deviation normalized pulse location.
$x=(times-center)/sigma$.
"""
times = np.asarray(times, dtype=np.complex_)
x = (times - center) / sigma
sech_out = amp * sech_fn(x).astype(np.complex_)
if zeroed_width is not None:
sech_out = _fix_sech_width(
sech_out,
amp=amp,
center=center,
sigma=sigma,
zeroed_width=zeroed_width,
rescale_amp=rescale_amp,
)
if ret_x:
return sech_out, x
return sech_out
def sech_deriv(
times: np.ndarray, amp: complex, center: float, sigma: float, ret_sech: bool = False
) -> np.ndarray:
"""Continuous unnormalized sech derivative pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
ret_sech: Return sech with which derivative was taken with.
"""
sech_out, x = sech(times, amp=amp, center=center, sigma=sigma, ret_x=True)
sech_out_deriv = -sech_out * np.tanh(x) / sigma
if ret_sech:
return sech_out_deriv, sech_out
return sech_out_deriv
def gaussian_square(
times: np.ndarray,
amp: complex,
center: float,
square_width: float,
sigma: float,
zeroed_width: Optional[float] = None,
) -> np.ndarray:
r"""Continuous gaussian square pulse.
Args:
times: Times to output pulse for.
amp: Pulse amplitude.
center: Center of the square pulse component.
square_width: Width of the square pulse component.
sigma: Standard deviation of Gaussian rise/fall portion of the pulse.
zeroed_width: Subtract baseline of gaussian square pulse
to enforce $\OmegaSquare(center \pm zeroed_width/2)=0$.
Raises:
PulseError: if zeroed_width is not compatible with square_width.
"""
square_start = center - square_width / 2
square_stop = center + square_width / 2
if zeroed_width:
if zeroed_width < square_width:
raise PulseError("zeroed_width cannot be smaller than square_width.")
gaussian_zeroed_width = zeroed_width - square_width
else:
gaussian_zeroed_width = None
funclist = [
functools.partial(
gaussian,
amp=amp,
center=square_start,
sigma=sigma,
zeroed_width=gaussian_zeroed_width,
rescale_amp=True,
),
functools.partial(
gaussian,
amp=amp,
center=square_stop,
sigma=sigma,
zeroed_width=gaussian_zeroed_width,
rescale_amp=True,
),
functools.partial(constant, amp=amp),
]
condlist = [times <= square_start, times >= square_stop]
return np.piecewise(times.astype(np.complex_), condlist, funclist)
def drag(
times: np.ndarray,
amp: complex,
center: float,
sigma: float,
beta: float,
zeroed_width: Optional[float] = None,
rescale_amp: bool = False,
) -> np.ndarray:
r"""Continuous Y-only correction DRAG pulse for standard nonlinear oscillator (SNO) [1].
[1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K.
Analytic control methods for high-fidelity unitary operations
in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011).
Args:
times: Times to output pulse for.
amp: Pulse amplitude at `center`.
center: Center (mean) of pulse.
sigma: Width (standard deviation) of pulse.
beta: Y correction amplitude. For the SNO this is $\beta=-\frac{\lambda_1^2}{4\Delta_2}$.
Where $\lambds_1$ is the relative coupling strength between the first excited and second
excited states and $\Delta_2$ is the detuning between the respective excited states.
zeroed_width: Subtract baseline of drag pulse to make sure
$\Omega_g(center \pm zeroed_width/2)=0$ is satisfied. This is used to avoid
large discontinuities at the start of a drag pulse.
rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will
be rescaled so that $\Omega_g(center)=amp$.
"""
gauss_deriv, gauss = gaussian_deriv(
times,
amp=amp,
center=center,
sigma=sigma,
ret_gaussian=True,
zeroed_width=zeroed_width,
rescale_amp=rescale_amp,
)
return gauss + 1j * beta * gauss_deriv
|
#!/usr/bin/env python3
'''
Copyright © 2020 Doug Eaton
USBDecode is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 2 of
the License, or (at your option) any later version.
usb_decode is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
from enum import Enum
import fileinput
import re
wanted=["usb_d_n", "usb_d_p", "usb_tx_en", "usb_pullup"]
signals=[]
line=None
if __name__ == "__main__":
tb="tb"
# Expect test bench a "module tb"
# Process headers first. Break and move to next phase after definitions
with fileinput.input() as vcd:
for line in vcd:
if line[0:6]=='$scope':
break
print(line, end='')
print("$scope module", tb, "$end")
for line in vcd:
word=re.findall("\S+", line)
if word[4] in wanted:
print(line, end='')
signals.append(word[3])
wanted.remove(word[4])
if len(wanted)==0:
break
if len(wanted)>0:
print("Not all signals found:", wanted, file=sys.stderr)
print("$upscope $end")
print("$enddefinitions $end")
for line in vcd:
line=line.rstrip()
if line[0]=='#':
# Avoid printing time if none of the signals cared about toggle
time=line
next
if line[0]=='b':
if line[line.find(' ')+1:] in signals:
if time:
print(time)
time=None
print(line)
elif line[1:] in signals:
if time:
print(time)
time=None
print(line)
if time:
print(time)
|
import torch
import torch.distributed as dist
from .parallel_mode import ParallelMode
from typing import Tuple
def _check_sanity():
from colossalai.core import global_context as gpc
if gpc.tensor_parallel_size > 1 or gpc.pipeline_parallel_size > 1:
raise NotImplementedError("Moe is not compatible with tensor or "
"pipeline parallel at present.")
class MoeParallelInfo:
"""Moe parallelism information, storing parallel sizes and groups.
"""
def __init__(self, ep_size: int, dp_size: int):
_check_sanity()
self.ep_size = ep_size
self.dp_size = dp_size
self.ep_group = None
# data parallel group for experts, since ep_group is different
# we may have different dp_group from get_group(ParallelMode.DATA)
self.dp_group = None
# Here we assume tensor parallel size = 1
# Otherwise, MoE can't be used
# Since TENSOR parallel group and DATA parallel group
# have been created, we can use them directly.
if ep_size == 1:
from colossalai.core import global_context as gpc
self.ep_group = gpc.get_group(ParallelMode.TENSOR)
self.dp_group = gpc.get_group(ParallelMode.DATA)
return
if dp_size == 1:
from colossalai.core import global_context as gpc
self.ep_group = gpc.get_group(ParallelMode.DATA)
self.dp_group = gpc.get_group(ParallelMode.TENSOR)
return
rank = dist.get_rank()
# Create expert parallel group
for i in range(dp_size):
ranks = [i * ep_size + j for j in range(ep_size)]
group = dist.new_group(ranks)
if rank in ranks:
self.ep_group = group
# Create data parallel group
for j in range(ep_size):
ranks = [i * ep_size + j for i in range(dp_size)]
group = dist.new_group(ranks)
if rank in ranks:
self.dp_group = group
class MoeContext:
"""MoE parallel context manager. This class manages different
parallel groups in MoE context and MoE loss in training.
"""
__instance = None
@staticmethod
def get_instance():
if MoeContext.__instance is None:
MoeContext.__instance = MoeContext()
return MoeContext.__instance
def __init__(self):
self.world_size = 1
# Users may want to set maximum expert parallel size smaller than the world size
# since very low bandwidth across nodes may constrain the performance of MoE
# When we have a maximum expert parallel size, we have a minimum data parallel size naturally
self.max_ep_size = 1
self.min_dp_size = 1
self.aux_loss = None
self.use_kernel_optim = True
self.has_setup = False
self._parallel_info_dict = dict()
@property
def parallel_info_dict(self):
return self._parallel_info_dict
@property
def is_initialized(self):
return self.has_setup
def setup(self, seed: int, use_kernel_optim: bool = True):
assert not self.is_initialized, "MoE distributed context shouldn't be set up again"
_check_sanity()
assert torch.cuda.is_available(), "MoE requires to enable CUDA first"
self.world_size = dist.get_world_size()
from colossalai.core import global_context as gpc
self.max_ep_size = gpc.config.get('max_ep_size', self.world_size)
assert self.world_size % self.max_ep_size == 0, \
"Maximum epxert parallel size must be a factor of the number of GPUs"
self.min_dp_size = self.world_size // self.max_ep_size
# Enabling kernel optimization may raise error in some cases
# Users can close kernel optimization manually
self.use_kernel_optim = use_kernel_optim
from .random import moe_set_seed
moe_set_seed(seed)
self.has_setup = True
def get_info(self, num_experts: int) -> Tuple[int, MoeParallelInfo]:
"""Calculate the Data Parallel Group and Expert Parallel Group.
Parameters
----------
num_experts : int
The number experts
Returns
-------
int, MoeParallelInfo
number of local experts, the MoeParallelInfo of the current ep_size
"""
gt_flag = num_experts % self.max_ep_size == 0 # check whether num_experts is greater
lt_flag = self.max_ep_size % num_experts == 0 # check whether num_experts is less
assert gt_flag or lt_flag, "Automatic experts placement dose not not support expert number"\
" is not a multiple of ep size or vice versa."
# If the number of experts is greater than maximum expert parallel size. a.k.a ep_size,
# there are multiple experts in each GPU and each GPU has different experts
# So it's data parallel size is 1
# Otherwise, there is only one expert in each GPU
# The data parallel size should be calculated
dp_size = 1 if gt_flag else self.max_ep_size // num_experts
ep_size = self.max_ep_size // dp_size
# Calculate the number of experts for each GPU
num_local_experts = 1 if lt_flag else num_experts // self.max_ep_size
# Don't forget to multiply minimum data parallel size
dp_size *= self.min_dp_size
if not (ep_size in self.parallel_info_dict):
self.parallel_info_dict[ep_size] = MoeParallelInfo(ep_size, dp_size)
return num_local_experts, self.parallel_info_dict[ep_size]
def set_kernel_not_use(self):
self.use_kernel_optim = False
def reset_loss(self):
self.aux_loss = 0
def add_loss(self, loss):
self.aux_loss += loss
def get_loss(self):
return self.aux_loss
|
import numpy as np
import cv2
import time
from grabscreen import grab_screen
import os
from alexnet import alexnet
from keys import key_check, PressKey, ReleaseKey, W, A, S, D
t_time = 0.09
def forward():
PressKey(W)
ReleaseKey(A)
ReleaseKey(D)
ReleaseKey(S)
def left():
PressKey(A)
PressKey(W)
ReleaseKey(D)
time.sleep(t_time)
ReleaseKey(A)
def right():
PressKey(W)
PressKey(D)
ReleaseKey(A)
time.sleep(t_time)
ReleaseKey(D)
def backward():
PressKey(S)
ReleaseKey(A)
ReleaseKey(D)
ReleaseKey(W)
WIDTH = 80
HEIGHT = 60
LR = 1e-3
EPOCH = 8
MODEL_NAME = 'models/car-ai-{}-{}-{}-epochs.model'.format(LR, 'alexnetv2', EPOCH)
model = alexnet(WIDTH, HEIGHT, LR)
model.load(MODEL_NAME)
print('Loading model %s...' % MODEL_NAME)
print('Starting in...')
for i in list(range(5))[::-1]:
print(i+1)
time.sleep(1)
last_time = time.time()
paused = False
while(True):
if not paused:
screen = np.array(grab_screen(region=(0,40,800,600)))
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
screen = cv2.resize(screen, (80,60))
#newscreen, original_image, m1, m2 = process_img(screen)
print('FPS: %d Time: %.2f' %( 1/(time.time() - last_time), time.time() - last_time))
last_time = time.time()
prediction = model.predict([screen.reshape(WIDTH, HEIGHT, 1)])[0]
print(prediction)
turn_thresh = .75
fwd_thresh = 0.70
if prediction[0] > fwd_thresh:
forward()
elif prediction[1] > turn_thresh:
left()
elif prediction[3] > turn_thresh:
right()
elif prediction[2] > fwd_thresh:
backward()
else:
forward()
keys = key_check()
if 'P' in keys:
if paused:
paused = False
time.sleep(1)
else:
paused = True
ReleaseKey(A)
ReleaseKey(W)
ReleaseKey(D)
time.sleep(1)
elif 'X' in keys:
break
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import math
import paddle
import paddle.nn.functional as F
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn.initializer import KaimingNormal
from ppdet.core.workspace import register, serializable
from ppdet.modeling.layers import ConvNormLayer
from ..shape_spec import ShapeSpec
__all__ = ['BlazeNeck']
def hard_swish(x):
return x * F.relu6(x + 3) / 6.
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
num_groups=1,
act='relu',
conv_lr=0.1,
conv_decay=0.,
norm_decay=0.,
norm_type='bn',
name=None):
super(ConvBNLayer, self).__init__()
self.act = act
self._conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=num_groups,
weight_attr=ParamAttr(
learning_rate=conv_lr,
initializer=KaimingNormal(),
name=name + "_weights"),
bias_attr=False)
param_attr = ParamAttr(name=name + "_bn_scale")
bias_attr = ParamAttr(name=name + "_bn_offset")
if norm_type == 'sync_bn':
self._batch_norm = nn.SyncBatchNorm(
out_channels, weight_attr=param_attr, bias_attr=bias_attr)
else:
self._batch_norm = nn.BatchNorm(
out_channels,
act=None,
param_attr=param_attr,
bias_attr=bias_attr,
use_global_stats=False,
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
def forward(self, x):
x = self._conv(x)
x = self._batch_norm(x)
if self.act == "relu":
x = F.relu(x)
elif self.act == "relu6":
x = F.relu6(x)
elif self.act == 'leaky':
x = F.leaky_relu(x)
elif self.act == 'hard_swish':
x = hard_swish(x)
return x
class FPN(nn.Layer):
def __init__(self, in_channels, out_channels, name=None):
super(FPN, self).__init__()
self.conv1_fpn = ConvBNLayer(
in_channels,
out_channels // 2,
kernel_size=1,
padding=0,
stride=1,
act='leaky',
name=name + '_output1')
self.conv2_fpn = ConvBNLayer(
in_channels,
out_channels // 2,
kernel_size=1,
padding=0,
stride=1,
act='leaky',
name=name + '_output2')
self.conv3_fpn = ConvBNLayer(
out_channels // 2,
out_channels // 2,
kernel_size=3,
padding=1,
stride=1,
act='leaky',
name=name + '_merge')
def forward(self, input):
output1 = self.conv1_fpn(input[0])
output2 = self.conv2_fpn(input[1])
up2 = F.upsample(
output2, size=paddle.shape(output1)[-2:], mode='nearest')
output1 = paddle.add(output1, up2)
output1 = self.conv3_fpn(output1)
return output1, output2
class SSH(nn.Layer):
def __init__(self, in_channels, out_channels, name=None):
super(SSH, self).__init__()
assert out_channels % 4 == 0
self.conv0_ssh = ConvBNLayer(
in_channels,
out_channels // 2,
kernel_size=3,
padding=1,
stride=1,
act=None,
name=name + 'ssh_conv3')
self.conv1_ssh = ConvBNLayer(
out_channels // 2,
out_channels // 4,
kernel_size=3,
padding=1,
stride=1,
act='leaky',
name=name + 'ssh_conv5_1')
self.conv2_ssh = ConvBNLayer(
out_channels // 4,
out_channels // 4,
kernel_size=3,
padding=1,
stride=1,
act=None,
name=name + 'ssh_conv5_2')
self.conv3_ssh = ConvBNLayer(
out_channels // 4,
out_channels // 4,
kernel_size=3,
padding=1,
stride=1,
act='leaky',
name=name + 'ssh_conv7_1')
self.conv4_ssh = ConvBNLayer(
out_channels // 4,
out_channels // 4,
kernel_size=3,
padding=1,
stride=1,
act=None,
name=name + 'ssh_conv7_2')
def forward(self, x):
conv0 = self.conv0_ssh(x)
conv1 = self.conv1_ssh(conv0)
conv2 = self.conv2_ssh(conv1)
conv3 = self.conv3_ssh(conv2)
conv4 = self.conv4_ssh(conv3)
concat = paddle.concat([conv0, conv2, conv4], axis=1)
return F.relu(concat)
@register
@serializable
class BlazeNeck(nn.Layer):
def __init__(self, in_channel, neck_type="None", data_format='NCHW'):
super(BlazeNeck, self).__init__()
self.neck_type = neck_type
self.reture_input = False
self._out_channels = in_channel
if self.neck_type == 'None':
self.reture_input = True
if "fpn" in self.neck_type:
self.fpn = FPN(self._out_channels[0],
self._out_channels[1],
name='fpn')
self._out_channels = [
self._out_channels[0] // 2, self._out_channels[1] // 2
]
if "ssh" in self.neck_type:
self.ssh1 = SSH(self._out_channels[0],
self._out_channels[0],
name='ssh1')
self.ssh2 = SSH(self._out_channels[1],
self._out_channels[1],
name='ssh2')
self._out_channels = [self._out_channels[0], self._out_channels[1]]
def forward(self, inputs):
if self.reture_input:
return inputs
output1, output2 = None, None
if "fpn" in self.neck_type:
backout_4, backout_1 = inputs
output1, output2 = self.fpn([backout_4, backout_1])
if self.neck_type == "only_fpn":
return [output1, output2]
if self.neck_type == "only_ssh":
output1, output2 = inputs
feature1 = self.ssh1(output1)
feature2 = self.ssh2(output2)
return [feature1, feature2]
@property
def out_shape(self):
return [
ShapeSpec(channels=c)
for c in [self._out_channels[0], self._out_channels[1]]
]
|
from datetime import timedelta
from .activity import Activity
from .activity_helper import ActivityHelper
class ActivityStat:
"""Store and update the amount of time spent in a certain activity"""
def __init__(self, work_time: timedelta = timedelta(),
off_time: timedelta = timedelta()) -> None:
"""
Can be used for the first reading ApplicationInfo
from detailed/distracting lists when no activity has started yet
"""
self.work_time = work_time
self.off_time = off_time
@staticmethod
def from_activity(activity: Activity) -> 'ActivityStat':
"""For creating ActivityStat when Activity has already started"""
ActivityHelper.raise_if_not_finished(activity)
# NOTE: for distracting activity work_time is distracting time
if activity.is_work_time:
work_time = ActivityHelper.get_activity_time(activity)
off_time = timedelta()
else:
work_time = timedelta()
off_time = ActivityHelper.get_activity_time(activity)
return ActivityStat(work_time, off_time)
def update(self, activity: Activity) -> None:
ActivityHelper.raise_if_not_finished(activity)
if activity.is_work_time:
self.work_time += ActivityHelper.get_activity_time(activity)
else:
self.off_time += ActivityHelper.get_activity_time(activity)
def __eq__(self, other: object) -> bool:
"""
Overrides the default implementation
to use the object values instead of identifiers for comparison
"""
if not isinstance(other, ActivityStat):
return False
if self.work_time != other.work_time:
return False
return self.off_time == other.off_time
|
from fabric.api import cd, env, lcd, local, hosts, prompt, run
from fabric.decorators import runs_once
import os
import time
env.runtime = 'production'
env.hosts = ['newchimera.readthedocs.com',
'newbuild.readthedocs.com',
'newasgard.readthedocs.com']
env.user = 'docs'
env.code_dir = '/home/docs/checkouts/readthedocs.org'
env.virtualenv = '/home/docs/'
env.rundir = '/home/docs/run'
fabfile_dir = os.path.dirname(__file__)
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def remove_project(project):
run('rm -rf %s/user_builds/%s' % (env.code_dir, project))
def ntpdate():
run('ntpdate-debian')
def wheelhouse():
for host in ['newchimera.readthedocs.com', 'newasgard.readthedocs.com']:
run('rsync -av wheelhouse/ root@%s:/home/docs/checkouts/readthedocs.org/media/wheelhouse/' % host)
## Logging Awesomeness
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def nginx_logs():
env.user = "root"
run("tail -F /var/log/nginx/*.log")
@hosts(['newbuild.readthedocs.com'])
def celery_logs():
env.user = "docs"
run("tail -F tail -f ~/log/celery.err")
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def logs():
env.user = "docs"
run("tail -F %s/logs/*.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def postcommit_logs():
env.user = "docs"
run("tail -F %s/logs/postcommit.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def cat_postcommit_logs():
env.user = "docs"
run("cat %s/logs/postcommit.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def api_logs():
env.user = "docs"
run("tail -F %s/logs/api.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def web_logs(type):
"""
Get logs from the web servers::
fab -P web_logs:middleware
"""
env.user = "docs"
run("tail -F %s/logs/%s.log" % (env.code_dir, type))
## Normal bits
@hosts(['localhost'])
def i18n():
with lcd('readthedocs'):
local('rm -rf rtd_tests/tests/builds/')
local('tx pull')
local('./manage.py makemessages --all')
local('tx push -s')
local('./manage.py compilemessages')
@hosts(['localhost'])
def i18n_docs():
with lcd('docs'):
# Update our tanslations
local('tx pull -a')
local('sphinx-intl build')
# Push new ones
local('make gettext')
local('tx push -s')
def push():
"Push new code, but don't restart/reload."
local('git push origin master')
with cd(env.code_dir):
run('git fetch')
run('git reset --hard origin/master')
def update_requirements():
"Update requirements in the virtualenv."
run("%s/bin/pip install -r %s/deploy_requirements.txt" % (env.virtualenv, env.code_dir))
@hosts(['newchimera.readthedocs.com'])
def migrate(project=None):
if project:
run('django-admin.py migrate %s' % project)
else:
run('django-admin.py migrate')
@hosts(['newchimera.readthedocs.com'])
def syncdb(project=None):
run('django-admin.py syncdb')
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def static():
"Restart (or just start) the server"
run('django-admin.py collectstatic --noinput')
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def restart():
"Restart (or just start) the server"
env.user = "docs"
run("supervisorctl restart web")
#so it has time to reload
time.sleep(3)
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def reload():
"Reload (or just start) the server"
run("supervisorctl update")
@hosts(['newbuild.readthedocs.com'])
def celery():
"Restart (or just start) the server"
run("supervisorctl restart celery")
def pull():
"Pull new code"
with cd(env.code_dir):
run('git fetch')
run('git reset --hard origin/master')
@runs_once
def spider():
local('patu.py -d1 readthedocs.org')
def _aws_wrapper(f, *args, **kwargs):
"get AWS credentials if not defined"
#these are normally defined in ~/.fabricrc
@hosts('run_once') # so fab doesn't go crazy
def wrapped(*args, **kwargs):
from boto.cloudfront.exception import CloudFrontServerError
from boto.cloudfront import CloudFrontConnection
c = CloudFrontConnection(env.aws_access_key_id,
env.aws_secret_access_key)
if not hasattr(env, 'aws_access_key_id'):
prompt('AWS Access Key ID: ', key='aws_access_key_id')
if not hasattr(env, 'aws_secret_access_key'):
prompt('AWS Secret Access Key: ', key='aws_secret_access_key')
try:
return f(c, *args, **kwargs)
except CloudFrontServerError as e:
print "Error: \n", e.error_message
return wrapped
@_aws_wrapper
def to_cdn(c, slug):
"Create a new Distribution object on CloudFront"
from boto.cloudfront import CloudFrontConnection
from boto.cloudfront.origin import CustomOrigin
c = CloudFrontConnection(env.aws_access_key_id,
env.aws_secret_access_key)
d = c.create_distribution(
origin=CustomOrigin(slug + '.cdn.readthedocs.org',
origin_protocol_policy='http-only'),
enabled=True,
comment='Slug: ' + slug,
cnames=[slug + '.readthedocs.org']
)
print "Created: " + d.domain_name + " for " + slug
list_cdn()
@_aws_wrapper
def list_cdn(c):
"List Distributions on CloudFront"
distributions = c.get_all_distributions()
for d in distributions:
print "%3s %4s %40s %30s" % ('Ena' if d.enabled else 'Dis',
d.status[:4], d.origin.dns_name,
d.domain_name)
@_aws_wrapper
def disable_cdn(c, *args):
"Sets a Distribution entry to disabled. Required before deletion."
distributions = c.get_all_distributions()
for distro in distributions:
dist_slug = distro.origin.dns_name.split('.')[0]
if dist_slug in args:
print "Disabling:", dist_slug
#this is broken as of boto 2.0b4.
#fix is to comment out lines 347-352 in cloudfront/distribution.py
distro.get_distribution().disable()
@_aws_wrapper
def delete_cdn(c):
"Deletes all Distributions in the 'Disabled' state."
distributions = c.get_all_distributions()
for distro in distributions:
if not distro.enabled and distro.status == "Deployed":
print "Deleting", distro.origin.dns_name
distro.get_distribution().delete()
def full_deploy():
#HACK
#Call this again at the top-level so the hosts decorator
#effects the hosts it runs against for each command.
run('fab push update_requirements migrate restart celery')
#push()
#update_requirements()
#migrate()
#restart()
#celery()
@hosts(['newchimera.readthedocs.com'])
def uptime():
run('uptime')
@hosts(['newchimera.readthedocs.com'])
def update_index():
run('django-admin.py update_index')
@hosts('None')
def update_theme():
theme_dir = os.path.join(fabfile_dir, 'readthedocs', 'templates', 'sphinx')
if not os.path.exists('/tmp/sphinx_rtd_theme'):
local('git clone https://github.com/snide/sphinx_rtd_theme.git /tmp/sphinx_rtd_theme')
with lcd('/tmp/sphinx_rtd_theme'):
local('git remote update')
local('git reset --hard origin/master ')
local('cp -r /tmp/sphinx_rtd_theme/sphinx_rtd_theme %s' % theme_dir)
local('cp -r /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/fonts/ %s' % os.path.join(fabfile_dir, 'media', 'font'))
local('cp /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/css/badge_only.css %s' % os.path.join(fabfile_dir, 'media', 'css'))
local('cp /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/css/theme.css %s' % os.path.join(fabfile_dir, 'media', 'css', 'sphinx_rtd_theme.css'))
|
#!/usr/bin/env python
# Copyright 2015 Criteo. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import re
import argparse
import yaml
from ciscoconfparse import CiscoConfParse
def cli_parser(argv=None):
parser = argparse.ArgumentParser(
description='Generating configuration commands by finding differences'
' between two Cisco IOS style configuration files')
parser.add_argument('--origin', metavar='origin',
type=str, help='Origin configuration file')
parser.add_argument('--target', metavar='target',
type=str, help='Target configuration file')
parser.add_argument('--vendor', help='Vendor or OS definition',
type=str, metavar='vendor')
parser.add_argument('--config', metavar='config',
type=str, help='config file name',
default='etc/netcompare.yml')
return parser.parse_args(argv)
def clean_line(line, vendor):
cleaned_lines = []
if vendor == 'tmsh':
# Remove text after a # (Because CiscoConfParse crash if there is a
# bracket in a comment
remove_comment = re.search('(?P<before_comment>[^\#]*)\#', line)
if remove_comment:
line = remove_comment.group('before_comment')
# match " begin } end"
tmsh_curly_bracket_left = re.search(
'^(?P<space>\s*)(?P<begin>.*)'
'(?P<bracket>[\}\{])(?'
'P<end>[^\}\{]*)$',
line)
if tmsh_curly_bracket_left:
# replace
# " begin } end"
# by
# " begin }
# end
cleaned_lines = clean_line(tmsh_curly_bracket_left.
group('begin'), vendor)
cleaned_lines.append(tmsh_curly_bracket_left.group('bracket'))
cleaned_lines.append(tmsh_curly_bracket_left.group('end').
rstrip(' \t\r\n\0'))
else:
cleaned_lines.append(line.rstrip(' \t\r\n\0'))
else:
cleaned_lines.append(line.rstrip(' \t\r\n\0'))
return cleaned_lines
def clean_file(file, vendor, config):
with open(file) as file_opened:
list = file_opened.readlines()
list_clean = []
try:
config[vendor]['dont_compare']
for line in list:
for dont_compare in config[vendor]['dont_compare']:
if dont_compare in line:
break
else:
list_clean = (list_clean +
clean_line(line, vendor))
return list_clean
except:
for line in list:
list_clean = (list_clean +
clean_line(line, vendor))
return list_clean
def get_one_line(line, vendor, config):
if line[0] == 'NO':
line_text_no = re.match("^(\s*)" +
config[vendor]['no_command'] +
" (.*)", line[1])
if line_text_no:
cmd = (line_text_no.group(1) + line_text_no.group(2))
else:
line_text_without_no = re.match("^(\s*)(.*)", line[1])
cmd = (line_text_without_no.group(1) +
config[vendor]['no_command'] + " " +
line_text_without_no.group(2))
return cmd
else:
return line[1]
def get_diff_lines(d, vendor, config, depth=0):
result = []
for k, v in sorted(d.items(), key=lambda x: x[0]):
result.append(get_one_line(k, vendor, config))
result.extend(get_diff_lines(v, vendor, config, depth+1))
return result
def netcompare(origin, target, vendor, config):
origin_file = CiscoConfParse(origin,
comment=config[vendor]
['CiscoConfParse_comment'],
syntax=config[vendor]
['CiscoConfParse_syntax'],
factory=False)
target_file = CiscoConfParse(target,
comment=config[vendor]
['CiscoConfParse_comment'],
syntax=config[vendor]
['CiscoConfParse_syntax'],
factory=False)
result = {}
for line_origin in origin_file.objs:
eq_lines = (target_file.find_objects(
'^' + re.escape(line_origin.text) + '$'))
for line_target in eq_lines:
if line_origin.geneology_text == line_target.geneology_text:
break
else: # Delete needed
pointer = result
index = len(line_origin.geneology_text)
for cmd in line_origin.geneology_text:
index = index - 1
if ('NO', cmd) in pointer:
break
if ('_CR', cmd) in pointer:
pointer = pointer.get(('_CR', cmd))
elif index == 0:
pointer[('NO', cmd)] = {}
pointer = pointer.get(('NO', cmd))
else:
pointer[('_CR', cmd)] = {}
pointer = pointer.get(('_CR', cmd))
for line_target in target_file.objs:
find = 0
eq_lines = (origin_file.find_objects(
'^' + re.escape(line_target.text) + '$'))
for line_origin in eq_lines:
if line_origin.geneology_text == line_target.geneology_text:
find = 1
if find == 0: # Create needed
pointer = result
for cmd in line_target.geneology_text:
if not ('_CR', cmd) in pointer:
pointer[('_CR', cmd)] = {}
pointer = pointer.get(('_CR', cmd))
return result
def main(argv=None):
args = cli_parser(argv)
with open(args.config, 'r') as f:
config = yaml.load(f)
origin_list = clean_file(args.origin, args.vendor, config)
target_list = clean_file(args.target, args.vendor, config)
display_commands = netcompare(origin_list,
target_list, args.vendor, config)
result = get_diff_lines(display_commands, args.vendor, config)
for line in result:
print line
if __name__ == '__main__':
main()
|
version https://git-lfs.github.com/spec/v1
oid sha256:6c0828479c4167ae77b4de1f98c39749190ceaf383b6046097cfd212b12804de
size 3101
|
# Complete project details at https://RandomNerdTutorials.com
import socket
import network
import machine, onewire, ds18x20, time
sta_if = network.WLAN(network.STA_IF)
print(sta_if.ifconfig())
ds_pin = machine.Pin(4)
ds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin))
def read_ds_sensor():
roms = ds_sensor.scan()
print('Found DS devices: ', roms)
print('Temperatures: ')
ds_sensor.convert_temp()
for rom in roms:
temp = ds_sensor.read_temp(rom)
if isinstance(temp, float):
msg = round(temp, 2)
print(temp, end=' ')
print('Valid temperature')
return msg
return b'0.0'
def web_page():
temp = read_ds_sensor()
html = """<!DOCTYPE HTML><html><head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="refresh" content="60">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.7.2/css/all.css" integrity="sha384-fnmOCqbTlWIlj8LyTjo7mOUStjsKC4pOpQbqyi7RrhN7udi9RwhKkMHpvLbHG9Sr" crossorigin="anonymous">
<style> html { font-family: Arial; display: inline-block; margin: 0px auto; text-align: center; }
h2 { font-size: 3.0rem; } p { font-size: 3.0rem; } .units { font-size: 1.2rem; }
.ds-labels{ font-size: 1.5rem; vertical-align:middle; padding-bottom: 15px; }
</style></head><body><h2>ESP with DS18B20</h2>
<p><i class="fas fa-thermometer-half" style="color:#059e8a;"></i>
<span class="ds-labels">Temperature</span>
<span id="temperature">""" + str(temp) + """</span>
<sup class="units">°C</sup>
</p>
<p><i class="fas fa-thermometer-half" style="color:#059e8a;"></i>
<span class="ds-labels">Temperature</span>
<span id="temperature">""" + str(round(temp * (9/5) + 32.0, 2)) + """</span>
<sup class="units">°F</sup>
</p></body></html>"""
return html
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 80))
s.listen(5)
while True:
try:
if gc.mem_free() < 102000:
gc.collect()
conn, addr = s.accept()
conn.settimeout(3.0)
print('Got a connection from %s' % str(addr))
request = conn.recv(1024)
conn.settimeout(None)
request = str(request)
print('Content = %s' % request)
response = web_page()
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: text/html\n')
conn.send('Connection: close\n\n')
conn.sendall(response)
conn.close()
except OSError as e:
conn.close()
print('Connection closed')
|
from molsysmt import puw
from molsysmt.basic import convert, select, get
from molsysmt._private_tools._digestion import digest_engine, digest_target
import numpy as np
def get_sasa (molecular_system, target='atom', selection='all', frame_indices='all', syntaxis='MolSysMT',
engine='MDTraj'):
engine = digest_engine(engine)
target = digest_target(target)
if engine == 'MDTraj':
from mdtraj import shrake_rupley
tmp_item = convert(molecular_system, frame_indices=frame_indices, to_form='mdtraj.Trajectory')
sasa_array = shrake_rupley(tmp_item, mode='atom') # tiene probe_radius y n_sphere_points
if target=='atom':
if selection is not 'all':
atom_indices = select(molecular_system, selection=selection, syntaxis=syntaxis)
sasa_array = sasa_array[:,atom_indices]
else:
sets_atoms = get(molecular_system, target=target, selection=selection, syntaxis=syntaxis, atom_index=True)
n_sets = len(sets_atoms)
n_frames = sasa_array.shape[0]
new_sasa_array = np.empty([n_frames, n_sets], dtype='float')
for ii in range(n_sets):
new_sasa_array[:,ii] = sasa_array[:,sets_atoms[ii].astype(int)].sum(axis=1)
sasa_array = new_sasa_array
sasa_array = puw.quantity(sasa_array, 'nm**2')
sasa_array = puw.standardize(sasa_array)
else:
raise NotImplementedError("Engine not implemented yet")
return sasa_array
|
# Graph (vertices and edges) manipulation class.
# Vertex and Edge definition customized for our need.
# Dijkstra stolen at https://www.bogotobogo.com/python/python_graph_data_structures.php
#
import logging
import math
import json
from functools import reduce
from .geo import Point, Line, Polygon, distance, nearestPointToLines, destination, pointInPolygon
from .globals import TAXIWAY_DIR_TWOWAY, DEPARTURE, ARRIVAL
class Vertex(Point): ## Vertex(Point)
def __init__(self, node, point, usage, name=""):
Point.__init__(self, point.lat, point.lon)
self.id = node
self.usage = usage
self.name = name
self.adjacent = {}
self.setProp("vid", node) # vertex id
def props(self):
self.setProp("marker-color", "#888888") # “dest”, “init”, “both” or “junc”
if self.usage == "dest":
self.setProp("marker-color", "#00aa00")
elif self.usage == "init":
self.setProp("marker-color", "#aa0000")
elif self.usage == "both":
self.setProp("marker-color", "#0000aa")
self.setProp("id", self.id)
self.setProp("use", self.usage)
self.setProp("name", self.name)
self.setProp("marker-size", "small")
return self.properties
def add_neighbor(self, neighbor, weight=0):
self.adjacent[neighbor] = weight
def get_connections(self, graph, options = {}):
return self.adjacent.keys()
class Active:
def __init__(self, active, runways):
self.active = active
self.runways = runways.split(",")
def __str__(self):
return self.active + ":" + ",".join(self.runways)
class Edge(Line):
def __init__(self, src, dst, cost, direction, usage, name):
Line.__init__(self, src, dst)
self.cost = cost # cost = distance to next vertext
self.direction = direction # direction of vertex: oneway or twoway
self.usage = usage # type of vertex: runway or taxiway or taxiway_X where X is width code (A-F)
self.name = name # segment name, not unique!
self.active = [] # array of segment activity, activity can be departure, arrival, or ils.
# departure require clearance. plane cannot stop on segment of type ils.
def props(self):
props = self.properties
props["stroke"] = "#aa0000" # “taxiway”, “runway”
props["stroke-width"] = 1
props["stroke-opacity"] = 1
if self.usage[0:4] == "taxi":
props["stroke"] = "#aaaa00"
if self.direction == "oneway":
props["stroke"] = "#00dd00"
props["stroke-width"] = 2
props["name"] = self.name
props["cost"] = self.cost
props["direction"] = self.direction
props["usage"] = self.usage
props["active"] = self.mkActives()
return props
def mkActives(self):
ret = []
for a in self.active:
ret.append({
a.active: ",".join(a.runways) # "ils": "12L,30R"
})
return ret
def add_active(self, active, runways):
return self.active.append(Active(active, runways))
def has_active(self, active=None):
if active:
for a in self.active:
if a.active == active:
return True
return False
return self.has_active(DEPARTURE) or self.has_active(ARRIVAL)
def widthCode(self, default=None):
if self.usage and len(self.usage) == 9:
return self.usage[8]
return default
class Graph: # Graph(FeatureCollection)?
def __init__(self):
self.vert_dict = {}
self.edges_arr = []
def __str__(self):
return json.dumps({
"type": "FeatureCollection",
"features": self.features()
})
def __iter__(self):
return iter(self.vert_dict.values())
def features(self):
def add(arr, v):
arr.append(v.feature(self))
return arr
return reduce(add, self.edges_arr, [])
def add_vertex(self, node, point, usage, name = ""):
new_vertex = Vertex(node, point, usage, name = "")
self.vert_dict[node] = new_vertex
return new_vertex
def get_vertex(self, n):
if n in self.vert_dict:
return self.vert_dict[n]
else:
return None
# Options taxiwayOnly = True|False, minSizeCode = {A,B,C,D,E,F}
def get_connections(self, src, options={}):
if len(options) > 0:
connectionKeys = []
for dst in src.adjacent.keys():
v = self.get_edge(src.id, dst)
code = v.widthCode("F") # "F" is only a default code, if provided the edge uses its own
txyOk = ("taxiwayOnly" in options and options["taxiwayOnly"] and v.usage != "runway") or ("taxiwayOnly" not in options)
scdOk = ("minSizeCode" in options and options["minSizeCode"] <= code) or ("minSizeCode" not in options)
# logging.debug("%s %s %s %s %s %s" % (dst, v.usage, code, txyOk, scdOk, v.cost))
if txyOk and scdOk:
connectionKeys.append(dst)
return connectionKeys
return src.adjacent.keys()
def add_edge(self, edge):
if edge.start.id in self.vert_dict and edge.end.id in self.vert_dict:
self.edges_arr.append(edge)
self.vert_dict[edge.start.id].add_neighbor(self.vert_dict[edge.end.id].id, edge.cost)
if(edge.direction == TAXIWAY_DIR_TWOWAY):
self.vert_dict[edge.end.id].add_neighbor(self.vert_dict[edge.start.id].id, edge.cost)
else:
logging.critical("Graph::add_edge: vertex not found when adding edges %s,%s", edge.src, edge.dst)
def get_edge(self, src, dst):
arr = list(filter(lambda x: x.start.id == src and x.end.id == dst, self.edges_arr))
if len(arr) > 0:
return arr[0]
arr = list(filter(lambda x: x.start.id == dst and x.end.id == src and x.direction == TAXIWAY_DIR_TWOWAY, self.edges_arr))
if len(arr) > 0:
return arr[0]
return None
def get_vertices(self):
return self.vert_dict.keys()
def get_connected_vertices(self, options={}):
# List of vertices may contain unconnected vertices.
# Same options as get_connections
connected = []
for edge in self.edges_arr:
code = edge.widthCode("F") # default all ok.
txyOk = ("taxiwayOnly" in options and options["taxiwayOnly"] and edge.usage != "runway") or ("taxiwayOnly" not in options)
scdOk = ("minSizeCode" in options and options["minSizeCode"] <= code) or ("minSizeCode" not in options)
# logging.debug("%s %s %s %s %s" % (dst, v.usage, code, txyOk, scdOk))
if txyOk and scdOk:
if edge.src not in connected:
connected.append(edge.src)
if edge.dst not in connected:
connected.append(edge.dst)
return connected
def findClosestPointOnEdges(self, point): # @todo: construct array of lines on "add_edge"
return nearestPointToLines(point, self.edges_arr)
def findClosestVertex(self, point):
closest = None
shortest = math.inf
for n, v in self.vert_dict.items():
if len(v.adjacent) > 0: # It must be a vertex connected to the network of taxiways
d = distance(v, point)
if d < shortest:
shortest = d
closest = n
logging.debug("Graph::findClosestVertex: %s at %f", closest, shortest)
return [closest, shortest]
def findVertexInPolygon(self, polygon):
vertices = []
for n, v in self.vert_dict.items():
if pointInPolygon(v, polygon):
vertices.append(v)
return vertices
def findClosestVertexAheadGuess(self, point, brng, speed):
MAX_AHEAD = 500 # m, we could make algorithm grow these until vertex found "ahead"
MAX_LATERAL = 200 # m
AHEAD_START = 300
LATERAL_START = 40
AHEAD_INC = 100
LATERAL_INC = 20
found = [None]
ahead = AHEAD_START
lateral = LATERAL_START
while not found[0] and ahead < MAX_AHEAD:
while not found[0] and lateral < MAX_LATERAL:
found = self.findClosestVertexAhead(point, brng, speed, ahead, lateral)
lateral += LATERAL_INC
ahead += AHEAD_INC
lateral = LATERAL_START
logging.debug("Graph::findClosestVertexAheadGuess: found at ahead=%d, lateral=%d.", ahead, lateral)
return found
def findClosestVertexAhead(self, point, brng, speed, ahead=200, lateral=100):
# We draw a triangle in front of the plane, plane is at apex, base is AHEAD meters in front (bearing)
# and LATERAL meters wide left and right.
# Should set maxahead from speed, if fast, maxahead large.
MAX_AHEAD = 200 # m
maxpoint = destination(point, brng, MAX_AHEAD)
base = destination(point, brng, ahead)
baseL = destination(base, brng + 90, lateral)
baseR = destination(base, brng - 90, lateral)
triangle = Polygon([point, baseL, baseR])
vertices = self.findVertexInPolygon(triangle)
logging.debug("Graph::findClosestVertexAhead: %d, %d, inside %s.", ahead, lateral, len(vertices))
v = None
d = math.inf
if len(vertices) > 0:
for vertex in vertices:
dist = math.inf
if ahead > MAX_AHEAD:
dist = distance(maxpoint, vertex) # uses base rather than point ;-)
else:
dist = distance(base, vertex) # uses base rather than point ;-)
if dist < d:
d = dist
v = vertex
if v:
return [v.id, d]
return [None, d]
def Dijkstra(self, source, target, options={}):
# This will store the Shortest path between source and target node
route = []
if not source or not target:
logging.debug("Graph::Dijkstra: source or target missing")
return route
# These are all the nodes which have not been visited yet
unvisited_nodes = list(self.get_vertices())
# logging.debug("Unvisited nodes", unvisited_nodes)
# It will store the shortest distance from one node to another
shortest_distance = {}
# It will store the predecessors of the nodes
predecessor = {}
# Iterating through all the unvisited nodes
for nodes in unvisited_nodes:
# Setting the shortest_distance of all the nodes as infinty
shortest_distance[nodes] = math.inf
# The distance of a point to itself is 0.
shortest_distance[str(source)] = 0
# Running the loop while all the nodes have been visited
while(unvisited_nodes):
# setting the value of min_node as None
min_node = None
# iterating through all the unvisited node
for current_node in unvisited_nodes:
# For the very first time that loop runs this will be called
if min_node is None:
# Setting the value of min_node as the current node
min_node = current_node
elif shortest_distance[min_node] > shortest_distance[current_node]:
# I the value of min_node is less than that of current_node, set
#min_node as current_node
min_node = current_node
# Iterating through the connected nodes of current_node (for
# example, a is connected with b and c having values 10 and 3
# respectively) and the weight of the edges
connected = self.get_connections(self.get_vertex(min_node), options)
# logging.debug("connected %s %s", min_node, connected)
for child_node in connected:
e = self.get_edge(min_node, child_node) # should always be found...
cost = e.cost
# checking if the value of the current_node + value of the edge
# that connects this neighbor node with current_node
# is lesser than the value that distance between current nodes
# and its connections
#
if (cost + shortest_distance[min_node]) < shortest_distance[child_node]:
# If true set the new value as the minimum distance of that connection
shortest_distance[child_node] = cost + shortest_distance[min_node]
# Adding the current node as the predecessor of the child node
predecessor[child_node] = min_node
# After the node has been visited (also known as relaxed) remove it from unvisited node
unvisited_nodes.remove(min_node)
# Till now the shortest distance between the source node and target node
# has been found. Set the current node as the target node
node = target
# Starting from the goal node, we will go back to the source node and
# see what path we followed to get the smallest distance
# logging.debug("predecessor %s", predecessor)
while node and node != source and len(predecessor.keys()) > 0:
# As it is not necessary that the target node can be reached from # the source node, we must enclose it in a try block
route.insert(0,node)
if node in predecessor:
node = predecessor[node]
else:
node = False
if not node:
logging.debug("Graph::Dijkstra: could not find route from %s to %s", source, target)
return None
else:
# Including the source in the path
route.insert(0, source)
logging.debug("Graph::Dijkstra: route: %s", "-".join(route))
return route
|
# coding=UTF-8
'''Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
'''
import email.utils
import datetime
import logging
import re
import os
import urllib
import urlparse
import pprint
import copy
import urlparse
from urllib import urlencode
from paste.deploy.converters import asbool
from webhelpers.html import escape, HTML, literal, url_escape
from webhelpers.html.tools import mail_to
from webhelpers.html.tags import *
from webhelpers.markdown import markdown
from webhelpers import paginate
from webhelpers.text import truncate
import webhelpers.date as date
from pylons import url as _pylons_default_url
from pylons.decorators.cache import beaker_cache
from pylons import config
from routes import redirect_to as _redirect_to
from routes import url_for as _routes_default_url_for
from alphabet_paginate import AlphaPage
import i18n
import ckan.exceptions
import ckan.lib.fanstatic_resources as fanstatic_resources
import ckan.model as model
import ckan.lib.formatters as formatters
import ckan.lib.maintain as maintain
import ckan.lib.datapreview as datapreview
import ckan.logic as logic
import ckan.lib.uploader as uploader
import ckan.new_authz as new_authz
from ckan.common import (
_, ungettext, g, c, request, session, json, OrderedDict
)
get_available_locales = i18n.get_available_locales
get_locales_dict = i18n.get_locales_dict
log = logging.getLogger(__name__)
def _datestamp_to_datetime(datetime_):
''' Converts a datestamp to a datetime. If a datetime is provided it
just gets returned.
:param datetime_: the timestamp
:type datetime_: string or datetime
:rtype: datetime
'''
if isinstance(datetime_, basestring):
try:
datetime_ = date_str_to_datetime(datetime_)
except TypeError:
return None
except ValueError:
return None
# check we are now a datetime
if not isinstance(datetime_, datetime.datetime):
return None
return datetime_
def redirect_to(*args, **kw):
'''Issue a redirect: return an HTTP response with a ``302 Moved`` header.
This is a wrapper for :py:func:`routes.redirect_to` that maintains the
user's selected language when redirecting.
The arguments to this function identify the route to redirect to, they're
the same arguments as :py:func:`ckan.plugins.toolkit.url_for` accepts,
for example::
import ckan.plugins.toolkit as toolkit
# Redirect to /dataset/my_dataset.
toolkit.redirect_to(controller='package', action='read',
id='my_dataset')
Or, using a named route::
toolkit.redirect_to('dataset_read', id='changed')
'''
kw['__ckan_no_root'] = True
if are_there_flash_messages():
kw['__no_cache__'] = True
return _redirect_to(url_for(*args, **kw))
def url(*args, **kw):
'''Create url adding i18n information if selected
wrapper for pylons.url'''
locale = kw.pop('locale', None)
my_url = _pylons_default_url(*args, **kw)
return _add_i18n_to_url(my_url, locale=locale, **kw)
def url_for(*args, **kw):
'''Return the URL for the given controller, action, id, etc.
Usage::
import ckan.plugins.toolkit as toolkit
url = toolkit.url_for(controller='package', action='read',
id='my_dataset')
=> returns '/dataset/my_dataset'
Or, using a named route::
toolkit.url_for('dataset_read', id='changed')
This is a wrapper for :py:func:`routes.url_for` that adds some extra
features that CKAN needs.
'''
locale = kw.pop('locale', None)
# remove __ckan_no_root and add after to not pollute url
no_root = kw.pop('__ckan_no_root', False)
# routes will get the wrong url for APIs if the ver is not provided
if kw.get('controller') == 'api':
ver = kw.get('ver')
if not ver:
raise Exception('api calls must specify the version! e.g. ver=3')
# fix ver to include the slash
kw['ver'] = '/%s' % ver
my_url = _routes_default_url_for(*args, **kw)
kw['__ckan_no_root'] = no_root
return _add_i18n_to_url(my_url, locale=locale, **kw)
def url_for_static(*args, **kw):
'''Returns the URL for static content that doesn't get translated (eg CSS)
It'll raise CkanUrlException if called with an external URL
This is a wrapper for :py:func:`routes.url_for`
'''
if args:
url = urlparse.urlparse(args[0])
url_is_external = (url.scheme != '' or url.netloc != '')
if url_is_external:
CkanUrlException = ckan.exceptions.CkanUrlException
raise CkanUrlException('External URL passed to url_for_static()')
return url_for_static_or_external(*args, **kw)
def url_for_static_or_external(*args, **kw):
'''Returns the URL for static content that doesn't get translated (eg CSS),
or external URLs
This is a wrapper for :py:func:`routes.url_for`
'''
def fix_arg(arg):
url = urlparse.urlparse(str(arg))
url_is_relative = (url.scheme == '' and url.netloc == '' and
not url.path.startswith('/'))
if url_is_relative:
return '/' + url.geturl()
return url.geturl()
if args:
args = (fix_arg(args[0]), ) + args[1:]
my_url = _routes_default_url_for(*args, **kw)
return my_url
def is_url(*args, **kw):
'''
Returns True if argument parses as a http, https or ftp URL
'''
if not args:
return False
try:
url = urlparse.urlparse(args[0])
except ValueError:
return False
valid_schemes = ('http', 'https', 'ftp')
return url.scheme in valid_schemes
def _add_i18n_to_url(url_to_amend, **kw):
# If the locale keyword param is provided then the url is rewritten
# using that locale .If return_to is provided this is used as the url
# (as part of the language changing feature).
# A locale of default will not add locale info to the url.
default_locale = False
locale = kw.pop('locale', None)
no_root = kw.pop('__ckan_no_root', False)
allowed_locales = ['default'] + i18n.get_locales()
if locale and locale not in allowed_locales:
locale = None
if locale:
if locale == 'default':
default_locale = True
else:
try:
locale = request.environ.get('CKAN_LANG')
default_locale = request.environ.get('CKAN_LANG_IS_DEFAULT', True)
except TypeError:
default_locale = True
try:
root = request.environ.get('SCRIPT_NAME', '')
except TypeError:
root = ''
if kw.get('qualified', False):
# if qualified is given we want the full url ie http://...
root = _routes_default_url_for('/', qualified=True)[:-1]
# ckan.root_path is defined when we have none standard language
# position in the url
root_path = config.get('ckan.root_path', None)
if root_path:
# FIXME this can be written better once the merge
# into the ecportal core is done - Toby
# we have a special root specified so use that
if default_locale:
root = re.sub('/{{LANG}}', '', root_path)
else:
root = re.sub('{{LANG}}', locale, root_path)
# make sure we don't have a trailing / on the root
if root[-1] == '/':
root = root[:-1]
url = url_to_amend[len(re.sub('/{{LANG}}', '', root_path)):]
url = '%s%s' % (root, url)
root = re.sub('/{{LANG}}', '', root_path)
else:
if default_locale:
url = url_to_amend
else:
# we need to strip the root from the url and the add it before
# the language specification.
url = url_to_amend[len(root):]
url = '%s/%s%s' % (root, locale, url)
# stop the root being added twice in redirects
if no_root:
url = url_to_amend[len(root):]
if not default_locale:
url = '/%s%s' % (locale, url)
if url == '/packages':
error = 'There is a broken url being created %s' % kw
raise ckan.exceptions.CkanUrlException(error)
return url
def url_is_local(url):
'''Returns True if url is local'''
if not url or url.startswith('//'):
return False
parsed = urlparse.urlparse(url)
if parsed.scheme:
domain = urlparse.urlparse(url_for('/', qualified=True)).netloc
if domain != parsed.netloc:
return False
return True
def full_current_url():
''' Returns the fully qualified current url (eg http://...) useful
for sharing etc '''
return (url_for(request.environ['CKAN_CURRENT_URL'], qualified=True))
def lang():
''' Return the language code for the current locale eg `en` '''
return request.environ.get('CKAN_LANG')
def lang_native_name(lang=None):
''' Return the langage name currently used in it's localised form
either from parameter or current environ setting'''
lang = lang or lang()
locale = get_locales_dict().get(lang)
if locale:
return locale.display_name or locale.english_name
return lang
class Message(object):
'''A message returned by ``Flash.pop_messages()``.
Converting the message to a string returns the message text. Instances
also have the following attributes:
* ``message``: the message text.
* ``category``: the category specified when the message was created.
'''
def __init__(self, category, message, allow_html):
self.category = category
self.message = message
self.allow_html = allow_html
def __str__(self):
return self.message
__unicode__ = __str__
def __html__(self):
if self.allow_html:
return self.message
else:
return escape(self.message)
class _Flash(object):
# List of allowed categories. If None, allow any category.
categories = ["", "alert-info", "alert-error", "alert-success"]
# Default category if none is specified.
default_category = ""
def __init__(self, session_key="flash", categories=None,
default_category=None):
self.session_key = session_key
if categories is not None:
self.categories = categories
if default_category is not None:
self.default_category = default_category
if self.categories and self.default_category not in self.categories:
raise ValueError("unrecognized default category %r"
% (self.default_category, ))
def __call__(self, message, category=None, ignore_duplicate=False,
allow_html=False):
if not category:
category = self.default_category
elif self.categories and category not in self.categories:
raise ValueError("unrecognized category %r" % (category, ))
# Don't store Message objects in the session, to avoid unpickling
# errors in edge cases.
new_message_tuple = (category, message, allow_html)
messages = session.setdefault(self.session_key, [])
# ``messages`` is a mutable list, so changes to the local variable are
# reflected in the session.
if ignore_duplicate:
for i, m in enumerate(messages):
if m[1] == message:
if m[0] != category:
messages[i] = new_message_tuple
session.save()
return # Original message found, so exit early.
messages.append(new_message_tuple)
session.save()
def pop_messages(self):
messages = session.pop(self.session_key, [])
# only save session if it has changed
if messages:
session.save()
return [Message(*m) for m in messages]
def are_there_messages(self):
return bool(session.get(self.session_key))
flash = _Flash()
# this is here for backwards compatability
_flash = flash
def flash_notice(message, allow_html=False):
''' Show a flash message of type notice '''
flash(message, category='alert-info', allow_html=allow_html)
def flash_error(message, allow_html=False):
''' Show a flash message of type error '''
flash(message, category='alert-error', allow_html=allow_html)
def flash_success(message, allow_html=False):
''' Show a flash message of type success '''
flash(message, category='alert-success', allow_html=allow_html)
def are_there_flash_messages():
''' Returns True if there are flash messages for the current user '''
return flash.are_there_messages()
def _link_active(kwargs):
''' creates classes for the link_to calls '''
highlight_actions = kwargs.get('highlight_actions',
kwargs.get('action', '')).split(' ')
return (c.controller == kwargs.get('controller')
and c.action in highlight_actions)
def _link_to(text, *args, **kwargs):
'''Common link making code for several helper functions'''
assert len(args) < 2, 'Too many unnamed arguments'
def _link_class(kwargs):
''' creates classes for the link_to calls '''
suppress_active_class = kwargs.pop('suppress_active_class', False)
if not suppress_active_class and _link_active(kwargs):
active = ' active'
else:
active = ''
kwargs.pop('highlight_actions', '')
return kwargs.pop('class_', '') + active or None
def _create_link_text(text, **kwargs):
''' Update link text to add a icon or span if specified in the
kwargs '''
if kwargs.pop('inner_span', None):
text = literal('<span>') + text + literal('</span>')
if icon:
text = literal('<i class="icon-%s"></i> ' % icon) + text
return text
icon = kwargs.pop('icon', None)
class_ = _link_class(kwargs)
return link_to(
_create_link_text(text, **kwargs),
url_for(*args, **kwargs),
class_=class_
)
def nav_link(text, *args, **kwargs):
'''
:param class_: pass extra class(es) to add to the ``<a>`` tag
:param icon: name of ckan icon to use within the link
:param condition: if ``False`` then no link is returned
'''
if len(args) > 1:
raise Exception('Too many unnamed parameters supplied')
if args:
kwargs['controller'] = controller
log.warning('h.nav_link() please supply controller as a named '
'parameter not a positional one')
named_route = kwargs.pop('named_route', '')
if kwargs.pop('condition', True):
if named_route:
link = _link_to(text, named_route, **kwargs)
else:
link = _link_to(text, **kwargs)
else:
link = ''
return link
@maintain.deprecated('h.nav_named_link is deprecated please '
'use h.nav_link\nNOTE: you will need to pass the '
'route_name as a named parameter')
def nav_named_link(text, named_route, **kwargs):
'''Create a link for a named route.
Deprecated in ckan 2.0 '''
return nav_link(text, named_route=named_route, **kwargs)
@maintain.deprecated('h.subnav_link is deprecated please '
'use h.nav_link\nNOTE: if action is passed as the second '
'parameter make sure it is passed as a named parameter '
'eg. `action=\'my_action\'')
def subnav_link(text, action, **kwargs):
'''Create a link for a named route.
Deprecated in ckan 2.0 '''
kwargs['action'] = action
return nav_link(text, **kwargs)
@maintain.deprecated('h.subnav_named_route is deprecated please '
'use h.nav_link\nNOTE: you will need to pass the '
'route_name as a named parameter')
def subnav_named_route(text, named_route, **kwargs):
'''Generate a subnav element based on a named route
Deprecated in ckan 2.0 '''
return nav_link(text, named_route=named_route, **kwargs)
def build_nav_main(*args):
''' build a set of menu items.
args: tuples of (menu type, title) eg ('login', _('Login'))
outputs <li><a href="...">title</a></li>
'''
output = ''
for item in args:
menu_item, title = item[:2]
if len(item) == 3 and not check_access(item[2]):
continue
output += _make_menu_item(menu_item, title)
return output
def build_nav_icon(menu_item, title, **kw):
'''Build a navigation item used for example in ``user/read_base.html``.
Outputs ``<li><a href="..."><i class="icon.."></i> title</a></li>``.
:param menu_item: the name of the defined menu item defined in
config/routing as the named route of the same name
:type menu_item: string
:param title: text used for the link
:type title: string
:param kw: additional keywords needed for creating url eg ``id=...``
:rtype: HTML literal
'''
return _make_menu_item(menu_item, title, **kw)
def build_nav(menu_item, title, **kw):
'''Build a navigation item used for example breadcrumbs.
Outputs ``<li><a href="..."></i> title</a></li>``.
:param menu_item: the name of the defined menu item defined in
config/routing as the named route of the same name
:type menu_item: string
:param title: text used for the link
:type title: string
:param kw: additional keywords needed for creating url eg ``id=...``
:rtype: HTML literal
'''
return _make_menu_item(menu_item, title, icon=None, **kw)
def _make_menu_item(menu_item, title, **kw):
''' build a navigation item used for example breadcrumbs
outputs <li><a href="..."></i> title</a></li>
:param menu_item: the name of the defined menu item defined in
config/routing as the named route of the same name
:type menu_item: string
:param title: text used for the link
:type title: string
:param **kw: additional keywords needed for creating url eg id=...
:rtype: HTML literal
This function is called by wrapper functions.
'''
_menu_items = config['routes.named_routes']
if menu_item not in _menu_items:
raise Exception('menu item `%s` cannot be found' % menu_item)
item = copy.copy(_menu_items[menu_item])
item.update(kw)
active = _link_active(item)
needed = item.pop('needed')
for need in needed:
if need not in kw:
raise Exception('menu item `%s` need parameter `%s`'
% (menu_item, need))
link = _link_to(title, menu_item, suppress_active_class=True, **item)
if active:
return literal('<li class="active">') + link + literal('</li>')
return literal('<li>') + link + literal('</li>')
def default_group_type():
return str(config.get('ckan.default.group_type', 'group'))
def get_facet_items_dict(facet, limit=None, exclude_active=False):
'''Return the list of unselected facet items for the given facet, sorted
by count.
Returns the list of unselected facet contraints or facet items (e.g. tag
names like "russian" or "tolstoy") for the given search facet (e.g.
"tags"), sorted by facet item count (i.e. the number of search results that
match each facet item).
Reads the complete list of facet items for the given facet from
c.search_facets, and filters out the facet items that the user has already
selected.
Arguments:
facet -- the name of the facet to filter.
limit -- the max. number of facet items to return.
exclude_active -- only return unselected facets.
'''
if not c.search_facets or \
not c.search_facets.get(facet) or \
not c.search_facets.get(facet).get('items'):
return []
facets = []
for facet_item in c.search_facets.get(facet)['items']:
if not len(facet_item['name'].strip()):
continue
if not (facet, facet_item['name']) in request.params.items():
facets.append(dict(active=False, **facet_item))
elif not exclude_active:
facets.append(dict(active=True, **facet_item))
facets = sorted(facets, key=lambda item: item['count'], reverse=True)
if c.search_facets_limits and limit is None:
limit = c.search_facets_limits.get(facet)
# zero treated as infinite for hysterical raisins
if limit is not None and limit > 0:
return facets[:limit]
return facets
def has_more_facets(facet, limit=None, exclude_active=False):
'''
Returns True if there are more facet items for the given facet than the
limit.
Reads the complete list of facet items for the given facet from
c.search_facets, and filters out the facet items that the user has already
selected.
Arguments:
facet -- the name of the facet to filter.
limit -- the max. number of facet items.
exclude_active -- only return unselected facets.
'''
facets = []
for facet_item in c.search_facets.get(facet)['items']:
if not len(facet_item['name'].strip()):
continue
if not (facet, facet_item['name']) in request.params.items():
facets.append(dict(active=False, **facet_item))
elif not exclude_active:
facets.append(dict(active=True, **facet_item))
if c.search_facets_limits and limit is None:
limit = c.search_facets_limits.get(facet)
if limit is not None and len(facets) > limit:
return True
return False
def unselected_facet_items(facet, limit=10):
'''Return the list of unselected facet items for the given facet, sorted
by count.
Returns the list of unselected facet contraints or facet items (e.g. tag
names like "russian" or "tolstoy") for the given search facet (e.g.
"tags"), sorted by facet item count (i.e. the number of search results that
match each facet item).
Reads the complete list of facet items for the given facet from
c.search_facets, and filters out the facet items that the user has already
selected.
Arguments:
facet -- the name of the facet to filter.
limit -- the max. number of facet items to return.
'''
return get_facet_items_dict(facet, limit=limit, exclude_active=True)
@maintain.deprecated('h.get_facet_title is deprecated in 2.0 and will be removed.')
def get_facet_title(name):
'''Deprecated in ckan 2.0 '''
# if this is set in the config use this
config_title = config.get('search.facets.%s.title' % name)
if config_title:
return config_title
facet_titles = {'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license': _('Licenses'), }
return facet_titles.get(name, name.capitalize())
def get_param_int(name, default=10):
try:
return int(request.params.get(name, default))
except ValueError:
return default
def _url_with_params(url, params):
if not params:
return url
params = [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v))
for k, v in params]
return url + u'?' + urlencode(params)
def _search_url(params):
url = url_for(controller='package', action='search')
return _url_with_params(url, params)
def sorted_extras(package_extras, auto_clean=False, subs=None, exclude=None):
''' Used for outputting package extras
:param package_extras: the package extras
:type package_extras: dict
:param auto_clean: If true capitalize and replace -_ with spaces
:type auto_clean: bool
:param subs: substitutes to use instead of given keys
:type subs: dict {'key': 'replacement'}
:param exclude: keys to exclude
:type exclude: list of strings
'''
# If exclude is not supplied use values defined in the config
if not exclude:
exclude = g.package_hide_extras
output = []
for extra in sorted(package_extras, key=lambda x: x['key']):
if extra.get('state') == 'deleted':
continue
k, v = extra['key'], extra['value']
if k in exclude:
continue
if subs and k in subs:
k = subs[k]
elif auto_clean:
k = k.replace('_', ' ').replace('-', ' ').title()
if isinstance(v, (list, tuple)):
v = ", ".join(map(unicode, v))
output.append((k, v))
return output
def check_access(action, data_dict=None):
context = {'model': model,
'user': c.user or c.author}
if not data_dict:
data_dict = {}
try:
logic.check_access(action, context, data_dict)
authorized = True
except logic.NotAuthorized:
authorized = False
return authorized
@maintain.deprecated("helpers.get_action() is deprecated and will be removed "
"in a future version of CKAN. Instead, please use the "
"extra_vars param to render() in your controller to pass "
"results from action functions to your templates.")
def get_action(action_name, data_dict=None):
'''Calls an action function from a template. Deprecated in CKAN 2.3.'''
if data_dict is None:
data_dict = {}
return logic.get_action(action_name)({}, data_dict)
def linked_user(user, maxlength=0, avatar=20):
if user in [model.PSEUDO_USER__LOGGED_IN, model.PSEUDO_USER__VISITOR]:
return user
if not isinstance(user, model.User):
user_name = unicode(user)
user = model.User.get(user_name)
if not user:
return user_name
if user:
name = user.name if model.User.VALID_NAME.match(user.name) else user.id
icon = gravatar(email_hash=user.email_hash, size=avatar)
displayname = user.display_name
if maxlength and len(user.display_name) > maxlength:
displayname = displayname[:maxlength] + '...'
return icon + u' ' + link_to(displayname,
url_for(controller='user', action='read', id=name))
def group_name_to_title(name):
group = model.Group.by_name(name)
if group is not None:
return group.display_name
return name
def markdown_extract(text, extract_length=190):
''' return the plain text representation of markdown encoded text. That
is the texted without any html tags. If extract_length is 0 then it
will not be truncated.'''
if (text is None) or (text.strip() == ''):
return ''
plain = RE_MD_HTML_TAGS.sub('', markdown(text))
if not extract_length or len(plain) < extract_length:
return literal(plain)
return literal(unicode(truncate(plain, length=extract_length, indicator='...', whole_word=True)))
def icon_url(name):
return url_for_static('/images/icons/%s.png' % name)
def icon_html(url, alt=None, inline=True):
classes = ''
if inline:
classes += 'inline-icon '
return literal(('<img src="%s" height="16px" width="16px" alt="%s" ' +
'class="%s" /> ') % (url, alt, classes))
def icon(name, alt=None, inline=True):
return icon_html(icon_url(name), alt, inline)
def resource_icon(res):
if False:
icon_name = 'page_white'
# if (res.is_404?): icon_name = 'page_white_error'
# also: 'page_white_gear'
# also: 'page_white_link'
return icon(icon_name)
else:
return icon(format_icon(res.get('format', '')))
def format_icon(_format):
_format = _format.lower()
if ('json' in _format): return 'page_white_cup'
if ('csv' in _format): return 'page_white_gear'
if ('xls' in _format): return 'page_white_excel'
if ('zip' in _format): return 'page_white_compressed'
if ('api' in _format): return 'page_white_database'
if ('plain text' in _format): return 'page_white_text'
if ('xml' in _format): return 'page_white_code'
return 'page_white'
def dict_list_reduce(list_, key, unique=True):
''' Take a list of dicts and create a new one containing just the
values for the key with unique values if requested. '''
new_list = []
for item in list_:
value = item.get(key)
if not value or (unique and value in new_list):
continue
new_list.append(value)
return new_list
def linked_gravatar(email_hash, size=100, default=None):
return literal(
'<a href="https://gravatar.com/" target="_blank" ' +
'title="%s" alt="">' % _('Update your avatar at gravatar.com') +
'%s</a>' % gravatar(email_hash, size, default)
)
_VALID_GRAVATAR_DEFAULTS = ['404', 'mm', 'identicon', 'monsterid',
'wavatar', 'retro']
def gravatar(email_hash, size=100, default=None):
if default is None:
default = config.get('ckan.gravatar_default', 'identicon')
if not default in _VALID_GRAVATAR_DEFAULTS:
# treat the default as a url
default = urllib.quote(default, safe='')
return literal('''<img src="//gravatar.com/avatar/%s?s=%d&d=%s"
class="gravatar" width="%s" height="%s" />'''
% (email_hash, size, default, size, size)
)
def pager_url(page, partial=None, **kwargs):
routes_dict = _pylons_default_url.environ['pylons.routes_dict']
kwargs['controller'] = routes_dict['controller']
kwargs['action'] = routes_dict['action']
if routes_dict.get('id'):
kwargs['id'] = routes_dict['id']
kwargs['page'] = page
return url(**kwargs)
class Page(paginate.Page):
# Curry the pager method of the webhelpers.paginate.Page class, so we have
# our custom layout set as default.
def pager(self, *args, **kwargs):
kwargs.update(
format=u"<div class='pagination pagination-centered'><ul>$link_previous ~2~ $link_next</ul></div>",
symbol_previous=u'«', symbol_next=u'»',
curpage_attr={'class': 'active'}, link_attr={}
)
return super(Page, self).pager(*args, **kwargs)
# Put each page link into a <li> (for Bootstrap to style it)
def _pagerlink(self, page, text, extra_attributes=None):
anchor = super(Page, self)._pagerlink(page, text)
extra_attributes = extra_attributes or {}
return HTML.li(anchor, **extra_attributes)
# Change 'current page' link from <span> to <li><a>
# and '..' into '<li><a>..'
# (for Bootstrap to style them properly)
def _range(self, regexp_match):
html = super(Page, self)._range(regexp_match)
# Convert ..
dotdot = '<span class="pager_dotdot">..</span>'
dotdot_link = HTML.li(HTML.a('...', href='#'), class_='disabled')
html = re.sub(dotdot, dotdot_link, html)
# Convert current page
text = '%s' % self.page
current_page_span = str(HTML.span(c=text, **self.curpage_attr))
current_page_link = self._pagerlink(self.page, text,
extra_attributes=self.curpage_attr)
return re.sub(current_page_span, current_page_link, html)
def render_datetime(datetime_, date_format=None, with_hours=False):
'''Render a datetime object or timestamp string as a localised date or
in the requested format.
If timestamp is badly formatted, then a blank string is returned.
:param datetime_: the date
:type datetime_: datetime or ISO string format
:param date_format: a date format
:type date_format: string
:param with_hours: should the `hours:mins` be shown
:type with_hours: bool
:rtype: string
'''
datetime_ = _datestamp_to_datetime(datetime_)
if not datetime_:
return ''
# if date_format was supplied we use it
if date_format:
return datetime_.strftime(date_format)
# the localised date
return formatters.localised_nice_date(datetime_, show_date=True,
with_hours=with_hours)
def date_str_to_datetime(date_str):
'''Convert ISO-like formatted datestring to datetime object.
This function converts ISO format date- and datetime-strings into
datetime objects. Times may be specified down to the microsecond. UTC
offset or timezone information may **not** be included in the string.
Note - Although originally documented as parsing ISO date(-times), this
function doesn't fully adhere to the format. This function will
throw a ValueError if the string contains UTC offset information.
So in that sense, it is less liberal than ISO format. On the
other hand, it is more liberal of the accepted delimiters between
the values in the string. Also, it allows microsecond precision,
despite that not being part of the ISO format.
'''
time_tuple = re.split('[^\d]+', date_str, maxsplit=5)
# Extract seconds and microseconds
if len(time_tuple) >= 6:
m = re.match('(?P<seconds>\d{2})(\.(?P<microseconds>\d{6}))?$',
time_tuple[5])
if not m:
raise ValueError('Unable to parse %s as seconds.microseconds' %
time_tuple[5])
seconds = int(m.groupdict().get('seconds'))
microseconds = int(m.groupdict(0).get('microseconds'))
time_tuple = time_tuple[:5] + [seconds, microseconds]
return datetime.datetime(*map(int, time_tuple))
def parse_rfc_2822_date(date_str, assume_utc=True):
'''
Parse a date string of the form specified in RFC 2822, and return a
datetime.
RFC 2822 is the date format used in HTTP headers. It should contain
timezone information, but that cannot be relied upon.
If date_str doesn't contain timezone information, then the 'assume_utc'
flag determines whether we assume this string is local (with respect to the
server running this code), or UTC. In practice, what this means is that if
assume_utc is True, then the returned datetime is 'aware', with an
associated tzinfo of offset zero. Otherwise, the returned datetime is
'naive'.
If timezone information is available in date_str, then the returned
datetime is 'aware', ie - it has an associated tz_info object.
Returns None if the string cannot be parsed as a valid datetime.
'''
time_tuple = email.utils.parsedate_tz(date_str)
# Not parsable
if not time_tuple:
return None
# No timezone information available in the string
if time_tuple[-1] is None and not assume_utc:
return datetime.datetime.fromtimestamp(
email.utils.mktime_tz(time_tuple))
else:
offset = 0 if time_tuple[-1] is None else time_tuple[-1]
tz_info = _RFC2282TzInfo(offset)
return datetime.datetime(*time_tuple[:6], microsecond=0, tzinfo=tz_info)
class _RFC2282TzInfo(datetime.tzinfo):
'''
A datetime.tzinfo implementation used by parse_rfc_2822_date() function.
In order to return timezone information, a concrete implementation of
datetime.tzinfo is required. This class represents tzinfo that knows
about it's offset from UTC, has no knowledge of daylight savings time, and
no knowledge of the timezone name.
'''
def __init__(self, offset):
'''
offset from UTC in seconds.
'''
self.offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
'''
Dates parsed from an RFC 2822 string conflate timezone and dst, and so
it's not possible to determine whether we're in DST or not, hence
returning None.
'''
return None
def tzname(self, dt):
return None
@maintain.deprecated('h.time_ago_in_words_from_str is deprecated in 2.2 '
'and will be removed. Please use '
'h.time_ago_from_timestamp instead')
def time_ago_in_words_from_str(date_str, granularity='month'):
'''Deprecated in 2.2 use time_ago_from_timestamp'''
if date_str:
return date.time_ago_in_words(date_str_to_datetime(date_str),
granularity=granularity)
else:
return _('Unknown')
def time_ago_from_timestamp(timestamp):
''' Returns a string like `5 months ago` for a datetime relative to now
:param timestamp: the timestamp or datetime
:type timestamp: string or datetime
:rtype: string
'''
datetime_ = _datestamp_to_datetime(timestamp)
if not datetime_:
return _('Unknown')
# the localised date
return formatters.localised_nice_date(datetime_, show_date=False)
def button_attr(enable, type='primary'):
if enable:
return 'class="btn %s"' % type
return 'disabled class="btn disabled"'
def dataset_display_name(package_or_package_dict):
if isinstance(package_or_package_dict, dict):
return package_or_package_dict.get('title', '') or \
package_or_package_dict.get('name', '')
else:
return package_or_package_dict.title or package_or_package_dict.name
def dataset_link(package_or_package_dict):
if isinstance(package_or_package_dict, dict):
name = package_or_package_dict['name']
else:
name = package_or_package_dict.name
text = dataset_display_name(package_or_package_dict)
return link_to(
text,
url_for(controller='package', action='read', id=name)
)
# TODO: (?) support resource objects as well
def resource_display_name(resource_dict):
name = resource_dict.get('name', None)
description = resource_dict.get('description', None)
if name:
return name
elif description:
description = description.split('.')[0]
max_len = 60
if len(description) > max_len:
description = description[:max_len] + '...'
return description
else:
return _("Unnamed resource")
def resource_link(resource_dict, package_id):
text = resource_display_name(resource_dict)
url = url_for(controller='package',
action='resource_read',
id=package_id,
resource_id=resource_dict['id'])
return link_to(text, url)
def related_item_link(related_item_dict):
text = related_item_dict.get('title', '')
url = url_for(controller='related',
action='read',
id=related_item_dict['id'])
return link_to(text, url)
def tag_link(tag):
url = url_for(controller='tag', action='read', id=tag['name'])
return link_to(tag.get('title', tag['name']), url)
def group_link(group):
url = url_for(controller='group', action='read', id=group['name'])
return link_to(group['title'], url)
def organization_link(organization):
url = url_for(controller='organization', action='read', id=organization['name'])
return link_to(organization['name'], url)
def dump_json(obj, **kw):
return json.dumps(obj, **kw)
def _get_template_name():
#FIX ME THIS IS BROKEN
''' helper function to get the currently/last rendered template name '''
return c.__debug_info[-1]['template_name']
def auto_log_message():
if (c.action == 'new'):
return _('Created new dataset.')
elif (c.action == 'editresources'):
return _('Edited resources.')
elif (c.action == 'edit'):
return _('Edited settings.')
return ''
def activity_div(template, activity, actor, object=None, target=None):
actor = '<span class="actor">%s</span>' % actor
if object:
object = '<span class="object">%s</span>' % object
if target:
target = '<span class="target">%s</span>' % target
rendered_datetime = render_datetime(activity['timestamp'])
date = '<span class="date">%s</span>' % rendered_datetime
template = template.format(actor=actor, date=date,
object=object, target=target)
template = '<div class="activity">%s %s</div>' % (template, date)
return literal(template)
def snippet(template_name, **kw):
''' This function is used to load html snippets into pages. keywords
can be used to pass parameters into the snippet rendering '''
import ckan.lib.base as base
return base.render_snippet(template_name, **kw)
def convert_to_dict(object_type, objs):
''' This is a helper function for converting lists of objects into
lists of dicts. It is for backwards compatability only. '''
def dictize_revision_list(revision, context):
# conversionof revision lists
def process_names(items):
array = []
for item in items:
array.append(item.name)
return array
rev = {'id': revision.id,
'state': revision.state,
'timestamp': revision.timestamp,
'author': revision.author,
'packages': process_names(revision.packages),
'groups': process_names(revision.groups),
'message': revision.message, }
return rev
import ckan.lib.dictization.model_dictize as md
converters = {'package': md.package_dictize,
'revisions': dictize_revision_list}
converter = converters[object_type]
items = []
context = {'model': model}
for obj in objs:
item = converter(obj, context)
items.append(item)
return items
# these are the types of objects that can be followed
_follow_objects = ['dataset', 'user', 'group']
def follow_button(obj_type, obj_id):
'''Return a follow button for the given object type and id.
If the user is not logged in return an empty string instead.
:param obj_type: the type of the object to be followed when the follow
button is clicked, e.g. 'user' or 'dataset'
:type obj_type: string
:param obj_id: the id of the object to be followed when the follow button
is clicked
:type obj_id: string
:returns: a follow button as an HTML snippet
:rtype: string
'''
obj_type = obj_type.lower()
assert obj_type in _follow_objects
# If the user is logged in show the follow/unfollow button
if c.user:
context = {'model': model, 'session': model.Session, 'user': c.user}
action = 'am_following_%s' % obj_type
following = logic.get_action(action)(context, {'id': obj_id})
return snippet('snippets/follow_button.html',
following=following,
obj_id=obj_id,
obj_type=obj_type)
return ''
def follow_count(obj_type, obj_id):
'''Return the number of followers of an object.
:param obj_type: the type of the object, e.g. 'user' or 'dataset'
:type obj_type: string
:param obj_id: the id of the object
:type obj_id: string
:returns: the number of followers of the object
:rtype: int
'''
obj_type = obj_type.lower()
assert obj_type in _follow_objects
action = '%s_follower_count' % obj_type
context = {'model': model, 'session': model.Session, 'user': c.user}
return logic.get_action(action)(context, {'id': obj_id})
def _create_url_with_params(params=None, controller=None, action=None,
extras=None):
''' internal function for building urls with parameters. '''
if not controller:
controller = c.controller
if not action:
action = c.action
if not extras:
extras = {}
url = url_for(controller=controller, action=action, **extras)
return _url_with_params(url, params)
def add_url_param(alternative_url=None, controller=None, action=None,
extras=None, new_params=None):
'''
Adds extra parameters to existing ones
controller action & extras (dict) are used to create the base url via
:py:func:`~ckan.lib.helpers.url_for` controller & action default to the
current ones
This can be overriden providing an alternative_url, which will be used
instead.
'''
params_nopage = [(k, v) for k, v in request.params.items() if k != 'page']
params = set(params_nopage)
if new_params:
params |= set(new_params.items())
if alternative_url:
return _url_with_params(alternative_url, params)
return _create_url_with_params(params=params, controller=controller,
action=action, extras=extras)
def remove_url_param(key, value=None, replace=None, controller=None,
action=None, extras=None, alternative_url=None):
''' Remove one or multiple keys from the current parameters.
The first parameter can be either a string with the name of the key to
remove or a list of keys to remove.
A specific key/value pair can be removed by passing a second value
argument otherwise all pairs matching the key will be removed. If replace
is given then a new param key=replace will be added.
Note that the value and replace parameters only apply to the first key
provided (or the only one provided if key is a string).
controller action & extras (dict) are used to create the base url
via :py:func:`~ckan.lib.helpers.url_for`
controller & action default to the current ones
This can be overriden providing an alternative_url, which will be used
instead.
'''
if isinstance(key, basestring):
keys = [key]
else:
keys = key
params_nopage = [(k, v) for k, v in request.params.items() if k != 'page']
params = list(params_nopage)
if value:
params.remove((keys[0], value))
else:
for key in keys:
[params.remove((k, v)) for (k, v) in params[:] if k == key]
if replace is not None:
params.append((keys[0], replace))
if alternative_url:
return _url_with_params(alternative_url, params)
return _create_url_with_params(params=params, controller=controller,
action=action, extras=extras)
def include_resource(resource):
r = getattr(fanstatic_resources, resource)
r.need()
def urls_for_resource(resource):
''' Returns a list of urls for the resource specified. If the resource
is a group or has dependencies then there can be multiple urls.
NOTE: This is for special situations only and is not the way to generally
include resources. It is advised not to use this function.'''
r = getattr(fanstatic_resources, resource)
resources = list(r.resources)
core = fanstatic_resources.fanstatic_extensions.core
f = core.get_needed()
lib = r.library
root_path = f.library_url(lib)
resources = core.sort_resources(resources)
if f._bundle:
resources = core.bundle_resources(resources)
out = []
for resource in resources:
if isinstance(resource, core.Bundle):
paths = [resource.relpath for resource in resource.resources()]
relpath = ';'.join(paths)
relpath = core.BUNDLE_PREFIX + relpath
else:
relpath = resource.relpath
out.append('%s/%s' % (root_path, relpath))
return out
def debug_inspect(arg):
''' Output pprint.pformat view of supplied arg '''
return literal('<pre>') + pprint.pformat(arg) + literal('</pre>')
def debug_full_info_as_list(debug_info):
''' This dumps the template variables for debugging purposes only. '''
out = []
ignored_keys = ['c', 'app_globals', 'g', 'h', 'request', 'tmpl_context',
'actions', 'translator', 'session', 'N_', 'ungettext',
'config', 'response', '_']
ignored_context_keys = ['__class__', '__context', '__delattr__', '__dict__',
'__doc__', '__format__', '__getattr__',
'__getattribute__', '__hash__', '__init__',
'__module__', '__new__', '__reduce__',
'__reduce_ex__', '__repr__', '__setattr__',
'__sizeof__', '__str__', '__subclasshook__',
'__weakref__', 'action', 'environ', 'pylons',
'start_response']
debug_vars = debug_info['vars']
for key in debug_vars.keys():
if not key in ignored_keys:
data = pprint.pformat(debug_vars.get(key))
data = data.decode('utf-8')
out.append((key, data))
if 'tmpl_context' in debug_vars:
for key in debug_info['c_vars']:
if not key in ignored_context_keys:
data = pprint.pformat(getattr(debug_vars['tmpl_context'], key))
data = data.decode('utf-8')
out.append(('c.%s' % key, data))
return out
def popular(type_, number, min=1, title=None):
''' display a popular icon. '''
if type_ == 'views':
title = ungettext('{number} view', '{number} views', number)
elif type_ == 'recent views':
title = ungettext('{number} recent view', '{number} recent views', number)
elif not title:
raise Exception('popular() did not recieve a valid type_ or title')
return snippet('snippets/popular.html', title=title, number=number, min=min)
def groups_available(am_member=False):
'''Return a list of the groups that the user is authorized to edit.
:param am_member: if True return only the groups the logged-in user is a
member of, otherwise return all groups that the user is authorized to
edit (for example, sysadmin users are authorized to edit all groups)
(optional, default: False)
:type am-member: boolean
'''
context = {}
data_dict = {'available_only': True, 'am_member': am_member}
return logic.get_action('group_list_authz')(context, data_dict)
def organizations_available(permission='edit_group'):
''' return a list of available organizations '''
context = {'user': c.user}
data_dict = {'permission': permission}
return logic.get_action('organization_list_for_user')(context, data_dict)
def user_in_org_or_group(group_id):
''' Check if user is in a group or organization '''
# we need a user
if not c.userobj:
return False
# sysadmins can do anything
if c.userobj.sysadmin:
return True
query = model.Session.query(model.Member) \
.filter(model.Member.state == 'active') \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.group_id == group_id) \
.filter(model.Member.table_id == c.userobj.id)
return len(query.all()) != 0
def dashboard_activity_stream(user_id, filter_type=None, filter_id=None,
offset=0):
'''Return the dashboard activity stream of the current user.
:param user_id: the id of the user
:type user_id: string
:param filter_type: the type of thing to filter by
:type filter_type: string
:param filter_id: the id of item to filter by
:type filter_id: string
:returns: an activity stream as an HTML snippet
:rtype: string
'''
context = {'model': model, 'session': model.Session, 'user': c.user}
if filter_type:
action_functions = {
'dataset': 'package_activity_list_html',
'user': 'user_activity_list_html',
'group': 'group_activity_list_html',
'organization': 'organization_activity_list_html',
}
action_function = logic.get_action(action_functions.get(filter_type))
return action_function(context, {'id': filter_id, 'offset': offset})
else:
return logic.get_action('dashboard_activity_list_html')(
context, {'offset': offset})
def recently_changed_packages_activity_stream(limit=None):
if limit:
data_dict = {'limit': limit}
else:
data_dict = {}
context = {'model': model, 'session': model.Session, 'user': c.user}
return logic.get_action('recently_changed_packages_activity_list_html')(
context, data_dict)
def escape_js(str_to_escape):
'''Escapes special characters from a JS string.
Useful e.g. when you need to pass JSON to the templates
:param str_to_escape: string to be escaped
:rtype: string
'''
return str_to_escape.replace('\\', '\\\\') \
.replace('\'', '\\\'') \
.replace('"', '\\\"')
def get_pkg_dict_extra(pkg_dict, key, default=None):
'''Returns the value for the dataset extra with the provided key.
If the key is not found, it returns a default value, which is None by
default.
:param pkg_dict: dictized dataset
:key: extra key to lookup
:default: default value returned if not found
'''
extras = pkg_dict['extras'] if 'extras' in pkg_dict else []
for extra in extras:
if extra['key'] == key:
return extra['value']
return default
def get_request_param(parameter_name, default=None):
''' This function allows templates to access query string parameters
from the request. This is useful for things like sort order in
searches. '''
return request.params.get(parameter_name, default)
# find all inner text of html eg `<b>moo</b>` gets `moo` but not of <a> tags
# as this would lead to linkifying links if they are urls.
RE_MD_GET_INNER_HTML = re.compile(
r'(^|(?:<(?!a\b)[^>]*>))([^<]+)(?=<|$)',
flags=re.UNICODE
)
# find all `internal links` eg. tag:moo, dataset:1234, tag:"my tag"
RE_MD_INTERNAL_LINK = re.compile(
r'\b(tag|package|dataset|group):((")?(?(3)[ \w\-.]+|[\w\-.]+)(?(3)"))',
flags=re.UNICODE
)
# find external links eg http://foo.com, https://bar.org/foobar.html
# but ignore trailing punctuation since it is probably not part of the link
RE_MD_EXTERNAL_LINK = re.compile(
r'(\bhttps?:\/\/[\w\-\.,@?^=%&;:\/~\\+#]*'
'[\w\-@?^=%&:\/~\\+#]' # but last character can't be punctuation [.,;]
')',
flags=re.UNICODE
)
# find all tags but ignore < in the strings so that we can use it correctly
# in markdown
RE_MD_HTML_TAGS = re.compile('<[^><]*>')
def html_auto_link(data):
'''Linkifies HTML
tag:... converted to a tag link
dataset:... converted to a dataset link
group:... converted to a group link
http://... converted to a link
'''
LINK_FNS = {
'tag': tag_link,
'group': group_link,
'dataset': dataset_link,
'package': dataset_link,
}
def makelink(matchobj):
obj = matchobj.group(1)
name = matchobj.group(2)
title = '%s:%s' % (obj, name)
return LINK_FNS[obj]({'name': name.strip('"'), 'title': title})
def link(matchobj):
return '<a href="%s" target="_blank" rel="nofollow">%s</a>' \
% (matchobj.group(1), matchobj.group(1))
def process(matchobj):
data = matchobj.group(2)
data = RE_MD_INTERNAL_LINK.sub(makelink, data)
data = RE_MD_EXTERNAL_LINK.sub(link, data)
return matchobj.group(1) + data
data = RE_MD_GET_INNER_HTML.sub(process, data)
return data
def render_markdown(data, auto_link=True, allow_html=False):
''' Returns the data as rendered markdown
:param auto_link: Should ckan specific links be created e.g. `group:xxx`
:type auto_link: bool
:param allow_html: If True then html entities in the markdown data.
This is dangerous if users have added malicious content.
If False all html tags are removed.
:type allow_html: bool
'''
if not data:
return ''
if allow_html:
data = markdown(data.strip(), safe_mode=False)
else:
data = RE_MD_HTML_TAGS.sub('', data.strip())
data = markdown(data, safe_mode=True)
# tags can be added by tag:... or tag:"...." and a link will be made
# from it
if auto_link:
data = html_auto_link(data)
return literal(data)
def format_resource_items(items):
''' Take a resource item list and format nicely with blacklisting etc. '''
blacklist = ['name', 'description', 'url', 'tracking_summary']
output = []
# regular expressions for detecting types in strings
reg_ex_datetime = '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?$'
reg_ex_int = '^-?\d{1,}$'
reg_ex_float = '^-?\d{1,}\.\d{1,}$'
for key, value in items:
if not value or key in blacklist:
continue
# size is treated specially as we want to show in MiB etc
if key == 'size':
try:
value = formatters.localised_filesize(int(value))
except ValueError:
# Sometimes values that can't be converted to ints can sneak
# into the db. In this case, just leave them as they are.
pass
elif isinstance(value, basestring):
# check if strings are actually datetime/number etc
if re.search(reg_ex_datetime, value):
datetime_ = date_str_to_datetime(value)
value = formatters.localised_nice_date(datetime_)
elif re.search(reg_ex_float, value):
value = formatters.localised_number(float(value))
elif re.search(reg_ex_int, value):
value = formatters.localised_number(int(value))
elif isinstance(value, int) or isinstance(value, float):
value = formatters.localised_number(value)
key = key.replace('_', ' ')
output.append((key, value))
return sorted(output, key=lambda x: x[0])
def resource_preview(resource, package):
'''
Returns a rendered snippet for a embedded resource preview.
Depending on the type, different previews are loaded.
This could be an img tag where the image is loaded directly or an iframe
that embeds a web page, recline or a pdf preview.
'''
if not resource['url']:
return False
format_lower = datapreview.res_format(resource)
directly = False
data_dict = {'resource': resource, 'package': package}
if datapreview.get_preview_plugin(data_dict, return_first=True):
url = url_for(controller='package', action='resource_datapreview',
resource_id=resource['id'], id=package['id'], qualified=True)
else:
return False
return snippet("dataviewer/snippets/data_preview.html",
embed=directly,
resource_url=url,
raw_resource_url=resource.get('url'))
def get_allowed_view_types(resource, package):
data_dict = {'resource': resource, 'package': package}
plugins = datapreview.get_allowed_view_plugins(data_dict)
allowed_view_types = []
for plugin in plugins:
info = plugin.info()
allowed_view_types.append((info['name'],
info.get('title', info['name']),
info.get('icon', 'image')))
allowed_view_types.sort(key=lambda item: item[1])
return allowed_view_types
def rendered_resource_view(resource_view, resource, package, embed=False):
'''
Returns a rendered resource view snippet.
'''
view_plugin = datapreview.get_view_plugin(resource_view['view_type'])
context = {}
data_dict = {'resource_view': resource_view,
'resource': resource,
'package': package}
vars = view_plugin.setup_template_variables(context, data_dict) or {}
template = view_plugin.view_template(context, data_dict)
data_dict.update(vars)
if not resource_view_is_iframed(resource_view) and embed:
template = "package/snippets/resource_view_embed.html"
import ckan.lib.base as base
return literal(base.render(template, extra_vars=data_dict))
def view_resource_url(resource_view, resource, package, **kw):
'''
Returns url for resource. made to be overridden by extensions. i.e
by resource proxy.
'''
return resource['url']
def resource_view_is_filterable(resource_view):
'''
Returns True if the given resource view support filters.
'''
view_plugin = datapreview.get_view_plugin(resource_view['view_type'])
return view_plugin.info().get('filterable', False)
def resource_view_get_fields(resource):
'''Returns sorted list of text and time fields of a datastore resource.'''
if not resource.get('datastore_active'):
return []
data = {
'resource_id': resource['id'],
'limit': 0
}
result = logic.get_action('datastore_search')({}, data)
fields = [field['id'] for field in result.get('fields', [])]
return sorted(fields)
def resource_view_is_iframed(resource_view):
'''
Returns true if the given resource view should be displayed in an iframe.
'''
view_plugin = datapreview.get_view_plugin(resource_view['view_type'])
return view_plugin.info().get('iframed', True)
def resource_view_icon(resource_view):
'''
Returns the icon for a particular view type.
'''
view_plugin = datapreview.get_view_plugin(resource_view['view_type'])
return view_plugin.info().get('icon', 'picture')
def resource_view_display_preview(resource_view):
'''
Returns if the view should display a preview.
'''
view_plugin = datapreview.get_view_plugin(resource_view['view_type'])
return view_plugin.info().get('preview_enabled', True)
def resource_view_full_page(resource_view):
'''
Returns if the edit view page should be full page.
'''
view_plugin = datapreview.get_view_plugin(resource_view['view_type'])
return view_plugin.info().get('full_page_edit', False)
def remove_linebreaks(string):
'''Remove linebreaks from string to make it usable in JavaScript'''
return str(string).replace('\n', '')
def list_dict_filter(list_, search_field, output_field, value):
''' Takes a list of dicts and returns the value of a given key if the
item has a matching value for a supplied key
:param list_: the list to search through for matching items
:type list_: list of dicts
:param search_field: the key to use to find matching items
:type search_field: string
:param output_field: the key to use to output the value
:type output_field: string
:param value: the value to search for
'''
for item in list_:
if item.get(search_field) == value:
return item.get(output_field, value)
return value
def SI_number_span(number):
''' outputs a span with the number in SI unit eg 14700 -> 14.7k '''
number = int(number)
if number < 1000:
output = literal('<span>')
else:
output = literal('<span title="' + formatters.localised_number(number) + '">')
return output + formatters.localised_SI_number(number) + literal('</span>')
# add some formatter functions
localised_number = formatters.localised_number
localised_SI_number = formatters.localised_SI_number
localised_nice_date = formatters.localised_nice_date
localised_filesize = formatters.localised_filesize
def new_activities():
'''Return the number of activities for the current user.
See :func:`logic.action.get.dashboard_new_activities_count` for more
details.
'''
if not c.userobj:
return None
action = logic.get_action('dashboard_new_activities_count')
return action({}, {})
def uploads_enabled():
if uploader.get_storage_path():
return True
return False
def get_featured_organizations(count=1):
'''Returns a list of favourite organization in the form
of organization_list action function
'''
config_orgs = config.get('ckan.featured_orgs', '').split()
orgs = featured_group_org(get_action='organization_show',
list_action='organization_list',
count=count,
items=config_orgs)
return orgs
def get_featured_groups(count=1):
'''Returns a list of favourite group the form
of organization_list action function
'''
config_groups = config.get('ckan.featured_groups', '').split()
groups = featured_group_org(get_action='group_show',
list_action='group_list',
count=count,
items=config_groups)
return groups
def featured_group_org(items, get_action, list_action, count):
def get_group(id):
context = {'ignore_auth': True,
'limits': {'packages': 2},
'for_view': True}
data_dict = {'id': id}
try:
out = logic.get_action(get_action)(context, data_dict)
except logic.NotFound:
return None
return out
groups_data = []
extras = logic.get_action(list_action)({}, {})
# list of found ids to prevent duplicates
found = []
for group_name in items + extras:
group = get_group(group_name)
if not group:
continue
# check if duplicate
if group['id'] in found:
continue
found.append(group['id'])
groups_data.append(group)
if len(groups_data) == count:
break
return groups_data
def get_site_statistics():
stats = {}
stats['dataset_count'] = logic.get_action('package_search')(
{}, {"rows": 1})['count']
stats['group_count'] = len(logic.get_action('group_list')({}, {}))
stats['organization_count'] = len(
logic.get_action('organization_list')({}, {}))
result = model.Session.execute(
'''select count(*) from related r
left join related_dataset rd on r.id = rd.related_id
where rd.status = 'active' or rd.id is null''').first()[0]
stats['related_count'] = result
return stats
_RESOURCE_FORMATS = None
def resource_formats():
''' Returns the resource formats as a dict, sourced from the resource format JSON file.
key: potential user input value
value: [canonical mimetype lowercased, canonical format (lowercase), human readable form]
Fuller description of the fields are described in
`ckan/config/resource_formats.json`.
'''
global _RESOURCE_FORMATS
if not _RESOURCE_FORMATS:
_RESOURCE_FORMATS = {}
format_file_path = config.get('ckan.resource_formats')
if not format_file_path:
format_file_path = os.path.join(
os.path.dirname(os.path.realpath(ckan.config.__file__)),
'resource_formats.json'
)
with open(format_file_path) as format_file:
try:
file_resource_formats = json.loads(format_file.read())
except ValueError, e: # includes simplejson.decoder.JSONDecodeError
raise ValueError('Invalid JSON syntax in %s: %s' % (format_file_path, e))
for format_line in file_resource_formats:
if format_line[0] == '_comment':
continue
line = [format_line[2], format_line[0], format_line[1]]
alternatives = format_line[3] if len(format_line) == 4 else []
for item in line + alternatives:
if item:
item = item.lower()
if item in _RESOURCE_FORMATS \
and _RESOURCE_FORMATS[item] != line:
raise ValueError('Duplicate resource format '
'identifier in %s: %s' %
(format_file_path, item))
_RESOURCE_FORMATS[item] = line
return _RESOURCE_FORMATS
def unified_resource_format(format):
formats = resource_formats()
format_clean = format.lower()
if format_clean in formats:
format_new = formats[format_clean][1]
else:
format_new = format
return format_new
def check_config_permission(permission):
return new_authz.check_config_permission(permission)
def get_organization(org=None, include_datasets=False):
if org is None:
return {}
try:
return logic.get_action('organization_show')({}, {'id': org, 'include_datasets': include_datasets})
except (NotFound, ValidationError, NotAuthorized):
return {}
# these are the functions that will end up in `h` template helpers
__allowed_functions__ = [
# functions defined in ckan.lib.helpers
'redirect_to',
'url',
'url_for',
'url_for_static',
'url_for_static_or_external',
'is_url',
'lang',
'flash',
'flash_error',
'flash_notice',
'flash_success',
'nav_link',
'nav_named_link',
'subnav_link',
'subnav_named_route',
'default_group_type',
'check_access',
'get_action',
'linked_user',
'group_name_to_title',
'markdown_extract',
'icon',
'icon_html',
'icon_url',
'resource_icon',
'format_icon',
'linked_gravatar',
'gravatar',
'pager_url',
'render_datetime',
'date_str_to_datetime',
'parse_rfc_2822_date',
'time_ago_in_words_from_str',
'button_attr',
'dataset_display_name',
'dataset_link',
'resource_display_name',
'resource_link',
'related_item_link',
'tag_link',
'group_link',
'dump_json',
'auto_log_message',
'snippet',
'convert_to_dict',
'activity_div',
'lang_native_name',
'get_facet_items_dict',
'unselected_facet_items',
'include_resource',
'urls_for_resource',
'build_nav_main',
'build_nav_icon',
'build_nav',
'debug_inspect',
'dict_list_reduce',
'full_current_url',
'popular',
'debug_full_info_as_list',
'get_facet_title',
'get_param_int',
'sorted_extras',
'follow_button',
'follow_count',
'remove_url_param',
'add_url_param',
'groups_available',
'organizations_available',
'user_in_org_or_group',
'dashboard_activity_stream',
'recently_changed_packages_activity_stream',
'escape_js',
'get_pkg_dict_extra',
'get_request_param',
'render_markdown',
'format_resource_items',
'resource_preview',
'rendered_resource_view',
'resource_view_get_fields',
'resource_view_is_filterable',
'resource_view_is_iframed',
'resource_view_icon',
'resource_view_display_preview',
'resource_view_full_page',
'remove_linebreaks',
'SI_number_span',
'localised_number',
'localised_SI_number',
'localised_nice_date',
'localised_filesize',
'list_dict_filter',
'new_activities',
'time_ago_from_timestamp',
'get_organization',
'has_more_facets',
# imported into ckan.lib.helpers
'literal',
'link_to',
'get_available_locales',
'get_locales_dict',
'truncate',
'file',
'mail_to',
'radio',
'submit',
'asbool',
'uploads_enabled',
'get_featured_organizations',
'get_featured_groups',
'get_site_statistics',
'get_allowed_view_types',
'urlencode',
'check_config_permission',
'view_resource_url',
]
|
# content of test_sample.py
def func(x):
return x * 2
def test_answer():
assert func(5) == 10
|
"""Produce metadata and datasets of NIH Chest Xray images
"""
import os
import numpy as np
import pandas as pd
import tensorflow as tf
def get_metadata(path):
"""Produce metadata with relevant columns from NIH Chest Xray images
Args:
path: Path to NIH dataset
Returns:
metadata Dataframe with image and label
"""
raw_meta = pd.read_csv(os.path.join(path, 'Data_Entry_2017.csv'))
meta = raw_meta[['Image Index', 'Finding Labels']].copy()
meta.columns = ['image', 'label']
meta.image = os.path.join(path, 'images/') + meta.image
return meta
def build_dataset(meta, mean=None, std=None, num_parallel_calls=32):
"""Produce tf Dataset from metadata
If mean and std are provided those values will be used to normalise the
image intensities to zero mean and unit variance.
Args:
meta: Dataframe with paths to images under column name image
mean:
std: If both provided will be used to normalize images
num_parallel_calls: Number of threads for loading images
"""
encoded_labels = meta.label.str.get_dummies(sep='|').sort_index(axis=1)
ds = tf.data.Dataset.from_tensor_slices({
'index': meta.index,
'path': meta['image'].values,
'label': encoded_labels.values.astype(np.float32)
})
if None in (mean, std):
mean = 0
std = 1
return ds.map(
lambda item: normalize_image(decode_image(read_file(item)), mean, std),
num_parallel_calls=num_parallel_calls
)
def read_file(item):
"""Read file in key path into key image
"""
item['image'] = tf.read_file(item['path'])
return item
def decode_image(item):
"""Decode raw image file into float32 image tensor with key image
"""
decoded = tf.image.decode_image(item['image'])
item['image'] = tf.image.convert_image_dtype(decoded, tf.float32)
# All images are B&W, but some seem to have the channel replicated,
# to avoid issues we simply select the first channel
item['image'] = tf.expand_dims(item['image'][:, :, 0], axis=-1)
item['image'].set_shape([None, None, 1])
return item
def normalize_image(item, mean, std):
"""Normalize image with key image to zero mean and unit variance
"""
item['image'] = (item['image'] - mean) / std
return item
|
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords
from utils.general import xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.draw_name import draw_name
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f'{n} {names[int(c)]}s, ' # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or view_img: # Add bbox to image
cv2.imwrite("img.jpg", im0)
im0 = draw_name(im0, colors[int(cls)]) # 填上人名
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow('Masks detect', im0)
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp/weights/best.pt',
help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
global opt
opt = parser.parse_args()
print(opt)
check_requirements()
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
|
"""
User app
"""
from django.urls import path,include
from .views import *
__all__=['urlpatterns','app_name']
app_name = 'users'
urlpatterns = [
# 首页
path('', IndexView.as_view(), name='index'),
# 登录
path('login/', LoginView.as_view(), name='login'),
# 登出
path('logout', LogoutView.as_view(), name='logout'),
# 单位列表
path('unit/list', UnitListView.as_view(), name='unit_list'),
# 单位添加
path('unit/add', AddUnitView.as_view(), name='unit_add'),
# 单位修改
path('unit/edit', EditUnitView.as_view(), name='unit_edit'),
# 单位删除
path('unit/delete', DeleteUnitView.as_view(), name='unit_delete'),
# 部门列表
path('dept/list', DeptListView.as_view(), name='dept_list'),
# 部门添加
path('dept/add', AddDeptView.as_view(), name='dept_add'),
# 部门修改
path('dept/edit', EditDeptView.as_view(), name='dept_edit'),
# 部门删除
path('dept/delete', DeleteDeptView.as_view(), name='dept_delete'),
# 用户列表
path('user/list', UserListView.as_view(), name='user_list'),
# 用户添加
path('user/add', AddUserView.as_view(), name='user_add'),
# 用户修改
path('user/edit', EditUserView.as_view(), name='user_edit'),
# 用户删除
path('user/delete', DeleteUserView.as_view(), name='user_delete'),
# # 用户激活请求
# path('email/active', SendActiveUserEmailView.as_view(), name='send_active_email'),
#
# # 用户激活处理
# path('active/<str:active_code>', ActiveUserView.as_view(), name='active'),
# 忘记密码
path('forget', ForgetPasswordView.as_view(), name='forget'),
# 重置密码
path('reset/<str:reset_code>', ResetPasswordView.as_view(), name='reset'),
# 修改密码
path('modify', ModifyPasswordView.as_view(), name='modify'),
# 用户信息
path('user/info', UserInfoView.as_view(), name='user_info'),
# 他人信息
path('other/user/info/<int:uid>', OtherUserInfoView.as_view(), name='other_user_info'),
# 修改用户信息
path('user/info/change', ChangeUserInfoView.as_view(), name='change_user_info'),
# 用户头像
path('user/avatar', UserAvatarView.as_view(), name='user_avatar'),
# 上传修改用户头像
path('user/avatar/change/upload', ChangeUserAvatarUploadView.as_view(), name='change_user_avatar_upload'),
# 选择修改用户头像
path('user/avatar/change/chose', ChangeUserAvatarChoseView.as_view(), name='change_user_avatar_chose'),
# 用户密码
path('user/password', UserPasswordView.as_view(), name='user_password'),
# 修改用户密码
path('user/password/change', ChangeUserPasswordView.as_view(), name='change_user_password'),
# 用户邮箱
path('user/email', UserEmailView.as_view(), name='user_email'),
# 用户邮箱验证码
path('user/email/code', SendChangeUserEmailCodeView.as_view(), name='user_email_code'),
# 修改用户邮箱
path('user/email/change', ChangeUserEmailView.as_view(), name='change_user_email'),
# 用户列表
path('user/list', UserListView.as_view(), name='user_list'),
# 添加用户
path('user/add', AddUserView.as_view(), name='add_user'),
# 修改用户
path('user/edit', EditUserView.as_view(), name='edit_user'),
# 删除用户
path('user/delete', AddUserView.as_view(), name='delete_user'),
# 用户登录日志
path('user/login/record', UserLoginRecordView.as_view(), name='login_record'),
# 用户操作日志
path('user/operation/record', UserOperationRecordView.as_view(), name='op_record'),
# 获取帮助
# path('help', AskHelpView.as_view(), name='help'),
]
|
from .data_perturb import DataPerturb
from .data_perturb_uniform import DataPerturbUniform
from .data_perturb_normal import DataPerturbNormal
|
from typing import Dict, List
import mido
from lss.pad import Pad
from lss.utils import open_input, open_output
class BaseLaunchpad:
row_count: int
column_count: int
name: str
pads: Dict[int, "Pad"] = {}
def __init__(self):
self._outport = open_output(self.name + " In", autoreset=True)
self._inport = open_input(self.name + " Out", autoreset=True)
self.reset_all_pads()
def hand_shake(self):
raise NotImplementedError()
def close(self):
self.reset_all_pads()
self._inport.close()
self._outport.close()
def reset_all_pads(self) -> None:
self.pads = {}
for x in range(self.column_count):
for y in range(self.row_count):
is_function_pad = y == 0
pad = Pad(x, y, launchpad=self, is_function_pad=is_function_pad)
pad.off()
self.pads[pad.note] = pad
def get_pad(self, note: int) -> "Pad":
return self.pads.get(note)
def get_pads_in_column(self, x: int) -> List["Pad"]:
"""Returns single column of pads, include functional buttons for better UX"""
pads_ids = [Pad.get_note(x, y) for y in range(9)]
return [self.pads.get(idx) for idx in pads_ids]
def get_pads_in_row(self, y: int) -> List["Pad"]:
"""Returns single row of pads, skips functional buttons"""
pads_ids = [Pad.get_note(x, y) for x in range(8)]
return [self.pads.get(idx) for idx in pads_ids]
def on(self, note: int, color: int = 4) -> None:
self._outport.send(mido.Message("note_on", note=note, velocity=color))
def off(self, note: int) -> None:
self._outport.send(mido.Message("note_off", note=note))
def get_pending_messages(self):
return self._inport.iter_pending()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.