text
stringlengths 2
999k
|
|---|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class KubernetesClusterNodePool(pulumi.CustomResource):
availability_zones: pulumi.Output[list]
"""
A list of Availability Zones where the Nodes in this Node Pool should be created in.
"""
enable_auto_scaling: pulumi.Output[bool]
"""
Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Defaults to `false`.
"""
enable_node_public_ip: pulumi.Output[bool]
"""
Should each node have a Public IP Address? Defaults to `false`.
"""
kubernetes_cluster_id: pulumi.Output[str]
"""
The ID of the Kubernetes Cluster where this Node Pool should exist. Changing this forces a new resource to be created.
"""
max_count: pulumi.Output[float]
"""
The maximum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be greater than or equal to `min_count`.
"""
max_pods: pulumi.Output[float]
"""
The maximum number of pods that can run on each agent. Changing this forces a new resource to be created.
"""
min_count: pulumi.Output[float]
"""
The minimum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be less than or equal to `max_count`.
"""
name: pulumi.Output[str]
"""
The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created.
"""
node_count: pulumi.Output[float]
"""
The initial number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be a value in the range `min_count` - `max_count`.
"""
node_labels: pulumi.Output[dict]
"""
A map of Kubernetes labels which should be applied to nodes in this Node Pool.
"""
node_taints: pulumi.Output[list]
"""
A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`).
"""
os_disk_size_gb: pulumi.Output[float]
"""
The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
"""
os_type: pulumi.Output[str]
"""
The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
vm_size: pulumi.Output[str]
"""
The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
"""
vnet_subnet_id: pulumi.Output[str]
"""
The ID of the Subnet where this Node Pool should exist.
"""
def __init__(__self__, resource_name, opts=None, availability_zones=None, enable_auto_scaling=None, enable_node_public_ip=None, kubernetes_cluster_id=None, max_count=None, max_pods=None, min_count=None, name=None, node_count=None, node_labels=None, node_taints=None, os_disk_size_gb=None, os_type=None, tags=None, vm_size=None, vnet_subnet_id=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Node Pool within a Kubernetes Cluster
> **NOTE:** Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_kubernetes_cluster = azure.containerservice.KubernetesCluster("exampleKubernetesCluster",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
dns_prefix="exampleaks1",
default_node_pool={
"name": "default",
"node_count": 1,
"vm_size": "Standard_D2_v2",
},
service_principal={
"client_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "00000000000000000000000000000000",
})
example_kubernetes_cluster_node_pool = azure.containerservice.KubernetesClusterNodePool("exampleKubernetesClusterNodePool",
kubernetes_cluster_id=example_kubernetes_cluster.id,
vm_size="Standard_DS2_v2",
node_count=1,
tags={
"Environment": "Production",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] availability_zones: A list of Availability Zones where the Nodes in this Node Pool should be created in.
:param pulumi.Input[bool] enable_auto_scaling: Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Defaults to `false`.
:param pulumi.Input[bool] enable_node_public_ip: Should each node have a Public IP Address? Defaults to `false`.
:param pulumi.Input[str] kubernetes_cluster_id: The ID of the Kubernetes Cluster where this Node Pool should exist. Changing this forces a new resource to be created.
:param pulumi.Input[float] max_count: The maximum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be greater than or equal to `min_count`.
:param pulumi.Input[float] max_pods: The maximum number of pods that can run on each agent. Changing this forces a new resource to be created.
:param pulumi.Input[float] min_count: The minimum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be less than or equal to `max_count`.
:param pulumi.Input[str] name: The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[float] node_count: The initial number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be a value in the range `min_count` - `max_count`.
:param pulumi.Input[dict] node_labels: A map of Kubernetes labels which should be applied to nodes in this Node Pool.
:param pulumi.Input[list] node_taints: A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`).
:param pulumi.Input[float] os_disk_size_gb: The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
:param pulumi.Input[str] os_type: The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] vm_size: The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
:param pulumi.Input[str] vnet_subnet_id: The ID of the Subnet where this Node Pool should exist.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['availability_zones'] = availability_zones
__props__['enable_auto_scaling'] = enable_auto_scaling
__props__['enable_node_public_ip'] = enable_node_public_ip
if kubernetes_cluster_id is None:
raise TypeError("Missing required property 'kubernetes_cluster_id'")
__props__['kubernetes_cluster_id'] = kubernetes_cluster_id
__props__['max_count'] = max_count
__props__['max_pods'] = max_pods
__props__['min_count'] = min_count
__props__['name'] = name
__props__['node_count'] = node_count
__props__['node_labels'] = node_labels
__props__['node_taints'] = node_taints
__props__['os_disk_size_gb'] = os_disk_size_gb
__props__['os_type'] = os_type
__props__['tags'] = tags
if vm_size is None:
raise TypeError("Missing required property 'vm_size'")
__props__['vm_size'] = vm_size
__props__['vnet_subnet_id'] = vnet_subnet_id
super(KubernetesClusterNodePool, __self__).__init__(
'azure:containerservice/kubernetesClusterNodePool:KubernetesClusterNodePool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, availability_zones=None, enable_auto_scaling=None, enable_node_public_ip=None, kubernetes_cluster_id=None, max_count=None, max_pods=None, min_count=None, name=None, node_count=None, node_labels=None, node_taints=None, os_disk_size_gb=None, os_type=None, tags=None, vm_size=None, vnet_subnet_id=None):
"""
Get an existing KubernetesClusterNodePool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] availability_zones: A list of Availability Zones where the Nodes in this Node Pool should be created in.
:param pulumi.Input[bool] enable_auto_scaling: Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Defaults to `false`.
:param pulumi.Input[bool] enable_node_public_ip: Should each node have a Public IP Address? Defaults to `false`.
:param pulumi.Input[str] kubernetes_cluster_id: The ID of the Kubernetes Cluster where this Node Pool should exist. Changing this forces a new resource to be created.
:param pulumi.Input[float] max_count: The maximum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be greater than or equal to `min_count`.
:param pulumi.Input[float] max_pods: The maximum number of pods that can run on each agent. Changing this forces a new resource to be created.
:param pulumi.Input[float] min_count: The minimum number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be less than or equal to `max_count`.
:param pulumi.Input[str] name: The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created.
:param pulumi.Input[float] node_count: The initial number of nodes which should exist within this Node Pool. Valid values are between `1` and `100` and must be a value in the range `min_count` - `max_count`.
:param pulumi.Input[dict] node_labels: A map of Kubernetes labels which should be applied to nodes in this Node Pool.
:param pulumi.Input[list] node_taints: A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`).
:param pulumi.Input[float] os_disk_size_gb: The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
:param pulumi.Input[str] os_type: The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] vm_size: The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
:param pulumi.Input[str] vnet_subnet_id: The ID of the Subnet where this Node Pool should exist.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["availability_zones"] = availability_zones
__props__["enable_auto_scaling"] = enable_auto_scaling
__props__["enable_node_public_ip"] = enable_node_public_ip
__props__["kubernetes_cluster_id"] = kubernetes_cluster_id
__props__["max_count"] = max_count
__props__["max_pods"] = max_pods
__props__["min_count"] = min_count
__props__["name"] = name
__props__["node_count"] = node_count
__props__["node_labels"] = node_labels
__props__["node_taints"] = node_taints
__props__["os_disk_size_gb"] = os_disk_size_gb
__props__["os_type"] = os_type
__props__["tags"] = tags
__props__["vm_size"] = vm_size
__props__["vnet_subnet_id"] = vnet_subnet_id
return KubernetesClusterNodePool(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import platform
import subprocess
import unittest
from conans import tools
from conans.client.conf.detect import detect_defaults_settings
from conans.test.utils.tools import TestBufferConanOutput
class DetectTest(unittest.TestCase):
def detect_default_compilers_test(self):
platform_default_compilers = {
"Linux": "gcc",
"Darwin": "apple-clang",
"Windows": "Visual Studio"
}
output = TestBufferConanOutput()
result = detect_defaults_settings(output)
# result is a list of tuples (name, value) so converting it to dict
result = dict(result)
platform_compiler = platform_default_compilers.get(platform.system(), None)
if platform_compiler is not None:
self.assertEquals(result.get("compiler", None), platform_compiler)
def detect_default_in_mac_os_using_gcc_as_default_test(self):
"""
Test if gcc in Mac OS X is using apple-clang as frontend
"""
# See: https://github.com/conan-io/conan/issues/2231
if platform.system() != "Darwin":
return
try:
output = subprocess.check_output(["gcc", "--version"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
# gcc is not installed or there is any error (no test scenario)
return
if b"clang" not in output:
# Not test scenario gcc should display clang in output
# see: https://stackoverflow.com/questions/19535422/os-x-10-9-gcc-links-to-clang
raise Exception("Apple gcc doesn't point to clang with gcc frontend anymore! please check")
output = TestBufferConanOutput()
with tools.environment_append({"CC": "gcc"}):
result = detect_defaults_settings(output)
# result is a list of tuples (name, value) so converting it to dict
result = dict(result)
# No compiler should be detected
self.assertIsNone(result.get("compiler", None))
self.assertIn("gcc detected as a frontend using apple-clang", output)
self.assertIsNotNone(output.error)
|
import emmental
import numpy as np
from fonduer import Meta
from emmental.modules.embedding_module import EmbeddingModule
from emmental.data import EmmentalDataLoader
from emmental.model import EmmentalModel
from emmental.learner import EmmentalLearner
from fonduer.learning.utils import collect_word_counter
from fonduer.learning.dataset import FonduerDataset
from fonduer.learning.task import create_task
from troy200_utils import entity_level_f1
ABSTAIN = -1
FALSE = 0
TRUE = 1
def get_methods(ATTRIBUTE, gold, gold_file, all_docs):
train_docs = all_docs[0]
dev_docs = all_docs[1]
test_docs = all_docs[2]
def train_model(cands, F, align_type, model_type="LogisticRegression"):
# Extract candidates and features based on the align type (row/column)
align_val = 0 if align_type == "row" else 1
train_cands = cands[align_val][0]
F_train = F[align_val][0]
train_marginals = np.array([[0,1] if gold[align_val](x) else [1,0] for x in train_cands[0]])
# 1.) Setup training config
config = {
"meta_config": {"verbose": True},
"model_config": {"model_path": None, "device": 0, "dataparallel": False},
"learner_config": {
"n_epochs": 50,
"optimizer_config": {"lr": 0.001, "l2": 0.0},
"task_scheduler": "round_robin",
},
"logging_config": {
"evaluation_freq": 1,
"counter_unit": "epoch",
"checkpointing": False,
"checkpointer_config": {
"checkpoint_metric": {f"{ATTRIBUTE}/{ATTRIBUTE}/train/loss": "min"},
"checkpoint_freq": 1,
"checkpoint_runway": 2,
"clear_intermediate_checkpoints": True,
"clear_all_checkpoints": True,
},
},
}
emmental.init(Meta.log_path)
emmental.Meta.update_config(config=config)
# 2.) Collect word counter from training data
word_counter = collect_word_counter(train_cands)
# 3.) Generate word embedding module for LSTM model
# (in Logistic Regression, we generate it since Fonduer dataset requires word2id dict)
# Geneate special tokens
arity = 2
specials = []
for i in range(arity):
specials += [f"~~[[{i}", f"{i}]]~~"]
emb_layer = EmbeddingModule(
word_counter=word_counter, word_dim=300, specials=specials
)
# 4.) Generate dataloader for training set
# No noise in Gold labels
train_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE,
train_cands[0],
F_train[0],
emb_layer.word2id,
train_marginals,
),
split="train",
batch_size=100,
shuffle=True,
)
# 5.) Training
tasks = create_task(
ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model=model_type # "LSTM"
)
model = EmmentalModel(name=f"{ATTRIBUTE}_task")
for task in tasks:
model.add_task(task)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, [train_dataloader])
return (model, emb_layer)
def eval_model(model, emb_layer, cands, F, align_type = "row"):
# Extract candidates and features based on the align type (row/column)
align_val = 0 if align_type == "row" else 1
train_cands = cands[align_val][0]
dev_cands = cands[align_val][1]
test_cands = cands[align_val][2]
F_train = F[align_val][0]
F_dev = F[align_val][1]
F_test = F[align_val][2]
row_on = True if align_type == "row" else False
col_on = True if align_type == "col" else False
# Generate dataloader for test data
test_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE, test_cands[0], F_test[0], emb_layer.word2id, 2
),
split="test",
batch_size=100,
shuffle=False,
)
test_preds = model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.6)
true_pred = [test_cands[0][_] for _ in positive[0]]
test_results = entity_level_f1(true_pred, gold_file, ATTRIBUTE, test_docs, row_on=row_on, col_on=col_on)
# Run on dev and train set for validation
# We run the predictions also on our training and dev set, to validate that everything seems to work smoothly
# Generate dataloader for dev data
dev_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE, dev_cands[0], F_dev[0], emb_layer.word2id, 2
),
split="test",
batch_size=100,
shuffle=False,
)
dev_preds = model.predict(dev_dataloader, return_preds=True)
positive_dev = np.where(np.array(dev_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.6)
true_dev_pred = [dev_cands[0][_] for _ in positive_dev[0]]
dev_results = entity_level_f1(true_dev_pred, gold_file, ATTRIBUTE, dev_docs, row_on=row_on, col_on=col_on)
# Generate dataloader for train data
train_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(
ATTRIBUTE, train_cands[0], F_train[0], emb_layer.word2id, 2
),
split="test",
batch_size=100,
shuffle=False,
)
train_preds = model.predict(train_dataloader, return_preds=True)
positive_train = np.where(np.array(train_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.6)
true_train_pred = [train_cands[0][_] for _ in positive_train[0]]
train_results = entity_level_f1(true_train_pred, gold_file, ATTRIBUTE, train_docs, row_on=row_on, col_on=col_on)
return [train_results, dev_results, test_results]
return (train_model, eval_model)
|
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.data.string import ResponseDataString
from caldavclientlibrary.protocol.webdav.definitions import statuscodes, \
headers
from caldavclientlibrary.protocol.webdav.propfind import PropFind
from contrib.performance.sqlusage.requests.httpTests import HTTPTestBase
from caldavclientlibrary.protocol.caldav.definitions import csxml
class PropfindInviteTest(HTTPTestBase):
"""
A propfind operation
"""
def __init__(self, label, sessions, logFilePath, logFilePrefix, depth=1):
super(PropfindInviteTest, self).__init__(label, sessions, logFilePath, logFilePrefix)
self.depth = headers.Depth1 if depth == 1 else headers.Depth0
def doRequest(self):
"""
Execute the actual HTTP request.
"""
props = (
csxml.invite,
)
# Create WebDAV propfind
request = PropFind(self.sessions[0], self.sessions[0].calendarHref, self.depth, props)
result = ResponseDataString()
request.setOutput(result)
# Process it
self.sessions[0].runSession(request)
# If its a 207 we want to parse the XML
if request.getStatusCode() == statuscodes.MultiStatus:
pass
else:
raise RuntimeError("Propfind request failed: %s" % (request.getStatusCode(),))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Plivo Team. See LICENSE for details
from gevent import monkey
monkey.patch_all()
import grp
import os
import pwd
import signal
import sys
import optparse
from flask import Flask
import gevent
from gevent.wsgi import WSGIServer
from gevent.pywsgi import WSGIServer as PyWSGIServer
from plivo.rest.freeswitch.cacheapi import PlivoCacheApi
import plivo.utils.daemonize
from plivo.rest.freeswitch import cacheurls, helpers, cacheapi
from plivo.utils.logger import StdoutLogger, FileLogger, SysLogger, DummyLogger, HTTPLogger
class PlivoCacheServer(PlivoCacheApi):
"""Class PlivoCacheServer"""
name = 'PlivoCacheServer'
def __init__(self, configfile, daemon=False,
pidfile='/tmp/plivo_cache.pid'):
self._daemon = daemon
self._run = False
self._pidfile = pidfile
self.configfile = configfile
self._wsgi_mode = WSGIServer
# create flask app
self.app = Flask(self.name)
# load config
self.cache = None
self._config = None
self.load_config()
# expose API functions to flask app
for path, func_desc in cacheurls.URLS.iteritems():
func, methods = func_desc
fn = getattr(self, func.__name__)
self.app.add_url_rule(path, func.__name__, fn, methods=methods)
self.log.info("Listening HTTP")
self.log.info("%s mode set" % str(self._wsgi_mode))
self.http_server = self._wsgi_mode((self.http_host, self.http_port),
self.app, log=self.log)
def create_cache(self, config):
# load cache params
self.redis_host = config.get('cache_server', 'REDIS_HOST', default='')
self.redis_port = config.get('cache_server', 'REDIS_PORT', default='')
self.redis_db = config.get('cache_server', 'REDIS_DB', default='')
self.redis_pw = config.get('cache_server', 'REDIS_PASSWORD', default=None)
self.proxy_url = config.get('cache_server', 'PROXY_URL', default=None)
self.http_timeout = int(config.get('cache_server', 'HTTP_TIMEOUT', default=60))
if self.redis_host and self.redis_port and self.redis_db:
self.cache = cacheapi.ResourceCache(self.redis_host,
int(self.redis_port),
int(self.redis_db),
self.redis_pw,
self.proxy_url,
self.http_timeout)
return True
self.log.error("Cannot run cache server, cache not set !")
raise Exception("Cannot run cache server, cache not set !")
def get_log(self):
return self.log
def get_config(self):
return self._config
def get_cache(self):
return self.cache
def create_logger(self, config):
"""This will create a logger using helpers.PlivoConfig instance
Based on the settings in the configuration file,
LOG_TYPE will determine if we will log in file, syslog, stdout, http or dummy (no log)
"""
if self._daemon is False:
logtype = config.get('cache_server', 'LOG_TYPE')
if logtype == 'dummy':
new_log = DummyLogger()
else:
new_log = StdoutLogger()
new_log.set_debug()
self.app.debug = True
self.log = new_log
else:
logtype = config.get('cache_server', 'LOG_TYPE')
if logtype == 'file':
logfile = config.get('cache_server', 'LOG_FILE')
new_log = FileLogger(logfile)
elif logtype == 'syslog':
syslogaddress = config.get('cache_server', 'SYSLOG_ADDRESS')
syslogfacility = config.get('cache_server', 'SYSLOG_FACILITY')
new_log = SysLogger(syslogaddress, syslogfacility)
elif logtype == 'dummy':
new_log = DummyLogger()
elif logtype == 'http':
url = config.get('cache_server', 'HTTP_LOG_URL')
method = config.get('cache_server', 'HTTP_LOG_METHOD')
fallback_file = config.get('cache_server', 'HTTP_LOG_FILE_FAILURE')
new_log = HTTPLogger(url=url, method=method, fallback_file=fallback_file)
else:
new_log = StdoutLogger()
log_level = config.get('cache_server', 'LOG_LEVEL', default='INFO')
if log_level == 'DEBUG':
new_log.set_debug()
self.app.debug = True
elif log_level == 'INFO':
new_log.set_info()
self.app.debug = False
elif log_level == 'ERROR':
new_log.set_error()
self.app.debug = False
elif log_level in ('WARN', 'WARNING'):
new_log.set_warn()
self.app.debug = False
new_log.name = self.name
self.log = new_log
self.app._logger = self.log
def load_config(self, reload=False):
# backup config
backup_config = self._config
# create config
config = helpers.PlivoConfig(self.configfile)
try:
# read config
config.read()
if not reload:
# create first logger if starting
self.create_logger(config=config)
self.log.info("Starting ...")
self.log.warn("Logger %s" % str(self.log))
self.app.secret_key = config.get('cache_server', 'SECRET_KEY')
self.app.config['MAX_CONTENT_LENGTH'] = 1024 * 10240
self.http_address = config.get('cache_server', 'HTTP_ADDRESS')
self.http_host, http_port = self.http_address.split(':', 1)
self.http_port = int(http_port)
# load cache params
self.redis_host = config.get('cache_server', 'REDIS_HOST', default='')
self.redis_port = config.get('cache_server', 'REDIS_PORT', default='')
self.redis_db = config.get('cache_server', 'REDIS_DB', default='')
# create new cache
self.create_cache(config=config)
self.log.warn("Cache %s" % str(self.cache))
# set wsgi mode
_wsgi_mode = config.get('cache_server', 'WSGI_MODE', default='wsgi')
if _wsgi_mode in ('pywsgi', 'python', 'py'):
self._wsgi_mode = PyWSGIServer
else:
self._wsgi_mode = WSGIServer
if reload:
# create new logger if reloading
self.create_logger(config=config)
self.log.warn("New logger %s" % str(self.log))
# create new cache
self.create_cache(config=config)
self.log.warn("New cache %s" % str(self.cache))
# allowed ips to access cache server
allowed_ips = config.get('common', 'ALLOWED_IPS', default='')
if not allowed_ips.strip():
self.allowed_ips = []
else:
self.allowed_ips = [ ip.strip() for ip in allowed_ips.split(',') ]
# set new config
self._config = config
self.log.info("Config : %s" % str(self._config.dumps()))
except Exception, e:
if backup_config:
self._config = backup_config
self.load_config()
self.log.warn("Error reloading config: %s" % str(e))
self.log.warn("Rollback to the last config")
self.log.info("Config : %s" % str(self._config.dumps()))
else:
sys.stderr.write("Error loading config: %s" % str(e))
sys.stderr.flush()
raise e
def reload(self):
self.log.warn("Reload ...")
self.load_config(reload=True)
self.log.warn("Reload done")
def do_daemon(self):
"""This will daemonize the current application
Two settings from our configuration files are also used to run the
daemon under a determine user & group.
USER : determine the user running the daemon
GROUP : determine the group running the daemon
"""
# get user/group from config
user = self._config.get('cache_server', 'USER', default=None)
group = self._config.get('cache_server', 'GROUP', default=None)
if not user or not group:
uid = os.getuid()
user = pwd.getpwuid(uid)[0]
gid = os.getgid()
group = grp.getgrgid(gid)[0]
# daemonize now
plivo.utils.daemonize.daemon(user, group, path='/',
pidfile=self._pidfile,
other_groups=())
def sig_term(self, *args):
"""if we receive a term signal, we will shutdown properly
"""
self.log.warn("Shutdown ...")
self.stop()
sys.exit(0)
def sig_hup(self, *args):
self.reload()
def stop(self):
"""Method stop stop the infinite loop from start method
and close the socket
"""
self._run = False
def start(self):
"""start method is where we decide to :
* catch term signal
* run as daemon
* start the http server
"""
self.log.info("CacheServer starting ...")
# catch SIG_TERM
gevent.signal(signal.SIGTERM, self.sig_term)
gevent.signal(signal.SIGHUP, self.sig_hup)
# run
self._run = True
if self._daemon:
self.do_daemon()
# start http server
self.log.info("CacheServer started at: 'http://%s'" % self.http_address)
# Start cache server
try:
self.http_server.serve_forever()
except (SystemExit, KeyboardInterrupt):
pass
# finish here
self.log.info("CacheServer Exited")
def main():
parser = optparse.OptionParser()
parser.add_option("-c", "--configfile", action="store", type="string",
dest="configfile",
help="use plivo config file (argument is mandatory)",
metavar="CONFIGFILE")
parser.add_option("-p", "--pidfile", action="store", type="string",
dest="pidfile",
help="write pid to PIDFILE (argument is mandatory)",
metavar="PIDFILE")
(options, args) = parser.parse_args()
configfile = options.configfile
pidfile = options.pidfile
if not configfile:
configfile = './etc/plivo/default.conf'
if not os.path.isfile(configfile):
raise SystemExit("Error : Default config file mising at '%s'. Please specify -c <configfilepath>" %configfile)
if not pidfile:
pidfile='/tmp/plivo_cache.pid'
server = PlivoCacheServer(configfile=configfile, pidfile=pidfile,
daemon=False)
server.start()
if __name__ == '__main__':
main()
|
import lightgbm as lgb
import neptune.new as neptune
import neptune.new.integrations.optuna as optuna_utils
import optuna
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
def objective(trial):
data, target = load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)
dtrain = lgb.Dataset(train_x, label=train_y)
param = {
"verbose": -1,
"objective": "binary",
"metric": "binary_logloss",
"num_leaves": trial.suggest_int("num_leaves", 2, 256),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.2, 1.0),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.2, 1.0),
"min_child_samples": trial.suggest_int("min_child_samples", 3, 100),
}
gbm = lgb.train(param, dtrain)
preds = gbm.predict(test_x)
accuracy = roc_auc_score(test_y, preds)
return accuracy
# Create a Neptune Run
run = neptune.init(
api_token="ANONYMOUS", project="common/optuna-integration"
) # you can pass your credentials here
# Create a NeptuneCallback for Optuna
neptune_callback = optuna_utils.NeptuneCallback(
run,
plots_update_freq=10,
log_plot_slice=False,
log_plot_contour=False,
)
# Pass NeptuneCallback to Optuna Study .optimize()
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=50, callbacks=[neptune_callback])
# Stop logging to a Neptune Run
run.stop()
|
# encoding: utf-8
"""
config_file_backing_store.py
Created by Scott on 2014-08-12.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
import ConfigParser
import backing_store
class ConfigFileBackingStore(backing_store.BackingStore):
def __init__(self, path):
super(ConfigFileBackingStore, self).__init__(path)
self.configParser = ConfigParser.RawConfigParser()
self.configParser.read(self.path)
def identifiers(self):
return self.configParser.sections()
def add_identifier(self, ident):
try:
self.configParser.add_section(ident)
except ConfigParser.DuplicateSectionError:
raise ValueError("The identifier `%s` already exists" % str(ident))
def remove_identifier(self, ident):
self.configParser.remove_section(ident)
def keys(self, ident):
try:
return self.configParser.options(ident)
except ConfigParser.NoSectionError:
raise ValueError("No identifier named `%s` exists" % str(ident))
def get(self, ident, key, default=None):
try:
val = self.configParser.get(ident, key.lower())
return val if val != "" else default
except ConfigParser.NoOptionError:
return default
def set(self, ident, key, value):
self.configParser.set(ident, key.lower(), value)
def save(self):
try:
with open(self.path, "w") as configFile:
self.configParser.write(configFile)
except IOError:
raise IOError("Cannot save data to `%s`. Permission Denied")
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_searchengine', [dirname(__file__)])
except ImportError:
import _searchengine
return _searchengine
if fp is not None:
try:
_mod = imp.load_module('_searchengine', fp, pathname, description)
finally:
fp.close()
return _mod
_searchengine = swig_import_helper()
del swig_import_helper
else:
import _searchengine
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class TSearchIndex(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TSearchIndex, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TSearchIndex, name)
__repr__ = _swig_repr
def __init__(self, indexLocation):
this = _searchengine.new_TSearchIndex(indexLocation)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _searchengine.delete_TSearchIndex
__del__ = lambda self: None
def ExecuteQuery(self, keysStr, firstResult2Return, results2Return, objectsStr):
return _searchengine.TSearchIndex_ExecuteQuery(self, keysStr, firstResult2Return, results2Return, objectsStr)
TSearchIndex_swigregister = _searchengine.TSearchIndex_swigregister
TSearchIndex_swigregister(TSearchIndex)
cvar = _searchengine.cvar
OBJ_BITS = cvar.OBJ_BITS
SEGMENT_POS_BITS = cvar.SEGMENT_POS_BITS
WEIGHT_BITS = cvar.WEIGHT_BITS
NON_SEGMENT_BITS = cvar.NON_SEGMENT_BITS
MAX_SEGMENTS_PER_OBJECT = cvar.MAX_SEGMENTS_PER_OBJECT
MAX_WORDS4QUERY = cvar.MAX_WORDS4QUERY
MAX_KEYS2CONSIDER = cvar.MAX_KEYS2CONSIDER
CRUDE_FILTER_TRIM_PROPORTION = cvar.CRUDE_FILTER_TRIM_PROPORTION
MAX_OCCURENCES2RETURN = cvar.MAX_OCCURENCES2RETURN
# This file is compatible with both classic and new-style classes.
|
# -*- coding: utf-8 -*-
"""The graphical part of a Control Parameters step"""
import seamm
from seamm_util import ureg, Q_, units_class # noqa: F401
import seamm_widgets as sw
import control_parameters_step # noqa: F401
import Pmw
import pprint # noqa: F401
import tkinter as tk
import tkinter.ttk as ttk
class TkControlParameters(seamm.TkNode):
"""
The graphical part of a Control Parameters step in a flowchart.
Attributes
----------
tk_flowchart : TkFlowchart = None
The flowchart that we belong to.
node : Node = None
The corresponding node of the non-graphical flowchart
namespace : str
The namespace of the current step.
sub_tk_flowchart : TkFlowchart
A graphical Flowchart representing a subflowchart
canvas: tkCanvas = None
The Tk Canvas to draw on
dialog : Dialog
The Pmw dialog object
x : int = None
The x-coordinate of the center of the picture of the node
y : int = None
The y-coordinate of the center of the picture of the node
w : int = 200
The width in pixels of the picture of the node
h : int = 50
The height in pixels of the picture of the node
self[widget] : dict
A dictionary of tk widgets built using the information
contained in Control Parameters_parameters.py
See Also
--------
ControlParameters, TkControlParameters,
ControlParametersParameters,
"""
def __init__(
self, tk_flowchart=None, node=None, canvas=None, x=None, y=None, w=200, h=50
):
"""
Initialize a graphical node.
Parameters
----------
tk_flowchart: Tk_Flowchart
The graphical flowchart that we are in.
node: Node
The non-graphical node for this step.
namespace: str
The stevedore namespace for finding sub-nodes.
canvas: Canvas
The Tk canvas to draw on.
x: float
The x position of the nodes center on the canvas.
y: float
The y position of the nodes cetner on the canvas.
w: float
The nodes graphical width, in pixels.
h: float
The nodes graphical height, in pixels.
Returns
-------
None
"""
self.dialog = None
self._variables = None # temporary copy when editing
self._new_variable_dialog = None
self._new = {} # Widgets for the new variable dialog
self._edit_variable_dialog = None
self._edit = {} # Widgets for the edit variable dialog
super().__init__(
tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h
)
def create_dialog(self):
"""
Create the dialog. A set of widgets will be chosen by default
based on what is specified in the Control Parameters_parameters
module.
Parameters
----------
None
Returns
-------
None
See Also
--------
TkControlParameters.reset_dialog
"""
frame = super().create_dialog("Edit Parameters")
# make it large!
screen_w = self.dialog.winfo_screenwidth()
screen_h = self.dialog.winfo_screenheight()
w = int(0.9 * screen_w)
h = int(0.8 * screen_h)
x = int(0.05 * screen_w / 2)
y = int(0.1 * screen_h / 2)
self.dialog.geometry("{}x{}+{}+{}".format(w, h, x, y))
# The information about widgets is held in self['xxxx'], i.e. this
# class is in part a dictionary of widgets. This makes accessing
# the widgets easier and allows loops, etc.
# Shortcut for parameters
P = self.node.parameters
# Then create the widgets
self["variables"] = sw.ScrolledColumns(
frame,
columns=[
"",
"Name",
"Type",
"NArgs",
"Optional",
"Overwrite?",
"Default",
"Help",
],
)
# any remaining widgets
for key in P:
if key not in self:
self[key] = P[key].widget(frame)
def reset_dialog(self, widget=None):
"""Layout the widgets in the dialog.
The widgets are chosen by default from the information in
Control Parameters_parameter.
This function simply lays them out row by row with
aligned labels. You may wish a more complicated layout that
is controlled by values of some of the control parameters.
If so, edit or override this method
Parameters
----------
widget : Tk Widget = None
Returns
-------
None
See Also
--------
TkControlParameters.create_dialog
"""
# Remove any widgets previously packed
frame = self["frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
# Shortcut for parameters
P = self.node.parameters
# keep track of the row in a variable, so that the layout is flexible
# if e.g. rows are skipped to control such as 'method' here
row = 0
widgets = []
# Variables table first
key = "variables"
self[key].grid(row=row, column=0, sticky=tk.NSEW)
frame.columnconfigure(0, weight=1)
frame.rowconfigure(row, weight=1)
widgets.append(self[key])
row += 1
for key in P:
if self[key] not in widgets:
self[key].grid(row=row, column=0, sticky=tk.EW)
widgets.append(self[key])
row += 1
# Align the labels
sw.align_labels(widgets)
def right_click(self, event):
"""
Handles the right click event on the node.
Parameters
----------
event : Tk Event
Returns
-------
None
See Also
--------
TkControlParameters.edit
"""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def edit(self):
"""Present a dialog for editing the Control Parameters input
Parameters
----------
None
Returns
-------
None
See Also
--------
TkControlParameters.right_click
"""
if self.dialog is None:
self.create_dialog()
P = self.node.parameters
self._variables = P["variables"].value
self.reset_dialog()
self.reset_table()
self.dialog.activate(geometry="centerscreenfirst")
def handle_dialog(self, result):
"""Handle the closing of the edit dialog
What to do depends on the button used to close the dialog. If
the user closes it by clicking the 'x' of the dialog window,
None is returned, which we take as equivalent to cancel.
Parameters
----------
result : None or str
The value of this variable depends on what the button
the user clicked.
Returns
-------
None
"""
if result is None or result == "Cancel":
self.dialog.deactivate(result)
self._variables = None
return
if result == "Help":
# display help!!!
return
if result != "OK":
self.dialog.deactivate(result)
raise RuntimeError("Don't recognize dialog result '{}'".format(result))
self.dialog.deactivate(result)
# Shortcut for parameters
P = self.node.parameters
# Get the values for all the widgets.
P["variables"].value = self._variables
self._variables = None
for key in P:
if key != "variables":
P[key].set_from_widget()
def handle_help(self):
"""Shows the help to the user when click on help button.
Parameters
----------
None
Returns
-------
None
"""
print("Help not implemented yet for Control Parameters!")
def add_variable(self):
"""Add a new variable to the table."""
# Post dialog to fill out the new variable
if self._new_variable_dialog is None:
self.create_new_variable_dialog()
self._new["name"].set("")
self._new["type"].set("float")
self._new["optional"].set("Yes")
self._new["nargs"].set("a single value")
self._new["overwrite"].set("No")
self._new["default"].set("")
self._new["choices"].set("[]")
self._new["help"].set("")
self._new_variable_dialog.activate(geometry="centerscreenfirst")
def create_new_variable_dialog(self):
"""
Create a dialog for adding new variables.
Parameters
----------
None
Returns
-------
None
"""
if self._new_variable_dialog is not None:
return
dialog = self._new_variable_dialog = Pmw.Dialog(
self.dialog.interior(),
buttons=("OK", "Cancel"),
defaultbutton="OK",
title="Add Variable",
command=self.handle_new_variable_dialog,
)
self._new_variable_dialog.withdraw()
# Create a frame to hold everything in the dialog
frame = self._new["frame"] = ttk.Frame(dialog.interior())
frame.pack(expand=tk.YES, fill=tk.BOTH)
# Then create the widgets
self._new["name"] = sw.LabeledEntry(frame, labeltext="Name")
self._new["optional"] = sw.LabeledCombobox(
frame, labeltext="Optional?:", values=("Yes", "No"), state="readonly"
)
self._new["type"] = sw.LabeledCombobox(
frame,
labeltext="Type:",
values=(
"str",
"int",
"float",
"bool",
"file",
),
state="readonly",
)
self._new["nargs"] = sw.LabeledCombobox(
frame,
labeltext="Number of values:",
values=(
"a single value",
"an optional value",
"zero or more values",
"one or more values",
),
state="readonly",
)
self._new["overwrite"] = sw.LabeledCombobox(
frame,
labeltext="Overwrite if exists:",
values=("Yes", "No"),
state="readonly",
)
self._new["default"] = sw.LabeledEntry(frame, labeltext="Default:")
self._new["choices"] = sw.LabeledEntry(frame, labeltext="Choices:")
self._new["help"] = sw.LabeledEntry(frame, labeltext="Help:", width=80)
# and lay them out
self.reset_new_variable_dialog()
def reset_new_variable_dialog(self):
"""Lay the dialog out based on the contents."""
# Remove any widgets previously packed
frame = self._new["frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
row = 0
widgets = []
for key in ("name", "type", "nargs", "optional", "overwrite"):
self._new[key].grid(row=row, column=0, sticky=tk.EW)
widgets.append(self._new[key])
row += 1
type_ = self._new["type"].get()
if type_ != "bool":
w = self._new["default"]
w.grid(row=row, column=0, sticky=tk.EW)
widgets.append(w)
row += 1
w = self._new["choices"]
w.grid(row=row, column=0, sticky=tk.EW)
widgets.append(w)
row += 1
self._new["help"].grid(row=row, column=0, sticky=tk.EW)
widgets.append(self._new["help"])
row += 1
sw.align_labels(widgets)
def handle_new_variable_dialog(self, result):
"""Handle the closing of the new variable dialog
What to do depends on the button used to close the dialog. If
the user closes it by clicking the 'x' of the dialog window,
None is returned, which we take as equivalent to cancel.
Parameters
----------
result : None or str
The value of this variable depends on what the button
the user clicked.
Returns
-------
None
"""
if result is None or result == "Cancel":
self._new_variable_dialog.deactivate(result)
self._variables = None
return
if result != "OK":
self._new_variable_dialog.deactivate(result)
raise RuntimeError(f"Don't recognize new variable dialog result '{result}'")
self._new_variable_dialog.deactivate(result)
name = self._new["name"].get()
if name in self._variables:
raise KeyError(f"Duplicate variable name: '{name}'")
data = self._variables[name] = {}
for key, w in self._new.items():
if key not in ("frame", "name"):
data[key] = w.get()
self.reset_table()
def reset_table(self):
"""Update the table of variables to the current data."""
table = self["variables"]
frame = table.table.interior()
table.clear()
row = 0
for name, data in self._variables.items():
table[row, 0] = ttk.Button(
frame,
text="Edit",
width=5,
command=lambda: self.edit_variable(name),
takefocus=True,
)
table[row, 1] = name
table[row, 2] = data["type"]
table[row, 3] = data["nargs"]
table[row, 4] = data["optional"]
table[row, 5] = data["overwrite"]
if data["type"] != "bool":
table[row, 6] = data["default"]
table[row, 7] = data["help"]
row += 1
# a button to add new variables...
table[row, 0] = ttk.Button(
frame, text="+", width=5, command=self.add_variable, takefocus=True
)
table.update_idletasks()
def edit_variable(self, name):
"""Edit the values associated with a variable."""
# Post dialog to fill out the new variable
if self._edit_variable_dialog is None:
self.create_edit_variable_dialog()
self._edit_variable_dialog.configure(
command=lambda result: self.handle_edit_variable_dialog(
name, result
) # noqa: E501
)
data = self._variables[name]
for key, w in self._edit.items():
if key == "name":
w.set(name)
elif key != "frame":
w.set(data[key])
self._edit_variable_dialog.activate(geometry="centerscreenfirst")
def create_edit_variable_dialog(self):
"""
Create a dialog for adding edit variables.
Parameters
----------
None
Returns
-------
None
"""
if self._edit_variable_dialog is not None:
return
dialog = self._edit_variable_dialog = Pmw.Dialog(
self.dialog.interior(),
buttons=("OK", "Cancel"),
defaultbutton="OK",
title="Edit Variable",
command=lambda: self.handle_edit_variable_dialog,
)
self._edit_variable_dialog.withdraw()
# Create a frame to hold everything in the dialog
frame = self._edit["frame"] = ttk.Frame(dialog.interior())
frame.pack(expand=tk.YES, fill=tk.BOTH)
# Then create the widgets
self._edit["name"] = sw.LabeledEntry(frame, labeltext="Name")
self._edit["optional"] = sw.LabeledCombobox(
frame, labeltext="Type:", values=("Yes", "No"), state="readonly"
)
self._edit["type"] = sw.LabeledCombobox(
frame,
labeltext="Type:",
values=(
"str",
"int",
"float",
"bool",
"file",
),
state="readonly",
)
self._edit["nargs"] = sw.LabeledCombobox(
frame,
labeltext="Number of values:",
values=(
"a single value",
"an optional value",
"zero or more values",
"one or more",
),
state="readonly",
)
self._edit["overwrite"] = sw.LabeledCombobox(
frame,
labeltext="Overwrite if exists:",
values=("Yes", "No"),
state="readonly",
)
self._edit["default"] = sw.LabeledEntry(frame, labeltext="Default:")
self._edit["choices"] = sw.LabeledEntry(frame, labeltext="Choices:")
self._edit["optional"] = sw.LabeledCombobox(
frame, labeltext="Optional?:", values=("Yes", "No"), state="readonly"
)
self._edit["help"] = sw.LabeledEntry(frame, labeltext="Help:", width=80)
# and lay them out
self.reset_edit_variable_dialog()
def reset_edit_variable_dialog(self):
"""Lay the dialog out based on the contents."""
# Remove any widgets previously packed
frame = self._edit["frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
row = 0
widgets = []
for key in ("name", "type", "nargs", "optional", "overwrite"):
self._edit[key].grid(row=row, column=0, sticky=tk.EW)
widgets.append(self._edit[key])
row += 1
type_ = self._edit["type"].get()
if type_ != "bool":
w = self._edit["default"]
w.grid(row=row, column=0, sticky=tk.EW)
widgets.append(w)
row += 1
w = self._edit["choices"]
w.grid(row=row, column=0, sticky=tk.EW)
widgets.append(w)
row += 1
self._edit["help"].grid(row=row, column=0, sticky=tk.EW)
widgets.append(self._edit["help"])
row += 1
sw.align_labels(widgets)
def handle_edit_variable_dialog(self, name, result):
"""Handle the closing of the edit variable dialog
What to do depends on the button used to close the dialog. If
the user closes it by clicking the 'x' of the dialog window,
None is returned, which we take as equivalent to cancel.
Parameters
----------
result : None or str
The value of this variable depends on what the button
the user clicked.
Returns
-------
None
"""
if result is None or result == "Cancel":
self._edit_variable_dialog.deactivate(result)
self._variables = None
return
if result != "OK":
self._edit_variable_dialog.deactivate(result)
raise RuntimeError(
f"Don't recognize edit variable dialog result '{result}'"
)
self._edit_variable_dialog.deactivate(result)
new_name = self._edit["name"].get().lstrip("-")
if new_name == name:
data = self._variables[name]
else:
del self._variables[name]
name = new_name
data = self._variables[name] = {}
for key, w in self._edit.items():
if key not in ("frame", "name"):
data[key] = w.get()
self.reset_table()
|
from lux._config import config
from lux._config.config import (
register_action,
remove_action,
actions,
update_actions,
config,
)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
import shutil
import asyncio
from azure_functions_worker import protos
from azure_functions_worker import testutils
async def vertify_nested_namespace_import():
test_env = {}
request = protos.FunctionEnvironmentReloadRequest(
environment_variables=test_env)
request_msg = protos.StreamingMessage(
request_id='0',
function_environment_reload_request=request)
disp = testutils.create_dummy_dispatcher()
# Mock intepreter starts in placeholder mode
import azure.module_a as mod_a # noqa: F401
# Mock function specialization, load customer's libraries and functionapps
ns_root = os.path.join(
testutils.UNIT_TESTS_ROOT,
'azure_namespace_import',
'namespace_location_b')
test_path = os.path.join(ns_root, 'azure', 'namespace_b', 'module_b')
test_mod_path = os.path.join(test_path, 'test_module.py')
os.makedirs(test_path)
with open(test_mod_path, 'w') as f:
f.write('MESSAGE = "module_b is imported"')
try:
# Mock a customer uses test_module
if sys.argv[1].lower() == 'true':
await disp._handle__function_environment_reload_request(
request_msg)
from azure.namespace_b.module_b import test_module
print(test_module.MESSAGE)
except ModuleNotFoundError:
print('module_b fails to import')
finally:
# Cleanup
shutil.rmtree(ns_root)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(vertify_nested_namespace_import())
loop.close()
|
from unittest import TestCase
from pykechain.utils import is_url, is_valid_email, Empty, get_in_chunks
from tests.classes import SixTestCase
class TestIsURL(SixTestCase):
def test_is_url_returns_true_on_valid_url(self):
addresses = [
u"http://foobar.dk",
u"http://foobar.museum/foobar",
u"http://fo.com",
u"http://FOO.com",
u"http://foo.com/blah_blah",
u"http://foo.com/blah_blah/",
u"http://foo.com/blah_blah_(wikipedia)",
u"http://foo.com/blah_blah_(wikipedia)_(again)",
u"http://www.example.com/wpstyle/?p=364",
u"https://www.example.com/foo/?bar=baz&inga=42&quux",
u"https://www.example.com?bar=baz",
u"http://✪df.ws/123",
u"http://userid:password@example.com:8080",
u"http://userid@example.com",
u"http://userid@example.com:8080/",
u"http://userid:password@example.com",
u"http://142.42.1.1/",
u"http://142.42.1.1:8080/",
u"http://⌘.ws/",
u"http://foo.com/blah_(wikipedia)#cite-1",
u"http://foo.com/unicode_(✪)_in_parens",
u"http://foo.com/(something)?after=parens",
u"http://☺.damowmow.com/",
u"http://code.google.com/events/#&product=browser",
u"http://j.mp",
u"ftp://foo.bar/baz",
u"http://foo.bar/?q=Test%20URL-encoded%20stuff",
u"http://-.~_!$&'()*+,;=:%40:80%2f::::::@example.com",
u"http://1337.net",
u"http://a.b-c.de",
u"http://223.255.255.254",
u"http://127.0.10.150",
u"http://localhost",
u"http://localhost:8000",
u"http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html",
u"http://[1080:0:0:0:8:800:200C:417A]/index.html",
u"http://[3ffe:2a00:100:7031::1]",
u"http://[1080::8:800:200C:417A]/foo",
u"http://[::192.9.5.5]/ipng",
u"http://[::FFFF:129.144.52.38]:80/index.html",
u"http://[2010:836B:4179::836B:4179]",
]
for address in addresses:
with self.subTest(address):
self.assertTrue(is_url(address), "should be a valid address: '{}'".format(address))
def test_is_url_returns_False_on_failed_url(self):
failed_addresses = [
"http://foobar",
"foobar.dk",
"http://127.0.0/asdf",
"http://foobar.d",
"http://foobar.12",
"http://foobar",
"htp://foobar.com",
"http://foobar..com",
"http://fo..com",
"http://",
"http://.",
"http://..",
"http://../",
"http://?",
"http://??",
"http://??/",
"http://#",
"http://##",
"http://##/",
"http://foo.bar?q=Spaces should be encoded",
"//",
"//a",
"///a",
"///",
"http:///a",
"foo.com",
"rdar://1234",
"h://test",
"http:// shouldfail.com",
":// should fail",
"http://foo.bar/foo(bar)baz quux",
"ftps://foo.bar/",
"http://-error-.invalid/",
"http://a.b--c.de/",
"http://-a.b.co",
"http://a.b-.co",
"http://0.0.0.0",
"http://10.1.1.0",
"http://10.1.1.255",
"http://224.1.1.1",
"http://1.1.1.1.1",
"http://123.123.123",
"http://3628126748",
"http://.www.foo.bar/",
"http://www.foo.bar./",
"http://.www.foo.bar./",
"http://127.12.0.260",
'http://example.com/">user@example.com',
"http://[2010:836B:4179::836B:4179",
"http://2010:836B:4179::836B:4179",
"http://2010:836B:4179::836B:4179:80/index.html",
]
for address in failed_addresses:
with self.subTest(address):
self.assertFalse(is_url(address), "should be a invalid address: '{}'".format(address))
class TestIsEmail(SixTestCase):
def test_is_email_returns_true_on_valid_url(self):
valid_addresses = [
"some@domain.org",
"email@example.com",
"firstname.lastname@example.com",
"email@subdomain.example.com",
"firstname+lastname@example.com",
"email@123.123.123.123",
"email@[123.123.123.123]",
'"email"@example.com',
"1234567890@example.com",
"email@example-one.com",
"_______@example.com",
"email@example.name",
"email@example.museum",
"email@example.co.jp",
"firstname-lastname@example.com",
]
for email in valid_addresses:
with self.subTest(email):
self.assertTrue(is_valid_email(email), "should be a valid address: '{}'".format(email))
def test_is_email_returns_false_on_invalid_url(self):
invalid_addresses = [
"plainaddress",
"#@%^%#$@#$@#.com",
"@example.com",
"Joe Smith <email@example.com>",
"email.example.com",
"email@example@example.com",
".email@example.com",
"email.@example.com",
"email..email@example.com",
"あいうえお@example.com",
"email@example.com (Joe Smith)",
"email@example",
"email@-example.com",
"email@example..com",
"Abc..123@example.com",
"”(),:;<>[\]@example.com",
"just”not”right@example.com",
'this\ is"really"not\allowed@example.com',
]
for email in invalid_addresses:
with self.subTest(email):
self.assertFalse(is_valid_email(email), "should be an invalid address: '{}'".format(email))
class TestEmpty(TestCase):
def test_singleton(self):
empty_1 = Empty()
empty_2 = Empty()
self.assertEqual(empty_1, empty_2)
self.assertIs(empty_1, empty_2)
self.assertIsNot(empty_1, Empty)
class TestChunks(TestCase):
def test_get_in_chunks(self):
chunks = get_in_chunks(
lst=list(range(77)),
chunk_size=9,
)
import types
self.assertIsInstance(chunks, types.GeneratorType)
chunks_list = list(chunks)
self.assertEqual(9, len(chunks_list))
|
#! /usr/bin/python
#-*- coding:utf-8 -*
import os
import conf
from web.models import Shelter, ShelterPicture
from bootstrap import db
from PIL import Image
def create_shelters_thumbnails():
shelters = Shelter.query.all()
pictures = ShelterPicture.query.all()
for picture in pictures:
filepath = os.path.join(conf.SHELTERS_PICTURES_SERVER_PATH, str(picture.shelter_id), picture.file_name)
basename, ext = os.path.splitext(picture.file_name)
if str(basename.rpartition('_')[2]) == 'thumbnail':
continue
try:
thumbname = basename + '_thumbnail' + ext
new_thumbpath = os.path.join(conf.SHELTERS_PICTURES_SERVER_PATH, str(picture.shelter_id), thumbname)
if os.path.exists(new_thumbpath):
if db.session.query(ShelterPicture).filter_by(file_name=thumbname).first():
continue
im = Image.open(filepath)
im.thumbnail((300,200), Image.ANTIALIAS)
im.save(new_thumbpath, 'JPEG', quality=70, optimize=True, progressive=True)
new_picture = ShelterPicture(file_name=thumbname,
shelter_id=picture.shelter_id,
category_id=picture.category_id,
is_main_picture=picture.is_main_picture)
db.session.add(new_picture)
db.session.commit()
except:
print("Failed to create thumbnail for shelter {}, file {}".format(picture.shelter_id, picture.file_name))
continue
|
from .wrapper import get_dataset
from .adversarial import class_indices
|
from .env import DecisionEnv
from .base import BaseRL
from .ddpg import *
from .dqn import *
__all__ = ['DecisionEnv']
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 修改track_counts为off,观察预期结果;
Description :
1、查询track_counts默认值;
show track_counts;
2、修改track_counts为off,重启使其生效,并校验其预期结果;
gs_guc set -D {cluster/dn1} -c "track_counts=off"
gs_om -t stop && gs_om -t start
show track_counts;
3、重启后做简单DML
4、恢复默认值;
Expect :
1、显示默认值;
2、参数修改成功,校验修改后系统参数值为off;
3、DML无报错
4、恢复默认值成功;
History :
"""
import sys
import unittest
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('PrimaryDbUser')
class Guctestcase(unittest.TestCase):
def setUp(self):
logger.info("------------------------Opengauss_Function_Guc_Run_Statistics_Case0003开始执行-----------------------------")
self.Constant = Constant()
def test_guc(self):
logger.info("查询track_counts 期望:默认值on")
sql_cmd = commonsh.execut_db_sql('''show track_counts;''')
logger.info(sql_cmd)
self.assertEqual("on", sql_cmd.split("\n")[-2].strip())
logger.info("修改track_counts为off,重启使其生效,期望:设置成功")
result = commonsh.execute_gsguc('set', self.Constant.GSGUC_SUCCESS_MSG, 'track_counts=off')
self.assertTrue(result)
logger.info("期望:重启后查询结果为off")
commonsh.restart_db_cluster()
status = commonsh.get_db_cluster_status()
self.assertTrue("Normal" in status or "Degraded" in status)
sql_cmd = commonsh.execut_db_sql('''show track_counts;''')
logger.info(sql_cmd)
self.assertEqual("off", sql_cmd.split("\n")[-2].strip())
logger.info('执行DML 期望:执行成功')
sql_cmd = commonsh.execut_db_sql('''
begin
for i in 0..1000 loop
drop table if exists test cascade;
create table test(c_int int);
insert into test values(1),(2);
update test set c_int = 5 where c_int = 1;
delete from test where c_int = 2;
end loop;
end;
select * from test;
''')
logger.info(sql_cmd)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], sql_cmd)
logger.info("恢复默认值")
logger.info("删除表")
sql_cmd = commonsh.execut_db_sql('''drop table test cascade;''')
logger.info(sql_cmd)
self.assertIn(self.Constant.DROP_TABLE_SUCCESS,sql_cmd)
result = commonsh.execute_gsguc('set', self.Constant.GSGUC_SUCCESS_MSG, 'track_counts=on')
self.assertTrue(result)
result = commonsh.restart_db_cluster()
logger.info(result)
status = commonsh.get_db_cluster_status()
logger.info(status)
self.assertTrue("Normal" in status or "Degraded" in status)
def tearDown(self):
logger.info("恢复默认值")
sql_cmd = commonsh.execut_db_sql('''show track_counts;''')
if "on" not in sql_cmd:
commonsh.execute_gsguc('set', self.Constant.GSGUC_SUCCESS_MSG, 'track_counts=on')
commonsh.restart_db_cluster()
status = commonsh.get_db_cluster_status()
logger.info(status)
self.assertTrue("Normal" in status or "Degraded" in status)
logger.info("-------------------------Opengauss_Function_Guc_Run_Statistics_Case0003执行结束---------------------------")
|
import torch
from . import _base
from . import utils
from .so2 import SO2Matrix
class SE2Matrix(_base.SEMatrixBase):
"""See :mod:`liegroups.SE2`"""
dim = 3
dof = 3
RotationType = SO2Matrix
def adjoint(self):
rot_part = self.rot.as_matrix()
if rot_part.dim() < 3:
rot_part = rot_part.unsqueeze(dim=0) # matrix --> batch
trans = self.trans
if trans.dim() < 2:
# vector --> vectorbatch
trans = trans.unsqueeze(dim=0)
trans_part = trans.new_empty(
trans.shape[0], trans.shape[1], 1)
trans_part[:, 0, 0] = trans[:, 1]
trans_part[:, 1, 0] = -trans[:, 0]
bottom_row = trans.new_zeros(self.dof)
bottom_row[-1] = 1.
bottom_row = bottom_row.unsqueeze_(dim=0).unsqueeze_(
dim=0).expand(trans.shape[0], 1, self.dof)
return torch.cat([torch.cat([rot_part, trans_part], dim=2),
bottom_row], dim=1).squeeze_()
@classmethod
def exp(cls, xi):
if xi.dim() < 2:
xi = xi.unsqueeze(dim=0)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
rho = xi[:, 0:2]
phi = xi[:, 2]
rot = cls.RotationType.exp(phi)
rot_jac = cls.RotationType.left_jacobian(phi)
if rot_jac.dim() < 3:
rot_jac.unsqueeze_(dim=0)
if rho.dim() < 3:
rho.unsqueeze_(dim=2)
trans = torch.bmm(rot_jac, rho).squeeze_()
return cls(rot, trans)
@classmethod
def inv_left_jacobian(cls, xi):
if xi.dim() < 2:
xi = xi.unsqueeze(dim=0)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
rho = xi[:, 0:2] # translation part
phi = xi[:, 2] # rotation part
cos_phi = torch.cos(phi)
sin_phi = torch.sin(phi)
phi_sq = phi * phi
small_angle_mask = utils.isclose(phi_sq, 0.)
small_angle_inds = small_angle_mask.nonzero().squeeze_(dim=1)
large_angle_mask = small_angle_mask.logical_not()
large_angle_inds = large_angle_mask.nonzero().squeeze_(dim=1)
jac = torch.zeros((xi.shape[0], cls.dof, cls.dof)).to(xi.device)
jac[small_angle_inds, 0, 0] = -(96*(phi_sq[small_angle_inds] - 6))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)
jac[small_angle_inds, 0, 1] = -(24*phi[small_angle_inds]*(phi_sq[small_angle_inds] - 12))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)
jac[small_angle_inds, 0, 2] = (4*(12*phi[small_angle_inds]*rho[small_angle_inds,0] - 72*rho[small_angle_inds,1] + 12*phi_sq[small_angle_inds]*rho[small_angle_inds,1] - 12*phi_sq[small_angle_inds]*rho[small_angle_inds,1] + phi_sq[small_angle_inds]*phi_sq[small_angle_inds]*rho[small_angle_inds,1] + phi_sq[small_angle_inds]*phi[small_angle_inds]*rho[small_angle_inds,0]))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)
jac[small_angle_inds, 1, 0] = (24*phi[small_angle_inds]*(phi_sq[small_angle_inds] - 12))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)
jac[small_angle_inds, 1, 1] = -(96*(phi_sq[small_angle_inds] - 6))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)
jac[small_angle_inds, 1, 2] = (4*(72*rho[small_angle_inds,0] - 12*phi_sq[small_angle_inds]*rho[small_angle_inds,0] + 12*phi[small_angle_inds]*rho[small_angle_inds,1] + 12*phi_sq[small_angle_inds]*rho[small_angle_inds,0] - phi_sq[small_angle_inds]*phi_sq[small_angle_inds]*rho[small_angle_inds,0] + phi_sq[small_angle_inds]*phi[small_angle_inds]*rho[small_angle_inds,1]))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)
jac[large_angle_inds, 0, 0] = (sin_phi[large_angle_inds]*phi[large_angle_inds])/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)
jac[large_angle_inds, 0, 1] = -(phi[large_angle_inds]*(cos_phi[large_angle_inds] - 1))/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)
jac[large_angle_inds, 0, 2] = (phi[large_angle_inds]*(rho[large_angle_inds,0] - 2*cos_phi[large_angle_inds]*rho[large_angle_inds,0] - phi[large_angle_inds]*rho[large_angle_inds,1] + cos_phi[large_angle_inds]**2*rho[large_angle_inds,0] + sin_phi[large_angle_inds]**2*rho[large_angle_inds,0] + cos_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,1] - sin_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,0]))/(phi_sq[large_angle_inds]*(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1))
jac[large_angle_inds, 1, 0] = (phi[large_angle_inds]*(cos_phi[large_angle_inds] - 1))/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)
jac[large_angle_inds, 1, 1] = (sin_phi[large_angle_inds]*phi[large_angle_inds])/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)
jac[large_angle_inds, 1, 2] = (phi[large_angle_inds]*(rho[large_angle_inds,1] - 2*cos_phi[large_angle_inds]*rho[large_angle_inds,1] + phi[large_angle_inds]*rho[large_angle_inds,0] + cos_phi[large_angle_inds]**2*rho[large_angle_inds,1] + sin_phi[large_angle_inds]**2*rho[large_angle_inds,1] - cos_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,0] - sin_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,1]))/(phi_sq[large_angle_inds]*(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1))
jac[:, 2, 0] = 0
jac[:, 2, 1] = 0
jac[:, 2, 2] = 1
return jac.squeeze_()
@classmethod
def left_jacobian(cls, xi):
if xi.dim() < 2:
xi = xi.unsqueeze(dim=0)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
rho = xi[:, 0:2] # translation part
phi = xi[:, 2] # rotation part
cos_phi = torch.cos(phi)
sin_phi = torch.sin(phi)
phi_sq = phi * phi
small_angle_mask = utils.isclose(phi_sq, 0.)
small_angle_inds = small_angle_mask.nonzero().squeeze_(dim=1)
large_angle_mask = small_angle_mask.logical_not()
large_angle_inds = large_angle_mask.nonzero().squeeze_(dim=1)
jac = torch.zeros((xi.shape[0], cls.dof, cls.dof)).to(xi.device)
jac[small_angle_inds, 0, 0] = 1 - 1./6. * phi_sq[small_angle_inds]
jac[small_angle_inds, 0, 1] = -(0.5 * phi[small_angle_inds] - 1./24. * phi[small_angle_inds] * phi_sq[small_angle_inds])
jac[small_angle_inds, 0, 2] = rho[small_angle_inds,1] / 2. + phi[small_angle_inds] * rho[small_angle_inds,0] / 6.
jac[small_angle_inds, 1, 0] = 0.5 * phi[small_angle_inds] - 1./24. * phi[small_angle_inds] * phi_sq[small_angle_inds]
jac[small_angle_inds, 1, 1] = 1 - 1./6. * phi_sq[small_angle_inds]
jac[small_angle_inds, 1, 2] = -rho[small_angle_inds,0] / 2. + phi[small_angle_inds] * rho[small_angle_inds,1] / 6.
jac[large_angle_inds, 0, 0] = sin_phi[large_angle_inds] / phi[large_angle_inds]
jac[large_angle_inds, 0, 1] = -(1 - cos_phi[large_angle_inds]) / phi[large_angle_inds]
jac[large_angle_inds, 0, 2] = ( rho[large_angle_inds,1] + phi[large_angle_inds]*rho[large_angle_inds,0] - rho[large_angle_inds,1]*cos_phi[large_angle_inds] - rho[large_angle_inds,0]*sin_phi[large_angle_inds])/phi_sq[large_angle_inds]
jac[large_angle_inds, 1, 0] = (1 - cos_phi[large_angle_inds]) / phi[large_angle_inds]
jac[large_angle_inds, 1, 1] = sin_phi[large_angle_inds] / phi[large_angle_inds]
jac[large_angle_inds, 1, 2] = (-rho[large_angle_inds,0] + phi[large_angle_inds]*rho[large_angle_inds,1] + rho[large_angle_inds,0]*cos_phi[large_angle_inds] - rho[large_angle_inds,1]*sin_phi[large_angle_inds])/phi_sq[large_angle_inds]
jac[:, 2, 0] = 0
jac[:, 2, 1] = 0
jac[:, 2, 2] = 1
return jac.squeeze_()
def log(self):
phi = self.rot.log()
inv_rot_jac = self.RotationType.inv_left_jacobian(phi)
if self.trans.dim() < 2:
trans = self.trans.unsqueeze(dim=0)
else:
trans = self.trans
if phi.dim() < 1:
phi.unsqueeze_(dim=0)
phi.unsqueeze_(dim=1) # because phi is 1-dimensional for SE2
if inv_rot_jac.dim() < 3:
inv_rot_jac.unsqueeze_(dim=0)
if trans.dim() < 3:
trans = trans.unsqueeze(dim=2)
rho = torch.bmm(inv_rot_jac, trans).squeeze_()
if rho.dim() < 2:
rho.unsqueeze_(dim=0)
return torch.cat([rho, phi], dim=1).squeeze_()
@classmethod
def odot(cls, p, directional=False):
if p.dim() < 2:
p = p.unsqueeze(dim=0) # vector --> vectorbatch
result = p.__class__(p.shape[0], p.shape[1], cls.dof).zero_()
# Got euclidean coordinates
if p.shape[1] == cls.dim - 1:
# Assume scale parameter is 1 unless p is a direction
# vector, in which case the scale is 0
if not directional:
result[:, 0:2, 0:2] = torch.eye(
cls.RotationType.dim).unsqueeze_(dim=0).expand(
p.shape[0], cls.RotationType.dim, cls.RotationType.dim)
result[:, 0:2, 2] = torch.mm(
cls.RotationType.wedge(p.__class__([1.])),
p.transpose(1, 0)).transpose_(1, 0)
# Got homogeneous coordinates
elif p.shape[1] == cls.dim:
result[:, 0:2, 0:2] = \
p[:, 2].unsqueeze_(dim=1).unsqueeze_(dim=2) * \
torch.eye(
cls.RotationType.dim).unsqueeze_(dim=0).repeat(
p.shape[0], 1, 1)
result[:, 0:2, 2] = torch.mm(
cls.RotationType.wedge(p.__class__([1.])),
p[:, 0:2].transpose_(1, 0)).transpose_(1, 0)
# Got wrong dimension
else:
raise ValueError("p must have shape ({},), ({},), (N,{}) or (N,{})".format(
cls.dim - 1, cls.dim, cls.dim - 1, cls.dim))
return result.squeeze_()
@classmethod
def vee(cls, Xi):
if Xi.dim() < 3:
Xi = Xi.unsqueeze(dim=0)
if Xi.shape[1:3] != (cls.dim, cls.dim):
raise ValueError("Xi must have shape ({},{}) or (N,{},{})".format(
cls.dim, cls.dim, cls.dim, cls.dim))
xi = Xi.new_empty(Xi.shape[0], cls.dof)
xi[:, 0:2] = Xi[:, 0:2, 2]
xi[:, 2] = cls.RotationType.vee(Xi[:, 0:2, 0:2])
return xi.squeeze_()
@classmethod
def wedge(cls, xi):
if xi.dim() < 2:
xi = xi.unsqueeze(dim=0)
if xi.shape[1] != cls.dof:
raise ValueError(
"phi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
Xi = xi.new_zeros(xi.shape[0], cls.dim, cls.dim)
Xi[:, 0:2, 0:2] = cls.RotationType.wedge(xi[:, 2])
Xi[:, 0:2, 2] = xi[:, 0:2]
return Xi.squeeze_()
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.net.ng/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicNetNgStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.net.ng/status_registered.txt"
host = "whois.nic.net.ng"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "nic.net.ng")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "rns1.nic.net.ng")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "rns2.nic.net.ng")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "rns3.nic.net.ng")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "rns4.nic.net.ng")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2009-05-13 14:27:27 UTC'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, None)
eq_(self.record.registrar.name, "nira")
eq_(self.record.registrar.organization, None)
eq_(self.record.registrar.url, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2012-08-24 13:46:14 UTC'))
def test_domain_id(self):
eq_(self.record.domain_id, "6808-NIRA")
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2020-07-30 23:00:00 UTC'))
|
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import next
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
from fnmatch import fnmatch
import itertools
import logging
from operator import attrgetter
import os
import re
import subprocess
from urllib.request import urlopen
from urllib.error import URLError
import xml.etree.ElementTree as ET
import yaml
from bonsai.model import (
CodeGlobalScope, CodeReference, CodeFunctionCall, pretty_str
)
from bonsai.cpp.model import (
CppEntity, CppFunctionCall, CppDefaultArgument, CppOperator, CppReference
)
from bonsai.analysis import (
CodeQuery, resolve_reference, resolve_expression, get_control_depth,
get_conditions, get_condition_paths, is_under_loop
)
try:
from bonsai.cpp.clang_parser import CppAstParser
except ImportError:
CppAstParser = None
from bonsai.py.py_parser import PyAstParser
from rospkg import RosPack, RosStack, ResourceNotFound
from xml.etree.cElementTree import ElementTree
from distutils.spawn import find_executable
from .cmake_parser import RosCMakeParser
from .launch_parser import LaunchParser, LaunchParserError
from .metamodel import (
Project, Repository, Package, SourceFile, Node, Person, SourceCondition,
AdvertiseCall, SubscribeCall, AdvertiseServiceCall,
ServiceClientCall, Location, GetParamCall, SetParamCall
)
from .util import cwd
###############################################################################
# Utility
###############################################################################
class LoggingObject(object):
log = logging.getLogger(__name__)
def findRosPackages(paths = None, as_stack = False):
"""
Find ROS packages inside folders.
:param paths: [list] of [str] File system path to search, [None] to use the ROS default search paths.
:param as_stack: [bool] Whether the paths point to stacks.
:returns: [dict] Dictionary of [str]package_name -> [str]package_path.
"""
ros_version = os.environ.get("ROS_VERSION")
if ros_version != "1":
# try ROS2 crawling with colcon if possible
# (in ambiguous cases, we give preference to trying the ROS2 method first,
# because ROS1 rospkg only produces misleading/
# incorrect information when used in ROS2/mixed workspaces.
colcon = find_executable('colcon')
if colcon != None:
cmd = [colcon, 'list']
if paths != None:
cmd.extend(['--base-paths'])
cmd.extend(paths)
try:
pkglist = subprocess.check_output(cmd)
# format is <pkg_name>\t<pkg_path>\t<build_system>\n
pkglist = pkglist.split('\n')
pkgs = {}
for pkginfo in pkglist:
pkginfo_parts = pkginfo.split('\t')
if len(pkginfo_parts) < 2:
continue
if pkginfo_parts[0] in pkgs:
continue
pkgs[pkginfo_parts[0]] = pkginfo_parts[1]
return pkgs
except:
pass
# ^ if colcon != None
# ^ if ros_version != "1"
# else: try the ROS1 way
ros = None
if as_stack:
ros = RosStack.get_instance(paths)
else:
ros = RosPack.get_instance(paths)
pkg_names = ros.list()
pkgs = {}
for pkg_name in pkg_names:
if pkg_name in pkgs:
continue
pkgs[pkg_name] = ros.get_path(pkg_name)
return pkgs
# ^ findRosPackages(paths)
_EMPTY_DICT = {}
_EMPTY_LIST = ()
###############################################################################
# Source Extractor
###############################################################################
class ProjectExtractor(LoggingObject):
def __init__(self, index_file, env = None, pkg_cache = None,
repo_cache = None, repo_path = None, distro_url = None,
require_repos = False, parse_nodes = False, node_cache = None):
self.log.debug("ProjectExtractor(%s, %s, %s)",
index_file, repo_path, distro_url)
self.index_file = index_file
self.repo_path = repo_path
self.distribution = distro_url
self.require_repos = require_repos
self.parse_nodes = parse_nodes
self.environment = env if not env is None else {}
self.package_cache = pkg_cache if not pkg_cache is None else {}
self.repo_cache = repo_cache if not repo_cache is None else {}
self.node_cache = node_cache if not node_cache is None else {}
self.project = None
self.packages = None
self.missing = None
self.repositories = None
self.configurations = None
self.node_specs = None
self.rules = None
self.analysis = None
self._extra_packages = set()
def index_source(self, settings=None):
self.log.debug("ProjectExtractor.index_source()")
self._setup()
settings.update_analysis_preferences(self.analysis)
self._load_user_repositories()
self._find_local_packages()
if self.missing and self.distribution:
self._load_distro_repositories()
self._find_local_packages()
self._topological_sort()
for name in self.missing:
self.log.warning("Could not find package " + name)
self._populate_packages_and_dependencies(settings=settings)
self._update_node_cache()
self._find_nodes(settings)
self._update_nodes_from_specs()
def _setup(self):
try:
with open(self.index_file, "r") as handle:
data = yaml.safe_load(handle)
except IOError as e:
data = {}
self.project = Project(data.get("project", "default"))
self.repositories = data.get("repositories", {})
self.packages = set(data.get("packages")
or list(findRosPackages(["."])))
self.missing = set(self.packages)
self.configurations = data.get("configurations", {})
self.node_specs = data.get("nodes", {})
self.project.node_specs = self.node_specs
self.rules = data.get("rules", {})
self.analysis = data.get("analysis", {})
for node_name in self.node_specs:
if not "/" in node_name:
raise ValueError("expected '<pkg>/<node>' in node specs")
pkg, exe = node_name.split("/")
self._extra_packages.add(pkg)
self.missing.update(self._extra_packages)
def _load_user_repositories(self):
self.log.info("Looking up user provided repositories.")
extractor = RepositoryExtractor()
for name, data in self.repositories.items():
repo = self.repo_cache.get(name)
if repo:
self.project.repositories.append(repo)
else:
extractor.load_from_user(name, data, project = self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _find_local_packages(self):
self.log.info("Looking for packages locally.")
cdir = os.path.abspath(".")
alt_paths = [self.repo_path, cdir] if self.repo_path else [cdir]
extractor = PackageExtractor(alt_paths = alt_paths)
extractor.refresh_package_cache()
found = []
for name in self.missing:
analyse = name in self.packages
pkg = self.package_cache.get(name)
if pkg:
self.project.packages.append(pkg)
found.append(name)
pkg._analyse = analyse
else:
pkg = extractor.find_package(name, project=self.project)
if pkg:
found.append(name)
pkg._analyse = analyse
self.missing.difference_update(found)
def _load_distro_repositories(self):
self.log.info("Looking up repositories from official distribution.")
try:
data = yaml.safe_load(urlopen(self.distribution).read())["repositories"]
except URLError as e:
self.log.warning("Could not download distribution data.")
return
extractor = RepositoryExtractor()
extractor.load_needed_from_distro(data, self.missing, self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _topological_sort(self):
dependencies = {}
pending = list(self.project.packages)
for pkg in self.project.packages:
pkg.topological_tier = -1
dependencies[pkg.id] = set(p for p in pkg.dependencies.packages
if p in self.packages)
tier = 1
emitted = []
while pending:
next_pending = []
next_emitted = []
for pkg in pending:
deps = dependencies[pkg.id]
deps.difference_update(emitted)
if deps:
next_pending.append(pkg)
else:
pkg.topological_tier = tier
next_emitted.append(pkg.name)
if not next_emitted:
# cyclic dependencies detected
self.log.warning("Cyclic dependencies: %s", next_pending)
for pkg in next_pending:
pkg.topological_tier = tier
next_pending = None
pending = next_pending
emitted = next_emitted
tier += 1
self.project.packages.sort(key = attrgetter("topological_tier", "id"))
def _populate_packages_and_dependencies(self, settings=None):
found = set()
extractor = PackageExtractor()
extractor.packages = self.project.packages
for pkg in self.project.packages:
found.add(pkg.name)
analysis_ignore = extractor._populate_package(
pkg, ignored_globs=settings.ignored_globs)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps = extractor._extra
extractor._extra = []
while deps:
pkg = deps.pop()
assert pkg.name not in found
pkg._analyse = False
found.add(pkg.name)
self.project.packages.append(pkg)
analysis_ignore = extractor._populate_package(
pkg, ignored_globs=settings.ignored_globs)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps.extend(extractor._extra)
extractor._extra = []
def _find_nodes(self, settings):
pkgs = {pkg.name: pkg for pkg in self.project.packages if pkg._analyse}
ws = settings.workspace
if not ws:
ws = settings.find_ros_workspace()
ws = os.path.abspath(ws)
if CppAstParser is None:
self.log.warning("C++ AST parser not found.")
extractor = NodeExtractor(pkgs, self.environment, ws = ws,
node_cache = self.node_cache,
parse_nodes = self.parse_nodes)
if self.parse_nodes and CppAstParser is not None:
if settings is None:
CppAstParser.set_library_path()
db_dir = os.path.join(extractor.workspace, "build")
if os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(db_dir)
else:
#library file if given explicitly, otherwise path
if settings.cpp_parser_lib_file:
CppAstParser.set_library_file(settings.cpp_parser_lib_file)
else:
CppAstParser.set_library_path(settings.cpp_parser_lib)
CppAstParser.set_standard_includes(settings.cpp_includes)
db_dir = settings.cpp_compile_db
if db_dir and os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(settings.cpp_compile_db)
for pkg in self.project.packages:
if pkg._analyse and pkg.name not in self.package_cache:
extractor.find_nodes(pkg)
def _update_node_cache(self):
self.log.debug("Importing cached Nodes.")
data = [datum for datum in self.node_cache.values()]
self.node_cache = {}
empty_dict = {}
empty_list = ()
for datum in data:
try:
pkg = self._get_package(datum["package"])
source_files = self._get_files(pkg, datum["files"])
except ValueError as e:
# either a package or a file is no longer part of the analysis
self.log.debug("Cached node %s: %s", datum["name"], e)
continue
mtime = datum["timestamp"]
for sf in source_files:
if sf.timestamp > mtime:
# a file was modified, needs to be parsed again
break
else:
node = Node(datum["name"], pkg, rosname = datum["rosname"],
nodelet = datum["nodelet"])
node.source_files = source_files
for p in datum["advertise"]:
node.advertise.append(self._pub_from_JSON(p))
for p in datum["subscribe"]:
node.subscribe.append(self._sub_from_JSON(p))
for p in datum["service"]:
node.service.append(self._srv_from_JSON(p))
for p in datum["client"]:
node.client.append(self._client_from_JSON(p))
for p in datum["readParam"]:
node.read_param.append(self._read_from_JSON(p))
for p in datum["writeParam"]:
node.write_param.append(self._write_from_JSON(p))
hpl = datum.get("hpl", empty_dict)
for p in hpl.get("properties", empty_list):
node.hpl_properties.append(p)
for a in hpl.get("assumptions", empty_list):
node.hpl_assumptions.append(a)
self.node_cache[node.node_name] = node
def _update_nodes_from_specs(self):
self.log.debug("Loading Nodes from specs.")
pkg_finder = PackageExtractor()
pkg_finder.packages.extend(self.project.packages)
nhm = NodeHints2(self.node_specs, pkg_finder=pkg_finder)
# nodes = dict(self.node_cache)
for pkg in self.project.packages:
for node in pkg.nodes:
node_type = node.node_name
if node_type not in self.node_cache:
self.log.debug(
"WARNING node %s is not in node cache!", node_type)
self.node_cache[node_type] = node
new_nodes = nhm.apply_to(self.node_cache, create=True)
for node in new_nodes:
assert node.node_name not in self.node_cache
self.node_cache[node.node_name] = node
node.package.nodes.append(node)
def _get_package(self, name):
for pkg in self.project.packages:
if pkg.name == name:
return pkg
raise ValueError("cannot find package: " + name)
def _get_files(self, pkg, filenames):
files = []
for filename in filenames:
found = False
for sf in pkg.source_files:
if sf.full_name == filename:
found = True
files.append(sf)
break
if not found:
raise ValueError("cannot find file: " + filename)
return files
def _pub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return AdvertiseCall(datum["name"], datum["namespace"], datum["type"],
datum["queue"], latched=datum.get("latched", False),
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _sub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return SubscribeCall(datum["name"], datum["namespace"], datum["type"],
datum["queue"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _srv_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return AdvertiseServiceCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _client_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _read_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return GetParamCall(datum["name"], datum["namespace"],
datum["type"], default_value=datum["default_value"],
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=cs, location=l(datum["location"]))
def _write_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location=l(c["location"]),
statement=c["statement"])
for c in datum["conditions"]]
return SetParamCall(datum["name"], datum["namespace"],
datum["type"], value=datum["value"],
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=cs, location=l(datum["location"]))
def _location_from_JSON(self, datum):
if datum is None:
return None
try:
pkg = self._get_package(datum["package"])
sf = None
filename = datum["file"]
if filename:
sf = self._get_files(pkg, [filename])[0]
except ValueError:
return None
return Location(pkg, file=sf, line=datum["line"], col=datum["column"],
fun=datum["function"], cls=datum["class"])
###############################################################################
# Repository Extractor
###############################################################################
class RepositoryCloneError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RepositoryExtractor(LoggingObject):
def __init__(self):
self.repositories = []
self.declared_packages = set()
def load_from_user(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_user(%s, %s)", name, data)
repo = Repository(name, proj = project)
repo.status = "private"
repo.vcs = data["type"]
repo.url = data["url"]
repo.version = data["version"]
repo.declared_packages = data["packages"]
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_from_distro(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_distro(%s, %s)", name, data)
if not "source" in data:
self.log.debug("There is no source in provided data.")
return
repo = Repository(name, proj = project)
repo.status = data.get("status")
src = data["source"]
repo.vcs = src["type"]
repo.url = src["url"]
repo.version = src["version"]
if "release" in data:
repo.declared_packages = data["release"].get("packages", [name])
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_needed_from_distro(self, data, pkgs, project = None):
if not pkgs:
return True
remaining = set(pkgs)
for name, info in data.items():
if not "release" in info:
continue
for pkg in info["release"].get("packages", [name]):
try:
remaining.remove(pkg)
self.load_from_distro(name, info, project = project)
except KeyError as e:
pass
if not remaining:
break
return not remaining
def download(self, repo_path):
self.log.debug("RepositoryExtractor.download(%s)", repo_path)
for repo in self.repositories:
if not repo.url:
self.log.debug("%s has no URL to download from.", repo.id)
continue
path = os.path.join(repo_path, repo.name)
clone = False
if not os.path.exists(path):
os.makedirs(path)
clone = True
with cwd(path):
if repo.vcs == "git":
self._download_git(repo, path, clone)
elif repo.vcs == "hg":
self._download_hg(repo, path, clone)
elif repo.vcs == "svn":
self._download_svn(repo, path, clone)
return True
GIT_INIT = ("git", "init")
GIT_PULL = ("git", "pull")
GIT_COUNT = ("git", "rev-list", "HEAD", "--count")
def _download_git(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_git(%s)", path)
try:
if clone:
subprocess.check_call(self.GIT_INIT)
subprocess.check_call(["git", "remote",
"add", "-t", repo.version,
"-f", "origin", repo.url])
subprocess.check_call(["git", "checkout", repo.version])
else:
subprocess.check_call(self.GIT_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git error: " + str(e))
HG_PULL = ("hg", "pull")
HG_COUNT = ("hg", "id", "--num", "--rev", "tip")
def _download_hg(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_hg(%s)", path)
try:
if clone:
subprocess.check_call(["hg", "clone", repo.url,
"-r", repo.version])
else:
subprocess.check_call(self.HG_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.HG_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("hg error: " + str(e))
SVN_FETCH = ("git", "svn", "fetch")
def _download_svn(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_svn(%s)", path)
try:
if clone:
if repo.version == "trunk":
version = repo.version
else:
version = "branches/" + repo.version
subprocess.check_call(["git", "svn", "clone",
"-T", version, repo.url])
else:
subprocess.check_call(self.SVN_FETCH)
self.path = path
self.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git-svn error: " + str(e))
###############################################################################
# Package Extractor
###############################################################################
class PackageExtractor(LoggingObject):
def __init__(self, alt_paths = None):
self.packages = []
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.alt_paths = alt_paths
self.altpack_pkgs = None
self.altstack_pkgs = None
self._pkg_cache = {}
self._extra = []
def refresh_package_cache(self):
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.altpack_pkgs = None
self.altstack_pkgs = None
# To use with LaunchParser.
def get(self, pkg_id, populate=True):
self.log.debug("%s.get('%s')", type(self).__name__, pkg_id)
if pkg_id in self._pkg_cache:
return self._pkg_cache[pkg_id]
for pkg in self.packages:
if pkg.id == pkg_id:
self._pkg_cache[pkg_id] = pkg
return pkg
try:
assert pkg_id.startswith("package:")
pkg = self._find(pkg_id[8:], None)
self._pkg_cache[pkg_id] = pkg
self._extra.append(pkg)
pkg._analyse = False
if populate:
self._populate_package(pkg)
except (IOError, ET.ParseError, ResourceNotFound):
return None
return pkg
def find_package(self, name, project=None, analyse=True):
try:
pkg = self._find(name, project)
pkg._analyse = analyse
self.packages.append(pkg)
if project:
project.packages.append(pkg)
for repo in project.repositories:
if name in repo.declared_packages:
pkg.repository = repo
repo.packages.append(pkg)
break
# self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def find_package_at(self, dirpath, populate=True):
try:
manifest = os.path.join(dirpath, "package.xml")
pkg = PackageParser.parse(manifest)
if pkg.id in self._pkg_cache:
return self._pkg_cache[pkg.id]
else:
self._pkg_cache[pkg.id] = pkg
if pkg not in self._extra:
self._extra.append(pkg)
pkg._analyse = False
if populate:
self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def _find(self, name, project):
path = None
if self.alt_paths:
if self.altpack_pkgs == None:
self.altpack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=False)
path = self.altpack_pkgs.get(name, None)
if (path == None):
if self.altstack_pkgs == None:
self.altstack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=True)
path = self.altstack_pkgs.get(name, None)
if path is None:
if self.rospack_pkgs == None:
self.rospack_pkgs = findRosPackages(as_stack=False)
path = self.rospack_pkgs.get(name, None)
if path is None:
if self.rosstack_pkgs == None:
self.rosstack_pkgs = findRosPackages(as_stack=True)
path = self.rosstack_pkgs.get(name, None)
if path is None:
raise KeyError(name)
return PackageParser.parse(os.path.join(path, "package.xml"),
project = project)
EXCLUDED = (".git", "doc", "cmake", ".eggs", "__pycache__")
_START_GLOB = (os.path.sep, '*', '?', '[')
_BYTE_CODE = (".pyc", ".pyd", ".pyo")
def _populate_package(self, pkg, ignored_globs=None):
self.log.debug("PackageExtractor.populate(%s, %s)", pkg, ignored_globs)
if not pkg.path:
self.log.debug("Package %s has no path", pkg.name)
return
self.log.info("Indexing source files for package %s", pkg.name)
analysis_ignore = {}
#pkgs = {pkg.id: pkg for pkg in self.packages}
launch_parser = LaunchParser(pkgs=self)
prefix = len(pkg.path) + len(os.path.sep)
if ignored_globs is None:
ignored_globs = ()
else:
ignored_globs = list(ignored_globs)
for i in range(len(ignored_globs)):
c = ignored_globs[i][0]
if not c in self._START_GLOB:
ignored_globs[i] = '*/' + ignored_globs[i]
for root, subdirs, files in os.walk(pkg.path, topdown=True):
if 'COLCON_IGNORE' in files or 'AMENT_IGNORE' in files or 'CATKIN_IGNORE' in files:
del subdirs[:] # don't traverse into subdirectories
continue # skip
subdirs[:] = [d for d in subdirs if d not in self.EXCLUDED]
path = root[prefix:]
for filename in files:
self.log.debug("Found file %s at %s", filename, path)
source = SourceFile(filename, path, pkg)
self.log.debug("File language: %s", source.language)
sfn = os.path.join(pkg.name, source.full_name)
if source.language == "unknown":
if filename.endswith(self._BYTE_CODE):
self.log.debug("Python bytecode file %s was ignored",
sfn)
continue # skip this file
if any(fnmatch(sfn, pattern)
for pattern in ignored_globs):
self.log.debug(
"File %s was ignored due to glob pattern", sfn)
continue # skip this file
ignore = source.set_file_stats()
if any(v for v in ignore.values()):
analysis_ignore[source.id] = ignore
if pkg._analyse and source.language == "launch":
self.log.info("Parsing launch file: " + source.path)
try:
source.tree = launch_parser.parse(source.path)
except LaunchParserError as e:
self.log.warning("Parsing error in %s:\n%s",
source.path, str(e))
pkg.source_files.append(source)
pkg.size += source.size
pkg.lines += source.lines
pkg.sloc += source.sloc
return analysis_ignore
###############################################################################
# Package Parser
###############################################################################
class PackageParser(LoggingObject):
@staticmethod
def parse(pkg_file, project = None):
PackageParser.log.debug("PkgParser.parse(%s, %s)", pkg_file, project)
with open(pkg_file, "r") as handle:
root = ET.parse(handle).getroot()
name = root.find("name").text.strip()
package = Package(name, proj = project)
package.path = os.path.dirname(pkg_file)
PackageParser.log.info("Found package %s at %s", package, package.path)
PackageParser._parse_metadata(root, package)
PackageParser._parse_export(root, package)
PackageParser._parse_dependencies(root, package)
return package
@staticmethod
def _parse_metadata(xml, package):
package.description = (xml.find("description").text or "").strip()
for el in xml.findall("maintainer"):
name = (el.text or "?").strip()
email = el.get("email") or "email@example.com"
package.maintainers.add(Person(name, email))
for el in xml.findall("author"):
name = (el.text or "?").strip()
email = el.get("email") or "email@example.com"
package.authors.add(Person(name, email))
for el in xml.findall("license"):
package.licenses.add((el.text or "?").strip())
for el in xml.findall("url"):
value = el.get("type")
if value is None or value == "website":
if el.text:
package.website = el.text.strip()
elif value == "repository":
if el.text:
package.vcs_url = el.text.strip()
elif value == "bugtracker":
if el.text:
package.bug_url = el.text.strip()
el = xml.find("version")
if el is not None:
package.version = (el.text or "?").strip()
@staticmethod
def _parse_export(xml, package):
el = xml.find("export")
if not el is None:
package.is_metapackage = not el.find("metapackage") is None
if not el.find("nodelet") is None:
nodelets = el.find("nodelet").get("plugin")
nodelets = nodelets.replace("${prefix}", package.path)
with open(nodelets, "r") as handle:
xmltext = "<export>{}</export>".format(handle.read())
root = ET.fromstring(xmltext)
PackageParser.log.info("Found nodelets at %s", nodelets)
libs = []
for child in root:
if child.tag == "library":
libs.append(child)
else:
libs.extend(child.findall("library"))
for el in libs:
libname = el.get("path").rsplit(os.sep)[-1]
for cl in el.findall("class"):
nodelet = cl.get("type").split("::")[-1]
node = Node(libname, package, nodelet = nodelet)
package.nodes.append(node)
@staticmethod
def _parse_dependencies(xml, package):
sources = ["build_depend"]
if xml.get("format") == "2":
sources.extend(("depend", "build_export_depend", "exec_depend"))
else:
sources.append("run_depend")
for src in sources:
for el in xml.findall(src):
name = el.text.strip()
if name:
package.dependencies.packages.add(name)
###############################################################################
# Hard-coded Node Parser
###############################################################################
class HardcodedNodeParser(LoggingObject):
model_dir = None
distro = None
_cache = {}
@classmethod
def get(cls, pkg, node_type):
cls.log.debug("Fetching hard-coded node: (%s, %s, %s)",
pkg, node_type, cls.distro)
node_id = "node:" + pkg + "/" + node_type
if node_id in cls._cache:
cls.log.debug("Node already in cache.")
return cls._cache[node_id]
filename = os.path.join(cls.model_dir, pkg + ".yaml")
try:
with open(filename) as handle:
data = yaml.safe_load(handle)
except IOError as e:
cls.log.debug("YAML file not found: %s", filename)
return None
if not cls.distro in data:
cls.log.debug("Package has no data for ROS %s.", cls.distro)
return None
if not node_type in data[cls.distro]:
cls.log.debug("Node does not exist for ROS %s.", cls.distro)
return None
cls.log.debug("Building node from YAML data.")
pkg = Package(pkg)
pkg.path = "/tmp/" + pkg.name
node = cls._build_node(node_type, cls.distro, pkg, data)
cls._cache[node_id] = node
return node
@classmethod
def _build_node(cls, node_type, distro, pkg, data):
node_data = data[distro][node_type]
base = node_data.get("base")
if base:
node = cls._build_node(node_type, base, pkg, data)
else:
node = Node(node_type, pkg, rosname = node_data.get("rosname"),
nodelet = node_type if node_data["nodelet"] else None)
for datum in node_data.get("advertise", ()):
loc = cls._loc(pkg, datum)
pub = AdvertiseCall(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
latched=datum.get("latched", False),
control_depth=datum["depth"],
repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.advertise.append(pub)
for datum in node_data.get("subscribe", ()):
loc = cls._loc(pkg, datum)
sub = SubscribeCall(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.subscribe.append(sub)
for datum in node_data.get("service", ()):
loc = cls._loc(pkg, datum)
srv = AdvertiseServiceCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.service.append(srv)
for datum in node_data.get("client", ()):
loc = cls._loc(pkg, datum)
cli = ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.client.append(cli)
for datum in node_data.get("readParam", ()):
loc = cls._loc(pkg, datum)
par = GetParamCall(datum["name"], datum["namespace"],
datum["type"], default_value=datum.get("default"),
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.read_param.append(par)
for datum in node_data.get("writeParam", ()):
loc = cls._loc(pkg, datum)
par = SetParamCall(datum["name"], datum["namespace"],
datum["type"], value=datum.get("value"),
control_depth=datum["depth"], repeats=datum["repeats"],
conditions=[SourceCondition(c["condition"],
statement=c["statement"])
for c in datum["conditions"]],
location=loc)
node.write_param.append(par)
cls.log.debug("Hard-coded Node: " + str(node.to_JSON_object()))
return node
@classmethod
def _loc(cls, pkg, data):
loc = data.get("location")
if loc is None:
return None
p = loc.get("package")
if p is None or p != pkg.name:
return None
f = loc["file"]
for sf in pkg.source_files:
if sf.full_name == f:
f = sf
break
else:
parts = loc["file"].rsplit("/", 1)
if len(parts) == 1:
directory = ""
name = parts[0]
else:
assert len(parts) == 2
directory, name = parts
f = SourceFile(name, directory, pkg)
pkg.source_files.append(f)
return Location(pkg, file=f, line=loc["line"], col=loc["column"],
fun=loc.get("function"), cls=loc.get("class"))
###############################################################################
# Node Extractor
###############################################################################
class NodeExtractor(LoggingObject):
def __init__(self, pkgs, env, ws=None, node_cache=None, parse_nodes=False):
self.package = None
self.packages = pkgs
self.environment = env
self.workspace = ws
self.node_cache = node_cache
self.parse_nodes = parse_nodes
self.nodes = []
self.roscpp_extractor = None
self.rospy_extractor = None
def find_nodes(self, pkg):
self.log.debug("NodeExtractor.find_nodes(%s)", pkg)
self.package = pkg
srcdir = self.package.path[len(self.workspace):]
srcdir = os.path.join(self.workspace, srcdir.split(os.sep, 1)[0])
bindir = os.path.join(self.workspace, "build")
cmake_path = os.path.join(self.package.path, "CMakeLists.txt")
if os.path.isfile(cmake_path):
parser = RosCMakeParser(srcdir, bindir, pkgs = self.packages,
env = self.environment,
vars = self._default_variables())
parser.parse(cmake_path)
self._update_nodelets(parser.libraries)
self._register_nodes(parser.executables)
else:
# It may be normal for pure Python projects not to have a CMakeLists.txt
# Instead, search for python files with "def main():"
pattern = re.compile('^def\s+main\s*\(.*\)\s*:')
for file in pkg.source_files:
if file.language != 'python':
continue # continue with next file
entry_point_found = False
with open(file.path) as f:
for line in f:
match = pattern.match(line)
if match is not None:
entry_point_found = True
break
if entry_point_found == False:
continue # continue with next file
# else: this is a python file with a 'main' function,
# so we consider it a node.
node = Node(file.full_name, pkg)
node.source_files.append(file)
self.nodes.append(node)
self.package.nodes.append(node)
if self.parse_nodes:
self._extract_primitives()
def _default_variables(self):
# TODO: clean up these hardcoded values
v = {}
v["catkin_INCLUDE_DIRS"] = os.path.join(self.workspace,
"devel/include")
v["Boost_INCLUDE_DIRS"] = "/usr/include/"
v["Eigen_INCLUDE_DIRS"] = "/usr/include/eigen3"
v["ImageMagick_INCLUDE_DIRS"] = "/usr/include/ImageMagick"
v["PROJECT_SOURCE_DIR"] = self.package.path
return v
def _get_file(self, path):
for sf in self.package.source_files:
if sf.path == path:
return sf
return None
def _update_nodelets(self, libraries):
lib_files = {}
for target in libraries.values():
files = []
for path in target.files:
sf = self._get_file(path)
if sf:
files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
files.append(sf)
lib_files[target.prefixed_name] = files
for nodelet in self.package.nodes:
if not nodelet.is_nodelet:
continue
if nodelet.name in lib_files:
nodelet.source_files = lib_files[nodelet.name]
def _register_nodes(self, executables):
for target in executables.values():
node = Node(target.output_name, self.package)
for path in target.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
lang = node.language
if lang == "cpp" or lang == "python":
self.log.debug("register %s node: %s", lang, node.node_name)
self.nodes.append(node)
self.package.nodes.append(node)
else:
self.log.debug("CMake target is not a node: %s (%s) %s",
node.node_name, lang, node.source_files)
def _extract_primitives(self, force_when_cached=False):
self.roscpp_extractor = RoscppExtractor(self.package, self.workspace)
self.rospy_extractor = RospyExtractor(self.package, self.workspace)
for i in range(len(self.package.nodes)):
node = self.package.nodes[i]
self.log.debug("Extracting primitives for node %s", node.id)
if node.source_tree is not None:
self.log.debug("Node already has a source tree. Skipped.")
continue
if (node.node_name in self.node_cache) and not force_when_cached:
self.log.debug("Using Node %s from cache.", node.node_name)
node = self.node_cache[node.node_name]
assert node.package is self.package
self.package.nodes[i] = node
continue
node.source_tree = CodeGlobalScope()
node.advertise = []
node.subscribe = []
node.service = []
node.client = []
node.read_param = []
node.write_param = []
if not node.source_files:
self.log.warning("no source files for node " + node.id)
if node.language == "cpp" and CppAstParser is not None:
self.roscpp_extractor.extract(node)
elif node.language == "python":
self.rospy_extractor.extract(node)
else:
self.log.debug("Node written in %s.", node.language)
self.log.debug("Skipping parsing and primitive extraction.")
###############################################################################
# C++ Primitive Extractor
###############################################################################
class RoscppExtractor(LoggingObject):
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
def extract(self, node):
self.log.debug("Parsing C++ files for node %s", node.id)
parser = CppAstParser(workspace=self.workspace, logger=__name__)
for sf in node.source_files:
self.log.debug("Parsing C++ file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_nh_param_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
def _query_comm_primitives(self, node, gs):
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "ros::Publisher":
continue
self._on_publication(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "ros::Subscriber":
continue
self._on_subscription(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("advertiseService").get():
if call.canonical_type != "ros::ServiceServer":
continue
self._on_service(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("serviceClient").get():
if call.canonical_type != "ros::ServiceClient":
continue
self._on_client(node,
self._resolve_node_handle(call.method_of), call)
self.log.debug("Looking for image_transport::SubscriberFilter calls.")
for call in CodeQuery(gs).all_calls.where_name("SubscriberFilter").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@image_transport@S@SubscriberFilter"):
continue
if not "image_transport::SubscriberFilter" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, topic_pos = 1, queue_pos = 2,
msg_type = "sensor_msgs/Image")
self.log.debug("Looking for message_filters::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("Subscriber").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@message_filters@S@Subscriber"):
continue
if not "message_filters::Subscriber" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_node_handle(n),
call, topic_pos = 1, queue_pos = 2)
self.log.debug("Looking for image_transport::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "image_transport::Subscriber":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
self.log.debug("Looking for image_transport::Publisher.")
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "image_transport::Publisher":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_publication(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
def _query_nh_param_primitives(self, node, gs):
nh_prefix = "c:@N@ros@S@NodeHandle@"
gets = ("getParam", "getParamCached", "param")
reads = gets + ("hasParam", "searchParam")
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
param_type = default_value = None
if call.name in gets:
param_type = self._extract_param_type(call.arguments[1])
if call.name == "param":
if len(call.arguments) > 2:
default_value = self._extract_param_value(
call, arg_pos=2)
elif len(call.arguments) == 2:
default_value = self._extract_param_value(
call, arg_pos=1)
self._on_read_param(node, self._resolve_node_handle(call),
call, param_type, default_value)
sets = ("setParam",)
writes = sets + ("deleteParam",)
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
param_type = value = None
if len(call.arguments) >= 2 and call.name in sets:
param_type = self._extract_param_type(call.arguments[1])
value = self._extract_param_value(call, arg_pos=1)
self._on_write_param(node, self._resolve_node_handle(call),
call, param_type, value)
def _query_param_primitives(self, node, gs):
ros_prefix = "c:@N@ros@N@param@"
gets = ("get", "getCached", "param")
reads = gets + ("has",)
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
param_type = default_value = None
if call.name in gets:
param_type = self._extract_param_type(call.arguments[1])
if call.name == "param":
if len(call.arguments) > 2:
default_value = self._extract_param_value(
call, arg_pos=2)
elif len(call.arguments) == 2:
default_value = self._extract_param_value(
call, arg_pos=1)
self._on_read_param(node, "", call, param_type, default_value)
for call in (CodeQuery(gs).all_calls.where_name("search")
.where_result("bool").get()):
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
if len(call.arguments) > 2:
ns = resolve_expression(call.arguments[0])
if not isinstance(ns, basestring):
ns = "?"
else:
ns = "~"
self._on_read_param(node, ns, call, None, None)
sets = ("set",)
writes = sets + ("del",)
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
param_type = value = None
if len(call.arguments) >= 2 and call.name in sets:
param_type = self._extract_param_type(call.arguments[1])
value = self._extract_param_value(call, arg_pos=1)
self._on_write_param(node, "", call, param_type, value)
def _on_publication(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None, latch_pos=-1):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
latched = False
if len(call.arguments) >= 3 and len(call.arguments) > latch_pos:
latched = self._extract_latch(call, latch_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
pub = AdvertiseCall(name, ns, msg_type, queue_size, latched=latched,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found AdvertiseCall on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
sub = SubscribeCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found SubscribeCall on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
srv = AdvertiseServiceCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_client(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_read_param(self, node, ns, call, param_type, default_value):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
read = GetParamCall(name, ns, param_type,
default_value=default_value, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive = True))
node.read_param.append(read)
self.log.debug("Found Read on %s/%s (%s) (%s)",
ns, name, param_type, default_value)
def _on_write_param(self, node, ns, call, param_type, value):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = []
for path in get_condition_paths(call):
for c in path:
conditions.append(SourceCondition(pretty_str(c.value),
location=self._condition_location(c, location.file),
statement=c.statement))
break # FIXME
wrt = SetParamCall(name, ns, param_type, value=value,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive = True))
node.write_param.append(wrt)
self.log.debug("Found Write on %s/%s (%s) (%s)",
ns, name, param_type, value)
def _condition_location(self, condition_obj, sf):
if sf is not None:
if sf.path != condition_obj.file:
self.log.debug(("condition Location: files do not match: "
"'%s', '%s'"), sf.path, condition_obj.file)
if condition_obj.file.startswith(self.package.path):
for sf2 in self.package.source_files:
if sf2.path == condition_obj.file:
sf = sf2
break
self.log.debug("Location: found correct file")
return Location(self.package, file=sf, line=condition_obj.line,
col=condition_obj.column, fun=condition_obj.function.name)
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
source_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file,
line=call.line, col=call.column, fun=function)
def _resolve_it_node_handle(self, value):
value = resolve_expression(value)
if (isinstance(value, CppFunctionCall)
and value.name == "ImageTransport"):
return self._resolve_node_handle(value.arguments[0])
return "?"
def _resolve_node_handle(self, call):
ns = "?"
node_handle = getattr(call, 'method_of', None) or call
if getattr(node_handle, 'name', None) == 'operator->':
node_handle = node_handle.arguments[0]
node_handle_def = None
if isinstance(node_handle, CppReference):
node_handle_def = resolve_reference(node_handle)
elif isinstance(node_handle, CppDefaultArgument):
return ''
# A function needs to be called to create a NodeHandle (constructors
# are functions)
if isinstance(node_handle_def, CppFunctionCall):
# node_handle_def is a call to the constructor
if node_handle_def.name == 'NodeHandle':
args = node_handle_def.arguments
# Copy constructor
if len(args) == 1:
parent = args[0]
if isinstance(parent, CppFunctionCall):
if parent.name == 'getNodeHandle':
return ''
elif parent.name == 'getPrivateNodeHandle':
return '~'
return self._resolve_node_handle(parent)
# All other constructor have at least two arguments. The third
# is never meaningful
# If a parent NodeHande is passed, it is the first argument
# If a namespace argument is passed, it is either first or
# second parameter. Only the first has an empty default value.
prefix = ''
if isinstance(args[0], basestring):
ns = args[0]
elif isinstance(args[0], CppDefaultArgument):
ns = ''
elif isinstance(args[1], basestring):
prefix = self._resolve_node_handle(args[0])
ns = args[1]
else:
ns = "?"
if prefix:
ns = prefix + "/" + ns
elif node_handle_def.name == 'getNodeHandle':
ns = ''
elif node_handle_def.name == 'getPrivateNodeHandle':
ns = '~'
elif isinstance(node_handle_def, CppDefaultArgument):
ns = ''
return ns
def _extract_topic(self, call, topic_pos=0):
name = resolve_expression(call.arguments[topic_pos])
if not isinstance(name, basestring):
name = "?"
return name or "?"
def _extract_message_type(self, call):
if call.template:
template = call.template[0]
std_alloc = re.search("_<std::allocator<void>", template)
if std_alloc is not None:
template = template[:std_alloc.start()]
#assert re.match(r"\w+::\w+$", template)
if not re.match(r"\w+::\w+$", template):
self.log.debug("Weird message type: " + repr(template))
return template.replace("::", "/")
if (call.name not in ("subscribe", "advertiseService")
and 'NodeHandle' not in call.full_name):
return "?"
callback = (call.arguments[2]
if call.name == "subscribe"
else call.arguments[1])
while isinstance(callback, CppOperator):
callback = callback.arguments[0]
type_string = callback.result
try:
type_string = type_string.split(None, 1)[1]
except IndexError:
type_string = type_string.strip()
if type_string.startswith("(*)"):
type_string = type_string[3:]
if type_string[0] == "(" and type_string[-1] == ")":
type_string = type_string[1:-1]
if call.name == "advertiseService":
type_string = type_string.split(", ")[0]
is_const = type_string.startswith("const ")
if is_const:
type_string = type_string[6:]
is_ref = type_string.endswith(" &")
if is_ref:
type_string = type_string[:-2]
is_ptr = type_string.endswith("::ConstPtr")
if is_ptr:
type_string = type_string[:-10]
else:
is_ptr = type_string.endswith("ConstPtr")
if is_ptr:
type_string = type_string[:-8]
if type_string.endswith("::Request"):
type_string = type_string[:-9]
if type_string.startswith("boost::function"):
type_string = type_string[52:-25]
type_string = type_string.replace("::", "/")
if re.match(r"\w+/\w+$", type_string):
return type_string
return "?"
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_queue_size(self, call, queue_pos=1):
queue_size = resolve_expression(call.arguments[queue_pos])
if isinstance(queue_size, (int, float)):
return queue_size
return None
def _extract_latch(self, call, latch_pos):
expr = call.arguments[latch_pos]
self.log.debug("extract latched publisher from {!r}".format(expr))
if isinstance(expr, CppDefaultArgument):
self.log.debug("latch is default: false")
return False
latch = resolve_expression(expr)
self.log.debug("resolve latch expr returns {!r}".format(latch))
if not isinstance(latch, bool):
return None
return latch
def _extract_param_type(self, value):
self.log.debug("extract param type from {}".format(repr(value)))
if value is True or value is False:
return "bool"
if isinstance(value, int):
return "int"
if isinstance(value, float):
return "double"
if isinstance(value, basestring):
return "str"
cpp_type = getattr(value, "result", None)
if cpp_type:
self.log.debug("param type from C++ type {}".format(repr(cpp_type)))
if cpp_type == "std::string" or cpp_type == "char *":
return "str"
if cpp_type == "int":
return "int"
if cpp_type == "double":
return "double"
if cpp_type == "bool":
return "bool"
return "yaml" if cpp_type else None
def _extract_param_value(self, call, arg_pos=1):
self.log.debug("extract_param_value({!r}, pos={})".format(
call.arguments, arg_pos))
if len(call.arguments) <= arg_pos:
self.log.debug("Failed to extract param value: not enough arguments")
return None
value = resolve_expression(call.arguments[arg_pos])
if isinstance(value, CppEntity):
self.log.debug("Failed to extract param value: " + repr(value))
return None
return value
###############################################################################
# Python Primitive Extractor
###############################################################################
class RospyExtractor(LoggingObject):
queue_size_pos = {
'publisher': 6,
'subscriber': 4,
}
rospy_names = {
'publication': ('Publisher',),
'subscription': ('Subscriber',),
'service-def': ('Service',),
'service-call': ('ServiceProxy',),
}
@classmethod
def all_rospy_names(cls, type):
names = cls.rospy_names[type]
return tuple('rospy.' + name for name in names) + names
@staticmethod
def get_arg(call, pos, name):
try:
return next(
keyword.value
for keyword in call.named_args
if keyword.name == name)
except StopIteration:
try:
return call.arguments[pos]
except IndexError:
return None
@staticmethod
def invalid_call(call, n=1):
return (len(call.arguments) + len(call.named_args)
+ bool(call.star_args) + bool(call.kw_args)) <= n
@staticmethod
def split_ns_name(full_name):
if '/' in full_name:
ns, _, name = full_name.rpartition('/')
else:
ns, name = '', full_name
return ns, name
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
souce_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file, line=call.line,
fun=function)
@classmethod
def _extract_queue_size(cls, call):
pos = cls.queue_size_pos[call.name.lower()]
queue_size_arg = cls.get_arg(call, pos, 'queue_size')
try:
queue_size = resolve_expression(queue_size_arg)
assert(isinstance(queue_size, (int, float)))
return queue_size
except AssertionError:
return None
@classmethod
def _extract_message_type(cls, call, arg_name, msgs_imports, pkgs_imports, arg_pos=1):
msg_type = cls.get_arg(call, 1, arg_name)
# Very common case of calling type() on a message class
if isinstance(msg_type, CodeFunctionCall) and msg_type.name == 'type':
msg_type = msg_type.arguments[0].name
if isinstance(msg_type, CodeReference):
msg_type = resolve_reference(msg_type) or msg_type
if isinstance(msg_type, CodeReference):
if msg_type.field_of is None:
for pkg_name, msg_name in msgs_imports:
if msg_name == msg_type.name:
return pkg_name + "/" + msg_name
else:
maybe_pkg = msg_type.field_of
if isinstance(maybe_pkg, CodeReference):
pkg_name = maybe_pkg.name
if pkg_name in pkgs_imports:
return pkg_name + "/" + msg_type.name
return "?"
@classmethod
def _extract_topic(cls, call):
name = resolve_expression(cls.get_arg(call, 0, 'name'))
if not isinstance(name, basestring):
name = '?'
return cls.split_ns_name(name)
def _on_client(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list, self.pkgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_publication(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list, self.pkgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
pub = AdvertiseCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found AdvertiseCall on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list, self.pkgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
srv = AdvertiseServiceCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list, self.pkgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
sub = SubscribeCall(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found SubscribeCall on %s/%s (%s)", ns, name, msg_type)
def _query_comm_primitives(self, node, gs):
##################################
# Topics
##################################
publications = (CodeQuery(gs).all_calls
.where_name(('Publisher', 'rospy.Publisher'))
.get())
subscriptions = (CodeQuery(gs).all_calls
.where_name(('Subscriber', 'rospy.Subscriber'))
.get())
for call in publications:
self._on_publication(node, call)
for call in subscriptions:
self._on_subscription(node, call)
##################################
# Services
##################################
service_defs = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-def'))
.get())
service_calls = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-call'))
.get())
for call in service_defs:
self._on_service(node, call)
for call in service_calls:
self._on_client(node, call)
def _on_param_getter(self, node, call):
if self.invalid_call(call, n=0):
return
name = resolve_expression(self.get_arg(call, 0, 'param_name'))
if not isinstance(name, basestring):
name = '?'
ns, name = self.split_ns_name(name)
param_type = None
default_value = self.get_arg(call, 1, 'default')
if default_value is not None:
default_value = resolve_expression(default_value)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
getter = GetParamCall(name, ns, param_type,
default_value=default_value, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.read_param.append(getter)
self.log.debug("Found GetParamCall on %s/%s", ns, name)
def _on_param_setter(self, node, call):
if self.invalid_call(call):
return
name = resolve_expression(self.get_arg(call, 0, 'param_name'))
if not isinstance(name, basestring):
name = '?'
ns, name = self.split_ns_name(name)
param_type = None
value = resolve_expression(self.get_arg(call, 1, 'param_value'))
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
setter = SetParamCall(name, ns, param_type, value=value,
location=location, control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.write_param.append(setter)
self.log.debug("Found SetParamCall on %s/%s", ns, name)
def _query_param_primitives(self, node, gs):
getters = (CodeQuery(gs).all_calls
.where_name(('get_param', 'rospy.get_param'))
.get())
setters = (CodeQuery(gs).all_calls
.where_name(('set_param', 'rospy.set_param'))
.get())
for call in getters:
self._on_param_getter(node, call)
for call in setters:
self._on_param_setter(node, call)
# FIXME: missing:
# rospy.has_param(param_name)
# rospy.delete_param(param_name)
def _setup_path(self):
setup_file = os.path.join(self.package.path, 'setup.py')
if not os.path.isfile(setup_file):
return []
parser = PyAstParser(workspace=self.package.path)
setup = parser.parse(setup_file)
setup_call = (CodeQuery(setup).all_calls
.where_name('generate_distutils_setup')
.get()
or
CodeQuery(setup).all_calls
.where_name('setup')
.get())[0]
package_dir = self.get_arg(setup_call, 0, 'package_dir')
if hasattr(package_dir, 'value'):
package_dir = {
keyword.name: keyword.value
for keyword in self.get_arg(setup_call, 0, 'package_dir').value
}
else:
src_path = os.path.join(self.package.path, 'src')
package_dir = {'': 'src'} if os.path.exists(src_path) else {}
root = package_dir.get('', '')
return [os.path.join(self.package.path, root)]
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
self.pythonpath = self._setup_path()
def extract(self, node):
self.log.debug("Parsing Python files for node %s", node.id)
self.log.debug("PyAstParser(pythonpath={!r}, workspace={!r})".format(
self.pythonpath, self.workspace))
parser = PyAstParser(pythonpath=self.pythonpath,
workspace=self.workspace)
for sf in node.source_files:
self.log.debug("Parsing Python file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# In theory the imported names list should not be needed here, this is a fix to be able to locate the complete description of ros msgs types (i.e. PkgName/MsgName
self.msgs_list = []
self.pkgs_list = []
for imp_name in parser.imported_names_list:
s = str(imp_name)
if "msg" in s or "srv" in s:
ss = s.split(".")
if len(ss) < 2:
continue
if ss[-1] == "msg" or ss[-1] == "srv":
self.pkgs_list.append(ss[0])
elif ss[1] == "msg" or ss[1] == "srv":
self.msgs_list.append((ss[0], ss[2]))
else:
self.log.debug(("Python import with 'msg' or 'srv', "
"but unable to process it: ")
+ s)
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
###############################################################################
# Node Hints
###############################################################################
class NodeHints2(LoggingObject):
# pkg/node:
# fix: (fix variables)
# advertise@1: name
# getParam@1: true
# advertise: (always adds)
# - full JSON spec
# - full JSON spec
def __init__(self, hints, pkg_finder=None):
if not isinstance(hints, dict):
raise ValueError("expected dict of hints, got " + repr(hints))
for key, value in hints.items():
if not isinstance(key, basestring) or key.count("/") != 1:
raise ValueError("expected 'pkg/node' key, found " + repr(key))
if not isinstance(value, dict):
raise ValueError("expected dict value, found " + repr(value))
self.hints = hints
self.pkg_finder = pkg_finder
def apply_to(self, nodes, create=False):
if not self.hints:
return []
nodes = self._list_to_dict(nodes)
if create and not self.pkg_finder:
raise ValueError("received create=True but no pkg_finder")
new_nodes = []
for node_type, node_hints in self.hints.items():
node = nodes.get(node_type)
if node is not None:
fix_hints = node_hints.get("fix", _EMPTY_DICT)
if not isinstance(fix_hints, dict):
raise ValueError("expected dict in {}:fix; got {!r}".format(
node_type, fix_hints))
self.log.info("Merging extracted Node with hints: " + node_type)
self.log.debug("node specs %s %s", node, node_hints)
node.resolve_variables(fix_hints)
elif create:
self.log.info("Creating new Node from hints: " + node_type)
self.log.debug("node specs %s %s", node_type, node_hints)
node = self._create(node_type, node_hints)
if node is not None:
new_nodes.append(node)
if node is not None:
self._add_primitives(node, node_hints)
hpl = node_hints.get("hpl", _EMPTY_DICT)
node.hpl_properties = list(hpl.get("properties", _EMPTY_LIST))
node.hpl_assumptions = list(hpl.get("assumptions", _EMPTY_LIST))
return new_nodes
def _create(self, node_type, hints):
pkg_name, exe = node_type.split("/")
pkg = self.pkg_finder.get("package:" + pkg_name)
if pkg is None:
self.log.error("Unable to find package: " + repr(pkg_name))
return None
rosname = hints.get("rosname")
nodelet_cls = hints.get("nodelet")
node = Node(exe, pkg, rosname=rosname, nodelet=nodelet_cls)
return node
def _add_primitives(self, node, hints):
for key, attr, cls in self._PRIMITIVES:
calls = getattr(node, attr)
for datum in hints.get(key, _EMPTY_LIST):
call = cls.from_JSON_specs(datum)
call.location = self._location_from_JSON(datum.get("location"))
calls.append(call)
_PRIMITIVES = (
("advertise", "advertise", AdvertiseCall),
("subscribe", "subscribe", SubscribeCall),
("advertiseService", "service", AdvertiseServiceCall),
("serviceClient", "client", ServiceClientCall),
("getParam", "read_param", GetParamCall),
("setParam", "write_param", SetParamCall)
)
def _list_to_dict(self, nodes):
if isinstance(nodes, dict):
return nodes
return {node.node_name: node for node in nodes}
# FIXME code duplication
def _location_from_JSON(self, datum):
if datum is None:
return None
pkg = self.pkg_finder.get("package:" + datum["package"])
if pkg is None:
self.log.error("Unable to find package: " + repr(datum["package"]))
return None
source_file = None
filename = datum["file"]
if filename:
try:
source_file = next(sf for sf in pkg.source_files
if sf.full_name == filename)
except StopIteration:
self.log.error("Unable to find file: '{}/{}'".format(
datum["package"], filename))
return Location(pkg, file=source_file,
line=datum.get("line", 1), col=datum.get("column", 1),
fun=datum.get("function"), cls=datum.get("class"))
|
import numpy as np
from ge.classify import read_node_label, Classifier
from ge import Node2Vec
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.manifold import TSNE
def evaluate_embeddings(embeddings):
X, Y = read_node_label('../data/wiki/wiki_labels.txt')
tr_frac = 0.8
print("Training classifier using {:.2f}% nodes...".format(
tr_frac * 100))
clf = Classifier(embeddings=embeddings, clf=LogisticRegression())
clf.split_train_evaluate(X, Y, tr_frac)
def plot_embeddings(embeddings,):
X, Y = read_node_label('../data/wiki/wiki_labels.txt')
emb_list = []
for k in X:
emb_list.append(embeddings[k])
emb_list = np.array(emb_list)
model = TSNE(n_components=2)
node_pos = model.fit_transform(emb_list)
color_idx = {}
for i in range(len(X)):
color_idx.setdefault(Y[i][0], [])
color_idx[Y[i][0]].append(i)
for c, idx in color_idx.items():
plt.scatter(node_pos[idx, 0], node_pos[idx, 1], label=c))
plt.legend()
plt.show()
if __name__ == "__main__":
G=nx.read_edgelist('../data/wiki/Wiki_edgelist.txt',
create_using = nx.DiGraph(), nodetype = None, data = [('weight', int)])
model=Node2Vec(G, walk_length = 10, num_walks = 80,
p = 0.25, q = 4, workers = 1)
model.train(window_size = 5, iter = 3)
embeddings=model.get_embeddings()
evaluate_embeddings(embeddings)
plot_embeddings(embeddings)
|
# Write results to this file
OUTFILE = 'runs/src2-tgt1/seq-bro-iter10000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [True, True]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [True]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 10000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
|
"""Test around scalar constructors and scalar methods."""
import riptable as rt
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_warns
class TestScalarConstructor(object):
# Type-coercion from strings test cases adapted from numpy/core/tests/test_scalar_ctors.py.
# https://github.com/numpy/numpy/blob/c31cc36a8a814ed4844a2a553454185601914a5a/numpy/core/tests/test_scalar_ctors.py
@pytest.mark.parametrize(
"scalar_ctor, numeric_string",
[
# simple numeric string
("single", "1.234"),
("double", "1.234"),
("longdouble", "1.234"),
# numeric string with overflow overflow; expect inf value
("half", "1e10000"),
("single", "1e10000"),
("double", "1e10000"),
("longdouble", "1e10000"),
("longdouble", "-1e10000"),
],
)
def test_floating(self, scalar_ctor, numeric_string):
rt_value = getattr(rt, scalar_ctor)(numeric_string)
np_value = getattr(np, scalar_ctor)(numeric_string)
assert_almost_equal(rt_value, np_value)
@pytest.mark.parametrize(
"scalar_ctor, numeric_string",
[("longdouble", "1e10000"), ("longdouble", "-1e10000"),],
)
def test_overflow_warning(self, scalar_ctor, numeric_string):
assert_warns(RuntimeWarning, getattr(np, scalar_ctor), numeric_string)
|
import biokbase.narrative.clients as clients
from biokbase.narrative.app_util import (
app_version_tags,
check_tag,
app_param
)
import json
from jinja2 import Template
from IPython.display import HTML
class SpecManager(object):
__instance = None
app_specs = dict()
type_specs = dict()
def __new__(cls):
if SpecManager.__instance is None:
SpecManager.__instance = object.__new__(cls)
SpecManager.__instance.reload()
return SpecManager.__instance
def get_spec(self, app_id, tag='release'):
self.check_app(app_id, tag, raise_exception=True)
return self.app_specs[tag][app_id]
def get_type_spec(self, type_id, raise_exception=True):
if (type_id not in self.type_specs) and raise_exception:
raise ValueError('Unknown type id "{}"'.format(type_id))
return self.type_specs.get(type_id)
def reload(self):
"""
Reloads all app specs into memory from the latest update.
"""
client = clients.get('narrative_method_store')
for tag in app_version_tags:
specs = client.list_methods_spec({'tag': tag})
spec_dict = dict()
for spec in specs:
spec_dict[spec['info']['id']] = spec
self.app_specs[tag] = spec_dict
# And let's load all types from the beginning and cache them
self.type_specs = client.list_categories({'load_types': 1})[3]
def app_description(self, app_id, tag='release'):
"""
Returns the app description as a printable object. Makes it kinda pretty? repr_html, maybe?
"""
self.check_app(app_id, tag, raise_exception=True)
info = clients.get('narrative_method_store').get_method_full_info({'ids': [app_id], 'tag': tag})[0]
tmpl = """
<div class="bg-info" style="padding:15px">
<h1>{{info.name}} <small>{{info.module_name}}</small></h1>
<p class='lead'>{{info.id}} - v{{info.ver}}</p>
</div>
<p class='lead'>{{info.subtitle}}</p>
<hr>
{{info.description}}
"""
return HTML(Template(tmpl).render(info=info))
def available_apps(self, tag="release"):
"""
Lists the set of available apps in a pretty HTML way.
Usable only in the Jupyter notebook.
"""
check_tag(tag, raise_exception=True)
tmpl="""
<b>Available {{tag}} apps</b><br>
<table class="table table-striped table-bordered table-condensed">
<thead>
<tr>
<th>Id</th>
<th>Name</th>
<th>Subtitle</th>
</tr>
</thead>
{% for m in apps %}
<tr>
<td>
{{ m.info.id }}
</td>
<td>
{{ m.info.name }}
</td>
<td>
{{ m.info.subtitle }}
</td>
</tr>
{% endfor %}
</table>
"""
return HTML(Template(tmpl).render(tag=tag,
apps=sorted(list(self.app_specs[tag].values()), key=lambda m: m['info']['id'])))
def app_usage(self, app_id, tag='release'):
"""
Should show app inputs and outputs. Something like this:
App id
App name
Subtitle
Parameters
1. xxx - string - data object - [type1, type2, type3]
(subtitle, description, etc)
2. yyy - int - from x to y
(subtitle, description, etc)
3. zzz - list of strings
...
4. aaa - OUTPUT - ....
"""
self.check_app(app_id, tag, raise_exception=True)
spec = self.app_specs[tag][app_id]
# start with basic info
usage = {'id': app_id,
'name': spec['info']['name'],
'tag': tag,
'subtitle': spec['info']['subtitle'],
'ver': spec['info']['ver'],
'params': self.app_params(spec)}
return AppUsage(usage)
def check_app(self, app_id, tag='release', raise_exception=False):
"""
Checks if a method (and release tag) is available for running and such.
If raise_exception==True, and either the tag or app_id are invalid, a ValueError is raised.
If raise_exception==False, and there's something invalid, it just returns False.
If everything is hunky-dory, it returns True.
"""
tag_ok = check_tag(tag, raise_exception=raise_exception)
if not tag_ok:
return False
if app_id not in self.app_specs[tag]:
if raise_exception:
raise ValueError('Unknown app id "{}" tagged as "{}"'.format(app_id, tag))
return False
return True
def app_params(self, spec):
"""
Should return a dict of params with key = id, val =
{
optional = boolean,
is_constant = boolean,
value = (whatever, optional),
type = [text|int|float|list],
is_output = boolean,
short_hint = string,
description = string,
allowed_values = list (optional),
}
"""
params = list()
for p in spec['parameters']:
p_info = app_param(p)
params.append(p_info)
for p in spec.get('parameter_groups', []):
p_info = {'id': p.get('id', ''), 'is_group': True}
p_info['optional'] = p.get('optional', 0) == 1
p_info['short_hint'] = p.get('short_hint', '')
p_info['description'] = p.get('ui_name', '')
p_info['parameter_ids'] = p.get('parameter_ids', [])
p_info['id_mapping'] = p.get('id_mapping', {})
p_info['allow_multiple'] = p.get('allow_multiple', 0)
p_info['type'] = 'group'
params.append(p_info)
return sorted(params, key=lambda p: (p.get('optional', False), p.get('is_output', False)))
class AppUsage(object):
"""
A tiny class for representing app usage in HTML (or as a pretty string)
"""
def __init__(self, usage):
self.usage = usage
def _repr_html_(self):
tmpl = """
<h1>{{usage.name}}</h1>
id = {{usage.id}}<br>
{{usage.subtitle}}<br>
Parameters (<span class="bg-warning">required</span>)
<table class="table table-striped table-bordered table-condensed">
<thead>
<tr>
<th>Id</th>
<th>Type</th>
<th>Allowed Types</th>
<th>Description</th>
<th>Allowed Values</th>
<th>Default</th>
</tr>
</thead>
{% for p in usage.params %}
<tr {% if not p.optional %}class="warning"{% endif %}>
<td>{{ p.id|e }}</td>
<td>{{ p.type|e }}{% if p.is_output %} (output){% endif %}</td>
<td>
{% if p.allowed_types %}
{% for t in p.allowed_types %}
{{t}}<br>
{% endfor %}
{% else %}
N/A
{% endif %}
</td>
<td>{{ p.short_hint|e }}</td>
<td>
{% if p.allowed_values %}
{% for v in p.allowed_values %}
{{v}}<br>
{% endfor %}
{% else %}
N/A
{% endif %}
</td>
<td>
{% if p.default %}
{{ p.default }}
{% endif %}
</tr>
{% endfor %}
</table>
"""
return Template(tmpl).render(usage=self.usage)
# return "<h1>" + self.usage['name'] + "</h1>" + self.usage['id'] + "<br>"
def __repr__(self):
return self.__str__()
def __str__(self):
s = "id: {}\nname: {}\nsubtitle: {}\nparameters (*required):\n-----------------------".format(self.usage['id'], self.usage['name'], self.usage['subtitle'])
for p in self.usage['params']:
if not p.get("is_constant", False):
p_def = "\n{}{} - {}".format('*' if not p['optional'] else '', p['id'], p['type'])
if "allowed_types" in p:
p_def = p_def + " - is a data object where the type is one of: {}".format(json.dumps(p['allowed_types']))
if "allowed_values" in p:
p_def = p_def + " - must be one of {}".format(json.dumps(p['allowed_values']))
s = s + p_def
return s
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from .analyze_db import AnalyzeDbTask
from .create_db import CreateDbTask
from .create_generic_db import CreateGenericDbTask
from .parse_folder import ParseFolderTask
from .parse_s3 import ParseS3Task
__all__ = [
'AnalyzeDbTask',
'CreateDbTask',
'CreateGenericDbTask',
'ParseFolderTask',
'ParseS3Task',
]
|
import unittest.mock
from programy.brain import Brain
from programy.bot import BrainFactory
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.config.programy import ProgramyConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
from programy.context import ClientContext
from programy.config.bot.spelling import BotSpellingConfiguration
from programytest.client import TestClient
class MockBrain(Brain):
def __init__(self, bot, configuration):
Brain.__init__(self, bot, configuration)
self._response = ""
def ask_question(self, clientid, sentence, srai=False):
return self._response
class MockBot(Bot):
def __init__(self, config: BotConfiguration, client):
Bot.__init__(self, config, client)
def loads_brains(self, bot):
self._brains["mock"] = MockBrain(self, self.configuration.configurations[0])
class BrainFactoryTests(unittest.TestCase):
def test_empty_config_init(self):
configuration = BotConfiguration()
configuration._bot_selector = "programy.clients.client.DefaultBrainSelector"
client = TestClient()
bot = Bot(configuration, client)
factory = BrainFactory(bot)
self.assertIsNotNone(factory)
brain = factory.select_brain()
self.assertIsNotNone(brain)
self.assertIsInstance(brain, Brain)
class BotTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
def test_bot_init_blank(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot.brain)
self.assertEqual("bot", bot.ylogger_type())
self.assertIsNone(bot.spell_checker)
self.assertIsNotNone(bot.sentence_splitter)
self.assertIsNotNone(bot.sentence_joiner)
self.assertIsNotNone(bot.conversations)
self.assertIsNotNone(bot.default_response)
self.assertIsNotNone(bot.exit_response)
self.assertIsNotNone(bot.initial_question)
self.assertTrue(bot.override_properties)
self.assertIsNotNone(bot.get_version_string)
def test_bot_init_with_config(self):
bot_config = BotConfiguration()
bot_config._bot_root = BotConfiguration.DEFAULT_ROOT
bot_config._default_response = BotConfiguration.DEFAULT_RESPONSE
bot_config._exit_response = BotConfiguration.DEFAULT_EXIT_RESPONSE
bot_config._initial_question = BotConfiguration.DEFAULT_INITIAL_QUESTION
bot_config._empty_string = BotConfiguration.DEFAULT_EMPTY_STRING
bot_config._override_properties = BotConfiguration.DEFAULT_OVERRIDE_PREDICATES
bot_config._max_question_recursion = 1000
bot_config._max_question_timeout = 60
bot_config._max_search_depth = 100
bot_config._max_search_timeout = 60
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot.brain)
self.assertIsNone(bot.spell_checker)
self.assertIsNotNone(bot.sentence_splitter)
self.assertIsNotNone(bot.sentence_joiner)
self.assertIsNotNone(bot.conversations)
self.assertIsNotNone(bot.default_response)
self.assertIsNotNone(bot.exit_response)
self.assertIsNotNone(bot.initial_question)
self.assertTrue(bot.override_properties)
self.assertIsNotNone(bot.get_version_string)
def test_bot_old_version(self):
bot_config = BotConfiguration()
client = TestClient()
bot = Bot(bot_config, client)
self._client_context.brain.properties.add_property("name", 'bot'),
self._client_context.brain.properties.add_property("version", "1.9.3"),
self._client_context.brain.properties.add_property("birthdate", "1st January 2019")
version = bot.get_version_string(self._client_context)
self.assertIsNotNone(version)
self.assertEqual("bot, v1.9.3, initiated 1st January 2019", version)
def test_bot_new_version(self):
bot_config = BotConfiguration()
client = TestClient()
bot = Bot(bot_config, client)
self._client_context.brain.properties.add_property("name", 'bot'),
self._client_context.brain.properties.add_property("app_version", "1.9.3"),
self._client_context.brain.properties.add_property("grammar_version", "37"),
self._client_context.brain.properties.add_property("birthdate", "1st January 2019")
version = bot.get_version_string(self._client_context)
self.assertIsNotNone(version)
self.assertEqual("bot, App: v1.9.3 Grammar v37, initiated 1st January 2019", version)
def test_bot_init_no_spellchecker_configuration(self):
bot_config = BotConfiguration()
bot_config._spelling = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.spell_checker)
def test_bot_init_no_spellchecker(self):
bot_config = BotConfiguration()
bot_config.spelling._classname = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.spell_checker)
def test_bot_init_with_invalid_spellchecker(self):
bot_config = BotConfiguration()
bot_config.spelling._classname = "programy.spelling.checker.SpellingCheckerX"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.spell_checker)
def test_bot_init_with_valid_spellchecker(self):
bot_config = BotConfiguration()
bot_config.spelling._classname = "programy.spelling.textblob_spelling.TextBlobSpellingChecker"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.spell_checker)
def test_bot_init_no_splitter_configuration(self):
bot_config = BotConfiguration()
bot_config._splitter = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentence_splitter)
def test_bot_init_no_splitterr(self):
bot_config = BotConfiguration()
bot_config.splitter._classname = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentence_splitter)
def test_bot_init_with_invalid_splitterr(self):
bot_config = BotConfiguration()
bot_config.splitter._classname = "programy.spelling.checker.SpellingCheckerX"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentence_splitter)
def test_bot_init_with_valid_splitterr(self):
bot_config = BotConfiguration()
bot_config.splitter._classname = "programy.dialog.splitter.splitter.SentenceSplitter"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.sentence_splitter)
def test_bot_init_no_joiner_configuration(self):
bot_config = BotConfiguration()
bot_config._joiner = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentence_joiner)
def test_bot_init_no_joinerr(self):
bot_config = BotConfiguration()
bot_config.joiner._classname = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentence_joiner)
def test_bot_init_with_invalid_joinerr(self):
bot_config = BotConfiguration()
bot_config.joiner._classname = "programy.spelling.checker.SpellingCheckerX"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentence_joiner)
def test_bot_init_with_valid_joinerr(self):
bot_config = BotConfiguration()
bot_config.joiner._classname = "programy.dialog.joiner.joiner.SentenceJoiner"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.sentence_joiner)
def test_bot_init_no_translator_configuration(self):
bot_config = BotConfiguration()
bot_config._from_translator = None
bot_config._to_translator = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.from_translator)
self.assertIsNone(bot.to_translator)
def test_bot_init_no_translatorr(self):
bot_config = BotConfiguration()
bot_config.from_translator._classname = None
bot_config.to_translator._classname = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.from_translator)
self.assertIsNone(bot.to_translator)
def test_bot_init_with_invalid_translatorr(self):
bot_config = BotConfiguration()
bot_config.from_translator._classname = "programy.spelling.checker.SpellingCheckerX"
bot_config.to_translator._classname = "programy.spelling.checker.SpellingCheckerX"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.from_translator)
self.assertIsNone(bot.to_translator)
def test_bot_init_with_valid_translatorr(self):
bot_config = BotConfiguration()
bot_config.from_translator._classname = "programy.translate.textblob_translator.TextBlobTranslator"
bot_config.to_translator._classname = "programy.translate.textblob_translator.TextBlobTranslator"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.from_translator)
self.assertIsNotNone(bot.to_translator)
def test_bot_init_no_sentiment_analyser_configuration(self):
bot_config = BotConfiguration()
bot_config._sentiment = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentiment_analyser)
def test_bot_init_no_sentiment_analyserr(self):
bot_config = BotConfiguration()
bot_config.sentiment_analyser._classname = None
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentiment_analyser)
def test_bot_init_with_invalid_sentiment_analyserr(self):
bot_config = BotConfiguration()
bot_config.sentiment_analyser._classname = "programy.spelling.checker.SpellingCheckerX"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNone(bot.sentiment_analyser)
def test_bot_init_with_valid_sentiment_analyserr(self):
bot_config = BotConfiguration()
bot_config.sentiment_analyser._classname = "programy.sentiment.textblob_sentiment.TextBlobSentimentAnalyser"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.sentiment_analyser)
def test_bot_init_default_brain(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.brain)
def test_bot_init_supplied_brain(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot)
self.assertIsNotNone(bot.brain)
def test_bot_defaultresponses(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot)
self.assertEqual(bot.default_response, "")
self.assertEqual(bot.exit_response, "Bye!")
def test_bot_with_config(self):
configuration = ProgramyConfiguration(ConsoleConfiguration())
self.assertIsNotNone(configuration)
self.assertIsNotNone(configuration.client_configuration.configurations[0])
self.assertIsNotNone(configuration.client_configuration.configurations[0].configurations[0])
configuration.client_configuration.configurations[0].prompt = ":"
configuration.client_configuration.configurations[0].default_response = "No answer for that"
configuration.client_configuration.configurations[0].exit_response = "See ya!"
client = TestClient()
bot = Bot(configuration.client_configuration.configurations[0], client)
self.assertIsNotNone(bot)
self.assertEqual(bot.default_response, "No answer for that")
self.assertEqual(bot.exit_response, "See ya!")
def test_bot_with_conversation(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot)
self.assertFalse(bot.has_conversation(self._client_context))
response = bot.ask_question(self._client_context, "hello")
self.assertIsNotNone(response)
self.assertTrue(bot.has_conversation(self._client_context))
response = bot.ask_question(self._client_context, "hello")
self.assertIsNotNone(response)
self.assertTrue(bot.has_conversation(self._client_context))
client_context2 = ClientContext(TestClient(), "testid2")
client_context2._bot = bot
client_context2._brain = self._client_context.bot.brain
response = bot.ask_question(client_context2, "hello")
self.assertIsNotNone(response)
self.assertTrue(bot.has_conversation(client_context2))
def test_bot_chat_loop(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot)
self.assertIsInstance(bot, Bot)
bot.configuration._default_response = "Sorry, I don't have an answer for that right now"
response = bot.ask_question(self._client_context, "hello")
self.assertIsNotNone(response)
self.assertEqual(response, "Sorry, I don't have an answer for that right now.")
response = bot.ask_question(self._client_context, "hello again")
self.assertIsNotNone(response)
self.assertEqual(response, "Sorry, I don't have an answer for that right now.")
response = bot.ask_question(self._client_context, "goodbye")
self.assertIsNotNone(response)
self.assertEqual(response, "Sorry, I don't have an answer for that right now.")
conversation = bot.get_conversation(self._client_context)
self.assertIsNotNone(conversation)
self.assertEqual(conversation.previous_nth_question(2).sentence(0).text(), "hello")
self.assertEqual(conversation.previous_nth_question(2).sentence(0).response, "Sorry, I don't have an answer for that right now")
self.assertEqual(conversation.previous_nth_question(1).sentence(0).text(), "hello again")
self.assertEqual(conversation.previous_nth_question(1).sentence(0).response, "Sorry, I don't have an answer for that right now")
self.assertEqual(conversation.previous_nth_question(0).sentence(0).text(), "goodbye")
self.assertEqual(conversation.previous_nth_question(0).sentence(0).response, "Sorry, I don't have an answer for that right now")
def test_max_recusion(self):
client = TestClient()
bot = Bot(BotConfiguration(), client)
self.assertIsNotNone(bot)
bot.configuration._default_response = "Sorry, I don't have an answer for that right now"
bot.configuration._max_question_recursion = 0
with self.assertRaises(Exception):
bot.ask_question(self._client_context, "hello")
def test_get_default_response_empty_string(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("", bot.get_default_response(self._client_context))
def test_get_default_response_default_response_only(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.default_response = "Default response!"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Default response!", bot.get_default_response(self._client_context))
def test_get_default_response_default_response_srai_no_match(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.default_response_srai = "YDEFAULTRESPONSE"
bot_config.default_response = "Default response!"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Default response!", bot.get_default_response(self._client_context))
def test_get_default_response_default_response_srai_match(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.default_response_srai = "YDEFAULTRESPONSE"
bot_config.default_response = "Default response!"
client = TestClient()
bot = MockBot(bot_config, client)
self.assertIsNotNone(bot)
client_context2 = ClientContext(TestClient(), "testid2")
client_context2._bot = bot
client_context2._brain = MockBrain(bot, bot.configuration.configurations[0])
client_context2._brain._response = "Y DEFAULT RESPONSE"
response = bot.get_default_response(client_context2)
self.assertIsNotNone(response)
self.assertEqual("Y DEFAULT RESPONSE", response)
def test_get_default_response_no_srai(self):
bot_config = BotConfiguration()
bot_config._default_response_srai = None
bot_config._default_response = "Test This"
self.assertIsNotNone(bot_config)
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Test This", bot.get_default_response(self._client_context))
def test_get_initial_question_empty_string(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Hello", bot.get_initial_question(self._client_context))
def test_get_initial_question_initial_question_only(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.initial_question = "Default response!"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Default response!", bot.get_initial_question(self._client_context))
def test_get_initial_question_initial_question_srai_no_match(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.initial_question_srai = "YDEFAULTRESPONSE"
bot_config.initial_question = "Default response!"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Default response!", bot.get_initial_question(self._client_context))
def test_get_initial_question_initial_question_srai_match(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
client = TestClient()
bot = MockBot(bot_config, client)
self.assertIsNotNone(bot)
client_context2 = ClientContext(TestClient(), "testid2")
client_context2._bot = bot
client_context2._brain = MockBrain(bot, bot.configuration.configurations[0])
client_context2._brain._response = "Y DEFAULT RESPONSE"
self.assertEqual("Y DEFAULT RESPONSE", bot.get_initial_question(client_context2))
def test_get_initial_question_no_srai(self):
bot_config = BotConfiguration()
bot_config._initial_question = "Test This"
bot_config._initial_question_srai = None
self.assertIsNotNone(bot_config)
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Test This", bot.get_initial_question(self._client_context))
def test_get_exit_response_empty_string(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Bye!", bot.get_exit_response(self._client_context))
def test_get_exit_response_exit_response_only(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.exit_response = "Default response!"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Default response!", bot.get_exit_response(self._client_context))
def test_get_exit_response_exit_response_srai_no_match(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.exit_response_srai = "YDEFAULTRESPONSE"
bot_config.exit_response = "Default response!"
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Default response!", bot.get_exit_response(self._client_context))
def test_get_exit_response_exit_response_srai_match(self):
bot_config = BotConfiguration()
self.assertIsNotNone(bot_config)
bot_config.exit_response_srai = "YDEFAULTRESPONSE"
bot_config.exit_response = "Default response!"
client = TestClient()
bot = MockBot(bot_config, client)
self.assertIsNotNone(bot)
client_context2 = ClientContext(TestClient(), "testid2")
client_context2._bot = bot
client_context2._brain = MockBrain(bot, bot.configuration.configurations[0])
client_context2._brain._response = "Y DEFAULT RESPONSE"
self.assertEqual("Y DEFAULT RESPONSE", bot.get_exit_response(client_context2))
def test_get_exit_response_no_srai(self):
bot_config = BotConfiguration()
bot_config._exit_response = "Test This"
bot_config._exit_response_srai = None
self.assertIsNotNone(bot_config)
client = TestClient()
bot = Bot(bot_config, client)
self.assertIsNotNone(bot)
self.assertEqual("Test This", bot.get_exit_response(self._client_context))
def test_log_answer(self):
bot_config = BotConfiguration()
client = TestClient()
bot = Bot(bot_config, client)
bot.log_answer(self._client_context, "Hello", "Test", None)
response_logger = unittest.mock.Mock()
bot.log_answer(self._client_context, "Hello", "Test", response_logger)
|
class TSException(Exception):
"""Exception related to tsbot"""
class TSResponseError(TSException):
"""Raised when response from server has error_id set to other than 0."""
def __init__(self, message: str, error_id: int) -> None:
self.message = message
self.error_id = error_id
def __str__(self) -> str:
return f"Error {self.error_id}: {self.message}"
class TSCommandError(TSException):
"""
Command handlers can raise this exception to indicate
that something went wrong while running the handler.
"""
class TSPermissionError(TSException):
"""
Command handlers can raise this exception to indicate
that the user running this command doesn't have the
proper permissions.
"""
class TSEventError(TSException):
"""Exception happend during event handling"""
|
import logging
logger = logging.getLogger(__name__)
TuneSearchCV = None
TuneGridSearchCV = None
try:
from tune_sklearn import TuneSearchCV, TuneGridSearchCV
except ImportError:
logger.info("tune_sklearn is not installed. Please run "
"`pip install tune-sklearn`.")
__all__ = ["TuneSearchCV", "TuneGridSearchCV"]
|
"""
Set of specific utilities for combining PySeg with Subtomogram Averaging tools (PyTom)
# Author: Antonio Martinez-Sanchez (Max Planck Institute for Biochemistry)
# Date: 1.06.16
"""
__author__ = 'Antonio Martinez-Sanchez'
from .plist import TomoPeaks, SetTomoPeaks, Score, SingleTiltWedge, Particle, ParticleList, PK_COORDS
from .star import Star
from .fxml import XMLFilaments
|
# Portal to Manon
sm.warp(240020401, 3)
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
class IvyImports(IvyTaskMixin, NailgunTask):
"""Resolves a jar of .proto files for each target in the context which has imports (ie, for each
JavaProtobufLibrary target).
"""
_CONFIG_SECTION = 'ivy-imports'
# TODO https://github.com/pantsbuild/pants/issues/604 product_types start
@classmethod
def product_types(cls):
return ['ivy_imports']
# TODO https://github.com/pantsbuild/pants/issues/604 product_types finish
@classmethod
def prepare(cls, options, round_manager):
super(IvyImports, cls).prepare(options, round_manager)
round_manager.require_data('jvm_build_tools_classpath_callbacks')
@property
def config_section(self):
return self._CONFIG_SECTION
def _str_jar(self, jar):
return 'jar' + str((jar.org, jar.name, jar.rev))
def execute(self):
def nice_target_name(t):
return t.address.spec
resolve_for = self.context.targets(lambda t: t.has_label('has_imports'))
if resolve_for:
imports_map = self.context.products.get('ivy_imports')
executor = self.create_java_executor()
for target in resolve_for:
jars = target.imports
self.context.log.info('Mapping import jars for {target}: \n {jars}'.format(
target=nice_target_name(target),
jars='\n '.join(self._str_jar(s) for s in jars)))
self.mapjars(imports_map, target, executor, jars=jars)
|
#!/usr/bin/python
# pure python keytool
import os
import sys
import json
import base64
import hashlib
import datetime
assert sys.version_info < (3,0), "Python 2.7 required"
TZ = '+02:00'
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b,e,m):
if e == 0: return 1
t = expmod(b,e/2,m)**2 % m
if e & 1: t = (t*b) % m
return t
def inv(x):
return expmod(x,q-2,q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)/4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)/8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
def edwards(P,Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
return [x3 % q,y3 % q]
def scalarmult(P,e):
if e == 0: return [0,1]
Q = scalarmult(P,e/2)
Q = edwards(Q,Q)
if e & 1: Q = edwards(Q,P)
return Q
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)])
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)])
def bit(h,i):
return (ord(h[i/8]) >> (i%8)) & 1
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
r = Hint(''.join([h[i] for i in range(b/8,b/4)]) + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P):
raise Exception("decoding point that is not on curve")
return P
def checkvalid(s,m,pk):
if len(s) != b/4:
raise Exception("signature length is wrong")
if len(pk) != b/8:
raise Exception("public-key length is wrong")
R = decodepoint(s[0:b/8])
A = decodepoint(pk)
S = decodeint(s[b/8:b/4])
h = Hint(encodepoint(R) + pk + m)
if scalarmult(B,S) != edwards(R,scalarmult(A,h)):
raise Exception("signature does not pass verification")
def generate(seckey='keyseed.dat', entropy=os.urandom):
if os.path.exists(seckey):
print('Error: file already exists {}'.format(seckey))
return 1
sk = entropy(32)
with open(seckey, 'wb') as fp:
fp.write(sk)
os.chmod(seckey, 0o600)
print('Private signing key saved to {}'.format(seckey))
def hash_id(datas):
h1 = hashlib.sha256(datas).digest()
h2 = hashlib.sha256(h1).hexdigest()
return h2[:32]
def dump_binstr(data):
datas = json.dumps(data,
skipkeys=False,
ensure_ascii=False,
sort_keys=True,
separators=(',',':'))
return datas.encode('utf-8')
def data_sign(data, sk, pk):
datas = dump_binstr(data['envelope'])
data['id'] = hash_id(datas)
sign = signature(datas, sk, pk)
data['sign'] = base64.b64encode(sign).rstrip('=')
def data_verify(data, vk):
sign = base64.b64decode(data['sign'])
data_bin = dump_binstr(data['envelope'])
data_hid = hash_id(data_bin)
assert data_hid == data['id'], 'Bad hash ID'
checkvalid(sign, data_bin, vk)
def dump_pretty(data):
datas = json.dumps(data,
indent=2,
ensure_ascii=False,
sort_keys=True)
return datas.encode('utf-8')
def export_pubkey(seckey='keyseed.dat', pubkey='pubkey.json', owner_name='root'):
sk = open(seckey, 'rb').read()
vk = publickey(sk)
vk_s = vk.encode('hex')
dt_now = datetime.datetime.now()
dt_exp = dt_now + datetime.timedelta(days=365)
data = {
'envelope': {
'date': dt_now.isoformat()+TZ,
'model': 'admin/pubkey',
'owner': owner_name,
'payload': {
'algorithm': 'Ed25519',
'owner': owner_name,
'publicKey': vk_s,
'validSince': dt_now.isoformat()+TZ,
'validTill': dt_exp.isoformat()+TZ
}
}
}
data_sign(data, sk, vk)
datas = dump_pretty(data)
with open(pubkey, 'wb') as fp:
fp.write(datas.encode('utf-8'))
print('Public verifying key saved to {}'.format(pubkey))
def verify_file(pubkey='pubkey.json', datafile=None):
if len(sys.argv) > 2:
datafile = sys.argv[2]
if datafile and datafile != pubkey:
print('Load public key data from {}'.format(pubkey))
print('Verify any data json from {}'.format(datafile))
else:
print('Verify public key data from {}'.format(pubkey))
with open(pubkey) as fp:
data = json.loads(fp.read())
vkey_hex = data['envelope']['payload']['publicKey']
vk = vkey_hex.decode('hex')
if datafile and datafile != pubkey:
with open(datafile) as fp:
data = json.loads(fp.read())
try:
data_verify(data, vk)
print("Result OK")
except Exception as e:
print("Can't verify: {}".format(e))
return 1
return 0
def sign_file(seckey='keyseed.dat', pubkey='pubkey.json', datafile='data.json'):
sk = open(seckey, 'rb').read()
vk = publickey(sk)
vk_s = vk.encode('hex')
with open(pubkey) as fp:
pubkey_data = json.loads(fp.read())
pubkey_payload = pubkey_data['envelope']['payload']
vkey_hex = pubkey_payload['publicKey'].encode()
assert vk_s == vkey_hex, 'Keys mismatch'
print('Load data json from {}'.format(datafile))
with open(datafile) as fp:
data = json.loads(fp.read())
if '--set-date' in sys.argv:
dt_now = datetime.datetime.now()
data['envelope']['date'] = dt_now.isoformat()+TZ
data['envelope']['owner'] = pubkey_payload['owner']
data_sign(data, sk, vk)
if '--compact' in sys.argv:
datas = dump_binstr(data)
else:
datas = dump_pretty(data)
print('Save signed json to {}'.format(datafile))
with open(datafile, 'w') as fp:
fp.write(datas)
def usage():
print('Usage: admin-keytool.py command [args]\n')
print(' admin-keytool.py generate\n')
print(' admin-keytool.py export owner_name\n')
print(' admin-keytool.py verify [pubkey.json]\n')
print(' admin-keytool.py sign anydata.json\n')
return 2
def main():
if len(sys.argv) < 2:
return usage()
cmd = sys.argv[1]
if cmd == 'generate':
return generate()
elif cmd == 'export' and len(sys.argv) > 2:
return export_pubkey(owner_name=sys.argv[2])
elif cmd == 'verify':
return verify_file()
elif cmd == 'sign' and len(sys.argv) > 2:
return sign_file(datafile=sys.argv[2])
else:
return usage()
if __name__ == '__main__':
sys.exit(main())
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface distance module: https://github.com/deepmind/surface-distance ."""
# from .metrics import * # pylint: disable=wildcard-import
__version__ = "0.1"
|
#!/usr/bin/env python3
# encoding: utf-8
#Linea 1 - “Shebang”,le indicamos a la máquina con qué programa lo vamos a ejecutar.
#Linea 2 - Python 3 - asume que solo se utiliza ASCII en el código fuente
#para usar utf-8 hay que indicarlo al principio de nuestro script encoding: utf-8
import rospy #Importamos ropsy (interface de python-ROS)
from std_msgs.msg import String #Importamos tipo de mensaje String
def nodo(): #Definimos una función nodo
rospy.init_node('nodo_publisher') #Inicializamos nuestro nodo y le asignamos un nombre = nodo_publisher
pub = rospy.Publisher('example', String, queue_size=10) #Definimos nuestro topico con nombre example y tipo de mensaje String
#con un límite de 10 mensajes en cola
rate = rospy.Rate(10) #Crea un objeto Rate a 10hz (loop 10 times per second)
while not rospy.is_shutdown(): #Bucle While - hasta pulsar Ctrl-C
mensaje = "Nodo Publisher" #Declaramos una variable mensaje y asignamos una cadena de caracteres
rospy.loginfo(mensaje) #Imprime en pantalla mensajes logs de tipo Info
pub.publish(mensaje) #Publicamos un mensaje de tipo String en nuestro tópico example
rate.sleep()
if __name__ == '__main__': #Llamamos a la función principal main
try:
nodo() # Lamamos a la función nodo
except rospy.ROSInterruptException: # Check si hay una excepción Ctrl-C para terminar la ejecución del nodo
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 08:11:51 2021
@author: vik748
"""
import json
import numpy as np
import matplotlib.pyplot as plt
import sys,os
import pandas as pd
def tracks_histogram(recon_file, tracks_file, ax, model_num=0, bins=np.linspace(2,15,14)):
'''
How the tracks.csv file is written
template <class S>
void WriteToStreamCurrentVersion(S& ostream, const TracksManager& manager) {
ostream << manager.TRACKS_HEADER << "_v" << manager.TRACKS_VERSION
<< std::endl;
const auto shotsIDs = manager.GetShotIds();
for (const auto& shotID : shotsIDs) {
const auto observations = manager.GetShotObservations(shotID);
for (const auto& observation : observations) {
ostream << shotID << "\t" << observation.first << "\t"
<< observation.second.id << "\t" << observation.second.point(0)
<< "\t" << observation.second.point(1) << "\t"
<< observation.second.scale << "\t" << observation.second.color(0)
<< "\t" << observation.second.color(1) << "\t"
<< observation.second.color(2) << std::endl;
}
}
}
'''
with open(recon_file) as f:
data = json.load(f)
if model_num == -1:
points_dict = {}
for d in data:
points_dict.update(d['points'])
else:
points_dict = data[model_num]['points']
model_0_point_ids_int = [int(k) for k in points_dict.keys()]
tracks_df = pd.read_csv(tracks_file, sep='\t', skiprows=1,
names=['image', 'track_id', 'feature_id', 'x', 'y',
'scale', 'r', 'g', 'b'])
track_id_counts = tracks_df.track_id.value_counts()
model_0_track_id_counts = track_id_counts[model_0_point_ids_int]
ax.hist(model_0_track_id_counts, bins=bins)
return model_0_track_id_counts
########################################
# Skerki_mud SIFT - RAW vs CLAHE - Model 0
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'
fig1, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig1.suptitle('Skerki Mud SIFT')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Skerki_mud RAW - SIFT vs Zernike
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'
fig2, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig2.suptitle('Skerki Mud RAW')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('SIFT')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], model_num=1, bins=np.linspace(2,15,14))
ax[1].set_title('ZERNIKE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Skerki_mud Zernike - RAW vs CLAHE
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_tracks.csv'
fig3, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig3.suptitle('Skerki Mud ZERNIKE')
tracks_histogram(recon_file, tracks_file, ax[0], model_num=1, bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_ZERNIKE_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Stingray - SIFT vs Zernike
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_tracks.csv'
fig4, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig4.suptitle('Stingray RAW')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('SIFT')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('ZERNIKE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Skerki_mud SIFT - RAW vs CLAHE - Combined
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'
fig5, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig5.suptitle('Skerki Mud SIFT - Combined')
tracks_histogram(recon_file, tracks_file, ax[0], model_num=-1, bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Stingray SIFT - RAW vs CLAHE
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_tracks.csv'
fig6, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig6.suptitle('Stingray SIFT')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_SIFT_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Stingray Zernike - RAW vs CLAHE
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_tracks.csv'
fig7, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig7.suptitle('Stingray ZERNIKE')
counts0 = tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_ZERNIKE_tracks.csv'
counts1 = tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
plt.hist([counts1, counts2], np.linspace(2,15,14), label=['RAW', 'CLAHE'])
plt.legend(loc='upper right')
plt.show()
|
import sqlalchemy as sa
from .base import Base
__all__ = ['Role']
class Role(Base):
__tablename__ = 'role'
__table_args__ = {'schema': 'system'}
name = sa.Column(sa.String(256), primary_key=True)
is_default = sa.Column(sa.Boolean(), default=False)
can_reset_password = sa.Column(sa.Boolean(), default=False)
has_sql_access = sa.Column(sa.Boolean(), default=False)
can_register_all = sa.Column(sa.Boolean(), default=False)
can_register = sa.Column(sa.ARRAY(sa.String), nullable=True)
linked_entity_schema = sa.Column(sa.String(256), nullable=True)
linked_entity_name = sa.Column(sa.String(256), nullable=True)
users = sa.orm.relationship(
'User', secondary='system.user_has_roles', back_populates='roles'
)
|
from flask import Flask, send_from_directory
import os
import connexion
from connexion.resolver import RestyResolver
from flask_socketio import SocketIO, send, emit
# Constants
STATIC_FILES_DIR = 'static'
# The application server
app = connexion.FlaskApp(__name__, specification_dir='swagger/', resolver=RestyResolver('VrDemoServer.api'))
flask_app = app.app
# We define the websocket feature
socketio = SocketIO(flask_app)
@socketio.on('message')
def handle_message(message):
print('received message: ' + message)
emit('my response', "res"+message)
# We add the OpenApi definitions
app.add_api('demo2.yaml')
# we define that all static content will be served from the STATIC_FILES_DIR subdirectory
static_files_dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), STATIC_FILES_DIR)
app.static_folder = STATIC_FILES_DIR
app.add_url_rule(
app.app.static_url_path + "/<path:filename>",
endpoint="static",
view_func=app.app.send_static_file,
)
# we redirect the root index.thml to the app_index.html
@app.route('/', methods=['GET'])
def serve_index():
return send_from_directory(static_files_dir_path, 'app_index.html')
# function to launch the server
def run_server(ip, port=8080, debug=False):
# print('starting rest api')
# app.run(host=ip, port=port, debug=debug)
print('starting websocket')
socketio.run(flask_app, host=ip, port=port, debug=debug)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="is-natural-number",
version="1.0.0",
author="Adam Zerella",
author_email="hello@adamzerella.com",
description="Check if a value is a natural number",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/adamzerella/is-natural-number",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
# Copyright 2017-2019 David Grote
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
"""Provides wrappers around field and current density on multiFABs
Available routines:
ExWrapper, EyWrapper, EzWrapper
BxWrapper, ByWrapper, BzWrapper
JxWrapper, JyWrapper, JzWrapper
"""
import numpy as np
try:
from mpi4py import MPI as mpi
comm_world = mpi.COMM_WORLD
npes = comm_world.Get_size()
except ImportError:
npes = 1
from . import _libwarpx
class _MultiFABWrapper(object):
"""Wrapper around field arrays at level 0
This provides a convenient way to query and set fields that are broken up into FABs.
The indexing is based on global indices.
- direction: component to access, one of the values (0, 1, 2) or None
- get_lovects: routine that returns the list of lo vectors
- get_fabs: routine that returns the list of FABs
- get_nodal_flag: routine that returns the list of nodal flag
- level: refinement level
"""
def __init__(self, direction, get_lovects, get_fabs, get_nodal_flag, level, include_ghosts=False):
self.direction = direction
self.get_lovects = get_lovects
self.get_fabs = get_fabs
self.get_nodal_flag = get_nodal_flag
self.level = level
self.include_ghosts = include_ghosts
self.dim = _libwarpx.dim
# overlaps is one along the axes where the grid boundaries overlap the neighboring grid,
# which is the case with node centering.
# This presumably will never change during a calculation.
self.overlaps = self.get_nodal_flag()
def _getlovects(self):
if self.direction is None:
lovects, ngrow = self.get_lovects(self.level, self.include_ghosts)
else:
lovects, ngrow = self.get_lovects(self.level, self.direction, self.include_ghosts)
return lovects, ngrow
def _gethivects(self):
lovects, ngrow = self._getlovects()
fields = self._getfields()
hivects = np.zeros_like(lovects)
for i in range(len(fields)):
hivects[:,i] = lovects[:,i] + np.array(fields[i].shape[:self.dim]) - self.overlaps
return hivects, ngrow
def _getfields(self):
if self.direction is None:
return self.get_fabs(self.level, self.include_ghosts)
else:
return self.get_fabs(self.level, self.direction, self.include_ghosts)
def __len__(self):
lovects, ngrow = self._getlovects()
return len(lovects)
def mesh(self, direction):
"""Returns the mesh along the specified direction with the appropriate centering.
- direction: In 3d, one of 'x', 'y', or 'z'.
In 2d, Cartesian, one of 'x', or 'z'.
In RZ, one of 'r', or 'z'
"""
try:
if _libwarpx.geometry_dim == '3d':
idir = ['x', 'y', 'z'].index(direction)
celldir = idir
elif _libwarpx.geometry_dim == '2d':
idir = ['x', 'z'].index(direction)
celldir = 2*idir
elif _libwarpx.geometry_dim == 'rz':
idir = ['r', 'z'].index(direction)
celldir = 2*idir
except ValueError:
raise Exception('Inappropriate direction given')
# --- Get the total number of cells along the direction
hivects, ngrow = self._gethivects()
nn = hivects[idir,:].max() - ngrow[idir] + self.overlaps[idir]
if npes > 1:
nn = comm_world.allreduce(nn, op=mpi.MAX)
# --- Cell size in the direction
dd = _libwarpx.getCellSize(celldir, self.level)
# --- Get the nodal flag along direction
nodal_flag = self.get_nodal_flag()[idir]
# --- The centering shift
if nodal_flag == 1:
# node centered
shift = 0.
else:
# cell centered
shift = 0.5*dd
return np.arange(nn)*dd + shift
def __getitem__(self, index):
"""Returns slices of a decomposed array, The shape of
the object returned depends on the number of ix, iy and iz specified, which
can be from none to all three. Note that the values of ix, iy and iz are
relative to the fortran indexing, meaning that 0 is the lower boundary
of the whole domain.
"""
if index == Ellipsis:
index = tuple(self.dim*[slice(None)])
if len(index) < self.dim:
# --- Add extra dims to index if needed
index = list(index)
for i in range(len(index), self.dim):
index.append(slice(None))
index = tuple(index)
if self.dim == 2:
return self._getitem2d(index)
elif self.dim == 3:
return self._getitem3d(index)
def _getitem3d(self, index):
"""Returns slices of a 3D decomposed array,
"""
lovects, ngrow = self._getlovects()
hivects, ngrow = self._gethivects()
fields = self._getfields()
ix = index[0]
iy = index[1]
iz = index[2]
if len(fields[0].shape) > self.dim:
ncomps = fields[0].shape[-1]
else:
ncomps = 1
if len(index) > self.dim:
if ncomps > 1:
ic = index[-1]
else:
raise Exception('Too many indices given')
else:
ic = None
nx = hivects[0,:].max() - ngrow[0]
ny = hivects[1,:].max() - ngrow[1]
nz = hivects[2,:].max() - ngrow[2]
if npes > 1:
nx = comm_world.allreduce(nx, op=mpi.MAX)
ny = comm_world.allreduce(ny, op=mpi.MAX)
nz = comm_world.allreduce(nz, op=mpi.MAX)
if isinstance(ix, slice):
ixstart = max(ix.start or -ngrow[0], -ngrow[0])
ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])
else:
ixstart = ix
ixstop = ix + 1
if isinstance(iy, slice):
iystart = max(iy.start or -ngrow[1], -ngrow[1])
iystop = min(iy.stop or ny + 1 + ngrow[1], ny + self.overlaps[1] + ngrow[1])
else:
iystart = iy
iystop = iy + 1
if isinstance(iz, slice):
izstart = max(iz.start or -ngrow[2], -ngrow[2])
izstop = min(iz.stop or nz + 1 + ngrow[2], nz + self.overlaps[2] + ngrow[2])
else:
izstart = iz
izstop = iz + 1
# --- Setup the size of the array to be returned and create it.
# --- Space is added for multiple components if needed.
sss = (max(0, ixstop - ixstart),
max(0, iystop - iystart),
max(0, izstop - izstart))
if ncomps > 1 and ic is None:
sss = tuple(list(sss) + [ncomps])
resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)
datalist = []
for i in range(len(fields)):
# --- The ix1, 2 etc are relative to global indexing
ix1 = max(ixstart, lovects[0,i])
ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])
iy1 = max(iystart, lovects[1,i])
iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])
iz1 = max(izstart, lovects[2,i])
iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])
if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:
sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),
slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),
slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))
if ic is not None:
sss = tuple(list(sss) + [ic])
vslice = (slice(ix1 - ixstart, ix2 - ixstart),
slice(iy1 - iystart, iy2 - iystart),
slice(iz1 - izstart, iz2 - izstart))
datalist.append((vslice, fields[i][sss]))
if npes == 1:
all_datalist = [datalist]
else:
all_datalist = comm_world.allgather(datalist)
for datalist in all_datalist:
for vslice, ff in datalist:
resultglobal[vslice] = ff
# --- Now remove any of the reduced dimensions.
sss = [slice(None), slice(None), slice(None)]
if not isinstance(ix, slice):
sss[0] = 0
if not isinstance(iy, slice):
sss[1] = 0
if not isinstance(iz, slice):
sss[2] = 0
return resultglobal[tuple(sss)]
def _getitem2d(self, index):
"""Returns slices of a 2D decomposed array,
"""
lovects, ngrow = self._getlovects()
hivects, ngrow = self._gethivects()
fields = self._getfields()
ix = index[0]
iz = index[1]
if len(fields[0].shape) > self.dim:
ncomps = fields[0].shape[-1]
else:
ncomps = 1
if len(index) > self.dim:
if ncomps > 1:
ic = index[2]
else:
raise Exception('Too many indices given')
else:
ic = None
nx = hivects[0,:].max() - ngrow[0]
nz = hivects[1,:].max() - ngrow[1]
if npes > 1:
nx = comm_world.allreduce(nx, op=mpi.MAX)
nz = comm_world.allreduce(nz, op=mpi.MAX)
if isinstance(ix, slice):
ixstart = max(ix.start or -ngrow[0], -ngrow[0])
ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])
else:
ixstart = ix
ixstop = ix + 1
if isinstance(iz, slice):
izstart = max(iz.start or -ngrow[1], -ngrow[1])
izstop = min(iz.stop or nz + 1 + ngrow[1], nz + self.overlaps[1] + ngrow[1])
else:
izstart = iz
izstop = iz + 1
# --- Setup the size of the array to be returned and create it.
# --- Space is added for multiple components if needed.
sss = (max(0, ixstop - ixstart),
max(0, izstop - izstart))
if ncomps > 1 and ic is None:
sss = tuple(list(sss) + [ncomps])
resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)
datalist = []
for i in range(len(fields)):
# --- The ix1, 2 etc are relative to global indexing
ix1 = max(ixstart, lovects[0,i])
ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])
iz1 = max(izstart, lovects[1,i])
iz2 = min(izstop, lovects[1,i] + fields[i].shape[1])
if ix1 < ix2 and iz1 < iz2:
sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),
slice(iz1 - lovects[1,i], iz2 - lovects[1,i]))
if ic is not None:
sss = tuple(list(sss) + [ic])
vslice = (slice(ix1 - ixstart, ix2 - ixstart),
slice(iz1 - izstart, iz2 - izstart))
datalist.append((vslice, fields[i][sss]))
if npes == 1:
all_datalist = [datalist]
else:
all_datalist = comm_world.allgather(datalist)
for datalist in all_datalist:
for vslice, ff in datalist:
resultglobal[vslice] = ff
# --- Now remove any of the reduced dimensions.
sss = [slice(None), slice(None)]
if not isinstance(ix, slice):
sss[0] = 0
if not isinstance(iz, slice):
sss[1] = 0
return resultglobal[tuple(sss)]
def __setitem__(self, index, value):
"""Sets slices of a decomposed array. The shape of
the input object depends on the number of arguments specified, which can
be from none to all three.
- value: input array (must be supplied)
"""
if index == Ellipsis:
index = tuple(self.dim*[slice(None)])
if len(index) < self.dim:
# --- Add extra dims to index if needed
index = list(index)
for i in range(len(index), self.dim):
index.append(slice(None))
index = tuple(index)
if self.dim == 2:
return self._setitem2d(index, value)
elif self.dim == 3:
return self._setitem3d(index, value)
def _setitem3d(self, index, value):
"""Sets slices of a decomposed 3D array.
"""
ix = index[0]
iy = index[1]
iz = index[2]
lovects, ngrow = self._getlovects()
hivects, ngrow = self._gethivects()
fields = self._getfields()
if len(index) > self.dim:
if ncomps > 1:
ic = index[-1]
else:
raise Exception('Too many indices given')
else:
ic = None
nx = hivects[0,:].max() - ngrow[0]
ny = hivects[1,:].max() - ngrow[1]
nz = hivects[2,:].max() - ngrow[2]
# --- Add extra dimensions so that the input has the same number of
# --- dimensions as array.
if isinstance(value, np.ndarray):
value3d = np.array(value, copy=False)
sss = list(value3d.shape)
if not isinstance(ix, slice): sss[0:0] = [1]
if not isinstance(iy, slice): sss[1:1] = [1]
if not isinstance(iz, slice): sss[2:2] = [1]
value3d.shape = sss
if isinstance(ix, slice):
ixstart = max(ix.start or -ngrow[0], -ngrow[0])
ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])
else:
ixstart = ix
ixstop = ix + 1
if isinstance(iy, slice):
iystart = max(iy.start or -ngrow[1], -ngrow[1])
iystop = min(iy.stop or ny + 1 + ngrow[1], ny + self.overlaps[1] + ngrow[1])
else:
iystart = iy
iystop = iy + 1
if isinstance(iz, slice):
izstart = max(iz.start or -ngrow[2], -ngrow[2])
izstop = min(iz.stop or nz + 1 + ngrow[2], nz + self.overlaps[2] + ngrow[2])
else:
izstart = iz
izstop = iz + 1
for i in range(len(fields)):
# --- The ix1, 2 etc are relative to global indexing
ix1 = max(ixstart, lovects[0,i])
ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])
iy1 = max(iystart, lovects[1,i])
iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])
iz1 = max(izstart, lovects[2,i])
iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])
if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:
sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),
slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),
slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))
if ic is not None:
sss = tuple(list(sss) + [ic])
if isinstance(value, np.ndarray):
vslice = (slice(ix1 - ixstart, ix2 - ixstart),
slice(iy1 - iystart, iy2 - iystart),
slice(iz1 - izstart, iz2 - izstart))
fields[i][sss] = value3d[vslice]
else:
fields[i][sss] = value
def _setitem2d(self, index, value):
"""Sets slices of a decomposed 2D array.
"""
ix = index[0]
iz = index[2]
lovects, ngrow = self._getlovects()
hivects, ngrow = self._gethivects()
fields = self._getfields()
if len(fields[0].shape) > self.dim:
ncomps = fields[0].shape[-1]
else:
ncomps = 1
if len(index) > self.dim:
if ncomps > 1:
ic = index[2]
else:
raise Exception('Too many indices given')
else:
ic = None
nx = hivects[0,:].max() - ngrow[0]
nz = hivects[2,:].max() - ngrow[1]
# --- Add extra dimensions so that the input has the same number of
# --- dimensions as array.
if isinstance(value, np.ndarray):
value3d = np.array(value, copy=False)
sss = list(value3d.shape)
if not isinstance(ix, slice): sss[0:0] = [1]
if not isinstance(iz, slice): sss[1:1] = [1]
value3d.shape = sss
if isinstance(ix, slice):
ixstart = max(ix.start or -ngrow[0], -ngrow[0])
ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])
else:
ixstart = ix
ixstop = ix + 1
if isinstance(iz, slice):
izstart = max(iz.start or -ngrow[1], -ngrow[1])
izstop = min(iz.stop or nz + 1 + ngrow[1], nz + self.overlaps[2] + ngrow[1])
else:
izstart = iz
izstop = iz + 1
for i in range(len(fields)):
# --- The ix1, 2 etc are relative to global indexing
ix1 = max(ixstart, lovects[0,i])
ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])
iz1 = max(izstart, lovects[2,i])
iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])
if ix1 < ix2 and iz1 < iz2:
sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),
slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))
if ic is not None:
sss = tuple(list(sss) + [ic])
if isinstance(value, np.ndarray):
vslice = (slice(ix1 - ixstart, ix2 - ixstart),
slice(iz1 - izstart, iz2 - izstart))
fields[i][sss] = value3d[vslice]
else:
fields[i][sss] = value
def ExWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_electric_field_lovects,
get_fabs=_libwarpx.get_mesh_electric_field,
get_nodal_flag=_libwarpx.get_Ex_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EyWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_electric_field_lovects,
get_fabs=_libwarpx.get_mesh_electric_field,
get_nodal_flag=_libwarpx.get_Ey_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EzWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_electric_field_lovects,
get_fabs=_libwarpx.get_mesh_electric_field,
get_nodal_flag=_libwarpx.get_Ez_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BxWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_magnetic_field_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field,
get_nodal_flag=_libwarpx.get_Bx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ByWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_magnetic_field_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field,
get_nodal_flag=_libwarpx.get_By_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BzWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_magnetic_field_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field,
get_nodal_flag=_libwarpx.get_Bz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JxWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_current_density_lovects,
get_fabs=_libwarpx.get_mesh_current_density,
get_nodal_flag=_libwarpx.get_Jx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JyWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_current_density_lovects,
get_fabs=_libwarpx.get_mesh_current_density,
get_nodal_flag=_libwarpx.get_Jy_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JzWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_current_density_lovects,
get_fabs=_libwarpx.get_mesh_current_density,
get_nodal_flag=_libwarpx.get_Jz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ExCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects,
get_fabs=_libwarpx.get_mesh_electric_field_cp,
get_nodal_flag=_libwarpx.get_Ex_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EyCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects,
get_fabs=_libwarpx.get_mesh_electric_field_cp,
get_nodal_flag=_libwarpx.get_Ey_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EzCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects,
get_fabs=_libwarpx.get_mesh_electric_field_cp,
get_nodal_flag=_libwarpx.get_Ez_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BxCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field_cp,
get_nodal_flag=_libwarpx.get_Bx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ByCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field_cp,
get_nodal_flag=_libwarpx.get_By_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BzCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field_cp,
get_nodal_flag=_libwarpx.get_Bz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JxCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_current_density_cp_lovects,
get_fabs=_libwarpx.get_mesh_current_density_cp,
get_nodal_flag=_libwarpx.get_Jx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JyCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_current_density_cp_lovects,
get_fabs=_libwarpx.get_mesh_current_density_cp,
get_nodal_flag=_libwarpx.get_Jy_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JzCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_current_density_cp_lovects,
get_fabs=_libwarpx.get_mesh_current_density_cp,
get_nodal_flag=_libwarpx.get_Jz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def RhoCPWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=None,
get_lovects=_libwarpx.get_mesh_charge_density_cp_lovects,
get_fabs=_libwarpx.get_mesh_charge_density_cp,
get_nodal_flag=_libwarpx.get_Rho_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ExFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects,
get_fabs=_libwarpx.get_mesh_electric_field_fp,
get_nodal_flag=_libwarpx.get_Ex_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EyFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects,
get_fabs=_libwarpx.get_mesh_electric_field_fp,
get_nodal_flag=_libwarpx.get_Ey_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EzFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects,
get_fabs=_libwarpx.get_mesh_electric_field_fp,
get_nodal_flag=_libwarpx.get_Ez_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BxFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field_fp,
get_nodal_flag=_libwarpx.get_Bx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ByFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field_fp,
get_nodal_flag=_libwarpx.get_By_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BzFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects,
get_fabs=_libwarpx.get_mesh_magnetic_field_fp,
get_nodal_flag=_libwarpx.get_Bz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JxFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_current_density_fp_lovects,
get_fabs=_libwarpx.get_mesh_current_density_fp,
get_nodal_flag=_libwarpx.get_Jx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JyFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_current_density_fp_lovects,
get_fabs=_libwarpx.get_mesh_current_density_fp,
get_nodal_flag=_libwarpx.get_Jy_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JzFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_current_density_fp_lovects,
get_fabs=_libwarpx.get_mesh_current_density_fp,
get_nodal_flag=_libwarpx.get_Jz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def RhoFPWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=None,
get_lovects=_libwarpx.get_mesh_charge_density_fp_lovects,
get_fabs=_libwarpx.get_mesh_charge_density_fp,
get_nodal_flag=_libwarpx.get_Rho_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ExCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_electric_field_cp_pml,
get_nodal_flag=_libwarpx.get_Ex_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EyCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_electric_field_cp_pml,
get_nodal_flag=_libwarpx.get_Ey_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EzCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_electric_field_cp_pml,
get_nodal_flag=_libwarpx.get_Ez_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BxCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_magnetic_field_cp_pml,
get_nodal_flag=_libwarpx.get_Bx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ByCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_magnetic_field_cp_pml,
get_nodal_flag=_libwarpx.get_By_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BzCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_magnetic_field_cp_pml,
get_nodal_flag=_libwarpx.get_Bz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JxCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_current_density_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_current_density_cp_pml,
get_nodal_flag=_libwarpx.get_Jx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JyCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_current_density_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_current_density_cp_pml,
get_nodal_flag=_libwarpx.get_Jy_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JzCPPMLWrapper(level=1, include_ghosts=False):
assert level>0, Exception('Coarse patch only available on levels > 0')
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_current_density_cp_lovects_pml,
get_fabs=_libwarpx.get_mesh_current_density_cp_pml,
get_nodal_flag=_libwarpx.get_Jz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ExFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_electric_field_fp_pml,
get_nodal_flag=_libwarpx.get_Ex_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EyFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_electric_field_fp_pml,
get_nodal_flag=_libwarpx.get_Ey_nodal_flag,
level=level, include_ghosts=include_ghosts)
def EzFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_electric_field_fp_pml,
get_nodal_flag=_libwarpx.get_Ez_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BxFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_magnetic_field_fp_pml,
get_nodal_flag=_libwarpx.get_Bx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def ByFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_magnetic_field_fp_pml,
get_nodal_flag=_libwarpx.get_By_nodal_flag,
level=level, include_ghosts=include_ghosts)
def BzFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_magnetic_field_fp_pml,
get_nodal_flag=_libwarpx.get_Bz_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JxFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=0,
get_lovects=_libwarpx.get_mesh_current_density_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_current_density_fp_pml,
get_nodal_flag=_libwarpx.get_Jx_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JyFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=1,
get_lovects=_libwarpx.get_mesh_current_density_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_current_density_fp_pml,
get_nodal_flag=_libwarpx.get_Jy_nodal_flag,
level=level, include_ghosts=include_ghosts)
def JzFPPMLWrapper(level=0, include_ghosts=False):
return _MultiFABWrapper(direction=2,
get_lovects=_libwarpx.get_mesh_current_density_fp_lovects_pml,
get_fabs=_libwarpx.get_mesh_current_density_fp_pml,
get_nodal_flag=_libwarpx.get_Jz_nodal_flag,
level=level, include_ghosts=include_ghosts)
|
from __future__ import absolute_import
from __future__ import unicode_literals
from mb.lib.memoize import memoize
class SomeClass:
def __init__(self):
self._x = 0
def _the_test(self, number):
self._x += 1
return number * self._x
@memoize
def TestCache1(self, number):
return self._the_test(number)
@memoize("self", "number")
def TestCache2(self, number, **kw):
tmp = self._the_test(kw["number2"])
return self._the_test(tmp - number)
def test_NoArgumentsPassed_UsesAllArgumentsForCache():
someClass = SomeClass()
assert someClass._the_test(5) == 5
assert someClass.TestCache1(5) == 10
assert someClass.TestCache1(5) == 10
def test_ArgumentsPassedToUseForCache_UsesArgumentsForCache():
someClass = SomeClass()
assert someClass.TestCache2(5, number2=10) == 10
assert someClass.TestCache2(5, number2=10) == 10
|
import astropy.io.fits as fits
from specutils import SpectrumList
from specutils.io.registers import data_loader
from .loaders import FITS_FILE_EXTS, SINGLE_SPLIT_LABEL
GALAH_CONFIG = {
"hdus": {
"0": {"purpose": "science"},
"1": {"purpose": "error_stdev"},
"2": {"purpose": "unreduced_science"},
"3": {"purpose": "unreduced_error_stdev"},
"4": {"purpose": "skip"},
},
"wcs": {
"pixel_reference_point_keyword": "CRPIX1",
"pixel_reference_point_value_keyword": "CRVAL1",
"pixel_width_keyword": "CDELT1",
"wavelength_unit": "Angstrom",
},
"units": {"flux_unit": "count"},
"all_standard_units": False,
"all_keywords": False,
"valid_wcs": False,
}
def identify_galah(origin, *args, **kwargs):
"""
Identify if the current file is a GALAH file
"""
file_obj = args[0]
if isinstance(file_obj, fits.hdu.hdulist.HDUList):
hdulist = file_obj
else:
hdulist = fits.open(file_obj, **kwargs)
if "galah" in hdulist[0].header.get("REFERENC"):
if not isinstance(file_obj, fits.hdu.hdulist.HDUList):
hdulist.close()
return True
if not isinstance(file_obj, fits.hdu.hdulist.HDUList):
hdulist.close()
return False
@data_loader(
label="GALAH", extensions=FITS_FILE_EXTS, dtype=SpectrumList,
identifier=identify_galah,
)
def galah_loader(fname):
spectra = SpectrumList.read(
fname, format=SINGLE_SPLIT_LABEL, **GALAH_CONFIG
)
return spectra
|
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to verify all json configs can be parsed."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
# append parent directory to import path
import dlbs.tests.env # pylint: disable=W0611
from dlbs.tests.test_config_base import ConfigTester
class TestConfigCaffe2(ConfigTester):
def __init__(self, *args, **kwargs):
ConfigTester.__init__(self, *args, **kwargs)
def setUp(self):
self.setUpBase(files=['base.json', 'caffe2.json'])
def check_parameters(self, docker_image):
self.build_plan({"exp.framework": "caffe2", "DLBS_ROOT": ""})
self.compute_vars(
[],
[("exp.framework_title", "Caffe2"), ("exp.framework_family", "caffe2"),
('exp.docker_image', docker_image), ('caffe2.docker_image', docker_image),
('runtime.EXPORT_CUDA_CACHE_PATH', 'CUDA_CACHE_PATH=/workspace/cuda_cache')]
)
def test_docker_images(self):
self.check_parameters('nvcr.io/nvidia/caffe2:18.05-py2')
if __name__ == '__main__':
unittest.main()
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class _SkellamTest(object):
def _make_skellam(self,
rate1,
rate2,
validate_args=True,
force_probs_to_zero_outside_support=False):
return tfd.Skellam(
rate1=rate1,
rate2=rate2,
validate_args=validate_args,
force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)
def testSkellamShape(self):
rate1 = tf.constant([3.0] * 5, dtype=self.dtype)
rate2 = tf.constant([3.0] * 4, dtype=self.dtype)[..., tf.newaxis]
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
self.assertAllEqual(self.evaluate(skellam.batch_shape_tensor()), (4, 5))
self.assertEqual(skellam.batch_shape, tf.TensorShape([4, 5]))
self.assertAllEqual(self.evaluate(skellam.event_shape_tensor()), [])
self.assertEqual(skellam.event_shape, tf.TensorShape([]))
def testInvalidLam(self):
invalid_rate = self.dtype([-.01, 1., 2.])
valid_rate = self.dtype([1., 2., 3.])
with self.assertRaisesOpError('Argument `rate1` must be non-negative.'):
skellam = self._make_skellam(rate1=invalid_rate, rate2=valid_rate)
self.evaluate(skellam.rate1_parameter())
with self.assertRaisesOpError('Argument `rate2` must be non-negative.'):
skellam = self._make_skellam(rate1=valid_rate, rate2=invalid_rate)
self.evaluate(skellam.rate2_parameter())
def testZeroRate(self):
lam = self.dtype(0.)
skellam = tfd.Skellam(rate1=lam, rate2=lam, validate_args=True)
self.assertAllClose(lam, self.evaluate(skellam.rate1))
self.assertAllClose(lam, self.evaluate(skellam.rate2))
self.assertAllClose(0., skellam.prob(3.))
self.assertAllClose(1., skellam.prob(0.))
self.assertAllClose(0., skellam.log_prob(0.))
def testSkellamLogPmfDiscreteMatchesScipy(self):
batch_size = 12
rate1 = np.linspace(1, 12, 12).astype(self.dtype)
rate2 = np.array([[1.2], [2.3]]).astype(self.dtype)
x = np.array([-3., -1., 0., 2., 4., 3., 7., 4., 8., 9., 6., 7.],
dtype=self.dtype)
skellam = self._make_skellam(
rate1=rate1, rate2=rate2,
force_probs_to_zero_outside_support=True, validate_args=False)
log_pmf = skellam.log_prob(x)
self.assertEqual(log_pmf.shape, (2, batch_size))
self.assertAllClose(
self.evaluate(log_pmf),
stats.skellam.logpmf(x, rate1, rate2))
pmf = skellam.prob(x)
self.assertEqual(pmf.shape, (2, batch_size,))
self.assertAllClose(
self.evaluate(pmf),
stats.skellam.pmf(x, rate1, rate2))
@test_util.numpy_disable_gradient_test
def testSkellamLogPmfGradient(self):
batch_size = 6
rate1 = tf.constant([3.] * batch_size, dtype=self.dtype)
rate2 = tf.constant([2.7] * batch_size, dtype=self.dtype)
x = np.array([-1., 2., 3., 4., 5., 6.], dtype=self.dtype)
err = self.compute_max_gradient_error(
lambda lam: self._make_skellam( # pylint:disable=g-long-lambda
rate1=lam, rate2=rate2).log_prob(x), [rate1])
self.assertLess(err, 7e-4)
err = self.compute_max_gradient_error(
lambda lam: self._make_skellam( # pylint:disable=g-long-lambda
rate1=rate1, rate2=lam).log_prob(x), [rate2])
self.assertLess(err, 7e-4)
@test_util.numpy_disable_gradient_test
def testSkellamLogPmfGradientAtZeroPmf(self):
# Check that the derivative wrt parameter at the zero-prob points is zero.
batch_size = 6
rate1 = tf.constant(np.linspace(1, 7, 6), dtype=self.dtype)
rate2 = tf.constant(np.linspace(9.1, 12.1, 6), dtype=self.dtype)
x = tf.constant([-2.1, -1.3, -0.5, 0.2, 1.5, 10.5], dtype=self.dtype)
def make_skellam_log_prob(apply_to_second_rate=False):
def skellam_log_prob(lam):
return self._make_skellam(
rate1=rate1 if apply_to_second_rate else lam,
rate2=lam if apply_to_second_rate else rate2,
force_probs_to_zero_outside_support=True,
validate_args=False).log_prob(x)
return skellam_log_prob
_, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(
make_skellam_log_prob(), rate1))
self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))
self.assertAllClose(dlog_pmf_dlam, np.zeros([batch_size]))
_, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(
make_skellam_log_prob(True), rate2))
self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))
self.assertAllClose(dlog_pmf_dlam, np.zeros([batch_size]))
def testSkellamMean(self):
rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)
rate2 = np.array([5.0, 7.13, 2.56, 41.], dtype=self.dtype)[..., np.newaxis]
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
self.assertEqual(skellam.mean().shape, (4, 3))
self.assertAllClose(
self.evaluate(skellam.mean()), stats.skellam.mean(rate1, rate2))
self.assertAllClose(self.evaluate(skellam.mean()), rate1 - rate2)
def testSkellamVariance(self):
rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)
rate2 = np.array([5.0, 7.13, 2.56, 41.], dtype=self.dtype)[..., np.newaxis]
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
self.assertEqual(skellam.variance().shape, (4, 3))
self.assertAllClose(
self.evaluate(skellam.variance()), stats.skellam.var(rate1, rate2))
self.assertAllClose(self.evaluate(skellam.variance()), rate1 + rate2)
def testSkellamStd(self):
rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)
rate2 = np.array([5.0, 7.13, 2.56, 41.], dtype=self.dtype)[..., np.newaxis]
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
self.assertEqual(skellam.stddev().shape, (4, 3))
self.assertAllClose(
self.evaluate(skellam.stddev()), stats.skellam.std(rate1, rate2))
self.assertAllClose(self.evaluate(skellam.stddev()), np.sqrt(rate1 + rate2))
def testSkellamSample(self):
rate1 = self.dtype([2., 3., 4.])
rate2 = self.dtype([7.1, 3.2])[..., np.newaxis]
n = int(2e5)
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
samples = skellam.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 2, 3))
self.assertEqual(sample_values.shape, (n, 2, 3))
self.assertAllClose(
sample_values.mean(axis=0), stats.skellam.mean(rate1, rate2), rtol=.03)
self.assertAllClose(
sample_values.var(axis=0), stats.skellam.var(rate1, rate2), rtol=.03)
def testAssertValidSample(self):
rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)
rate2 = np.array([2.1, 7.0, 42.5], dtype=self.dtype)
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
with self.assertRaisesOpError('has non-integer components'):
self.evaluate(skellam.prob([-1.2, 3., 4.2]))
def testSkellamSampleMultidimensionalMean(self):
rate1 = self.dtype([2., 3., 4., 5., 6.])
rate2 = self.dtype([7.1, 3.2, 10., 9.])[..., np.newaxis]
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
n = int(2e5)
samples = skellam.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 4, 5))
self.assertEqual(sample_values.shape, (n, 4, 5))
self.assertAllClose(
sample_values.mean(axis=0),
stats.skellam.mean(rate1, rate2), rtol=.04, atol=0)
def testSkellamSampleMultidimensionalVariance(self):
rate1 = self.dtype([2., 3., 4., 5., 6.])
rate2 = self.dtype([7.1, 3.2, 10., 9.])[..., np.newaxis]
skellam = self._make_skellam(rate1=rate1, rate2=rate2)
n = int(1e5)
samples = skellam.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 4, 5))
self.assertEqual(sample_values.shape, (n, 4, 5))
self.assertAllClose(
sample_values.var(axis=0),
stats.skellam.var(rate1, rate2), rtol=.03, atol=0)
@test_util.tf_tape_safety_test
def testGradientThroughRate(self):
rate1 = tf.Variable(3.)
rate2 = tf.Variable(4.)
dist = self._make_skellam(rate1=rate1, rate2=rate2)
with tf.GradientTape() as tape:
loss = -dist.log_prob([1., 2., 4.])
grad = tape.gradient(loss, dist.trainable_variables)
self.assertLen(grad, 2)
self.assertAllNotNone(grad)
def testAssertsNonNegativeRate(self):
rate1 = tf.Variable([-1., 2., -3.])
rate2 = tf.Variable([1., 2., 3.])
self.evaluate([rate1.initializer, rate2.initializer])
with self.assertRaisesOpError('Argument `rate1` must be non-negative.'):
dist = self._make_skellam(
rate1=rate1, rate2=rate2, validate_args=True)
self.evaluate(dist.sample(seed=test_util.test_seed()))
rate1 = tf.Variable([1., 2., 3.])
rate2 = tf.Variable([-1., 2., -3.])
self.evaluate([rate1.initializer, rate2.initializer])
with self.assertRaisesOpError('Argument `rate2` must be non-negative.'):
dist = self._make_skellam(
rate1=rate1, rate2=rate2, validate_args=True)
self.evaluate(dist.sample(seed=test_util.test_seed()))
def testAssertsNonNegativeRateAfterMutation(self):
rate1 = tf.Variable([1., 2., 3.])
rate2 = tf.Variable([1., 2., 3.])
self.evaluate([rate1.initializer, rate2.initializer])
dist = self._make_skellam(
rate1=rate1, rate2=rate2, validate_args=True)
self.evaluate(dist.mean())
with self.assertRaisesOpError('Argument `rate1` must be non-negative.'):
with tf.control_dependencies([rate1.assign([1., 2., -3.])]):
self.evaluate(dist.sample(seed=test_util.test_seed()))
rate1 = tf.Variable([1., 2., 3.])
rate2 = tf.Variable([1., 2., 3.])
self.evaluate([rate1.initializer, rate2.initializer])
dist = self._make_skellam(
rate1=rate1, rate2=rate2, validate_args=True)
self.evaluate(dist.mean())
with self.assertRaisesOpError('Argument `rate2` must be non-negative.'):
with tf.control_dependencies([rate2.assign([1., 2., -3.])]):
self.evaluate(dist.sample(seed=test_util.test_seed()))
@test_util.test_all_tf_execution_regimes
class SkellamTestFloat32(test_util.TestCase, _SkellamTest):
dtype = np.float32
@test_util.test_all_tf_execution_regimes
class SkellamTestFloat64(test_util.TestCase, _SkellamTest):
dtype = np.float64
@test_util.test_all_tf_execution_regimes
class SkellamLogRateTest(_SkellamTest):
def _make_skellam(self,
rate1,
rate2,
validate_args=True,
force_probs_to_zero_outside_support=False):
return tfd.Skellam(
log_rate1=tf.math.log(rate1),
log_rate2=tf.math.log(rate2),
validate_args=validate_args,
force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)
# No need to worry about the non-negativity of `rate` when using the
# `log_rate` parameterization.
def testInvalidLam(self):
pass
def testAssertsNonNegativeRate(self):
pass
def testAssertsNonNegativeRateAfterMutation(self):
pass
# The gradient is not tracked through tf.math.log(rate) in _make_skellam(),
# so log_rate needs to be defined as a Variable and passed directly.
@test_util.tf_tape_safety_test
def testGradientThroughRate(self):
log_rate1 = tf.Variable(3.)
log_rate2 = tf.Variable(4.)
dist = tfd.Skellam(
log_rate1=log_rate1, log_rate2=log_rate2, validate_args=True)
with tf.GradientTape() as tape:
loss = -dist.log_prob([1., 2., 4.])
grad = tape.gradient(loss, dist.trainable_variables)
self.assertLen(grad, 2)
self.assertAllNotNone(grad)
if __name__ == '__main__':
tf.test.main()
|
from flask import render_template,request,url_for,redirect,flash,abort
from . import main
from .forms import BlogForm, CommentForm, UpdateProfile
from ..models import User, Comment , Blog
from flask_login import login_required, current_user
from .. import db, photos
import datetime
from ..requests import getQuotes
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
blogs = Blog.query.all()
title = 'Home - Welcome to Blogs Online Website'
quote = getQuotes()
quote1 = getQuotes()
quote2 = getQuotes()
quote3 = getQuotes()
return render_template('index.html', title = title, blogs=blogs, quote=quote ,quote1=quote1,quote2=quote2,quote3=quote3 )
@main.route('/blog/new', methods = ['GET','POST'])
@login_required
def new_blog():
blog_form = BlogForm()
if blog_form.validate_on_submit():
title = blog_form.title.data
blog = blog_form.text.data
# Updated blog instance
new_blog = Blog(blog_title=title,blog_content=blog,username=current_user.username,likes=0,dislikes=0)
# Save blog method
new_blog.save_blog()
return redirect(url_for('.index'))
title = 'New blog'
return render_template('new_blog.html',title = title,blog_form=blog_form )
@main.route('/blog/<int:id>', methods = ['GET','POST'])
def blog(id):
blog = Blog.get_blog(id)
posted_date = blog.posted.strftime('%b %d, %Y')
if request.args.get("like"):
blog.likes = blog.likes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
elif request.args.get("dislike"):
blog.dislikes = blog.dislikes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.text.data
new_comment = Comment(comment = comment,user = current_user,blog_id = blog)
new_comment.save_comment()
comments = Comment.get_comments(blog)
return render_template("blog.html", blog = blog, date = posted_date, comment_form = comment_form, comments = comments)
@main.route('/user/<uname>/blogs')
def user_blogs(uname):
user = User.query.filter_by(username=uname).first()
blogs = Blog.query.filter_by(user_id = user.id).all()
blogs_count = Blog.count_blogs(uname)
user_joined = user.date_joined.strftime('%b,%d,%y')
return render_template("profile/blogs.html",user = user, blogs = blogs, blogs_count= blogs_count,date= user_joined)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route("/blog/<int:id>/update",methods = ['GET','POST'])
@login_required
def update_blog(id):
blog = Blog.query.get_or_404(id)
if blog.username != current_user.username:
abort(403)
blog_form = BlogForm()
if blog_form.validate_on_submit():
blog.blog_title = blog_form.title.data
blog.blog_content = blog_form.text.data
db.session.commit()
flash('Your blog has been updated!', 'success')
return redirect(url_for('main.blog', id=blog.id))
elif request.method == 'GET':
blog_form.title.data = blog.blog_title
blog_form.text.data = blog.blog_content
return render_template('new_blog.html',title = 'Update Blog',blog_form=blog_form )
@main.route("/blog/<int:id>/delete", methods=['POST'])
@login_required
def delete_blog(id):
blog = Blog.query.get(id)
if blog.username != current_user.username:
abort(403)
db.session.delete(blog)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
|
from PyQt5.QtGui import QImage, QTextDocument, QIcon, QKeySequence, QFont
from PyQt5.QtWidgets import QTextEdit, QApplication, QMainWindow, QAction, QMessageBox, QVBoxLayout, QWidget, \
QStatusBar, QToolBar, QFontComboBox, QComboBox, QActionGroup, QFileDialog, QColorDialog, QDialog, QStyle
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
import os
import sys
import uuid
# create function that returns resources when compiled with pyinstaller
def get_resource(file: str) -> os.path:
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except AttributeError:
base_path = os.path.abspath(".")
return os.path.join(base_path, file)
FONT_SIZES = [7, 8, 9, 10, 11, 12, 13, 14, 18, 24, 36, 48, 64, 72, 96, 144, 288]
IMAGE_EXTENSIONS = ['.jpg', '.png', '.bmp']
HTML_EXTENSIONS = ['.htm', '.html']
def hexuuid():
return uuid.uuid4().hex
def splitext(p):
return os.path.splitext(p)[1].lower()
class TextEdit(QTextEdit):
def canInsertFromMimeData(self, source):
if source.hasImage():
return True
else:
return super(TextEdit, self).canInsertFromMimeData(source)
def insertFromMimeData(self, source):
cursor = self.textCursor()
document = self.document()
if source.hasUrls():
for u in source.urls():
file_ext = splitext(str(u.toLocalFile()))
if u.isLocalFile() and file_ext in IMAGE_EXTENSIONS:
image = QImage(u.toLocalFile())
document.addResource(QTextDocument.ImageResource, u, image)
cursor.insertImage(u.toLocalFile())
else:
# If we hit a non-image or non-local URL break the loop and fall out
# to the super call & let Qt handle it
break
else:
# If all were valid images, finish here.
return
elif source.hasImage():
image = source.imageData()
uuid2 = hexuuid()
document.addResource(QTextDocument.ImageResource, uuid2, image)
cursor.insertImage(uuid2)
return
super(TextEdit, self).insertFromMimeData(source)
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowIcon(QIcon(get_resource("Icon.png")))
layout = QVBoxLayout()
self.editor = TextEdit()
# Setup the QTextEdit editor configuration
self.editor.setAutoFormatting(QTextEdit.AutoAll)
self.editor.selectionChanged.connect(self.update_format)
# Initialize default font size.
font = QFont('Times New Roman', 12)
self.editor.setFont(font)
# We need to repeat the size to init the current format.
self.editor.setFontPointSize(12)
# self.path holds the path of the currently open file.
# If none, we haven't got a file open yet (or creating new).
self.path = None
layout.addWidget(self.editor)
container = QWidget()
container.setLayout(layout)
self.setCentralWidget(container)
self.status = QStatusBar()
self.setStatusBar(self.status)
# Uncomment to disable native menubar on Mac
# self.menuBar().setNativeMenuBar(False)
file_toolbar = QToolBar("File")
file_toolbar.setIconSize(QSize(14, 14))
self.addToolBar(file_toolbar)
file_menu = self.menuBar().addMenu("&Datei")
open_file_action = QAction(QIcon(get_resource("blue-folder-open-document.png")), "Öffnen...", self)
open_file_action.setStatusTip("Datei öffnen")
open_file_action.setShortcut('Ctrl+O')
open_file_action.triggered.connect(self.file_open)
file_menu.addAction(open_file_action)
file_toolbar.addAction(open_file_action)
save_file_action = QAction(QIcon(get_resource('disk.png')), "Speichern", self)
save_file_action.setStatusTip("Dokument speichern")
save_file_action.setShortcut('Ctrl+S')
save_file_action.triggered.connect(self.file_save)
file_menu.addAction(save_file_action)
file_toolbar.addAction(save_file_action)
saveas_file_action = QAction(QIcon(get_resource('disk--pencil.png')), "Speichern unter...", self)
saveas_file_action.setStatusTip("Dokument speichern unter")
saveas_file_action.setShortcut('Ctrl+Shift+S')
saveas_file_action.triggered.connect(self.file_saveas)
file_menu.addAction(saveas_file_action)
file_toolbar.addAction(saveas_file_action)
print_action = QAction(QIcon(get_resource('printer.png')), "Drucken...", self)
print_action.setStatusTip("Dokument drucken")
print_action.setShortcut("Ctrl+P")
print_action.triggered.connect(self.file_print)
file_menu.addAction(print_action)
file_toolbar.addAction(print_action)
preview_action = QAction(QIcon(get_resource("print_preview.png")), "Druckvorschau", self)
preview_action.setStatusTip("Druckvorschau")
preview_action.setShortcut("Ctrl+Shift+P")
preview_action.triggered.connect(self.preview)
file_menu.addAction(preview_action)
quit_action = QAction(QIcon(self.style().standardIcon(QStyle.SP_BrowserStop)), "Beenden", self)
quit_action.setStatusTip("Word beenden")
quit_action.setShortcut("Ctrl+Q")
quit_action.triggered.connect(self.close)
file_menu.addAction(quit_action)
# file_toolbar.addAction(quit_action)
edit_toolbar = QToolBar("Edit")
edit_toolbar.setIconSize(QSize(16, 16))
self.addToolBar(edit_toolbar)
edit_menu = self.menuBar().addMenu("&Bearbeiten")
undo_action = QAction(QIcon(get_resource('arrow-curve-180-left.png')), "Rückgängig", self)
undo_action.setStatusTip("letzte Änderung rückgängig machen")
undo_action.setShortcut("Ctrl+Z")
undo_action.triggered.connect(self.editor.undo)
edit_menu.addAction(undo_action)
edit_toolbar.addAction(undo_action)
redo_action = QAction(QIcon(get_resource('arrow-curve.png')), "Wiederherstellen", self)
redo_action.setStatusTip("letzte Änderung wiederherstellen")
redo_action.setShortcut("Ctrl+Y")
redo_action.triggered.connect(self.editor.redo)
edit_toolbar.addAction(redo_action)
edit_menu.addAction(redo_action)
edit_menu.addSeparator()
cut_action = QAction(QIcon(get_resource('scissors.png')), "Ausschneiden", self)
cut_action.setStatusTip("markierten Text ausschneiden")
# cut_action.setShortcut("Ctrl+X")
cut_action.setShortcut(QKeySequence.Cut)
cut_action.triggered.connect(self.editor.cut)
# edit_toolbar.addAction(cut_action)
edit_menu.addAction(cut_action)
copy_action = QAction(QIcon(get_resource('document-copy.png')), "Kopieren", self)
copy_action.setStatusTip("markierten Text kopieren")
copy_action.setShortcut(QKeySequence.Copy)
copy_action.triggered.connect(self.editor.copy)
edit_toolbar.addAction(copy_action)
edit_menu.addAction(copy_action)
paste_action = QAction(QIcon(get_resource('clipboard-paste-document-text.png')), "Einfügen", self)
paste_action.setStatusTip("einfügen aus der Zwischenablage")
paste_action.setShortcut(QKeySequence.Paste)
paste_action.triggered.connect(self.editor.paste)
edit_toolbar.addAction(paste_action)
edit_menu.addAction(paste_action)
select_action = QAction(QIcon(get_resource('selection-input.png')), "Alles auswählen", self)
select_action.setStatusTip("den gesamten Text auswählen")
select_action.setShortcut(QKeySequence.SelectAll)
select_action.triggered.connect(self.editor.selectAll)
edit_menu.addAction(select_action)
edit_menu.addSeparator()
wrap_action = QAction("Wrap text to window", self) # QIcon(get_resource( 'arrow-continue.png'))
wrap_action.setStatusTip("Toggle wrap text to window")
wrap_action.setCheckable(True)
wrap_action.setChecked(True)
wrap_action.triggered.connect(self.edit_toggle_wrap)
edit_menu.addAction(wrap_action)
format_toolbar = QToolBar("Format")
format_toolbar.setIconSize(QSize(16, 16))
self.addToolBar(format_toolbar)
format_menu = self.menuBar().addMenu("&Format")
# We need references to these actions/settings to update as selection changes, so attach to self.
self.fonts = QFontComboBox()
self.fonts.currentFontChanged.connect(self.editor.setCurrentFont)
format_toolbar.addWidget(self.fonts)
self.fontsize = QComboBox()
self.fontsize.addItems([str(s) for s in FONT_SIZES])
# Connect to the signal producing the text of the current selection. Convert the string to float
# and set as the pointsize. We could also use the index + retrieve from FONT_SIZES.
self.fontsize.currentIndexChanged[str].connect(lambda s: self.editor.setFontPointSize(float(s)))
format_toolbar.addWidget(self.fontsize)
self.bold_action = QAction(QIcon(get_resource('edit-bold.png')), "Dick", self)
self.bold_action.setStatusTip("Dick")
self.bold_action.setShortcut(QKeySequence.Bold)
self.bold_action.setCheckable(True)
self.bold_action.toggled.connect(lambda x: self.editor.setFontWeight(QFont.Bold if x else QFont.Normal))
format_toolbar.addAction(self.bold_action)
format_menu.addAction(self.bold_action)
self.italic_action = QAction(QIcon(get_resource('edit-italic.png')), "Kursiv", self)
self.italic_action.setStatusTip("Kursiv")
self.italic_action.setShortcut(QKeySequence.Italic)
self.italic_action.setCheckable(True)
self.italic_action.toggled.connect(self.editor.setFontItalic)
format_toolbar.addAction(self.italic_action)
format_menu.addAction(self.italic_action)
self.underline_action = QAction(QIcon(get_resource('edit-underline.png')), "Unterstriechen", self)
self.underline_action.setStatusTip("Unterstriechen")
self.underline_action.setShortcut(QKeySequence.Underline)
self.underline_action.setCheckable(True)
self.underline_action.toggled.connect(self.editor.setFontUnderline)
format_toolbar.addAction(self.underline_action)
format_menu.addAction(self.underline_action)
self.color_menu = QAction(QIcon(get_resource("draw_color.png")), "Zeichenfarbe", self)
self.color_menu.setStatusTip("Wähle eine Farbe mit der du schreiben kannst")
self.color_menu.triggered.connect(self.chose_draw_color)
format_menu.addAction(self.color_menu)
format_toolbar.addAction(self.color_menu)
format_menu.addSeparator()
self.alignl_action = QAction(QIcon(get_resource('edit-alignment.png')), "Links ausrichten", self)
self.alignl_action.setStatusTip("Links ausrichten")
self.alignl_action.setShortcut("Ctrl+L")
self.alignl_action.setCheckable(True)
self.alignl_action.triggered.connect(lambda: self.editor.setAlignment(Qt.AlignLeft))
format_toolbar.addAction(self.alignl_action)
format_menu.addAction(self.alignl_action)
self.alignc_action = QAction(QIcon(get_resource('edit-alignment-center.png')), "Zentriert ausrichten",
self)
self.alignc_action.setStatusTip("Zentriert ausrichten")
self.alignc_action.setShortcut("Ctrl+E")
self.alignc_action.setCheckable(True)
self.alignc_action.triggered.connect(lambda: self.editor.setAlignment(Qt.AlignCenter))
format_toolbar.addAction(self.alignc_action)
format_menu.addAction(self.alignc_action)
self.alignr_action = QAction(QIcon(get_resource('edit-alignment-right.png')), "Rechts ausrichten",
self)
self.alignr_action.setStatusTip("Rechts ausrichten")
self.alignr_action.setShortcut("Ctrl+R")
self.alignr_action.setCheckable(True)
self.alignr_action.triggered.connect(lambda: self.editor.setAlignment(Qt.AlignRight))
format_toolbar.addAction(self.alignr_action)
format_menu.addAction(self.alignr_action)
self.alignj_action = QAction(QIcon(get_resource('edit-alignment-justify.png')), "Blocksatz", self)
self.alignj_action.setStatusTip("Blocksatz")
self.alignj_action.setShortcut("Ctrl+B")
self.alignj_action.setCheckable(True)
self.alignj_action.triggered.connect(lambda: self.editor.setAlignment(Qt.AlignJustify))
format_toolbar.addAction(self.alignj_action)
format_menu.addAction(self.alignj_action)
format_group = QActionGroup(self)
format_group.setExclusive(True)
format_group.addAction(self.alignl_action)
format_group.addAction(self.alignc_action)
format_group.addAction(self.alignr_action)
format_group.addAction(self.alignj_action)
format_menu.addSeparator()
# A list of all format-related widgets/actions, so we can disable/enable signals when updating.
self._format_actions = [
self.fonts,
self.fontsize,
self.bold_action,
self.italic_action,
self.underline_action,
# We don't need to disable signals for alignment, as they are paragraph-wide.
]
# Initialize.
self.update_format()
self.update_title()
self.show()
# Tests
self.editor.textChanged.connect(self.update_title_chanced)
# self.editor.setTextColor(QColorDialog(self).getColor(Qt.black, self))
def block_signals(self, objects, b):
for o in objects:
o.blockSignals(b)
def update_format(self):
"""
Update the font format toolbar/actions when a new text selection is made. This is neccessary to keep
toolbars/etc. in sync with the current edit state.
:return:
"""
# Disable signals for all format widgets, so changing values here does not trigger further formatting.
self.block_signals(self._format_actions, True)
self.fonts.setCurrentFont(self.editor.currentFont())
# Nasty, but we get the font-size as a float but want it was an int
self.fontsize.setCurrentText(str(int(self.editor.fontPointSize())))
self.italic_action.setChecked(self.editor.fontItalic())
self.underline_action.setChecked(self.editor.fontUnderline())
self.bold_action.setChecked(self.editor.fontWeight() == QFont.Bold)
self.alignl_action.setChecked(self.editor.alignment() == Qt.AlignLeft)
self.alignc_action.setChecked(self.editor.alignment() == Qt.AlignCenter)
self.alignr_action.setChecked(self.editor.alignment() == Qt.AlignRight)
self.alignj_action.setChecked(self.editor.alignment() == Qt.AlignJustify)
self.block_signals(self._format_actions, False)
def dialog_critical(self, s):
dlg = QMessageBox(self)
dlg.setText(s)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
def file_open(self):
path, _ = QFileDialog.getOpenFileName(self, "Open file", "",
"HTML documents (*.html);Text documents (*.txt);All files (*.*)")
try:
with open(path, 'r') as f:
text = f.read()
except Exception as e:
self.dialog_critical(str(e))
else:
self.path = path
# Qt will automatically try and guess the format as txt/html
self.editor.setText(text)
self.update_title()
def file_save(self):
if self.path is None:
# If we do not have a path, we need to use Save As.
return self.file_saveas()
text = self.editor.toHtml() if splitext(self.path) in HTML_EXTENSIONS else self.editor.toPlainText()
try:
with open(self.path, 'w') as f:
f.write(text)
except Exception as e:
self.dialog_critical(str(e))
def file_saveas(self):
path, _ = QFileDialog.getSaveFileName(self, "Save file", "",
"WORD documents (*.word);HTML documents (*.html);Text documents (*.txt);All files (*.*)")
if not path:
# If dialog is cancelled, will return ''
return
text = self.editor.toHtml() if splitext(path) in HTML_EXTENSIONS else self.editor.toPlainText()
try:
with open(path, 'w') as f:
f.write(text)
except Exception as e:
self.dialog_critical(str(e))
else:
self.path = path
self.update_title()
def file_print(self):
dlg = QPrintDialog()
if dlg.exec() == QDialog.Accepted:
self.editor.print(dlg.printer())
def preview(self):
dlg = QPrintPreviewDialog()
dlg.paintRequested.connect(self.editor.print)
dlg.exec()
def chose_draw_color(self):
self.editor.setTextColor(QColorDialog(self).getColor(self.editor.textColor(), self, "Zeichenfarbe"))
def update_title(self):
self.setWindowTitle("%s - Word" % (os.path.basename(self.path) if self.path else "Unbenannt"))
def update_title_chanced(self):
self.setWindowTitle("*%s - Word" % (os.path.basename(self.path) if self.path else "Unbenannt"))
def edit_toggle_wrap(self):
self.editor.setLineWrapMode(1 if self.editor.lineWrapMode() == 0 else 0)
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setApplicationName("Word")
window = MainWindow()
app.exec_()
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry responsible for built-in keras classes."""
import logging
import tensorflow as tf
from tensorflow_model_optimization.python.core.clustering.keras import clustering_registry
from tensorflow_model_optimization.python.core.quantization.keras import quant_ops
from tensorflow_model_optimization.python.core.quantization.keras import quantizers
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantizers
layers = tf.keras.layers
K = tf.keras.backend
CLUSTER_CENTROIDS = 'cluster_centroids_tf'
PULLING_INDICES = 'pulling_indices_tf'
ORIGINAL_WEIGHTS = 'ori_weights_vars_tf'
WEIGHT_NAME = 'weight_name'
CLUSTERING_IMPL = 'clst_impl'
CENTROIDS_MASK = 'centroids_mask'
SPARSITY_MASK = 'sparsity_mask'
def get_unique(t):
"""Get unique values and lookup index from N-D tensor.
Args:
t: tensor
Returns:
unique value, lookup index (same shape as input tensor)
Example:
t:
([[1.0, 2.0],
[2.0, 3.0],
[3.0, 3.0],
[1.0, 2.0]]
)
uniques:
([1.0, 2.0, 3.0])
output final index:
([[0, 1],
[1, 2],
[2, 2],
[0, 1]]
)
"""
t_flatten = tf.reshape(t, shape=(-1,))
uniques, index = tf.unique(t_flatten)
return uniques, tf.reshape(index, shape=tf.shape(t))
class _ClusterPreserveInfo(object):
"""ClusterPreserveInfo."""
def __init__(self, weight_attrs, quantize_config_attrs):
"""ClusterPreserveInfo.
Args:
weight_attrs: list of cluster preservable weight attributes of layer.
quantize_config_attrs: list of quantization configuration class name.
"""
self.weight_attrs = weight_attrs
self.quantize_config_attrs = quantize_config_attrs
class ClusterPreserveQuantizeRegistry(object):
"""ClusterPreserveQuantizeRegistry is for built-in keras layers."""
# The keys represent built-in keras layers; the first values represent the
# the variables within the layers which hold the kernel weights, second
# values represent the class name of quantization configuration for layers.
# This decide the weights of layers with quantization configurations are
# cluster preservable.
_LAYERS_CONFIG_MAP = {
layers.Conv2D:
_ClusterPreserveInfo(['kernel'], ['Default8BitConvQuantizeConfig']),
layers.Dense:
_ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# DepthwiseConv2D is supported with 8bit qat, but not with
# clustering, thus for DepthwiseConv2D CQAT,
# preserving clustered weights is disabled.
layers.DepthwiseConv2D:
_ClusterPreserveInfo(['depthwise_kernel'],
['Default8BitQuantizeConfig']),
# layers that are supported with clustering, but not yet with qat
# layers.Conv1D:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv2DTranspose:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv3D:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv3DTranspose:
# _ClusterPreserveInfo(['kernel'], []),
# layers.LocallyConnected1D:
# _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# layers.LocallyConnected2D:
# _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# SeparableConv need verify from 8bit qat
# layers.SeparableConv1D:
# _ClusterPreserveInfo(['pointwise_kernel'],
# ['Default8BitConvQuantizeConfig']),
# layers.SeparableConv2D:
# _ClusterPreserveInfo(['pointwise_kernel'],
# ['Default8BitConvQuantizeConfig']),
# Embedding need verify from 8bit qat
# layers.Embedding: _ClusterPreserveInfo(['embeddings'], []),
}
_DISABLE_CLUSTER_PRESERVE = frozenset({
layers.DepthwiseConv2D,
})
def __init__(self, preserve_sparsity):
self._config_quantizer_map = {
'Default8BitQuantizeConfig':
ClusterPreserveDefault8BitWeightsQuantizer(preserve_sparsity),
'Default8BitConvQuantizeConfig':
ClusterPreserveDefault8BitConvWeightsQuantizer(preserve_sparsity),
}
@classmethod
def _no_trainable_weights(cls, layer):
"""Returns whether this layer has trainable weights.
Args:
layer: The layer to check for trainable weights.
Returns:
True/False whether the layer has trainable weights.
"""
return not layer.trainable_weights
@classmethod
def _disable_cluster_preserve(cls, layer):
"""Returns whether to disable this layer for preserving clusters.
Args:
layer: The layer to check for disabling.
Returns:
True/False whether disabling this layer for preserving clusters.
"""
return layer.__class__ in cls._DISABLE_CLUSTER_PRESERVE
@classmethod
def supports(cls, layer):
"""Returns whether the registry supports this layer type.
Args:
layer: The layer to check for support.
Returns:
True/False whether the layer type is supported.
"""
# layers without trainable weights are consider supported,
# e.g., ReLU, Softmax, and AveragePooling2D.
if cls._no_trainable_weights(layer):
return True
if layer.__class__ in cls._LAYERS_CONFIG_MAP:
return True
return False
@classmethod
def _weight_names(cls, layer):
if cls._no_trainable_weights(layer):
return []
return cls._LAYERS_CONFIG_MAP[layer.__class__].weight_attrs
def apply_cluster_preserve_quantize_config(self, layer, quantize_config):
"""Applies cluster-preserve weight quantizer.
Args:
layer: The layer to check for support.
quantize_config: quantization config for supporting cluster preservation
on clustered weights
Returns:
The quantize_config with addon cluster preserve weight_quantizer.
"""
if not self.supports(layer):
raise ValueError('Layer ' + str(layer.__class__) + ' is not supported.')
# Example: ReLU, Softmax, and AveragePooling2D (without trainable weights)
# DepthwiseConv2D (cluster_preserve is disabled)
if self._no_trainable_weights(layer) or self._disable_cluster_preserve(
layer):
return quantize_config
# Example: Conv2D, Dense layers
if quantize_config.__class__.__name__ in self._LAYERS_CONFIG_MAP[
layer.__class__].quantize_config_attrs:
quantize_config.weight_quantizer = self._config_quantizer_map[
quantize_config.__class__.__name__]
else:
raise ValueError('Configuration ' +
str(quantize_config.__class__.__name__) +
' is not supported for Layer ' + str(layer.__class__) +
'.')
return quantize_config
class Default8bitClusterPreserveQuantizeRegistry(
ClusterPreserveQuantizeRegistry):
"""Default 8 bit ClusterPreserveQuantizeRegistry."""
def __init__(self, preserve_sparsity):
super(Default8bitClusterPreserveQuantizeRegistry, self).__init__(
preserve_sparsity)
self.preserve_sparsity = preserve_sparsity
def get_quantize_config(self, layer):
"""Returns the quantization config with weight_quantizer for a given layer.
Args:
layer: input layer to return quantize config for.
Returns:
Returns the quantization config for cluster preserve weight_quantizer.
"""
quantize_config = (default_8bit_quantize_registry.
Default8BitQuantizeRegistry().
get_quantize_config(layer))
cluster_aware_quantize_config = super(
Default8bitClusterPreserveQuantizeRegistry,
self).apply_cluster_preserve_quantize_config(layer, quantize_config)
return cluster_aware_quantize_config
class ClusterPreserveDefaultWeightsQuantizer(quantizers.LastValueQuantizer):
"""Quantize weights while preserving clusters."""
def __init__(
self, num_bits, per_axis, symmetric, narrow_range, preserve_sparsity):
"""ClusterPreserveDefaultWeightsQuantizer.
Args:
num_bits: Number of bits for quantization
per_axis: Whether to apply per_axis quantization. The last dimension is
used as the axis.
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
narrow_range: In case of 8 bits, narrow_range nudges the quantized range
to be [-127, 127] instead of [-128, 127]. This ensures symmetric
range has 0 as the centre.
preserve_sparsity: Whether to apply prune-cluster-preserving quantization
aware training.
"""
super(ClusterPreserveDefaultWeightsQuantizer, self).__init__(
num_bits=num_bits,
per_axis=per_axis,
symmetric=symmetric,
narrow_range=narrow_range,
)
self.preserve_sparsity = preserve_sparsity
def _build_clusters(self, name, layer):
"""Extracts the cluster centroids and cluster indices.
Extracts cluster centroids and cluster indices from the pretrained
clustered model when the input layer is clustered.
Args:
name: Name of weights in layer.
layer: Quantization wrapped keras layer.
Returns:
A dictionary of the initial values of the
cluster centroids, cluster indices, original weights,
the pretrained flag for marking the first training
epoch, and weight name.
"""
result = {}
weights = getattr(layer.layer, name)
if self.preserve_sparsity and not tf.reduce_any(weights == 0):
self.preserve_sparsity = False
logging.warning(
'Input layer does not contain zero weights, so apply CQAT instead.')
centroids_mask = None
centroids, lookup = get_unique(weights)
num_centroids = tf.size(centroids)
if self.preserve_sparsity:
sparsity_mask = tf.math.divide_no_nan(weights, weights)
zero_idx = tf.argmin(tf.abs(centroids), axis=-1)
centroids_mask = 1.0 - tf.one_hot(zero_idx, num_centroids)
result = {SPARSITY_MASK: sparsity_mask}
# Prepare clustering variables for the Keras graph when clusters
# exist, assuming we do not use number_of_clusters larger than 1024
if num_centroids > 1024:
return result
else:
clst_centroids_tf = layer.add_weight(
CLUSTER_CENTROIDS,
shape=centroids.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([centroids])[0]),
dtype=centroids.dtype,
trainable=True)
ori_weights_tf = layer.add_weight(
ORIGINAL_WEIGHTS,
shape=weights.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([weights])[0]),
dtype=weights.dtype,
trainable=True)
# Get clustering implementation according to layer type
clustering_impl_cls = clustering_registry.ClusteringLookupRegistry(
).get_clustering_impl(layer.layer, name)
clustering_impl = clustering_impl_cls(clst_centroids_tf)
pulling_indices = tf.dtypes.cast(
clustering_impl.get_pulling_indices(ori_weights_tf),
lookup.dtype
)
pulling_indices_tf = layer.add_weight(
PULLING_INDICES,
shape=lookup.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([pulling_indices])[0]),
dtype=lookup.dtype,
trainable=False)
result_clst = {
CLUSTER_CENTROIDS: clst_centroids_tf,
PULLING_INDICES: pulling_indices_tf,
ORIGINAL_WEIGHTS: ori_weights_tf,
WEIGHT_NAME: name,
CLUSTERING_IMPL: clustering_impl,
CENTROIDS_MASK: centroids_mask,
}
result.update(result_clst)
return result
def build(self, tensor_shape, name, layer):
"""Build (P)CQAT wrapper.
When preserve_sparsity is true and the input is clustered.
Args:
tensor_shape: Shape of weights which needs to be quantized.
name: Name of weights in layer.
layer: Quantization wrapped keras layer.
Returns:
Dictionary of centroids, indices and
quantization params, the dictionary will be passed
to __call__ function.
"""
# To get all the initial values from pretrained clustered model
result = self._build_clusters(name, layer)
# Result can have clustering nodes, then this is CQAT
# Result can have both clustering nodes and sparsity mask, then
# this will be PCQAT
result.update(
super(ClusterPreserveDefaultWeightsQuantizer,
self).build(tensor_shape, name, layer))
return result
def __call__(self, inputs, training, weights, **kwargs):
"""Apply cluster preserved quantization to the input tensor.
Args:
inputs: Input tensor (layer's weights) to be quantized.
training: Whether the graph is currently training.
weights: Dictionary of weights (params) the quantizer can use to
quantize the tensor (layer's weights). This contains the weights
created in the `build` function.
**kwargs: Additional variables which may be passed to the quantizer.
Returns:
quantized tensor.
"""
if training:
if CLUSTER_CENTROIDS in weights:
if self.preserve_sparsity:
weights[ORIGINAL_WEIGHTS].assign(
tf.multiply(weights[ORIGINAL_WEIGHTS],
weights[SPARSITY_MASK]))
weights[CLUSTERING_IMPL].cluster_centroids.assign(
weights[CLUSTERING_IMPL].
cluster_centroids * weights[CENTROIDS_MASK]
)
weights[CLUSTER_CENTROIDS].assign(
weights[CLUSTERING_IMPL].cluster_centroids
)
# Insert clustering variables
weights[PULLING_INDICES].assign(tf.dtypes.cast(
weights[CLUSTERING_IMPL].get_pulling_indices(
weights[ORIGINAL_WEIGHTS]),
weights[PULLING_INDICES].dtype
))
output = weights[CLUSTERING_IMPL].get_clustered_weight(
weights[PULLING_INDICES], weights[ORIGINAL_WEIGHTS])
inputs.assign(output)
else:
if self.preserve_sparsity:
inputs = tf.multiply(inputs, weights[SPARSITY_MASK])
output = inputs
else:
output = inputs
return quant_ops.LastValueQuantize(
output,
weights['min_var'],
weights['max_var'],
is_training=training,
num_bits=self.num_bits,
per_channel=self.per_axis,
symmetric=self.symmetric,
narrow_range=self.narrow_range
)
class ClusterPreserveDefault8BitWeightsQuantizer(
ClusterPreserveDefaultWeightsQuantizer):
"""ClusterPreserveWeightsQuantizer for default 8bit weights."""
def __init__(self, preserve_sparsity):
super(ClusterPreserveDefault8BitWeightsQuantizer,
self).__init__(num_bits=8,
per_axis=False,
symmetric=True,
narrow_range=True,
preserve_sparsity=preserve_sparsity)
self.preserve_sparsity = preserve_sparsity
class ClusterPreserveDefault8BitConvWeightsQuantizer(
ClusterPreserveDefaultWeightsQuantizer,
default_8bit_quantizers.Default8BitConvWeightsQuantizer):
"""ClusterPreserveWeightsQuantizer for default 8bit Conv2D weights."""
def __init__(self, preserve_sparsity): # pylint: disable=super-init-not-called
default_8bit_quantizers.Default8BitConvWeightsQuantizer.__init__(self)
self.preserve_sparsity = preserve_sparsity
def build(self, tensor_shape, name, layer):
result = ClusterPreserveDefaultWeightsQuantizer._build_clusters(
self, name, layer)
result.update(
default_8bit_quantizers.Default8BitConvWeightsQuantizer.build(
self, tensor_shape, name, layer))
return result
|
"""
Created on 3 Oct 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Used by version 1 Handhelds.
Tuned for 2 x Nitecore IMR 18650 (3.7 V, 3100 mAh)
https://www.nitecore.co.uk/Shop/Products/Batteries/13663-Nitecore-IMR-18650-Battery-for-TM28-Torch.html#Features
"""
import json
from scs_psu.batt_pack.batt_pack import BattPack
from scs_psu.batt_pack.fuel_gauge.max17055.max17055_config import Max17055Config
from scs_psu.batt_pack.fuel_gauge.max17055.max17055_params import Max17055Params
# --------------------------------------------------------------------------------------------------------------------
class BattPackV1(BattPack):
"""
classdocs
"""
__CHARGE_MINIMUM = 1 # percent
__DEFAULT_PARAMS = '{"calibrated-on": "2021-04-19T13:29:46+01:00", "r-comp-0": 106, "temp-co": 8766, ' \
'"full-cap-rep": 10396, "full-cap-nom": 40738, "cycles": 245}'
# ----------------------------------------------------------------------------------------------------------------
@staticmethod
def name():
return 'PackV1'
@staticmethod
def gauge_conf():
des_cap = 6200 # mAh
sense_res = 0.01 # Ω
chrg_term = 10 # mA
empty_v_target = 3.3 # V
recovery_v = 3.8 # V
chrg_v = Max17055Config.CHRG_V_4_2
batt_type = Max17055Config.BATT_TYPE_LiCoO2
return Max17055Config(des_cap, sense_res, chrg_term, empty_v_target, recovery_v, chrg_v, batt_type)
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def charge_min(cls):
return cls.__CHARGE_MINIMUM
@classmethod
def default_params(cls):
return Max17055Params.construct_from_jdict(json.loads(cls.__DEFAULT_PARAMS))
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, gauge):
"""
Constructor
"""
super().__init__(gauge)
|
'''
Created on Oct 11, 2017
@author: seth_
'''
from .PlaygroundStandardPacketEncoder import PlaygroundStandardPacketEncoder, PacketEncoder
from .PacketFramingStream import PacketFramingStreamAdapter
class PlaygroundFramingPacketEncoder(PlaygroundStandardPacketEncoder):
def encode(self, stream, fieldType):
typeEncoder = self.GetTypeEncoder(fieldType)
if typeEncoder == PacketEncoder:
stream = PacketFramingStreamAdapter.Adapt(stream)
ret = super().encode(stream, fieldType)
if typeEncoder == PacketEncoder:
stream.writeFrame()
def decodeIterator(self, stream, fieldType):
typeEncoder = self.GetTypeEncoder(fieldType)
if typeEncoder == PacketEncoder:
stream = PacketFramingStreamAdapter.Adapt(stream)
yield from super().decodeIterator(stream, fieldType)
if typeEncoder == PacketEncoder:
stream.closeFrame()
|
import logging
import urllib
from typing import Any, Dict, List
import tornado.web
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.handlers.wsgi import WSGIRequest, get_script_name
from django.urls import set_script_prefix
from django.http import HttpRequest, HttpResponse
from tornado.wsgi import WSGIContainer
from zerver.lib.response import json_response
from zerver.middleware import async_request_timer_restart, async_request_timer_stop
from zerver.tornado.descriptors import get_descriptor_by_handler_id
current_handler_id = 0
handlers: Dict[int, 'AsyncDjangoHandler'] = {}
# Copied from django.core.handlers.base
logger = logging.getLogger('django.request')
def get_handler_by_id(handler_id: int) -> 'AsyncDjangoHandler':
return handlers[handler_id]
def allocate_handler_id(handler: 'AsyncDjangoHandler') -> int:
global current_handler_id
handlers[current_handler_id] = handler
handler.handler_id = current_handler_id
current_handler_id += 1
return handler.handler_id
def clear_handler_by_id(handler_id: int) -> None:
del handlers[handler_id]
def handler_stats_string() -> str:
return "%s handlers, latest ID %s" % (len(handlers), current_handler_id)
def finish_handler(handler_id: int, event_queue_id: str,
contents: List[Dict[str, Any]], apply_markdown: bool) -> None:
err_msg = "Got error finishing handler for queue %s" % (event_queue_id,)
try:
# We call async_request_timer_restart here in case we are
# being finished without any events (because another
# get_events request has supplanted this request)
handler = get_handler_by_id(handler_id)
request = handler._request
async_request_timer_restart(request)
if len(contents) != 1:
request._log_data['extra'] = "[%s/1]" % (event_queue_id,)
else:
request._log_data['extra'] = "[%s/1/%s]" % (event_queue_id, contents[0]["type"])
handler.zulip_finish(dict(result='success', msg='',
events=contents,
queue_id=event_queue_id),
request, apply_markdown=apply_markdown)
except OSError as e:
if str(e) != 'Stream is closed':
logging.exception(err_msg)
except AssertionError as e:
if str(e) != 'Request closed':
logging.exception(err_msg)
except Exception:
logging.exception(err_msg)
class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# Copied from the django.core.handlers.wsgi __init__() method.
self.load_middleware()
# Prevent Tornado from automatically finishing the request
self._auto_finish = False
# Handler IDs are allocated here, and the handler ID map must
# be cleared when the handler finishes its response
allocate_handler_id(self)
def __repr__(self) -> str:
descriptor = get_descriptor_by_handler_id(self.handler_id)
return "AsyncDjangoHandler<%s, %s>" % (self.handler_id, descriptor)
def convert_tornado_request_to_django_request(self) -> HttpRequest:
# This takes the WSGI environment that Tornado received (which
# fully describes the HTTP request that was sent to Tornado)
# and pass it to Django's WSGIRequest to generate a Django
# HttpRequest object with the original Tornado request's HTTP
# headers, parameters, etc.
environ = WSGIContainer.environ(self.request)
environ['PATH_INFO'] = urllib.parse.unquote(environ['PATH_INFO'])
# Django WSGIRequest setup code that should match logic from
# Django's WSGIHandler.__call__ before the call to
# `get_response()`.
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
request = WSGIRequest(environ)
# Provide a way for application code to access this handler
# given the HttpRequest object.
request._tornado_handler = self
return request
def write_django_response_as_tornado_response(self, response: HttpResponse) -> None:
# This takes a Django HttpResponse and copies its HTTP status
# code, headers, cookies, and content onto this
# tornado.web.RequestHandler (which is how Tornado prepares a
# response to write).
# Copy the HTTP status code.
self.set_status(response.status_code)
# Copy the HTTP headers (iterating through a Django
# HttpResponse is the way to access its headers as key/value pairs)
for h in response.items():
self.set_header(h[0], h[1])
# Copy any cookies
if not hasattr(self, "_new_cookies"):
self._new_cookies: List[http.cookie.SimpleCookie[str]] = []
self._new_cookies.append(response.cookies)
# Copy the response content
self.write(response.content)
# Close the connection.
self.finish()
def get(self, *args: Any, **kwargs: Any) -> None:
request = self.convert_tornado_request_to_django_request()
try:
response = self.get_response(request)
if hasattr(response, "asynchronous"):
# For asynchronous requests, this is where we exit
# without returning the HttpResponse that Django
# generated back to the user in order to long-poll the
# connection. We save some timers here in order to
# support accurate accounting of the total resources
# consumed by the request when it eventually returns a
# response and is logged.
async_request_timer_stop(request)
return
finally:
# Tell Django that we're done processing this request on
# the Django side; this triggers cleanup work like
# resetting the urlconf and any cache/database
# connections.
signals.request_finished.send(sender=self.__class__)
# For normal/synchronous requests that don't end up
# long-polling, we fall through to here and just need to write
# the HTTP response that Django prepared for us via Tornado.
# Mark this handler ID as finished for Zulip's own tracking.
clear_handler_by_id(self.handler_id)
self.write_django_response_as_tornado_response(response)
def head(self, *args: Any, **kwargs: Any) -> None:
self.get(*args, **kwargs)
def post(self, *args: Any, **kwargs: Any) -> None:
self.get(*args, **kwargs)
def delete(self, *args: Any, **kwargs: Any) -> None:
self.get(*args, **kwargs)
def on_connection_close(self) -> None:
# Register a Tornado handler that runs when client-side
# connections are closed to notify the events system.
#
# TODO: Theoretically, this code should run when you Ctrl-C
# curl to cause it to break a `GET /events` connection, but
# that seems to no longer run this code. Investigate what's up.
client_descriptor = get_descriptor_by_handler_id(self.handler_id)
if client_descriptor is not None:
client_descriptor.disconnect_handler(client_closed=True)
def zulip_finish(self, result_dict: Dict[str, Any], old_request: HttpRequest,
apply_markdown: bool) -> None:
# Function called when we want to break a long-polled
# get_events request and return a response to the client.
# Marshall the response data from result_dict.
if result_dict['result'] == 'success' and 'messages' in result_dict and apply_markdown:
for msg in result_dict['messages']:
if msg['content_type'] != 'text/html':
self.set_status(500)
self.finish('Internal error: bad message format')
if result_dict['result'] == 'error':
self.set_status(400)
# The `result` dictionary contains the data we want to return
# to the client. We want to do so in a proper Tornado HTTP
# response after running the Django response middleware (which
# does things like log the request, add rate-limit headers,
# etc.). The Django middleware API expects to receive a fresh
# HttpRequest object, and so to minimize hacks, our strategy
# is to create a duplicate Django HttpRequest object, tagged
# to automatically return our data in its response, and call
# Django's main self.get_response() handler to generate an
# HttpResponse with all Django middleware run.
request = self.convert_tornado_request_to_django_request()
# Add to this new HttpRequest logging data from the processing of
# the original request; we will need these for logging.
#
# TODO: Design a cleaner way to manage these attributes,
# perhaps via creating a ZulipHttpRequest class that contains
# these attributes with a copy method.
request._log_data = old_request._log_data
if hasattr(request, "_rate_limit"):
request._rate_limit = old_request._rate_limit
if hasattr(request, "_requestor_for_logs"):
request._requestor_for_logs = old_request._requestor_for_logs
request.user = old_request.user
request.client = old_request.client
# The saved_response attribute, if present, causes
# rest_dispatch to return the response immediately before
# doing any work. This arrangement allows Django's full
# request/middleware system to run unmodified while avoiding
# running expensive things like Zulip's authentication code a
# second time.
request.saved_response = json_response(res_type=result_dict['result'],
data=result_dict, status=self.get_status())
try:
response = self.get_response(request)
finally:
# Tell Django we're done processing this request
#
# TODO: Investigate whether this (and other call points in
# this file) should be using response.close() instead.
signals.request_finished.send(sender=self.__class__)
self.write_django_response_as_tornado_response(response)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'adv_1_2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import datetime
import logging
import os
import subprocess
import sys
import ncscli.batchRunner as batchRunner
class k6FrameProcessor(batchRunner.frameProcessor):
'''defines details for using k6 for a simplistic load test'''
def installerCmd( self ):
return 'tar -vzxf k6Worker/k6.tar.gz; ls -al k6Worker'
def frameOutFileName( self, frameNum ):
return 'worker_%03d.*' % frameNum
#return 'worker_%03d.csv' % frameNum
def frameCmd( self, frameNum ):
cmd = 'cd k6Worker && ./k6 run -q script.js --out csv=~/worker_%03d.csv' % (
frameNum
)
return cmd
# configure logger formatting
#logging.basicConfig()
logger = logging.getLogger(__name__)
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
#batchRunner.logger.setLevel(logging.DEBUG) # for more verbosity
dateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )
outDataDir = 'data/k6_' + dateTimeTag
if not os.path.isfile( 'k6Worker/k6.tar.gz' ):
logger.error( 'the compressed k6 binary was not found, you may need to build and compress it' )
sys.exit( 1)
try:
rc = batchRunner.runBatch(
frameProcessor = k6FrameProcessor(),
commonInFilePath = 'k6Worker',
authToken = os.getenv('NCS_AUTH_TOKEN') or 'YourAuthTokenHere',
encryptFiles=False,
timeLimit = 80*60,
instTimeLimit = 7*60,
frameTimeLimit = 14*60,
filter = '{"dpr": ">=48","ram:":">=2800000000","app-version": ">=2.1.11"}',
outDataDir = outDataDir,
startFrame = 1,
endFrame = 6,
nWorkers = 10,
limitOneFramePerWorker = True,
autoscaleMax = 2
)
if os.path.isfile( outDataDir +'/recruitLaunched.json' ):
rc2 = subprocess.call( [sys.executable, 'plotK6Output.py', '--dataDirPath', outDataDir],
stdout=subprocess.DEVNULL )
if rc2:
logger.warning( 'plotK6Output.py exited with returnCode %d', rc2 )
sys.exit( rc )
except KeyboardInterrupt:
logger.warning( 'an interuption occurred')
|
#! /usr/bin/env python3
# Copyright (c) 2021 Dave McCoy (dave.mccoy@cospandesign.com)
#
# NAME is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NAME is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAME; If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import argparse
import warnings
import time
from pynq import PL
from pynq import Overlay
from pynq.lib.video import *
from pynq.lib.video.hierarchies import *
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from drivers.video_mixer import VideoMixer
from drivers.dynamic_clock import DynamicClock
from drivers.timing_controller import TimingController
from drivers.test_pattern_generator import TestPatternGenerator
from drivers.test_pattern_generator import TestPatternID
from drivers.axi_graphics import AXIGraphics
NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
parser.add_argument("-t", "--test",
nargs=1,
default=["something"])
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
args = parser.parse_args()
print ("Running Script: %s" % NAME)
if args.debug:
print ("test: %s" % str(args.test[0]))
BF = os.path.join("./data/system_wrapper.bit")
if not os.path.exists(BF):
print ("%s Doesn't exist Exiting!!" % BF)
return
else:
print ("Found bit file!")
ol = Overlay(BF)
ol.download()
print ("Bitfile downloaded!")
print ("Starting Video")
print ("Configuring Timing Controller")
tc = TimingController("video/timing_generator")
tc.reset()
while (not tc.is_reset_done()):
print (".")
WIDTH = tc.get_generator_width()
HEIGHT = tc.get_generator_height()
print ("Image Size (Retrieved from Timing Controller): %d x %d" % (WIDTH, HEIGHT))
print ("Configuring Test Pattern Generator")
tpg = TestPatternGenerator("video/v_tpg_0", debug = args.debug)
tpg.set_image_size(WIDTH, HEIGHT)
tpg.set_color_format_to_rgb()
#tpg.set_color_bar_test_pattern()
#tpg.set_test_pattern(TestPatternID.SOLID_RED)
#tpg.set_test_pattern(TestPatternID.SOLID_WHITE)
tpg.set_test_pattern(TestPatternID.SOLID_BLACK)
#tpg.set_test_pattern(TestPatternID.COLOR_BARS)
tpg.start()
SUB_WIN_WIDTH = 640
SUB_WIN_HEIGHT = 480
#SUB_WIN_WIDTH = 16
#SUB_WIN_HEIGHT = 4
#SUB_WIN_WIDTH = WIDTH
#SUB_WIN_HEIGHT = HEIGHT
print ("Configuring Video Mixer")
vm = VideoMixer("video/v_mix_0", WIDTH, HEIGHT)
#vm.configure_layer(0, 0, 0, WIDTH, HEIGHT)
vm.configure_layer(1, 0, 0, SUB_WIN_WIDTH, SUB_WIN_HEIGHT)
vm.enable_layer(1, True)
vm.configure_master_layer(WIDTH, HEIGHT)
#vm.enable_overlay_layer(True)
if args.debug: print ("Video Mixer Control Register Before Enable: 0x%08X" % vm.get_control())
vm.start()
if args.debug: print ("Video Mixer Control Register After Enable: 0x%08X" % vm.get_control())
print ("Enable Master Layer (Test Pattern) Output")
vm.enable_master_layer(True)
if args.debug: print ("VM Settings:")
if args.debug: print (" x pos: %d" % vm.get_layer_x(0))
if args.debug: print (" y pos: %d" % vm.get_layer_y(0))
if args.debug: print (" width: %d" % vm.get_layer_width(0))
if args.debug: print (" height: %d" % vm.get_layer_height(0))
if args.debug: print (" scale: %d" % vm.get_layer_scale(0))
if args.debug: print (" alpha: %d" % vm.get_layer_alpha(0))
if args.debug: print (" stride: %d" % vm.get_layer_stride(0))
if args.debug: print (" p1 Addr: %d" % vm.get_layer_plane1_addr(0))
if args.debug: print (" p2 Addr: %d" % vm.get_layer_plane2_addr(0))
if args.debug: print ("Layer Settings: 0x%08X" % vm.get_layer_enable_reg())
g0 = ol.low_speed.axi_gpio_0
g0.write(0x0C, 0x01)
tc.enable(gen_enable = True, use_gen_src = True)
if args.debug: print ("TC Control Register: 0x%08X" % tc.get_control_reg())
print ("Interfacing with AXI Graphics Controller: \n")
print ("%s" % str(ol.ip_dict["video/axi_graphics_0"]))
ag = AXIGraphics("video/axi_graphics_0", debug = args.debug)
#print ("AXI Graphics Started: %s" % str(ol.ip_dict["video/axi_graphics"]))
#time.sleep(0.1)
#time.sleep(5)
print ("AXI Graphics Control Register: 0x%08X" % ag.get_control())
#print ("AXI Graphics: 0x%08X" % ag.get_version())
ag.set_width(SUB_WIN_WIDTH)
ag.set_height(SUB_WIN_HEIGHT)
print ("Size: %d x %d" % (ag.get_width(), ag.get_height()))
#ag.set_mode(0) # Black
#ag.set_mode(1) # White
#ag.set_mode(2) # Red
#ag.set_mode(3) # Green
#ag.set_mode(4) # Blue
ag.set_mode(5) # Color Bars
#ag.set_mode(6) # Block
#ag.set_mode(7) # Ramp
ag.set_alpha(0xFF)
ag.set_ref0_xy(100, 100)
ag.set_ref1_xy(200, 200)
ag.set_interval(100)
ag.enable_rgba_format(True)
#ag.enable_rgba_format(False)
ag.enable(True)
print ("AXI Graphics Control Register: 0x%08X" % ag.get_control())
#print ("Sleeping for 5 seconds")
#time.sleep(5)
if __name__ == "__main__":
main(sys.argv)
|
# coding=utf-8
# Copyright 2019 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fairness_gym.environments.template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments import template
class TemplateTest(absltest.TestCase):
def test_example_environment_can_run(self):
test_util.run_test_simulation(env=template.ExampleEnv())
if __name__ == '__main__':
absltest.main()
|
import re
from django.template.defaultfilters import force_escape
import django
from cms.models import CMSPlugin
from cms.plugins.utils import downcast_plugins
from distutils.version import LooseVersion
from django.utils.functional import LazyObject
from django.core.files.storage import get_storage_class
import os
OBJ_ADMIN_RE_PATTERN = r'<img [^>]*\bid="plugin_obj_(\d+)"[^>]*/?>'
OBJ_ADMIN_RE = re.compile(OBJ_ADMIN_RE_PATTERN)
def plugin_to_tag(obj):
return u'<img src="%(icon_src)s" alt="%(icon_alt)s" title="%(icon_alt)s" id="plugin_obj_%(id)d" />' % \
dict(id=obj.id,
icon_src=force_escape(obj.get_instance_icon_src()),
icon_alt=force_escape(obj.get_instance_icon_alt()),
)
def plugin_tags_to_id_list(text, regex=OBJ_ADMIN_RE):
ids = regex.findall(text)
return [int(id) for id in ids if id.isdigit()]
def plugin_tags_to_user_html(text, context, placeholder):
"""
Convert plugin object 'tags' into the form for public site.
context is the template context to use, placeholder is the placeholder name
"""
plugin_map = _plugin_dict(text)
def _render_tag(m):
plugin_id = int(m.groups()[0])
try:
obj = plugin_map[plugin_id]
obj._render_meta.text_enabled = True
except KeyError:
# Object must have been deleted. It cannot be rendered to
# end user so just remove it from the HTML altogether
return u''
return obj.render_plugin(context, placeholder)
return OBJ_ADMIN_RE.sub(_render_tag, text)
def replace_plugin_tags(text, id_dict):
def _replace_tag(m):
plugin_id = int(m.groups()[0])
new_id = id_dict.get(plugin_id)
try:
obj = CMSPlugin.objects.get(pk=new_id)
except CMSPlugin.DoesNotExist:
# Object must have been deleted. It cannot be rendered to
# end user, or edited, so just remove it from the HTML
# altogether
return u''
return u'<img src="%(icon_src)s" alt="%(icon_alt)s" title="%(icon_alt)s" id="plugin_obj_%(id)d" />' % \
dict(id=new_id,
icon_src=force_escape(obj.get_instance_icon_src()),
icon_alt=force_escape(obj.get_instance_icon_alt()),
)
return OBJ_ADMIN_RE.sub(_replace_tag, text)
def _plugin_dict(text, regex=OBJ_ADMIN_RE):
plugin_ids = plugin_tags_to_id_list(text, regex)
plugin_list = downcast_plugins(CMSPlugin.objects.filter(pk__in=plugin_ids), select_placeholder=True)
return dict((plugin.pk, plugin) for plugin in plugin_list)
"""
The following class is taken from https://github.com/jezdez/django/compare/feature/staticfiles-templatetag
and should be removed and replaced by the django-core version in 1.4
"""
default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage'
if LooseVersion(django.get_version()) < LooseVersion('1.3'):
default_storage = 'staticfiles.storage.StaticFilesStorage'
class ConfiguredStorage(LazyObject):
def _setup(self):
from django.conf import settings
self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))()
configured_storage = ConfiguredStorage()
def static_url(path):
'''
Helper that prefixes a URL with STATIC_URL and cms
'''
if not path:
return ''
return configured_storage.url(os.path.join('', path))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .block import Mish, SeparableConv2d, Block
class WideTipXception(nn.Module):
def __init__(self, num_class):
super(WideTipXception, self).__init__()
self.conv1 = nn.Conv2d(1, 192, 3, 2, 1, bias=True)
self.bn1 = nn.BatchNorm2d(192)
self.mish = Mish()
self.conv2 = nn.Conv2d(192, 512, 3, 1, 1, bias=True)
self.bn2 = nn.BatchNorm2d(512)
self.block1 = Block(512,1024,3,1)
self.block2 = Block(1024,1024,3,1)
self.block3 = Block(1024,1024,3,1)
self.block4 = Block(1024,1024,3,1)
self.block5 = Block(1024,1024,3,1)
self.block6 = Block(1024,2048,2,2)
self.block7 = Block(2048,3072,2,2)
self.conv3 = SeparableConv2d(3072,4096,3,stride=1,padding=0,bias=True)
self.fc = nn.Linear(4096, num_class)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.mish(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.mish(x)
x = self.conv3(x)
x = self.mish(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
result = self.fc(x)
return result
def get_classifiernet(num_class):
model = WideTipXception(num_class)
return model
|
class response():
def __init__(self) -> None:
self.num = 5
def call_result(self):
return(self.num)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
################################################################################
################################################################################
def roll_nn(X, axis, n):
f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def fftshift2d(x, dims = [2, 3]):
real, imag = torch.unbind(x, -1)
for dim in dims:
n_shift = real.size(dim)//2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = roll_nn(real, axis=dim, n=n_shift)
imag = roll_nn(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def ifftshift2d(x, dims = [2, 3]):
real, imag = torch.unbind(x, -1)
for dim in dims[::-1]:
real = roll_nn(real, axis=dim, n=real.size(dim)//2)
imag = roll_nn(imag, axis=dim, n=imag.size(dim)//2)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
################################################################################
################################################################################
def roll_n(X, axis, n):
f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def batch_fftshift2d(x, dims = [-2, -3]):
real, imag = torch.unbind(x, -1)
for dim in range(1, len(real.size())):
n_shift = real.size(dim)//2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = roll_n(real, axis=dim, n=n_shift)
imag = roll_n(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def batch_ifftshift2d(x):
real, imag = torch.unbind(x, -1)
for dim in range(len(real.size()) - 1, 0, -1):
real = roll_n(real, axis=dim, n=real.size(dim)//2)
imag = roll_n(imag, axis=dim, n=imag.size(dim)//2)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
################################################################################
################################################################################
def prepare_grid(m, n):
x = np.linspace(-(m // 2)/(m / 2), (m // 2)/(m / 2) - (1 - m % 2)*2/m, num=m)
y = np.linspace(-(n // 2)/(n / 2), (n // 2)/(n / 2) - (1 - n % 2)*2/n, num=n)
xv, yv = np.meshgrid(y, x)
angle = np.arctan2(yv, xv)
rad = np.sqrt(xv**2 + yv**2)
rad[m//2][n//2] = rad[m//2][n//2 - 1]
log_rad = np.log2(rad)
return log_rad, angle
def rcosFn(width, position):
N = 256 # abritrary
X = np.pi * np.array(range(-N-1, 2))/2/N
Y = np.cos(X)**2
Y[0] = Y[1]
Y[N+2] = Y[N+1]
X = position + 2*width/np.pi*(X + np.pi/4)
return X, Y
def pointOp(im, Y, X):
out = np.interp(im.flatten(), X, Y)
return np.reshape(out, im.shape)
def getlist(coeff):
straight = [bands for scale in coeff[1:-1] for bands in scale]
straight = [coeff[0]] + straight + [coeff[-1]]
return straight
################################################################################
# NumPy reference implementation (fftshift and ifftshift)
# def fftshift(x, axes=None):
# """
# Shift the zero-frequency component to the center of the spectrum.
# This function swaps half-spaces for all axes listed (defaults to all).
# Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
# Parameters
# """
# x = np.asarray(x)
# if axes is None:
# axes = tuple(range(x.ndim))
# shift = [dim // 2 for dim in x.shape]
# shift = [x.shape[ax] // 2 for ax in axes]
# return np.roll(x, shift, axes)
#
# def ifftshift(x, axes=None):
# """
# The inverse of `fftshift`. Although identical for even-length `x`, the
# functions differ by one sample for odd-length `x`.
# """
# x = np.asarray(x)
# if axes is None:
# axes = tuple(range(x.ndim))
# shift = [-(dim // 2) for dim in x.shape]
# shift = [-(x.shape[ax] // 2) for ax in axes]
# return np.roll(x, shift, axes)
|
"""mysql.utilities.command"""
|
from __future__ import print_function
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.core import Activation, Dense, Merge, Permute, Dropout
from keras.layers.recurrent import LSTM
from keras.datasets.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from functools import reduce
import tarfile
import numpy as np
import re
"""
Train a memory network on the bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1503.08895
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
Reaches 93% accuracy on task 'single_supporting_fact_10k' after 70 epochs.
Time per epoch: 3s on CPU (core i7).
"""
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return (pad_sequences(X, maxlen=story_maxlen),
pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))
path = get_file('babi-tasks-v1-2.tar.gz',
origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train_stories + test_stories)))
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, answer):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories, word_idx, story_maxlen, query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories, word_idx, story_maxlen, query_maxlen)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('answers_train shape:', answers_train.shape)
print('answers_test shape:', answers_test.shape)
print('-')
print('Compiling...')
# embed the input sequence into a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size,
output_dim=64,
input_length=story_maxlen))
# output: (samples, story_maxlen, embedding_dim)
# embed the question into a single vector
question_encoder = Sequential()
question_encoder.add(Embedding(input_dim=vocab_size,
output_dim=64,
input_length=query_maxlen))
# output: (samples, query_maxlen, embedding_dim)
# compute a 'match' between input sequence elements (which are vectors)
# and the question vector
match = Sequential()
match.add(Merge([input_encoder_m, question_encoder],
mode='dot',
dot_axes=[(2,), (2,)]))
# output: (samples, story_maxlen, query_maxlen)
# embed the input into a single vector with size = story_maxlen:
input_encoder_c = Sequential()
input_encoder_c.add(Embedding(input_dim=vocab_size,
output_dim=query_maxlen,
input_length=story_maxlen))
# output: (samples, story_maxlen, query_maxlen)
# sum the match vector with the input vector:
response = Sequential()
response.add(Merge([match, input_encoder_c], mode='sum'))
# output: (samples, story_maxlen, query_maxlen)
response.add(Permute((2, 1))) # output: (samples, query_maxlen, story_maxlen)
# concatenate the match vector with the question vector,
# and do logistic regression on top
answer = Sequential()
answer.add(Merge([response, question_encoder], mode='concat', concat_axis=-1))
# the original paper uses a matrix multiplication for this reduction step.
# we choose to use a RNN instead.
answer.add(LSTM(64))
# one regularization layer -- more would probably be needed.
answer.add(Dropout(0.25))
answer.add(Dense(vocab_size))
# we output a probability distribution over the vocabulary
answer.add(Activation('softmax'))
answer.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# Note: you could use a Graph model to avoid repeat the input twice
answer.fit([inputs_train, queries_train, inputs_train], answers_train,
batch_size=32,
nb_epoch=70,
show_accuracy=True,
validation_data=([inputs_test, queries_test, inputs_test], answers_test))
|
'''
A rouch implementation of the following paper
M. Nickel+, "Poincaré Embeddings for Learning Hierarchical Representations", NeurIPS2017
https://arxiv.org/pdf/1705.08039.pdf
'''
import argparse
from pathlib import Path
import itertools
import math
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import (IterableDataset,
RandomSampler,
DataLoader)
from gensim.corpora import Dictionary
from tqdm import tqdm
from torchemb.poincare_ball import PoincareBallEmbedding, PoincareBall
parser = argparse.ArgumentParser('Poincare Embeddings')
parser.add_argument('data_file', type=Path)
parser.add_argument('result_file', type=Path)
parser.add_argument('-e', '--embedding_dim', default=10, type=int)
parser.add_argument('-m', '--max_epoch', default=200, type=int)
parser.add_argument('-s', '--samples_per_iter', default=10000, type=int)
parser.add_argument('-b', '--batch_size', default=512, type=int)
parser.add_argument('-n', '--neg_size', default=10, type=int)
parser.add_argument('-c', default=1, type=float)
parser.add_argument('--lr', default=1.0e-3, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
print(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def load_data(data_file):
data_file = Path(data_file)
pairs = []
with data_file.open() as fin:
for line in fin:
a, b = line.strip().split('\t')
pairs.append((a, b))
d = Dictionary(pairs)
pairs = np.asarray([d.doc2idx(pair) for pair in pairs])
return d, pairs
dictionary, pairs = load_data(args.data_file)
print(len(dictionary), len(pairs))
class Dataset(IterableDataset):
def __init__(self, pairs, neg_size):
self.pairs = pairs
self.neg_size = neg_size
self.pair_sampler = RandomSampler(list(range(len(self.pairs))), replacement=True)
def __iter__(self):
pair_iter = itertools.cycle(iter(self.pair_sampler))
while True:
idx = next(pair_iter)
x, y = self.pairs[idx]
ys = [y] + [self.pairs[next(pair_iter)][1] for _ in range(self.neg_size - 1)]
yield x, torch.LongTensor(ys)
data = DataLoader(Dataset(pairs, args.neg_size),
batch_size=args.batch_size)
embeddings = PoincareBallEmbedding(len(dictionary), args.embedding_dim, c=args.c)
manifold = PoincareBall(c=args.c)
loss = nn.CrossEntropyLoss()
warmup_rate = 100
lr = args.lr * (args.batch_size ** 0.5) / (args.embedding_dim ** 0.5)
optimizer = optim.Adam(embeddings.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer,
gamma=math.exp(math.log(0.01) / args.max_epoch))
def train(embeddings, loss, optimizer, data, samples_per_iter):
embeddings.train()
data_iter = iter(data)
avg_loss_ = 0
N = samples_per_iter // args.batch_size
for i in tqdm(range(N)):
idx1, idx2 = next(data_iter)
x, ys = embeddings(idx1), embeddings(idx2)
assert(not torch.any(torch.isnan(x)))
assert(not torch.any(torch.isnan(ys)))
ds = manifold.distance(x[:, None, :].expand_as(ys), ys)
logits = -ds
loss_ = loss(logits, torch.zeros(len(logits), dtype=torch.long))
optimizer.zero_grad()
loss_.backward()
optimizer.step()
avg_loss_ += loss_.item()
avg_loss_ /= N
print('train loss: {:.5f}'.format(avg_loss_))
def save(embeddings, dictionary, result_file):
embeddings.eval()
result_file = Path(result_file)
result_file.parent.mkdir(parents=True, exist_ok=True)
with torch.no_grad():
with result_file.open('w') as fout:
for i, c in sorted(dictionary.dfs.items()):
e = embeddings(torch.LongTensor([i]))[0]
print(dictionary[i], *[_.item() for _ in e], sep='\t', file=fout)
for epoch in range(args.max_epoch):
print('epoch:', epoch + 1, '/', args.max_epoch)
train(embeddings, loss, optimizer, data, args.samples_per_iter)
lr_scheduler.step()
save(embeddings, dictionary, args.result_file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
from setuptools import setup
version_dict = {}
init_filename = "boxtree/version.py"
exec(compile(open(init_filename, "r").read(), init_filename, "exec"),
version_dict)
setup(name="boxtree",
version=version_dict["VERSION_TEXT"],
description="Quadtree/octree building in Python and OpenCL",
long_description=open("README.rst", "rt").read(),
author="Andreas Kloeckner",
author_email="inform@tiker.net",
license="MIT",
url="http://wiki.tiker.net/BoxTree",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
# We use conditional expressions, so 2.5 is the bare minimum.
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 3.x has not yet been tested.
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
packages=["boxtree"],
install_requires=[
"pytools>=2018.4",
"pyopencl>=2018.2.2",
"Mako>=0.7.3",
"pytest>=2.3",
"cgen>=2013.1.2",
"six",
])
if __name__ == '__main__':
main()
|
from typing import List, Optional
import aiosqlite
from shamrock.types.blockchain_format.coin import Coin
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.coin_record import CoinRecord
from shamrock.types.full_block import FullBlock
from shamrock.util.db_wrapper import DBWrapper
from shamrock.util.ints import uint32, uint64
from shamrock.util.lru_cache import LRUCache
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute("pragma journal_mode=wal")
await self.coin_record_db.execute("pragma synchronous=2")
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]):
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
"""
if block.is_transaction_block() is False:
return None
assert block.foliage_transaction_block is not None
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
False,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(record, False)
included_reward_coins = block.get_included_reward_coins()
if block.height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
True,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(reward_coin_r, False)
total_amount_spent: int = 0
for coin_name in tx_removals:
total_amount_spent += await self._set_spent(coin_name, block.height)
# Sanity check, already checked in block_body_validation
assert sum([a.amount for a in tx_additions]) <= total_amount_spent
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def rollback_to_block(self, block_index: int):
"""
Note that block_index can be negative, in which case everything is rolled back
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
# Store CoinRecord in DB and ram cache
async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None:
if self.coin_record_cache.get(record.coin.name()) is not None:
self.coin_record_cache.remove(record.coin.name())
cursor = await self.coin_record_db.execute(
f"INSERT {'OR REPLACE ' if allow_replace else ''}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.timestamp,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64:
current: Optional[CoinRecord] = await self.get_coin_record(coin_name)
if current is None:
raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}")
assert not current.spent # Redundant sanity check, already checked in block_body_validation
spent: CoinRecord = CoinRecord(
current.coin,
current.confirmed_block_index,
index,
True,
current.coinbase,
current.timestamp,
) # type: ignore # noqa
await self._add_coin_record(spent, True)
return current.coin.amount
|
import streamlit as st
import plotly.express as px
import pandas as pd
import pickle
import os
import base64
from io import BytesIO
from datetime import datetime
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index=False, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
val = to_excel(df)
b64 = base64.b64encode(val)
date_now = datetime.utcnow()
file_name = f'data_resultado-{date_now.strftime("%Y%m%d%H%M%S")}.xlsx'
link_download = f""" <a href="data:application/octet-stream;base64,{b64.decode()}" download="{file_name}">Download xlsx file</a> """
return link_download
def plot_graph(df_graph):
fig = px.bar(
df_graph,
x='Labels',
y='Descrição',
# text='Text test',
title='Test',
labels={
"Labels": "Labels",
"Descrição": 'Número de coisas'
},
# width=1400,
height=500
)
return fig
def main(classificador):
st.title('Model')
process_file = st.file_uploader(
"Faça o upload do arquivo no campo abaixo.",
type=["csv", "xlsx"],
accept_multiple_files=False
)
print(process_file)
print(os.environ.get('TOKEN'))
if process_file != None:
if process_file.name.endswith('.csv'):
df = pd.read_csv(
process_file, header=0, skip_blank_lines=True, skipinitialspace=True, encoding='latin-1')
elif process_file.name.endswith('.xlsx'):
df = pd.read_excel(
process_file, engine="openpyxl")
with st.empty():
st.write('Fazendo as predições ...')
df['Labels'] = classificador.predict(
df["Descrição"].astype("unicode"))
st.write('Predições feitas com sucesso !!!')
st.dataframe(df.head(20))
df_graph = df.groupby(['Labels'], as_index=False)['Descrição'].count()
df_graph.sort_values(by=['Descrição'], inplace=True, ascending=False)
print(df_graph)
st.plotly_chart(plot_graph(df_graph), use_container_width=True)
st.text('Gerando link para download ...')
st.markdown(get_table_download_link(df), unsafe_allow_html=True)
st.success('Link gerado com sucesso.')
if __name__ == '__main__':
classificador = pickle.load(open("modelo_final.pkl", "rb"))
main(classificador)
|
import json
import logging
import pytest
import tempfile
from typing import Any, Dict, Iterator, Optional
import sdk_cmd
import sdk_install
import sdk_jobs
import sdk_plan
import sdk_utils
from security import transport_encryption
from tests import config
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def service_account(configure_security: None) -> Iterator[Dict[str, Any]]:
"""
Sets up a service account for use with TLS.
"""
try:
name = config.SERVICE_NAME
service_account_info = transport_encryption.setup_service_account(name)
yield service_account_info
finally:
transport_encryption.cleanup_service_account(config.SERVICE_NAME, service_account_info)
@pytest.fixture(scope="module")
def dcos_ca_bundle() -> str:
"""
Retrieve DC/OS CA bundle and returns the content.
"""
return transport_encryption.fetch_dcos_ca_bundle_contents().decode("ascii")
@pytest.fixture(scope="module", autouse=True)
def cassandra_service(service_account: Dict[str, Any]) -> Iterator[Dict[str, Any]]:
"""
A pytest fixture that installs the cassandra service.
On teardown, the service is uninstalled.
"""
options = {
"service": {
"name": config.SERVICE_NAME,
# Note that since we wish to toggle TLS which *REQUIRES* a service account,
# we need to install Cassandra with a service account to start with.
"service_account": service_account["name"],
"service_account_secret": service_account["secret"],
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options=options,
wait_for_deployment=True,
)
yield {**options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.tls
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
def test_default_installation(cassandra_service: Dict[str, Any]) -> None:
"""
Tests writing, reading and deleting data over a plaintext connection.
"""
verify_client_can_write_read_and_delete()
@pytest.mark.sanity
@pytest.mark.tls
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
def test_enable_tls_and_plaintext(
cassandra_service: Dict[str, Any],
dcos_ca_bundle: str,
) -> None:
"""
Tests writing, reading and deleting data over TLS but still accepting
plaintext connections.
"""
update_service_transport_encryption(cassandra_service, enabled=True, allow_plaintext=True)
verify_client_can_write_read_and_delete(dcos_ca_bundle)
@pytest.mark.sanity
@pytest.mark.tls
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
def test_disable_plaintext(
cassandra_service: Dict[str, Any],
dcos_ca_bundle: str,
) -> None:
"""
Tests writing, reading and deleting data over a TLS connection.
"""
update_service_transport_encryption(cassandra_service, enabled=True, allow_plaintext=False)
verify_client_can_write_read_and_delete(dcos_ca_bundle)
@pytest.mark.sanity
@pytest.mark.tls
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
def test_disable_tls(cassandra_service: Dict[str, Any]) -> None:
"""
Tests writing, reading and deleting data over a plaintext connection.
"""
update_service_transport_encryption(cassandra_service, enabled=False, allow_plaintext=False)
verify_client_can_write_read_and_delete()
@pytest.mark.sanity
@pytest.mark.tls
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
def test_enabling_then_disabling_tls(
cassandra_service: Dict[str, Any],
dcos_ca_bundle: str,
) -> None:
# Write data.
write_data_job = config.get_write_data_job()
with sdk_jobs.InstallJobContext([write_data_job]):
sdk_jobs.run_job(write_data_job)
# Turn TLS on and off again.
update_service_transport_encryption(cassandra_service, enabled=True, allow_plaintext=True)
update_service_transport_encryption(cassandra_service, enabled=True, allow_plaintext=False)
update_service_transport_encryption(cassandra_service, enabled=False, allow_plaintext=False)
# Make sure data is still there.
verify_data_job = config.get_verify_data_job()
with sdk_jobs.InstallJobContext([verify_data_job]):
sdk_jobs.run_job(verify_data_job)
def verify_client_can_write_read_and_delete(
dcos_ca_bundle: Optional[str] = None,
) -> None:
write_data_job = config.get_write_data_job(dcos_ca_bundle=dcos_ca_bundle)
verify_data_job = config.get_verify_data_job(dcos_ca_bundle=dcos_ca_bundle)
delete_data_job = config.get_delete_data_job(dcos_ca_bundle=dcos_ca_bundle)
verify_deletion_job = config.get_verify_deletion_job(dcos_ca_bundle=dcos_ca_bundle)
with sdk_jobs.InstallJobContext(
[write_data_job, verify_data_job, delete_data_job, verify_deletion_job]
):
sdk_jobs.run_job(write_data_job)
sdk_jobs.run_job(verify_data_job)
sdk_jobs.run_job(delete_data_job)
sdk_jobs.run_job(verify_deletion_job)
def update_service_transport_encryption(
cassandra_service: Dict[str, Any],
enabled: bool = False,
allow_plaintext: bool = False
) -> None:
update_options = {
"service": {
"security": {
"transport_encryption": {"enabled": enabled, "allow_plaintext": allow_plaintext}
}
}
}
update_service(cassandra_service, update_options)
def update_service(service: Dict[str, Any], options: Dict[str, Any]) -> None:
with tempfile.NamedTemporaryFile("w", suffix=".json") as f:
options_path = f.name
log.info("Writing updated options to %s", options_path)
json.dump(options, f)
f.flush()
cmd = ["update", "start", "--options={}".format(options_path)]
sdk_cmd.svc_cli(service["package_name"], service["service"]["name"], " ".join(cmd))
# An update plan is a deploy plan
sdk_plan.wait_for_kicked_off_deployment(service["service"]["name"])
sdk_plan.wait_for_completed_deployment(service["service"]["name"])
|
def frequencyQueries(queries):
collection = {}
cumOperationThree = []
for i in range(0, len(queries)):
operation = queries[i][0]
numToAdd = queries[i][1]
if operation == 1:
if numToAdd in collection.keys():
collection[numToAdd] += 1
else:
collection[numToAdd] = 1
# print(collection)
elif operation == 2:
if numToAdd in collection.keys() and collection[numToAdd] > 0:
collection[numToAdd] -= 1
# print(collection)
elif operation == 3:
found = False
for key in collection:
if numToAdd == collection[key]:
cumOperationThree.append(1)
found = True
break
if not found:
cumOperationThree.append(0)
print(cumOperationThree)
return cumOperationThree
Query = [[1, 3], [2, 3], [3, 2], [1, 4], [1, 5],
[1, 5], [1, 4], [3, 2], [2, 4], [3, 2]]
frequencyQueries(Query)
|
from datetime import timedelta
from typing import Any, Dict, Optional, Sequence
from physlearn.names import DataBaseName, LabelType, NonSignalDataType, SignalType
from physlearn.typing import Array
class Signal:
"""A signal base class.
Args:
start_time: The time from the beginning of the record until the beginning of the
signal.
end_time: The time from the beginning of the record until the beginning of the
signal.
time_axis: A tensor with the time axis of the signal, shape: [1, time steps].
signal: The signal tensor of shape [channels, time steps].
signal_type: What kind of signal is it?
channel_names: sequence of channel names in the signal. For example: names of
different EEG electrodes.
"""
def __init__(
self,
start_time: timedelta,
end_time: timedelta,
time_axis: Array,
signal: Array,
signal_type: SignalType,
channel_names: Optional[Sequence[str]] = None,
):
"""
Args:
start_time: The time from the beginning of the record until the beginning of
the signal.
end_time: The time from the beginning of the record until the beginning of
the signal.
time_axis: A tensor with the time axis of the signal,
with shape: [1, time steps].
signal: The signal tensor of shape [channels, time steps].
signal_type: What kind of signal is it?
channel_names: Names of the channels in the signal. For example: names of
different EEG electrodes.
"""
self._start_time = start_time
self._end_time = end_time
self._time_axis = time_axis
self._signal = signal
self._signal_type = signal_type
self._channel_names = channel_names
self._num_channels = self.get_num_channels()
self.check_dimensions()
self.check_times()
self._check_num_channels()
def __getitem__(self, idx):
"""
Syntactic sugar, allows getting data from a Signal object like:
>>> signal[1:2, :1000]
"""
return self.signal[idx].copy()
@property
def start_time(self):
""" """
return self._start_time
@property
def end_time(self):
""" """
return self._end_time
@property
def time_axis(self):
""" """
return self._time_axis.copy()
@property
def signal(self):
""" """
return self._signal.copy()
@property
def signal_type(self):
""" """
return self._signal_type
@property
def channel_names(self):
""" """
return self._channel_names
@property
def num_channels(self):
"""
Number of channels in the signal.
"""
return self._num_channels
def get_num_channels(self):
return self.signal.shape[0]
def check_dimensions(self):
"""Signals can either be of the same length as the time axis or empty"""
if not self.signal.size:
return
elif self.signal.shape[1] == self.time_axis.shape[1]:
return
else:
raise ValueError(
"Illegal signal dimensions, the signal has to be of the "
"same length as the time axis or empty"
)
def check_times(self):
"""End time greater then start time"""
if self.end_time <= self.start_time:
raise ValueError("End time can't be before or same as start time")
def __eq__(self, other):
return bool(
self.start_time == other.start_time
and self.end_time == other.end_time
and (self.time_axis == other.time_axis).all()
and (self.signal == other.signal).all()
and self.signal_type == other.signal_type
)
def _check_num_channels(self):
"""
Checks number of channel match channel names
"""
if self.channel_names:
if self.num_channels != len(self.channel_names):
raise ValueError("Number of channels mismatch channel names")
def find_channel(self, desired_channel_name: str):
"""
Search for a given channel name in the signal's channel names.
Raises a value error if the channel was not found
Returns the index of the given channel in the list of channel names
of the signal.
"""
if desired_channel_name not in self.channel_names:
raise ValueError(
f"Channel {desired_channel_name} not found" f"in channel names"
)
else:
return tuple(
i for i, e in enumerate(self.channel_names) if e == desired_channel_name
)
def signal_like_this(
self,
start_time: timedelta = None,
end_time: timedelta = None,
time_axis: Array = None,
signal: Array = None,
signal_type: SignalType = None,
channel_names: Optional[Sequence[str]] = None,
):
if not type(self) == Signal:
raise NotImplementedError("Inheriting classes should override this method")
start_time = start_time if start_time else self.start_time
end_time = end_time if end_time else self.end_time
time_axis = time_axis if time_axis is not None else self.time_axis
signal = signal if signal is not None else self.signal
signal_type = signal_type if signal_type else self.signal_type
channel_names = channel_names if channel_names else self.channel_names
return Signal(
start_time=start_time,
end_time=end_time,
time_axis=time_axis,
signal=signal,
signal_type=signal_type,
channel_names=channel_names,
)
class Sample:
"""
A single sample from a single patient from a single database.
May include a full or a partial recording.
"""
def __init__(
self,
db: DataBaseName,
db_version: str,
patient_id: int,
record_id: int,
sample_id: int,
signals: Dict[SignalType, Signal],
data: Dict[NonSignalDataType, Array],
metadata: Dict[NonSignalDataType, Any],
label: Dict[LabelType, Array],
):
"""
Args:
db: Name of the database the sample is taken from
db_version: Version of the database the sample is taken from
patient_id: ID of the patient the sample is taken from in the database
record_id: ID of the record within the patient
sample_id: ID of the sample within the record
signals: Signal data of the sample as a dictionary with the structure:
signals = {
signal_type1: signal1,
signal_type2: signal2,
...
}
data: Non-signal data as tensors (clinical, socioeconomic, etc.)
metadata: Non-signal data (clinical, socioeconomic, etc.)
label: The label of the sample for a specific supervised learning task
Note:
`data` and `metadata` may look alike but they are used for completely
different purposes. `data` contains tensors, ready to be used as
features for ML models while `metadata` is unstructured and used for
internal operations of the system like labeling.
"""
self._db = db
self._db_version = db_version
self._patient_id = patient_id
self._record_id = record_id
self._sample_id = sample_id
self._signals = signals
self._data = data
self._metadata = metadata
self._label = label
@property
def record_id(self):
return self._record_id
def sample_like_this(
self,
db: Optional[DataBaseName] = None,
db_version: Optional[str] = None,
patient_id: Optional[int] = None,
sample_id: Optional[int] = None,
signals: Optional[Dict[SignalType, Signal]] = None,
data: Optional[Dict[NonSignalDataType, Array]] = None,
metadata: Optional[Dict[NonSignalDataType, Any]] = None,
label: Optional[Dict[LabelType, Array]] = None,
record_id: Optional[int] = None,
**kwargs,
):
if not type(self) == Sample:
raise NotImplementedError("Inheriting classes should override this method")
db = db if db else self.db
db_version = db_version if db_version else self.db_version
patient_id = patient_id if patient_id else self.patient_id
sample_id = sample_id if sample_id else self.sample_id
signals = signals if signals else self.signals
data = data if data else self.data
metadata = metadata if metadata else self.metadata
label = label if label else self.label
record_id = record_id if record_id else self.record_id
return Sample(
db=db,
db_version=db_version,
patient_id=patient_id,
record_id=record_id,
sample_id=sample_id,
signals=signals,
data=data,
metadata=metadata,
label=label,
)
@property
def db(self):
""" """
return self._db
@property
def db_version(self):
""" """
return self._db_version
@property
def patient_id(self):
""" """
return self._patient_id
@property
def sample_id(self):
""" """
return self._sample_id
@property
def signals(self):
""" """
return self._signals
@property
def data(self):
""" """
return self._data
@property
def metadata(self):
""" """
return self._metadata
@property
def label(self):
""" """
return self._label
def __eq__(self, other):
return all(
[
self.db == other.db,
self.db_version == other.db_version,
self.patient_id == other.patient_id,
self.record_id == other.record_id,
self.sample_id == other.sample_id,
self.signals == other.signals,
compare_tensor_dict(self.data, other.data),
compare_tensor_dict(self.label, other.label),
]
)
def compare_tensor_dict(d1: Dict[Any, Array], d2: Dict[Any, Array]):
"""Compare two dictionaries of tensors
Args:
d1: First dictionary
d2: Second dictionary
Returns:
True if same, false otherwise.
"""
if not d1 and not d2:
return True
return (
all([(t1 == t2).all() for t1, t2 in zip(d1.values(), d2.values())])
and d1.keys() == d2.keys()
)
|
import tensorflow as tf
import os
import time
import json
import pandas as pd
"""
Script to train a sequential NN.
Takes a df, filters based on `condition` (default None), and separates into test/train based on time
NN trains on training data, all results output to disk
"""
def train_test_split(df,filter_condition,train_condition, test_condition,features,targets):
"""
Separate df into a train and test set.
Returns training and testing dfs as well as split into inputs/outputs
"""
#Filter dataset
if filter_condition is not None:
df_filtered = df.query(filter_condition)
else:
df_filtered = df
#Select train/test data
training_data = df_filtered.query(train_condition)
test_data = df_filtered.query(test_condition)
#Separate into features/targets
x_train = training_data[features]
y_train = training_data[targets]
x_test = test_data[features]
y_test = test_data[targets]
return x_train,y_train,x_test,y_test,training_data, test_data #
def create_normalizer_layer(x_train):
#Create a normaliser layer
print ('Creating a normalization layer')
normalizer = tf.keras.layers.Normalization(axis=-1)
normalizer.adapt(x_train)
return normalizer
def train_NN(x_train,y_train,normalizer):
#Check GPU available
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
#Create a basic NN model
model = tf.keras.Sequential([
normalizer,
tf.keras.layers.Dense(int(len(features)/2), activation='relu',input_shape=(len(features),),name='layer1'),
tf.keras.layers.Dense(1, name='output')
])
#Compile it
print ('Compiling model')
model.compile(optimizer='adam',
loss='mse',
metrics=['accuracy'])
#Train it
print('Training model')
history = model.fit(x_train, y_train, epochs=100, batch_size=10000)
return history, model
def write_outputs(output_path,model,history,x_train,y_train,x_test,y_test,df_train,df_test):
print ('Writing outputs to dir: ', fout)
id = int(time.time())
fout = output_path+f'ML_{str(id)}/'
os.makedirs(fout)
print ('Writing outputs to dir: ', fout)
#NN
model.save(fout+'trained_model')
history_dict = history.history
json.dump(history_dict, open(fout+'history.json', 'w'))
#Data
#Probaby overkill saving all of these
x_train.to_pickle(fout + "x_train.pkl")
y_train.to_pickle(fout + "y_train.pkl")
x_test.to_pickle(fout + "x_test.pkl")
x_test.to_pickle(fout + "y_test.pkl")
df_train.to_pickle(fout + "df_train.pkl")
df_test.to_pickle(fout + "df_test.pkl")
def pipeline(input_file,output_path,filter_condition,train_condition, test_condition,features):
#Load the data
print('Loading the data')
df = pd.read_pickle(input_file)
#Process into train/test
targets = ['MODIS_LST']
x_train,y_train,x_test,y_test,df_train,df_test = train_test_split(df,filter_condition,train_condition, test_condition,features,targets)
#Train NN
normalizer = create_normalizer_layer(x_train)
history,model = train_NN(x_train,y_train,normalizer)
#Save trained NN in new dir, along with train/test sets
write_outputs(output_path,model,history,x_train,y_train,x_test,y_test,df_train,df_test)
#Parameters
#IO
input_file = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/MODIS_ERA_joined_data_averaged.pkl'
output_path = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/'
#Pre Processing
filter_condition = None
train_condition = 'time < "2019-01-01 00:00:00"'
test_condition = 'time >= "2020-01-01 00:00:00"'
features = ['sp', 'msl', 'u10', 'v10','t2m',
'aluvp', 'aluvd', 'alnip', 'alnid', 'cl',
'cvl', 'cvh', 'slt', 'sdfor', 'z', 'sd', 'sdor', 'isor', 'anor', 'slor',
'd2m', 'lsm', 'fal']
#Go
pipeline(input_file,output_path,filter_condition,train_condition, test_condition,features)
|
from __future__ import unicode_literals
import vmraid
def execute():
vmraid.reload_doc("workflow", "doctype", "workflow_transition")
vmraid.db.sql("update `tabWorkflow Transition` set allow_self_approval=1")
|
def get_urls(releases, **kwargs):
# Pypi has a old bugtracker_url which points to a separate repo which causes invalid
# changelogs to be generated by this tool.
ret = {'https://raw.githubusercontent.com/vertexproject/synapse/master/CHANGELOG.md'}
return ret, set()
|
from __future__ import absolute_import, division, print_function
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import ssl
import sys
from tornado.escape import to_unicode
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders, ResponseStartLine
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.concurrent import Future
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler, RedirectHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, unittest, skipBefore35, exec_test
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
self.set_status(204)
self.finish()
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@asynchronous
def get(self):
if self.request.version.startswith('HTTP/1'):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.detach()
stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
else:
self.finish('HTTP/1 required')
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
url("/redirect", RedirectHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient() is
SimpleAsyncHTTPClient())
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient() is not
SimpleAsyncHTTPClient(force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
client1 = self.io_loop.run_sync(gen.coroutine(SimpleAsyncHTTPClient))
client2 = io_loop2.run_sync(gen.coroutine(SimpleAsyncHTTPClient))
self.assertTrue(client1 is not client2)
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
# Chunked encoding bypasses the MIN_LENGTH check.
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_connect_timeout(self):
timeout = 0.1
timeout_min, timeout_max = 0.099, 1.0
class TimeoutResolver(Resolver):
def resolve(self, *args, **kwargs):
return Future() # never completes
with closing(self.create_client(resolver=TimeoutResolver())) as client:
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=timeout)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(timeout_min < response.request_time < timeout_max,
response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout while connecting")
@skipOnTravis
def test_request_timeout(self):
timeout = 0.1
timeout_min, timeout_max = 0.099, 0.15
if os.name == 'nt':
timeout = 0.5
timeout_min, timeout_max = 0.4, 0.6
response = self.fetch('/trigger?wake=false', request_timeout=timeout)
self.assertEqual(response.code, 599)
self.assertTrue(timeout_min < response.request_time < timeout_max,
response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout during request")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status shouldn't have a content-length
#
# Tests with a content-length header are included below
# in HTTP204NoContentTestCase.
self.assertNotIn("Content-Length", response.headers)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout in request queue")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
if response.body == b"HTTP/1 required":
self.skipTest("requires HTTP/1.x")
else:
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
@skipBefore35
def test_native_body_producer_chunked(self):
namespace = exec_test(globals(), locals(), """
async def body_producer(write):
await write(b'1234')
await gen.Task(IOLoop.current().add_callback)
await write(b'5678')
""")
response = self.fetch("/echo_post", method="POST",
body_producer=namespace["body_producer"])
response.rethrow()
self.assertEqual(response.body, b"12345678")
@skipBefore35
def test_native_body_producer_content_length(self):
namespace = exec_test(globals(), locals(), """
async def body_producer(write):
await write(b'1234')
await gen.Task(IOLoop.current().add_callback)
await write(b'5678')
""")
response = self.fetch("/echo_post", method="POST",
body_producer=namespace["body_producer"],
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
def test_streaming_follow_redirects(self):
# When following redirects, header and streaming callbacks
# should only be called for the final result.
# TODO(bdarnell): this test belongs in httpclient_test instead of
# simple_httpclient_test, but it fails with the version of libcurl
# available on travis-ci. Move it when that has been upgraded
# or we have a better framework to skip tests based on curl version.
headers = []
chunks = []
self.fetch("/redirect?url=/hello",
header_callback=headers.append,
streaming_callback=chunks.append)
chunks = list(map(to_unicode, chunks))
self.assertEqual(chunks, ['Hello world!'])
# Make sure we only got one set of headers.
num_start_lines = len([h for h in headers if h.startswith("HTTP/")])
self.assertEqual(num_start_lines, 1)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True, **kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
def test_ssl_options(self):
resp = self.fetch("/hello", ssl_options={})
self.assertEqual(resp.body, b"Hello world!")
@unittest.skipIf(not hasattr(ssl, 'SSLContext'),
'ssl.SSLContext not present')
def test_ssl_context(self):
resp = self.fetch("/hello",
ssl_options=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_options_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception",
required=False):
resp = self.fetch(
"/hello", ssl_options=dict(cert_reqs=ssl.CERT_REQUIRED))
self.assertRaises(ssl.SSLError, resp.rethrow)
@unittest.skipIf(not hasattr(ssl, 'SSLContext'),
'ssl.SSLContext not present')
def test_ssl_context_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
resp = self.fetch("/hello", ssl_options=ctx)
self.assertRaises(ssl.SSLError, resp.rethrow)
def test_error_logging(self):
# No stack traces are logged for SSL errors (in this case,
# failure to validate the testing self-signed cert).
# The SSLError is exposed through ssl.SSLError.
with ExpectLog(gen_log, '.*') as expect_log:
response = self.fetch("/", validate_cert=True)
self.assertEqual(response.code, 599)
self.assertIsInstance(response.error, ssl.SSLError)
self.assertFalse(expect_log.logged_stack)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
# Close the request cleanly in HTTP/2; it will be skipped anyway.
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). We simulate here a
# server that sends no content length and does not close the connection.
#
# Tests of a 204 response with no Content-Length header are included
# in SimpleHTTPClientTestMixin.
stream = request.connection.detach()
stream.write(b"HTTP/1.1 204 No content\r\n")
if request.arguments.get("error", [False])[-1]:
stream.write(b"Content-Length: 5\r\n")
else:
stream.write(b"Content-Length: 0\r\n")
stream.write(b"\r\n")
stream.close()
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
def test_204_invalid_content_length(self):
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, ".*Response with code 204 should not have body"):
response = self.fetch("/?error=1")
if not self.http1:
self.skipTest("requires HTTP/1.x")
if self.http_client.configured_class != SimpleAsyncHTTPClient:
self.skipTest("curl client accepts invalid headers")
self.assertEqual(response.code, 599)
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBodySizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 64)
class LargeBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 100)
return Application([('/small', SmallBody),
('/large', LargeBody)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_body_size=1024 * 64)
def test_small_body(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'a' * 1024 * 64)
def test_large_body(self):
with ExpectLog(gen_log, "Malformed HTTP message from None: Content-Length too long"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBufferSizeTest(AsyncHTTPTestCase):
def get_app(self):
class LargeBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 100)
return Application([('/large', LargeBody)])
def get_http_client(self):
# 100KB body with 64KB buffer
return SimpleAsyncHTTPClient(max_body_size=1024 * 100, max_buffer_size=1024 * 64)
def test_large_body(self):
response = self.fetch('/large')
response.rethrow()
self.assertEqual(response.body, b'a' * 1024 * 100)
class ChunkedWithContentLengthTest(AsyncHTTPTestCase):
def get_app(self):
class ChunkedWithContentLength(RequestHandler):
def get(self):
# Add an invalid Transfer-Encoding to the response
self.set_header('Transfer-Encoding', 'chunked')
self.write("Hello world")
return Application([('/chunkwithcl', ChunkedWithContentLength)])
def get_http_client(self):
return SimpleAsyncHTTPClient()
def test_chunked_with_content_length(self):
# Make sure the invalid headers are detected
with ExpectLog(gen_log, ("Malformed HTTP message from None: Response "
"with both Transfer-Encoding and Content-Length")):
response = self.fetch('/chunkwithcl')
self.assertEqual(response.code, 599)
|
from setuptools import setup, find_packages
setup(
name='django-osme',
version=__import__('osme').__version__,
packages=find_packages(),
zip_safe=False,
install_requires=[
'Django>=1.9.8',
'requests>=2.23.0',
'jsonfield>=2.1.1',
],
package_data={
'osme': [
'locale/*/LC_MESSAGES/*',
'static/osme/*',
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
]
)
|
from sqllite import CDataBase as sqllite
from kademliaGetSet import CDataBase
import socket
from isolated_functions import *
instance_kade = None
class CSQLLite():
def __init__(self):
global instance_kade
if instance_kade is None:
node_identifier = socket.gethostbyname(socket.gethostname())
self.sqllite = sqllite()
self.nodes = ["3.113.39.120", "192.168.0.38", "192.168.56.1", "10.0.2.2", "10.0.2.15", "127.0.0.1", node_identifier]
self.kade = CDataBase()
self.kade.initiate()
instance_kade = self
else:
self.nodes = instance_kade.nodes
self.sqllite = instance_kade.sqllite
self.kade = instance_kade.kade
def save(self, key, value, announce=''):
if isinstance(key, str) == False:
key = str(key)
if announce == 'EXTERNAL':
_current = self.sqllite.get(announce)
if _current is None:
self.sqllite.set(key=announce, value=[key, ])
else:
_current.append(key)
_current = list(set(_current))
self.sqllite.set(key=announce, value=_current)
else:
_not_save_local = self.sqllite.get('EXTERNAL')
if _not_save_local is None: _not_save_local = []
if not (key in _not_save_local and announce == 'Account:'):
self.sqllite.set(key=announce + key, value=value)
if announce != '':
self.announce(announce + key, value)
return self.sqllite.get(announce + key)
def get(self, key):
if isinstance(key, str) == False: key = str(key)
return self.sqllite.get(key=key)
def announce(self, key, value):
print('KADEMLIA SET: ',key,' = ',self.kade.set(key=key, value=str(value)))
def look_at(self, key):
if isinstance(key, str) == False: key = str(key)
response = self.kade.get(key=key)
if response is not None:
#self.save(key, response)
try:
response = str2obj(response)
except:
pass
return response
def close(self):
self.sqllite.close()
def register_node(self, address):
if address not in self.nodes:
self.nodes.append(address)
def bootstrapNodes(self):
self.kade.bootstrap(self.nodes)
|
"""Initialising Optimisers"""
import torch
def create_optimisers(
optim_enc,
optim_dec,
lr_enc,
lr_dec,
mom_enc,
mom_dec,
wd_enc,
wd_dec,
param_enc,
param_dec,
):
"""Create optimisers for encoder, decoder
Args:
optim_enc (str) : type of optimiser for encoder
optim_dec (str) : type of optimiser for decoder
lr_enc (float) : learning rate for encoder
lr_dec (float) : learning rate for decoder
mom_enc (float) : momentum for encoder
mom_dec (float) : momentum for decoder
wd_enc (float) : weight decay for encoder
wd_dec (float) : weight decay for decoder
param_enc (torch.parameters()) : encoder parameters
param_dec (torch.parameters()) : decoder parameters
Returns optim_enc, optim_dec (torch.optim)
"""
if optim_enc == "sgd":
optim_enc = torch.optim.SGD(
param_enc, lr=lr_enc, momentum=mom_enc, weight_decay=wd_enc
)
elif optim_enc == "adam":
optim_enc = torch.optim.Adam(param_enc, lr=lr_enc, weight_decay=wd_enc)
else:
raise ValueError("Unknown Encoder Optimiser: {}".format(optim_enc))
if optim_dec == "sgd":
optim_dec = torch.optim.SGD(
param_dec, lr=lr_dec, momentum=mom_dec, weight_decay=wd_dec
)
elif optim_dec == "adam":
optim_dec = torch.optim.Adam(param_dec, lr=lr_dec, weight_decay=wd_dec)
else:
raise ValueError("Unknown Decoder Optimiser: {}".format(optim_dec))
return optim_enc, optim_dec
|
# Standard library imports
import unittest
# Local imports
from IPython.frontend.qt.console.ansi_code_processor import AnsiCodeProcessor
class TestAnsiCodeProcessor(unittest.TestCase):
def setUp(self):
self.processor = AnsiCodeProcessor()
def test_clear(self):
""" Do control sequences for clearing the console work?
"""
string = '\x1b[2J\x1b[K'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'erase')
self.assertEquals(action.area, 'screen')
self.assertEquals(action.erase_to, 'all')
elif i == 1:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'erase')
self.assertEquals(action.area, 'line')
self.assertEquals(action.erase_to, 'end')
else:
self.fail('Too many substrings.')
self.assertEquals(i, 1, 'Too few substrings.')
def test_colors(self):
""" Do basic controls sequences for colors work?
"""
string = 'first\x1b[34mblue\x1b[0mlast'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEquals(substring, 'first')
self.assertEquals(self.processor.foreground_color, None)
elif i == 1:
self.assertEquals(substring, 'blue')
self.assertEquals(self.processor.foreground_color, 4)
elif i == 2:
self.assertEquals(substring, 'last')
self.assertEquals(self.processor.foreground_color, None)
else:
self.fail('Too many substrings.')
self.assertEquals(i, 2, 'Too few substrings.')
def test_colors_xterm(self):
""" Do xterm-specific control sequences for colors work?
"""
string = '\x1b]4;20;rgb:ff/ff/ff\x1b' \
'\x1b]4;25;rgbi:1.0/1.0/1.0\x1b'
substrings = list(self.processor.split_string(string))
desired = { 20 : (255, 255, 255),
25 : (255, 255, 255) }
self.assertEquals(self.processor.color_map, desired)
string = '\x1b[38;5;20m\x1b[48;5;25m'
substrings = list(self.processor.split_string(string))
self.assertEquals(self.processor.foreground_color, 20)
self.assertEquals(self.processor.background_color, 25)
def test_scroll(self):
""" Do control sequences for scrolling the buffer work?
"""
string = '\x1b[5S\x1b[T'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'scroll')
self.assertEquals(action.dir, 'up')
self.assertEquals(action.unit, 'line')
self.assertEquals(action.count, 5)
elif i == 1:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'scroll')
self.assertEquals(action.dir, 'down')
self.assertEquals(action.unit, 'line')
self.assertEquals(action.count, 1)
else:
self.fail('Too many substrings.')
self.assertEquals(i, 1, 'Too few substrings.')
def test_specials(self):
""" Are special characters processed correctly?
"""
string = '\f' # form feed
self.assertEquals(list(self.processor.split_string(string)), [''])
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'scroll')
self.assertEquals(action.dir, 'down')
self.assertEquals(action.unit, 'page')
self.assertEquals(action.count, 1)
if __name__ == '__main__':
unittest.main()
|
######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys, os
# find_library(name) returns the pathname of a library, or None.
if os.name == "nt":
def find_library(name):
# See MSDN for the REAL search order.
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.exists(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.exists(fname):
return fname
return None
if os.name == "ce":
# search path according to MSDN:
# - absolute path specified by filename
# - The .exe launch directory
# - the Windows directory
# - ROM dll files (where are they?)
# - OEM specified search path: HKLM\Loader\SystemPath
def find_library(name):
return name
if os.name == "posix" and sys.platform == "darwin":
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except ValueError:
continue
return None
elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile, errno
def _findLib_gcc(name):
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
fdout, ccout = tempfile.mkstemp()
os.close(fdout)
cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; else CC=cc; fi;' \
'$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
try:
f = os.popen(cmd)
trace = f.read()
f.close()
finally:
try:
os.unlink(ccout)
except OSError, e:
if e.errno != errno.ENOENT:
raise
res = re.search(expr, trace)
if not res:
return None
return res.group(0)
def _get_soname(f):
# assuming GNU binutils / ELF
if not f:
return None
cmd = "objdump -p -j .dynamic 2>/dev/null " + f
res = re.search(r'\sSONAME\s+([^\s]+)', os.popen(cmd).read())
if not res:
return None
return res.group(1)
if (sys.platform.startswith("freebsd")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [ sys.maxint ]
def find_library(name):
ename = re.escape(name)
expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
res = re.findall(expr,
os.popen('/sbin/ldconfig -r 2>/dev/null').read())
if not res:
return _get_soname(_findLib_gcc(name))
res.sort(cmp= lambda x,y: cmp(_num_version(x), _num_version(y)))
return res[-1]
else:
def _findLib_ldconfig(name):
# XXX assuming GLIBC's ldconfig (with option -p)
expr = r'/[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
res = re.search(expr,
os.popen('/sbin/ldconfig -p 2>/dev/null').read())
if not res:
# Hm, this works only for libs needed by the python executable.
cmd = 'ldd %s 2>/dev/null' % sys.executable
res = re.search(expr, os.popen(cmd).read())
if not res:
return None
return res.group(0)
def find_library(name):
return _get_soname(_findLib_ldconfig(name) or _findLib_gcc(name))
################################################################
# test code
def test():
from ctypes import cdll
if os.name == "nt":
print cdll.msvcrt
print cdll.load("msvcrt")
print find_library("msvcrt")
if os.name == "posix":
# find and load_version
print find_library("m")
print find_library("c")
print find_library("bz2")
# getattr
## print cdll.m
## print cdll.bz2
# load
if sys.platform == "darwin":
print cdll.LoadLibrary("libm.dylib")
print cdll.LoadLibrary("libcrypto.dylib")
print cdll.LoadLibrary("libSystem.dylib")
print cdll.LoadLibrary("System.framework/System")
else:
print cdll.LoadLibrary("libm.so")
print cdll.LoadLibrary("libcrypt.so")
print find_library("crypt")
if __name__ == "__main__":
test()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetGlobalAddressResult',
'AwaitableGetGlobalAddressResult',
'get_global_address',
'get_global_address_output',
]
@pulumi.output_type
class GetGlobalAddressResult:
def __init__(__self__, address=None, address_type=None, creation_timestamp=None, description=None, ip_version=None, kind=None, name=None, network=None, network_tier=None, prefix_length=None, purpose=None, region=None, self_link=None, status=None, subnetwork=None, users=None):
if address and not isinstance(address, str):
raise TypeError("Expected argument 'address' to be a str")
pulumi.set(__self__, "address", address)
if address_type and not isinstance(address_type, str):
raise TypeError("Expected argument 'address_type' to be a str")
pulumi.set(__self__, "address_type", address_type)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if ip_version and not isinstance(ip_version, str):
raise TypeError("Expected argument 'ip_version' to be a str")
pulumi.set(__self__, "ip_version", ip_version)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
pulumi.set(__self__, "network", network)
if network_tier and not isinstance(network_tier, str):
raise TypeError("Expected argument 'network_tier' to be a str")
pulumi.set(__self__, "network_tier", network_tier)
if prefix_length and not isinstance(prefix_length, int):
raise TypeError("Expected argument 'prefix_length' to be a int")
pulumi.set(__self__, "prefix_length", prefix_length)
if purpose and not isinstance(purpose, str):
raise TypeError("Expected argument 'purpose' to be a str")
pulumi.set(__self__, "purpose", purpose)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subnetwork and not isinstance(subnetwork, str):
raise TypeError("Expected argument 'subnetwork' to be a str")
pulumi.set(__self__, "subnetwork", subnetwork)
if users and not isinstance(users, list):
raise TypeError("Expected argument 'users' to be a list")
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def address(self) -> str:
"""
The static IP address represented by this resource.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="addressType")
def address_type(self) -> str:
"""
The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.
"""
return pulumi.get(self, "address_type")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this field when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> str:
"""
The IP version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.
"""
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def kind(self) -> str:
"""
Type of the resource. Always compute#address for addresses.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> str:
"""
The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with the VPC_PEERING purpose.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="networkTier")
def network_tier(self) -> str:
"""
This signifies the networking tier used for configuring this address and can only take the following values: PREMIUM or STANDARD. Internal IP addresses are always Premium Tier; global external IP addresses are always Premium Tier; regional external IP addresses can be either Standard or Premium Tier. If this field is not specified, it is assumed to be PREMIUM.
"""
return pulumi.get(self, "network_tier")
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> int:
"""
The prefix length if the resource represents an IP range.
"""
return pulumi.get(self, "prefix_length")
@property
@pulumi.getter
def purpose(self) -> str:
"""
The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud Interconnect* configuration. These addresses are regional resources. Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter
def region(self) -> str:
"""
The URL of the region where a regional address resides. For regional addresses, you must specify the region as a path parameter in the HTTP request URL. *This field is not applicable to global addresses.*
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def subnetwork(self) -> str:
"""
The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with a GCE_ENDPOINT or DNS_RESOLVER purpose.
"""
return pulumi.get(self, "subnetwork")
@property
@pulumi.getter
def users(self) -> Sequence[str]:
"""
The URLs of the resources that are using this address.
"""
return pulumi.get(self, "users")
class AwaitableGetGlobalAddressResult(GetGlobalAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGlobalAddressResult(
address=self.address,
address_type=self.address_type,
creation_timestamp=self.creation_timestamp,
description=self.description,
ip_version=self.ip_version,
kind=self.kind,
name=self.name,
network=self.network,
network_tier=self.network_tier,
prefix_length=self.prefix_length,
purpose=self.purpose,
region=self.region,
self_link=self.self_link,
status=self.status,
subnetwork=self.subnetwork,
users=self.users)
def get_global_address(address: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalAddressResult:
"""
Returns the specified address resource. Gets a list of available addresses by making a list() request.
"""
__args__ = dict()
__args__['address'] = address
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/v1:getGlobalAddress', __args__, opts=opts, typ=GetGlobalAddressResult).value
return AwaitableGetGlobalAddressResult(
address=__ret__.address,
address_type=__ret__.address_type,
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
ip_version=__ret__.ip_version,
kind=__ret__.kind,
name=__ret__.name,
network=__ret__.network,
network_tier=__ret__.network_tier,
prefix_length=__ret__.prefix_length,
purpose=__ret__.purpose,
region=__ret__.region,
self_link=__ret__.self_link,
status=__ret__.status,
subnetwork=__ret__.subnetwork,
users=__ret__.users)
@_utilities.lift_output_func(get_global_address)
def get_global_address_output(address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGlobalAddressResult]:
"""
Returns the specified address resource. Gets a list of available addresses by making a list() request.
"""
...
|
import asyncio
from datetime import datetime
from string import hexdigits
from time import time
import aiohttp
import aiohttp_jinja2
from aiohttp import web
async def handle_command(payload, user_id, state_manager):
if payload in {'/start', '/close'} and state_manager:
await state_manager.drop_active_dialog(user_id)
return True
class ApiHandler:
def __init__(self, output_formatter, response_time_limit=5):
self.output_formatter = output_formatter
self.response_time_limit = response_time_limit
async def handle_api_request(self, request):
response = {}
register_msg = request.app['agent'].register_msg
if request.method == 'POST':
if 'content-type' not in request.headers \
or not request.headers['content-type'].startswith('application/json'):
raise web.HTTPBadRequest(reason='Content-Type should be application/json')
data = await request.json()
user_id = data.pop('user_id')
payload = data.pop('payload', '')
deadline_timestamp = None
if self.response_time_limit:
deadline_timestamp = time() + self.response_time_limit
if not user_id:
raise web.HTTPBadRequest(reason='user_id key is required')
command_performed = await handle_command(payload, user_id, request.app['agent'].state_manager)
if command_performed:
return web.json_response({})
response = await asyncio.shield(
register_msg(utterance=payload, user_external_id=user_id,
user_device_type=data.pop('user_device_type', 'http'),
date_time=datetime.now(),
location=data.pop('location', ''),
channel_type='http_client',
message_attrs=data, require_response=True,
deadline_timestamp=deadline_timestamp)
)
if response is None:
raise RuntimeError('Got None instead of a bot response.')
return web.json_response(self.output_formatter(response['dialog'].to_dict()))
async def dialog(self, request):
state_manager = request.app['agent'].state_manager
dialog_id = request.match_info['dialog_id']
if len(dialog_id) == 24 and all(c in hexdigits for c in dialog_id):
dialog_obj = await state_manager.get_dialog_by_id(dialog_id)
if not dialog_obj:
raise web.HTTPNotFound(reason=f'dialog with id {dialog_id} does not exist')
return web.json_response(dialog_obj.to_dict())
raise web.HTTPBadRequest(reason='dialog id should be 24-character hex string')
async def dialogs_by_user(self, request):
state_manager = request.app['agent'].state_manager
user_external_id = request.match_info['user_external_id']
dialogs = await state_manager.get_dialogs_by_user_ext_id(user_external_id)
return web.json_response([i.to_dict() for i in dialogs])
async def dialog_rating(self, request):
state_manager = request.app['agent'].state_manager
data = await request.json()
dialog_id = data.pop('dialog_id')
user_id = data.pop('user_id', None)
rating = data.pop('rating')
await state_manager.set_rating_dialog(user_id, dialog_id, rating)
return web.Response()
async def utterance_rating(self, request):
state_manager = request.app['agent'].state_manager
data = await request.json()
user_id = data.pop('user_id', None)
rating = data.pop('rating')
utt_id = data.pop('utt_id')
await state_manager.set_rating_utterance(user_id, utt_id, rating)
return web.Response()
async def options(self, request):
return web.Response(headers={'Access-Control-Allow-Methods': 'POST, OPTIONS'})
class PagesHandler:
def __init__(self, debug=False):
self.debug = debug
async def ping(self, request):
return web.json_response("pong")
async def options(self, request):
return web.Response(headers={'Access-Control-Allow-Methods': 'GET, OPTIONS'})
class WSstatsHandler:
def __init__(self):
self.update_time = 0.5
@aiohttp_jinja2.template('services_ws_highcharts.html')
async def ws_page(self, request):
return {}
async def ws_handler(self, request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['websockets'].append(ws)
logger_stats = request.app['logger_stats']
while True:
data = dict(logger_stats.get_current_load())
await ws.send_json(data)
await asyncio.sleep(self.update_time)
return ws
async def options(self, request):
return web.Response(headers={'Access-Control-Allow-Methods': 'GET, OPTIONS'})
class WSChatHandler:
def __init__(self, output_formatter):
self.output_formatter = output_formatter
@aiohttp_jinja2.template('chat.html')
async def ws_page(self, request):
return {}
async def ws_handler(self, request):
register_msg = request.app['agent'].register_msg
ws = web.WebSocketResponse()
await ws.prepare(request)
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.text:
data = msg.json()
user_id = data.pop('user_id')
payload = data.pop('payload', '')
deadline_timestamp = None
if not user_id:
raise web.HTTPBadRequest(reason='user_id key is required')
command_performed = await handle_command(payload, user_id, request.app['agent'].state_manager)
if command_performed:
await ws.send_json('command_performed')
continue
response = await register_msg(
utterance=payload, user_external_id=user_id,
user_device_type=data.pop('user_device_type', 'websocket'),
date_time=datetime.now(),
location=data.pop('location', ''),
channel_type='ws_client',
message_attrs=data, require_response=True,
deadline_timestamp=deadline_timestamp
)
if response is None:
raise RuntimeError('Got None instead of a bot response.')
await ws.send_json(self.output_formatter(response['dialog'].to_dict()))
else:
await ws.close()
break
return ws
async def options(self, request):
return web.Response(headers={'Access-Control-Allow-Methods': 'GET, OPTIONS'})
|
# Copyright [2018-2020] Peter Krenesky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
OPTIONS = {
"name": "PYTHON",
"tasks": "ixian_docker.modules.python.tasks",
"config": "ixian_docker.modules.python.config.PythonConfig",
"dockerfile_template": "{PYTHON.MODULE_DIR}/Dockerfile.template",
# Dev volumes mounted only in local environment
"dev_volumes": [
# Pipenv
"{PYTHON.VIRTUAL_ENV_VOLUME}:{PYTHON.VIRTUAL_ENV_DIR}",
# Mount Pipfile in because it can't be symlinked.
"{PWD}/Pipfile:{DOCKER.APP_DIR}/Pipfile",
# ipython history
"{BUILDER}/.ipython/:{DOCKER.HOME_DIR}/.ipython/",
],
}
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.vision_v1p1beta1.proto import image_annotator_pb2_grpc
class ImageAnnotatorGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.vision.v1p1beta1 ImageAnnotator API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self, channel=None, credentials=None, address="vision.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"image_annotator_stub": image_annotator_pb2_grpc.ImageAnnotatorStub(channel)
}
@classmethod
def create_channel(
cls, address="vision.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def batch_annotate_images(self):
"""Return the gRPC stub for :meth:`ImageAnnotatorClient.batch_annotate_images`.
Run image detection and annotation for a batch of images.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["image_annotator_stub"].BatchAnnotateImages
|
# -*- coding: utf-8 -*-
"""
Plugin to create a Yambo input file and run a calculation with the yambo executable.
"""
from __future__ import absolute_import
import os
import six
from aiida.engine import CalcJob
from aiida_quantumespresso.calculations import _lowercase_dict, _uppercase_dict
from aiida.common.datastructures import CalcInfo
from aiida.common.datastructures import CalcJobState
from aiida.common.exceptions import UniquenessError, InputValidationError, ValidationError
from aiida.common.utils import classproperty
from aiida.orm import Code
from aiida.orm.nodes import Dict
from aiida.orm.nodes import RemoteData, BandsData, ArrayData
from aiida.plugins import DataFactory, CalculationFactory
from aiida.common import AIIDA_LOGGER
from aiida.common import LinkType
from aiida_yambo.utils.common_helpers import *
PwCalculation = CalculationFactory('quantumespresso.pw')
__authors__ = " Miki Bonacci (miki.bonacci@unimore.it)," \
" Gianluca Prandini (gianluca.prandini@epfl.ch)," \
" Antimo Marrazzo (antimo.marrazzo@epfl.ch)," \
" Michael Atambo (michaelontita.atambo@unimore.it)."
class YamboCalculation(CalcJob):
"""
AiiDA plugin for the Yambo code.
For more information, refer to http://www.yambo-code.org/
https://github.com/yambo-code/yambo-aiida and http://aiida-yambo.readthedocs.io/en/latest/
"""
# Default input and output files
_DEFAULT_INPUT_FILE = 'aiida.in'
_DEFAULT_OUTPUT_FILE = 'aiida.out'
@classmethod
def define(cls,spec):
super(YamboCalculation, cls).define(spec)
spec.input('metadata.options.input_filename', valid_type=six.string_types, default=cls._DEFAULT_INPUT_FILE)
spec.input('metadata.options.output_filename', valid_type=six.string_types, default=cls._DEFAULT_OUTPUT_FILE)
# Default output parser provided by AiiDA
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='yambo.yambo')
# self._SCRATCH_FOLDER = 'SAVE'
spec.input('metadata.options.scratch_folder', valid_type=six.string_types, default='SAVE')
spec.input('metadata.options.logostring', valid_type=six.string_types, default="""
#
# Y88b / e e e 888~~\ ,88~-_
# Y88b / d8b d8b d8b 888 | d888 \
# Y88b/ /Y88b d888bdY88b 888 _/ 88888 |
# Y8Y / Y88b / Y88Y Y888b 888 \ 88888 |
# Y /____Y88b / YY Y888b 888 | Y888 /
# / / Y88b / Y888b 888__/ `88_-~
#
# AIIDA input plugin. YAMBO 4.x compatible
# http://www.yambo-code.org
#
"""
)
spec.input('settings',valid_type=Dict,
help='Use an additional node for special settings')
spec.input('parameters',valid_type=Dict,
help='Use a node that specifies the input parameters')
spec.input('parent_folder',valid_type=RemoteData,
help='Use a remote folder as parent folder (for "restarts and similar"')
spec.input('preprocessing_code',valid_type=Code,
help='Use a preprocessing code for starting yambo',required=False)
spec.input('precode_parameters',valid_type=Dict,
help='Use a node that specifies the input parameters for the yambo precode',required=False)
spec.input('code',valid_type=Code,
help='Use a main code for yambo calculation')
spec.exit_code(500, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(501, 'WALLTIME_ERROR',
message='time exceeded the max walltime')
spec.exit_code(502, 'NO_SUCCESS',
message='failed calculation for some reason: could be a low number of conduction bands')
spec.exit_code(503, 'PARSER_ANOMALY',
message='Unexpected behavior of YamboFolder')
spec.exit_code(504, 'PARA_ERROR',
message='parallelization error')
spec.exit_code(505, 'MEMORY_ERROR',
message='general memory error')
spec.exit_code(506, 'X_par_MEMORY_ERROR',
message='x_par allocation memory error')
#outputs definition:
spec.output('output_parameters', valid_type=Dict,
required=True, help='returns the output parameters')
spec.output('array_alpha', valid_type=ArrayData,
required=False, help='returns the alpha array')
spec.output('array_alpha_bands', valid_type=ArrayData,
required=False, help='returns the alpha array bands')
spec.output('array_alpha_array', valid_type=ArrayData,
required=False, help='returns the alpha array')
spec.output('bands_quasiparticle', valid_type=BandsData,
required=False, help='returns the quasiparticle band structure')
spec.output('array_qp', valid_type=ArrayData,
required=False, help='returns the quasiparticle array band structure')
spec.output('array_eels', valid_type=ArrayData,
required=False, help='returns the eels array')
spec.output('array_eps', valid_type=ArrayData,
required=False, help='returns the eps array')
spec.output('array_ndb', valid_type=ArrayData,
required=False, help='returns the array for ndb')
spec.output('array_ndb_QP', valid_type=ArrayData,
required=False, help='returns the array for ndbQP')
spec.output('array_ndb_HFlocXC', valid_type=ArrayData,
required=False, help='returns the array ndb for HFlocXC')
spec.output('system_info', valid_type=Dict,
required=False, help='returns some system information after a p2y')
def prepare_for_submission(self, tempfolder):
_dbs_accepted = {'gw0': 'ndb.QP', 'HF_and_locXC': 'ndb.HF_and_locXC',}
local_copy_list = []
remote_copy_list = []
remote_symlink_list = []
# Settings can be undefined, and defaults to an empty dictionary.
# They will be used for any input that doen't fit elsewhere.
settings = self.inputs.settings.get_dict()
initialise = settings.pop('INITIALISE', None)
if initialise is not None:
if not isinstance(initialise, bool):
raise InputValidationError("INITIALISE must be " " a boolean")
copy_save = settings.pop('COPY_SAVE', None)
if copy_save is not None:
if not isinstance(copy_save, bool):
raise InputValidationError("COPY_SAVE must be " " a boolean")
copy_dbs = settings.pop('COPY_DBS', None)
if copy_dbs is not None:
if not isinstance(copy_dbs, bool):
raise InputValidationError("COPY_DBS must be " " a boolean")
restart_yambo = settings.pop('RESTART_YAMBO', None)
if restart_yambo is not None:
if not isinstance(restart_yambo, bool):
raise InputValidationError("RESTART_YAMBO must be " " a boolean")
parameters = self.inputs.parameters
if not initialise:
if not isinstance(parameters, Dict):
raise InputValidationError("parameters is not of type Dict")
parent_calc_folder = self.inputs.parent_folder
main_code = self.inputs.code
preproc_code = self.inputs.preprocessing_code
parent_calc = take_calc_from_remote(parent_calc_folder)
if parent_calc.process_type=='aiida.calculations:yambo.yambo':
yambo_parent=True
else:
yambo_parent=False
# flags for yambo interfaces
try:
precode_param_dict = self.inputs.precode_parameters
except:
precode_param_dict = Dict(dict={})
# check the precode parameters given in input
input_cmdline = settings.pop('CMDLINE', None)
import re
precode_params_list = [] #['cd aiida.save'] ##.format(parent_calc_folder._PREFIX)
pattern = re.compile(r"(^\-)([a-zA-Z])")
for key, value in six.iteritems(precode_param_dict.get_dict()):
if re.search(pattern, key) is not None:
if key == '-O' or key == '-H' or key == '-h' or key == '-F':
raise InputValidationError(
"Precode flag {} is not allowed".format(str(key)))
else:
if precode_param_dict[key] is True:
precode_params_list.append(str(key))
elif precode_param_dict[key] is False:
pass
else:
precode_params_list.append('{}'.format(str(key)))
precode_params_list.append('{}'.format(str(value)))
else:
raise InputValidationError(
"Wrong format of precode_parameters")
# Adding manual cmdline input (e.g. for DB fragmentation)
if input_cmdline is not None:
precode_params_list = precode_params_list + input_cmdline
# TODO: check that remote data must be on the same computer
##############################
# END OF INITIAL INPUT CHECK #
##############################
if not initialise:
###################################################
# Prepare yambo input file
###################################################
params_dict = parameters.get_dict()
# extract boolean keys
boolean_dict = {
k: v
for k, v in six.iteritems(params_dict) if isinstance(v, bool)
}
params_dict = {
k: v
for k, v in six.iteritems(params_dict)
if k not in list(boolean_dict.keys())
}
# reorganize the dictionary and create a list of dictionaries with key, value and units
parameters_list = []
for k, v in six.iteritems(params_dict):
if "_units" in k:
continue
units_key = "{}_units".format(k)
try:
units = params_dict[units_key]
except KeyError:
units = None
this_dict = {}
this_dict['key'] = k
this_dict['value'] = v
this_dict['units'] = units
parameters_list.append(this_dict)
input_filename = tempfolder.get_abs_path(self.metadata.options.input_filename)
with open(input_filename, 'w') as infile:
infile.write(self.metadata.options.logostring)
for k, v in six.iteritems(boolean_dict):
if v:
infile.write("{}\n".format(k))
for this_dict in parameters_list:
key = this_dict['key']
value = this_dict['value']
units = this_dict['units']
if isinstance(value, list):
value_string = ''
try:
for v in value:
value_string += " | ".join([str(_) for _ in v]) + " |\n"
except:
value_string += " | ".join([str(_) for _ in value]) + " |\n"
the_string = "% {}\n {}".format(key, value_string)
the_string += "%"
else:
the_value = '"{}"'.format(value) if isinstance(
value, six.string_types) else '{}'.format(value)
the_string = "{} = {}".format(key, the_value)
if units is not None:
the_string += " {}".format(units)
infile.write(the_string + "\n")
############################################
# set copy of the parent calculation
############################################
try:
parent_calc = parent_calc_folder.get_incoming().all_nodes()[-1] #to load the node from a workchain...
except:
parent_calc = parent_calc_folder.get_incoming().get_node_by_label('remote_folder')
if yambo_parent:
if copy_save:
try:
remote_copy_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/SAVE/",'./SAVE/'))
except:
remote_copy_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"out/aiida.save/SAVE/",'./SAVE/'))
else:
try:
remote_symlink_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/SAVE/",'./SAVE/'))
except:
remote_symlink_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"out/aiida.save/SAVE/",'./SAVE/'))
if copy_dbs:
remote_copy_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/aiida.out/",'./aiida.out/'))
if restart_yambo:
remote_symlink_list.append((parent_calc_folder.computer.uuid,parent_calc_folder.get_remote_path()+"/aiida.out/",'./aiida.out/'))
else:
remote_copy_list.append(
(parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
PwCalculation._OUTPUT_SUBFOLDER,
"aiida.save","*" ), ##.format(parent_calc_folder._PREFIX)
"."
)
)
############################################
# set Calcinfo
############################################
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
calcinfo.local_copy_list = []
calcinfo.remote_copy_list = remote_copy_list
calcinfo.remote_symlink_list = remote_symlink_list
# Retrieve by default the output file and the xml file
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append('r*')
calcinfo.retrieve_list.append('l*')
calcinfo.retrieve_list.append('o*')
calcinfo.retrieve_list.append('LOG/l*_CPU_1')
calcinfo.retrieve_list.append('LOG/l*_CPU_2')
calcinfo.retrieve_list.append('*stderr*') #standard errors
extra_retrieved = []
if initialise:
# extra_retrieved.append('SAVE/'+_dbs_accepted['ns.db1'])
pass
else:
for dbs in _dbs_accepted.keys():
db = boolean_dict.pop(dbs,False)
if db:
extra_retrieved.append('aiida.out/'+_dbs_accepted[dbs])
additional = settings.pop('ADDITIONAL_RETRIEVE_LIST',[])
if additional:
extra_retrieved.append(additional)
for extra in extra_retrieved:
calcinfo.retrieve_list.append(extra)
from aiida.common.datastructures import CodeRunMode, CodeInfo
# c1 = interface dft codes and yambo (ex. p2y or a2y)
c1 = CodeInfo()
c1.withmpi = True
c1.cmdline_params = precode_params_list
# c2 = yambo initialization
c2 = CodeInfo()
c2.withmpi = True
c2.cmdline_params = []
c2.code_uuid = main_code.uuid
# if the parent calculation is a yambo calculation skip the interface (c1) and the initialization (c2)
if yambo_parent:
try:
parent_settings = _uppercase_dict(
parent_calc.inputs.settings.get_dict(),
dict_name='parent settings')
parent_initialise = parent_settings['INITIALISE']
except KeyError:
parent_initialise = False
c1 = None
if not parent_initialise:
c2 = None
else:
c1.cmdline_params = precode_params_list
c1.code_uuid = preproc_code.uuid
# c3 = yambo calculation
c3 = CodeInfo()
c3.withmpi = True
#c3.withmpi = self.get_withmpi()
c3.cmdline_params = [
"-F", self.metadata.options.input_filename, \
'-J', self.metadata.options.output_filename, \
]
c3.code_uuid = main_code.uuid
if initialise:
c2 = None
c3 = None
#logic of the execution
#calcinfo.codes_info = [c1, c2, c3] if not yambo_parent else [c3]
if yambo_parent:
if not parent_initialise:
calcinfo.codes_info = [c3]
else:
calcinfo.codes_info = [c2, c3]
elif initialise:
calcinfo.codes_info = [c1]
else:
calcinfo.codes_info = [c1, c2, c3]
calcinfo.codes_run_mode = CodeRunMode.SERIAL
if settings:
raise InputValidationError(
"The following keys have been found in "
"the settings input node, but were not understood: {}".format(
",".join(list(settings.keys()))))
return calcinfo
################################################################################
#the following functions are not used
def _check_valid_parent(self, calc):
"""
Check that calc is a valid parent for a YamboCalculation.
It can be a PwCalculation or a YamboCalculation.
"""
try:
if ((not isinstance(calc, PwCalculation))
and (not isinstance(calc, YamboCalculation))):
raise ValueError(
"Parent calculation must be a PwCalculation or a YamboCalculation"
)
except ImportError:
if ((not isinstance(calc, PwCalculation))
and (not isinstance(calc, YamboCalculation))):
raise ValueError(
"Parent calculation must be a PwCalculation or a YamboCalculation"
)
def use_parent_calculation(self, calc):
"""
Set the parent calculation of Yambo,
from which it will inherit the outputsubfolder.
The link will be created from parent RemoteData to YamboCalculation
"""
from aiida.common.exceptions import NotExistent
self._check_valid_parent(calc)
remotedatas = calc.get_outputs(node_type=RemoteData)
if not remotedatas:
raise NotExistent("No output remotedata found in " "the parent")
if len(remotedatas) != 1:
raise UniquenessError("More than one output remotedata found in "
"the parent")
remotedata = remotedatas[0]
self._set_parent_remotedata(remotedata)
def _set_parent_remotedata(self, remotedata):
"""
Used to set a parent remotefolder in the start of Yambo.
"""
if not isinstance(remotedata, RemoteData):
raise ValueError('remotedata must be a RemoteData')
# complain if another remotedata is already found
input_remote = self.get_inputs(node_type=RemoteData)
if input_remote:
raise ValidationError("Cannot set several parent calculation to a "
"Yambo calculation")
self.use_parent_folder(remotedata)
|
# Generated by Django 3.0.3 on 2020-03-02 14:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(blank=True)),
('biography', models.TextField(blank=True)),
('phone_number', models.CharField(blank=True, max_length=20)),
('picture', models.ImageField(blank=True, null=True, upload_to='users/pictures')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import pytest
from ...annotations import skip_if_continuous_integration
try:
from pyglet.media.drivers import pulse
has_pulse = True
except ImportError:
has_pulse = False
try:
from pyglet.media.drivers import openal
has_openal = True
except ImportError:
has_openal = False
try:
from pyglet.media.drivers import directsound
has_directsound = True
except ImportError:
has_directsound = False
def check_listener_defaults(listener):
assert listener.volume == 1.0
assert listener.position == (0, 0, 0)
assert listener.forward_orientation == (0, 0, -1)
assert listener.up_orientation == (0, 1, 0)
def check_modifying_values(listener):
listener.volume = 0.5
listener.position = (-1, 0, 0)
listener.forward_orientation = (0, 0, 1)
listener.up_orientation = (0, -1, 0)
assert listener.volume == 0.5
assert listener.position == (-1, 0, 0)
assert listener.forward_orientation == (0, 0, 1)
assert listener.up_orientation == (0, -1, 0)
@pytest.mark.skipif(not has_openal, reason="Test requires OpenAL")
def test_openal_listener():
driver = openal.create_audio_driver()
listener = driver.get_listener()
check_listener_defaults(listener=listener)
check_modifying_values(listener=listener)
# Need to garbage collect the listener before the driver is deleted
del listener
@skip_if_continuous_integration() # test user cannot connect to PulseAudio daemon
@pytest.mark.skipif(not has_pulse, reason="Test requires PulseAudio")
def test_pulse_listener():
driver = pulse.create_audio_driver()
listener = driver.get_listener()
check_listener_defaults(listener=listener)
check_modifying_values(listener=listener)
# Need to garbage collect the listener before the driver is deleted
del listener
@pytest.mark.skipif(not has_directsound, reason="Test requires DirectSound")
def test_directsound_listener():
driver = directsound.create_audio_driver()
listener = driver.get_listener()
check_listener_defaults(listener=listener)
check_modifying_values(listener=listener)
# Need to garbage collect the listener before the driver is deleted
del listener
|
import datetime
import os
from airflow.models.dag import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
from dagster import DagsterEventType, execute_pipeline
from dagster.core.instance import AIRFLOW_EXECUTION_DATE_STR
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.core.test_utils import instance_for_test
from dagster.seven import get_current_datetime_in_utc
from dagster_airflow.dagster_pipeline_factory import make_dagster_pipeline_from_airflow_dag
default_args = {
"owner": "dagster",
"start_date": days_ago(1),
}
EXECUTION_DATE = get_current_datetime_in_utc()
EXECUTION_DATE_MINUS_WEEK = EXECUTION_DATE - datetime.timedelta(days=7)
EXECUTION_DATE_FMT = EXECUTION_DATE.strftime("%Y-%m-%d")
EXECUTION_DATE_MINUS_WEEK_FMT = EXECUTION_DATE_MINUS_WEEK.strftime("%Y-%m-%d")
def normalize_file_content(s):
return "\n".join([line for line in s.replace(os.linesep, "\n").split("\n") if line])
def check_compute_logs(manager, result, execution_date_fmt):
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert compute_steps == [
"airflow_templated.compute",
]
for step_key in compute_steps:
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
stdout_file = open(compute_io_path, "r")
file_contents = normalize_file_content(stdout_file.read())
stdout_file.close()
assert (
file_contents.count(
"INFO - Running command: \n echo '{execution_date_fmt}'\n".format(
execution_date_fmt=execution_date_fmt
)
)
== 1
)
assert (
file_contents.count(
"INFO - {execution_date_fmt}\n".format(execution_date_fmt=execution_date_fmt)
)
== 1
)
assert file_contents.count("INFO - Command exited with return code 0") == 1
def get_dag():
dag = DAG(dag_id="dag", default_args=default_args, schedule_interval=None,)
templated_command = """
echo '{{ ds }}'
"""
# pylint: disable=unused-variable
t1 = BashOperator(
task_id="templated", depends_on_past=False, bash_command=templated_command, dag=dag,
)
return dag
def test_pipeline_tags():
dag = get_dag()
with instance_for_test() as instance:
manager = instance.compute_log_manager
# When mode is default and tags are set, run with tags
result = execute_pipeline(
pipeline=make_dagster_pipeline_from_airflow_dag(
dag=dag, tags={AIRFLOW_EXECUTION_DATE_STR: EXECUTION_DATE_MINUS_WEEK_FMT}
),
instance=instance,
)
check_compute_logs(manager, result, EXECUTION_DATE_MINUS_WEEK_FMT)
def test_pipeline_auto_tag():
dag = get_dag()
with instance_for_test() as instance:
manager = instance.compute_log_manager
pre_execute_time = get_current_datetime_in_utc()
# When tags are not set, run with current time
result = execute_pipeline(
pipeline=make_dagster_pipeline_from_airflow_dag(dag=dag), instance=instance,
)
post_execute_time = get_current_datetime_in_utc()
compute_io_path = manager.get_local_path(
result.run_id, "airflow_templated.compute", ComputeIOType.STDOUT
)
assert os.path.exists(compute_io_path)
stdout_file = open(compute_io_path, "r")
file_contents = normalize_file_content(stdout_file.read())
stdout_file.close()
search_str = "INFO - Running command: \n echo '"
date_start = file_contents.find(search_str) + len(search_str)
date_end = date_start + 10 # number of characters in YYYY-MM-DD
date = file_contents[date_start:date_end]
check_compute_logs(manager, result, date)
pre_execute_time_fmt = pre_execute_time.strftime("%Y-%m-%d")
post_execute_time_fmt = post_execute_time.strftime("%Y-%m-%d")
assert date in [pre_execute_time_fmt, post_execute_time_fmt]
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import attr
from synapse.api.constants import Membership
from tests.server import make_request, render
@attr.s
class RestHelper(object):
"""Contains extra helper functions to quickly and clearly perform a given
REST action, which isn't the focus of the test.
"""
hs = attr.ib()
resource = attr.ib()
auth_user_id = attr.ib()
def create_room_as(self, room_creator, is_public=True, tok=None):
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/_matrix/client/r0/createRoom"
content = {}
if not is_public:
content["visibility"] = "private"
if tok:
path = path + "?access_token=%s" % tok
request, channel = make_request(
self.hs.get_reactor(), "POST", path, json.dumps(content).encode('utf8')
)
render(request, self.resource, self.hs.get_reactor())
assert channel.result["code"] == b"200", channel.result
self.auth_user_id = temp_id
return channel.json_body["room_id"]
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
self.change_membership(
room=room,
src=src,
targ=targ,
tok=tok,
membership=Membership.INVITE,
expect_code=expect_code,
)
def join(self, room=None, user=None, expect_code=200, tok=None):
self.change_membership(
room=room,
src=user,
targ=user,
tok=tok,
membership=Membership.JOIN,
expect_code=expect_code,
)
def leave(self, room=None, user=None, expect_code=200, tok=None):
self.change_membership(
room=room,
src=user,
targ=user,
tok=tok,
membership=Membership.LEAVE,
expect_code=expect_code,
)
def change_membership(self, room, src, targ, membership, tok=None, expect_code=200):
temp_id = self.auth_user_id
self.auth_user_id = src
path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (room, targ)
if tok:
path = path + "?access_token=%s" % tok
data = {"membership": membership}
request, channel = make_request(
self.hs.get_reactor(), "PUT", path, json.dumps(data).encode('utf8')
)
render(request, self.resource, self.hs.get_reactor())
assert int(channel.result["code"]) == expect_code, (
"Expected: %d, got: %d, resp: %r"
% (expect_code, int(channel.result["code"]), channel.result["body"])
)
self.auth_user_id = temp_id
def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
if body is None:
body = "body_text_here"
path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
content = {"msgtype": "m.text", "body": body}
if tok:
path = path + "?access_token=%s" % tok
request, channel = make_request(
self.hs.get_reactor(), "PUT", path, json.dumps(content).encode('utf8')
)
render(request, self.resource, self.hs.get_reactor())
assert int(channel.result["code"]) == expect_code, (
"Expected: %d, got: %d, resp: %r"
% (expect_code, int(channel.result["code"]), channel.result["body"])
)
return channel.json_body
def send_state(self, room_id, event_type, body, tok, expect_code=200):
path = "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, event_type)
if tok:
path = path + "?access_token=%s" % tok
request, channel = make_request(
self.hs.get_reactor(), "PUT", path, json.dumps(body).encode('utf8')
)
render(request, self.resource, self.hs.get_reactor())
assert int(channel.result["code"]) == expect_code, (
"Expected: %d, got: %d, resp: %r"
% (expect_code, int(channel.result["code"]), channel.result["body"])
)
return channel.json_body
|
# Generated by Django 3.1.3 on 2021-03-27 10:49
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0011_auto_20210327_1046'),
]
operations = [
migrations.AlterField(
model_name='ingredientrecipe',
name='amount',
field=models.PositiveIntegerField(null=True, validators=[django.core.validators.MinValueValidator(1)]),
),
]
|
"""Test move to well commands."""
from mock import AsyncMock # type: ignore[attr-defined]
from opentrons.protocol_engine.commands import (
MoveToWellRequest,
MoveToWellResult,
)
def test_move_to_well_request() -> None:
"""It should be able to create a MoveToWellRequest."""
request = MoveToWellRequest(
pipetteId="abc",
labwareId="123",
wellName="A3",
)
assert request.pipetteId == "abc"
assert request.labwareId == "123"
assert request.wellName == "A3"
def test_move_to_well_result() -> None:
"""It should be able to create a MoveToWellResult."""
# NOTE(mc, 2020-11-17): this model has no properties at this time
result = MoveToWellResult()
assert result
async def test_move_to_well_implementation(mock_handlers: AsyncMock) -> None:
"""A MoveToWellRequest should have an execution implementation."""
mock_handlers.movement.move_to_well.return_value = None
request = MoveToWellRequest(
pipetteId="abc",
labwareId="123",
wellName="A3",
)
impl = request.get_implementation()
result = await impl.execute(mock_handlers)
assert result == MoveToWellResult()
mock_handlers.movement.move_to_well.assert_called_with(
pipette_id="abc",
labware_id="123",
well_name="A3",
)
|
import os
import sys
from os import path
current_dir = path.dirname(path.abspath(__file__))
while path.split(current_dir)[-1] != r'Heron':
current_dir = path.dirname(current_dir)
sys.path.insert(0, path.dirname(current_dir))
from Heron import general_utils as gu
Exec = os.path.abspath(__file__)
# <editor-fold desc="The following code is called from the GUI process as part of the generation of the node.
# It is meant to create node specific elements (not part of a generic node).
# This is where a new nodes individual elements should be defined">
"""
Properties of the generated Node
"""
BaseName = 'User Input'
NodeAttributeNames = ['Parameters', 'Text Out']
NodeAttributeType = ['Static', 'Output']
ParameterNames = ['Visualisation', 'Output Type', 'User Input']
ParameterTypes = ['bool', 'list', 'str']
ParametersDefaultValues = [False, ['string', 'int', 'float'], '']
WorkerDefaultExecutable = os.path.join(os.path.dirname(Exec), 'user_input_worker.py')
# </editor-fold>
# <editor-fold desc="The following code is called as its own process when the editor starts the graph">
if __name__ == "__main__":
user_input_com = gu.start_the_source_communications_process(NodeAttributeType, NodeAttributeNames)
gu.register_exit_signals(user_input_com.on_kill)
user_input_com.start_ioloop()
# </editor-fold>
|
from simply_block.simply_sign import SimplySign
# Token
public_key = "hmac_pub_1"
private_key = "hmac_priv_1"
# Required Params
url = "http://127.0.0.1:8000/v1/eth/contract/build/"
file_path = "/home/lenovo/Documents/simplyblock/ethereum-service/contracts/DappToken.sol"
data = {
'contract_name': 'contract_22'
}
files = {
'contract': file_path
}
# Request
response = SimplySign(private_key, public_key).gateway_request(url, data, files=files)
print(response)
print(response.text)
|
from django.urls import path,re_path
from groups import views
app_name = 'groups'
urlpatterns = [
path('',views.ListGroup.as_view(), name= 'all'),
path('new/',views.CreateGroup.as_view(), name='create'),
path('posts/in/<slug>/',views.SingleGroup.as_view(),name='single'),
path('join/<slug>/',views.JoinGroup.as_view(),name='join'),
path('leave/<slug>/', views.LeaveGroup.as_view(),name='leave')
]
|
"""
Skills to control teufel raumfeld streaming devices
RaumfeldTVWakeup is a Skill that wakes up a speaker when the TV is turned on.
Copyright (c) 2021 Timo Haeckel
"""
from skills import SkillWithState
import raumfeld
RAUMFELD_URI_PLACEHOLDER = "dlna-playcontainer://uuid%3Abd4f7f00-aa40-4e4a-a54e-ec64e9944e23?sid=urn%3Aupnp-org%3AserviceId%3AContentDirectory&cid=0%2FTidal%2FDirectAccess%2FArtist%2F8372%2FTopTracks&fid=0&fii=0"
class RaumfeldTVWakeup(SkillWithState):
""" Skill to wakeup a raumfeld speaker connected to a smart tv.
This skill wakes up the raumfeld speaker connected to the smart tv when it
is turned on.
Settings
--------
{
"statePrefix" : "DevicePresence:",
"tvAddress" : "192.168.178.42",
"tvSpeakerRoomName" : "TV Speaker"
}
Attributes
----------
tvAddress : str
The TVs IPv4 address to be detected in the local network.
STATE_PREFIX : str
The prefix from the DeviceDetectionSkill used to form the state key in
the state data base for the tv host with STATE_PREFIX + Address
"""
def __init__(self, statedb, settingsFile=""):
"""
Parameters
----------
statedb : statedb.StateDataBase
The shared state data base instance used for all skills in
the home automation setup
settingsFile : str
Path to the global skill settings file
"""
SkillWithState.__init__(self,
name="RaumfeldTVWakeup",
statedb=statedb,
settingsFile=settingsFile)
raumfeld.init()
self.alreadyAwake = False
self.speaker = self.findSkillSettingWithKey("tvSpeakerRoomName")
self.tvAddress = self.findSkillSettingWithKey("tvAddress")
self.STATE_PREFIX = self.findSkillSettingWithKey("statePrefix")
def wakeup(self, speaker):
rooms = raumfeld.getRoomsByName(speaker)
rooms[0].play(RAUMFELD_URI_PLACEHOLDER)
rooms[0].pause()
def task(self):
""" Wakeup TV speakers task
This function will wakeup the TV speakers if the TV device is turned on
"""
tvState = self.readState(self.STATE_PREFIX + self.tvAddress)
if tvState is not None:
if tvState.currentlyPresent:
#tv is turned on!
if not self.alreadyAwake:
self.wakeup(self.speaker)
self.alreadyAwake = True
self.log("TV Speaker woken up")
elif self.alreadyAwake:
#tv turned off!
self.alreadyAwake = False
self.log("TV turned off sleep allowed")
|
"""
This file belongs to https://github.com/bitkeks/python-netflow-v9-softflowd.
The test packets (defined below as hex streams) were extracted from "real"
softflowd exports based on a sample PCAP capture file.
Copyright 2016-2020 Dominik Pataky <software+pynetflow@dpataky.eu>
Licensed under MIT License. See LICENSE.
"""
# The flowset with 2 templates (IPv4 and IPv6) and 8 flows with data
import queue
import random
import socket
import time
#from netflow.collector import ThreadedNetFlowListener
# Invalid export hex stream
PACKET_INVALID = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
CONNECTION = ('127.0.0.1', 1337)
NUM_PACKETS = 1000
def gen_pkts(n, idx):
for x in range(n):
if x == idx:
yield PACKET_V9_TEMPLATE
else:
yield random.choice(PACKETS_V9)
def emit_packets(packets, delay=0.0001):
"""Send the provided packets to the listener"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for p in packets:
sock.sendto(bytes.fromhex(p), CONNECTION)
time.sleep(delay)
sock.close()
def send_packets(packets, delay=0.0001, store_packets=-1) -> (list, float, float):
"""Starts a listener, send packets, receives packets
returns a tuple: ([(ts, export), ...], time_started_sending, time_stopped_sending)
"""
#listener = ThreadedNetFlowListener(*CONNECTION)
tstart = time.time()
emit_packets(packets, delay=delay)
time.sleep(0.5) # Allow packets to be sent and recieved
tend = time.time()
#listener.start()
"""
def send_recv_packets(packets, delay=0.0001, store_packets=-1) -> (list, float, float):
listener = ThreadedNetFlowListener(*CONNECTION)
tstart = time.time()
emit_packets(packets, delay=delay)
time.sleep(0.5) # Allow packets to be sent and recieved
tend = time.time()
listener.start()
pkts = []
to_pad = 0
while True:
try:
packet = listener.get(timeout=0.5)
if -1 == store_packets or store_packets > 0:
# Case where a programm yields from the queue and stores all packets.
pkts.append(packet)
if store_packets != -1 and len(pkts) > store_packets:
to_pad += len(pkts) # Hack for testing
pkts.clear()
else:
# Performance measurements for cases where yielded objects are processed
# immediatelly instead of stored. Add empty tuple to retain counting possibility.
pkts.append(())
except queue.Empty:
break
listener.stop()
listener.join()
if to_pad > 0:
pkts = [()] * to_pad + pkts
return pkts, tstart, tend
"""
def generate_packets(amount, version, template_every_x=100):
packets = [PACKET_IPFIX]
template = PACKET_IPFIX_TEMPLATE
if version == 1:
packets = [PACKET_V1]
elif version == 5:
packets = [PACKET_V5]
elif version == 9:
packets = [*PACKETS_V9]
template = PACKET_V9_TEMPLATE
if amount < template_every_x:
template_every_x = 10
# If the list of test packets is only one item big (the same packet is used over and over),
# do not use random.choice - it costs performance and results in the same packet every time.
def single_packet(pkts):
return pkts[0]
packet_func = single_packet
if len(packets) > 1:
packet_func = random.choice
for x in range(amount):
if x % template_every_x == 0 and version in [9, 10]:
# First packet always a template, then periodically
# Note: this was once based on random.random, but it costs performance
yield template
else:
yield packet_func(packets)
# Example export for v1 which contains two flows from one ICMP ping request/reply session
PACKET_V1 = "000100020001189b5e80c32c2fd41848ac110002ac11000100000000000000000000000a00000348" \
"000027c700004af100000800000001000000000000000000ac110001ac1100020000000000000000" \
"0000000a00000348000027c700004af100000000000001000000000000000000"
# Example export for v5 which contains three flows, two for ICMP ping and one multicast on interface (224.0.0.251)
PACKET_V5 = "00050003000379a35e80c58622a55ab00000000000000000ac110002ac1100010000000000000000" \
"0000000a0000034800002f4c0000527600000800000001000000000000000000ac110001ac110002" \
"00000000000000000000000a0000034800002f4c0000527600000000000001000000000000000000" \
"ac110001e00000fb000000000000000000000001000000a90000e01c0000e01c14e914e900001100" \
"0000000000000000"
PACKET_V9_TEMPLATE = "0009000a000000035c9f55980000000100000000000000400400000e00080004000c000400150004" \
"001600040001000400020004000a0004000e000400070002000b00020004000100060001003c0001" \
"00050001000000400800000e001b0010001c001000150004001600040001000400020004000a0004" \
"000e000400070002000b00020004000100060001003c000100050001040001447f0000017f000001" \
"fb3c1aaafb3c18fd000190100000004b00000000000000000050942c061b04007f0000017f000001" \
"fb3c1aaafb3c18fd00000f94000000360000000000000000942c0050061f04007f0000017f000001" \
"fb3c1cfcfb3c1a9b0000d3fc0000002a000000000000000000509434061b04007f0000017f000001" \
"fb3c1cfcfb3c1a9b00000a490000001e000000000000000094340050061f04007f0000017f000001" \
"fb3bb82cfb3ba48b000002960000000300000000000000000050942a061904007f0000017f000001" \
"fb3bb82cfb3ba48b00000068000000020000000000000000942a0050061104007f0000017f000001" \
"fb3c1900fb3c18fe0000004c0000000100000000000000000035b3c9110004007f0000017f000001" \
"fb3c1900fb3c18fe0000003c000000010000000000000000b3c9003511000400"
# This packet is special. We take PACKET_V9_TEMPLATE and re-order the templates and flows.
# The first line is the header, the smaller lines the templates and the long lines the flows (limited to 80 chars)
PACKET_V9_TEMPLATE_MIXED = ("0009000a000000035c9f55980000000100000000" # header
"040001447f0000017f000001fb3c1aaafb3c18fd000190100000004b00000000000000000050942c"
"061b04007f0000017f000001fb3c1aaafb3c18fd00000f94000000360000000000000000942c0050"
"061f04007f0000017f000001fb3c1cfcfb3c1a9b0000d3fc0000002a000000000000000000509434"
"061b04007f0000017f000001fb3c1cfcfb3c1a9b00000a490000001e000000000000000094340050"
"061f04007f0000017f000001fb3bb82cfb3ba48b000002960000000300000000000000000050942a"
"061904007f0000017f000001fb3bb82cfb3ba48b00000068000000020000000000000000942a0050"
"061104007f0000017f000001fb3c1900fb3c18fe0000004c0000000100000000000000000035b3c9"
"110004007f0000017f000001fb3c1900fb3c18fe0000003c000000010000000000000000b3c90035"
"11000400" # end of flow segments
"000000400400000e00080004000c000400150004001600040001000400020004" # template 1024
"000a0004000e000400070002000b00020004000100060001003c000100050001"
"000000400800000e001b0010001c001000150004001600040001000400020004" # template 2048
"000a0004000e000400070002000b00020004000100060001003c000100050001")
# Three packets without templates, each with 12 flows
PACKETS_V9 = [
"0009000c000000035c9f55980000000200000000040001e47f0000017f000001fb3c1a17fb3c19fd"
"000001480000000200000000000000000035ea82110004007f0000017f000001fb3c1a17fb3c19fd"
"0000007a000000020000000000000000ea820035110004007f0000017f000001fb3c1a17fb3c19fd"
"000000f80000000200000000000000000035c6e2110004007f0000017f000001fb3c1a17fb3c19fd"
"0000007a000000020000000000000000c6e20035110004007f0000017f000001fb3c1a9efb3c1a9c"
"0000004c0000000100000000000000000035adc1110004007f0000017f000001fb3c1a9efb3c1a9c"
"0000003c000000010000000000000000adc10035110004007f0000017f000001fb3c1b74fb3c1b72"
"0000004c0000000100000000000000000035d0b3110004007f0000017f000001fb3c1b74fb3c1b72"
"0000003c000000010000000000000000d0b30035110004007f0000017f000001fb3c2f59fb3c1b71"
"00001a350000000a000000000000000000509436061b04007f0000017f000001fb3c2f59fb3c1b71"
"0000038a0000000a000000000000000094360050061b04007f0000017f000001fb3c913bfb3c9138"
"0000004c0000000100000000000000000035e262110004007f0000017f000001fb3c913bfb3c9138"
"0000003c000000010000000000000000e262003511000400",
"0009000c000000035c9f55980000000300000000040001e47f0000017f000001fb3ca523fb3c913b"
"0000030700000005000000000000000000509438061b04007f0000017f000001fb3ca523fb3c913b"
"000002a200000005000000000000000094380050061b04007f0000017f000001fb3f7fe1fb3dbc97"
"0002d52800000097000000000000000001bb8730061b04007f0000017f000001fb3f7fe1fb3dbc97"
"0000146c000000520000000000000000873001bb061f04007f0000017f000001fb3d066ffb3d066c"
"0000004c0000000100000000000000000035e5bd110004007f0000017f000001fb3d066ffb3d066c"
"0000003c000000010000000000000000e5bd0035110004007f0000017f000001fb3d1a61fb3d066b"
"000003060000000500000000000000000050943a061b04007f0000017f000001fb3d1a61fb3d066b"
"000002a2000000050000000000000000943a0050061b04007f0000017f000001fb3fed00fb3f002c"
"0000344000000016000000000000000001bbae50061f04007f0000017f000001fb3fed00fb3f002c"
"00000a47000000120000000000000000ae5001bb061b04007f0000017f000001fb402f17fb402a75"
"0003524c000000a5000000000000000001bbc48c061b04007f0000017f000001fb402f17fb402a75"
"000020a60000007e0000000000000000c48c01bb061f0400",
"0009000c000000035c9f55980000000400000000040001e47f0000017f000001fb3d7ba2fb3d7ba0"
"0000004c0000000100000000000000000035a399110004007f0000017f000001fb3d7ba2fb3d7ba0"
"0000003c000000010000000000000000a3990035110004007f0000017f000001fb3d8f85fb3d7b9f"
"000003070000000500000000000000000050943c061b04007f0000017f000001fb3d8f85fb3d7b9f"
"000002a2000000050000000000000000943c0050061b04007f0000017f000001fb3d9165fb3d7f6d"
"0000c97b0000002a000000000000000001bbae48061b04007f0000017f000001fb3d9165fb3d7f6d"
"000007f40000001a0000000000000000ae4801bb061b04007f0000017f000001fb3dbc96fb3dbc7e"
"0000011e0000000200000000000000000035bd4f110004007f0000017f000001fb3dbc96fb3dbc7e"
"0000008e000000020000000000000000bd4f0035110004007f0000017f000001fb3ddbb3fb3c1a18"
"0000bfee0000002f00000000000000000050ae56061b04007f0000017f000001fb3ddbb3fb3c1a18"
"00000982000000270000000000000000ae560050061b04007f0000017f000001fb3ddbb3fb3c1a18"
"0000130e0000001200000000000000000050e820061b04007f0000017f000001fb3ddbb3fb3c1a18"
"0000059c000000140000000000000000e8200050061b0400"
]
# Example export for IPFIX (v10) with 4 templates, 1 option template and 8 data flow sets
PACKET_IPFIX_TEMPLATE = "000a05202d45a4700000001300000000000200400400000e00080004000c00040016000400150004" \
"0001000400020004000a0004000e000400070002000b00020004000100060001003c000100050001" \
"000200340401000b00080004000c000400160004001500040001000400020004000a0004000e0004" \
"00200002003c000100050001000200400800000e001b0010001c0010001600040015000400010004" \
"00020004000a0004000e000400070002000b00020004000100060001003c00010005000100020034" \
"0801000b001b0010001c001000160004001500040001000400020004000a0004000e0004008b0002" \
"003c0001000500010003001e010000050001008f000400a000080131000401320004013000020100" \
"001a00000a5900000171352e672100000001000000000001040000547f000001ac110002ff7ed688" \
"ff7ed73a000015c70000000d000000000000000001bbe1a6061b0400ac1100027f000001ff7ed688" \
"ff7ed73a0000074f000000130000000000000000e1a601bb061f04000401004cac110002ac110001" \
"ff7db9e0ff7dc1d0000000fc00000003000000000000000008000400ac110001ac110002ff7db9e0" \
"ff7dc1d0000000fc0000000300000000000000000000040008010220fde66f14e0f1960900000242" \
"ac110002ff0200000000000000000001ff110001ff7dfad6ff7e0e95000001b00000000600000000" \
"0000000087000600fde66f14e0f196090000affeaffeaffefdabcdef123456789000000000000001" \
"ff7e567fff7e664a0000020800000005000000000000000080000600fde66f14e0f1960900000000" \
"00000001fde66f14e0f196090000affeaffeaffeff7e567fff7e664a000002080000000500000000" \
"0000000081000600fe800000000000000042aafffe73bbfafde66f14e0f196090000affeaffeaffe" \
"ff7e6aaaff7e6aaa0000004800000001000000000000000087000600fde66f14e0f1960900000242" \
"ac110002fe800000000000000042aafffe73bbfaff7e6aaaff7e6aaa000000400000000100000000" \
"0000000088000600fe800000000000000042acfffe110002fe800000000000000042aafffe73bbfa" \
"ff7e7eaaff7e7eaa0000004800000001000000000000000087000600fe800000000000000042aaff" \
"fe73bbfafe800000000000000042acfffe110002ff7e7eaaff7e7eaa000000400000000100000000" \
"0000000088000600fe800000000000000042aafffe73bbfafe800000000000000042acfffe110002" \
"ff7e92aaff7e92aa0000004800000001000000000000000087000600fe800000000000000042acff" \
"fe110002fe800000000000000042aafffe73bbfaff7e92aaff7e92aa000000400000000100000000" \
"000000008800060008000044fde66f14e0f196090000affeaffeaffefd41b7143f86000000000000" \
"00000001ff7ec2a0ff7ec2a00000004a000000010000000000000000d20100351100060004000054" \
"ac1100027f000001ff7ed62eff7ed68700000036000000010000000000000000c496003511000400" \
"7f000001ac110002ff7ed62eff7ed687000000760000000100000000000000000035c49611000400" \
"08000044fde66f14e0f196090000affeaffeaffefd41b7143f8600000000000000000001ff7ef359" \
"ff7ef3590000004a000000010000000000000000b1e700351100060004000054ac1100027f000001" \
"ff7f06e4ff7f06e800000036000000010000000000000000a8f90035110004007f000001ac110002" \
"ff7f06e4ff7f06e8000000a60000000100000000000000000035a8f911000400"
# Example export for IPFIX with two data sets
PACKET_IPFIX = "000a00d02d45a47000000016000000000801007cfe800000000000000042acfffe110002fde66f14" \
"e0f196090000000000000001ff7f0755ff7f07550000004800000001000000000000000087000600" \
"fdabcdef123456789000000000000001fe800000000000000042acfffe110002ff7f0755ff7f0755" \
"000000400000000100000000000000008800060008000044fde66f14e0f196090000affeaffeaffe" \
"2a044e42020000000000000000000223ff7f06e9ff7f22d500000140000000040000000000000000" \
"e54c01bb06020600"
PACKET_IPFIX_TEMPLATE_ETHER = "000a05002d45a4700000000d00000000" \
"000200500400001200080004000c000400160004001500040001000400020004000a0004000e0004" \
"00070002000b00020004000100060001003c000100050001003a0002003b00020038000600390006" \
"000200440401000f00080004000c000400160004001500040001000400020004000a0004000e0004" \
"00200002003c000100050001003a0002003b000200380006003900060002005008000012001b0010" \
"001c001000160004001500040001000400020004000a0004000e000400070002000b000200040001" \
"00060001003c000100050001003a0002003b00020038000600390006000200440801000f001b0010" \
"001c001000160004001500040001000400020004000a0004000e0004008b0002003c000100050001" \
"003a0002003b000200380006003900060003001e010000050001008f000400a00008013100040132" \
"0004013000020100001a00000009000000b0d80a558000000001000000000001040000747f000001" \
"ac110002e58b988be58b993e000015c70000000d000000000000000001bbe1a6061b040000000000" \
"123456affefeaffeaffeaffeac1100027f000001e58b988be58b993e0000074f0000001300000000" \
"00000000e1a601bb061f040000000000affeaffeaffe123456affefe0401006cac110002ac110001" \
"e58a7be3e58a83d3000000fc0000000300000000000000000800040000000000affeaffeaffe0242" \
"aa73bbfaac110001ac110002e58a7be3e58a83d3000000fc00000003000000000000000000000400" \
"00000000123456affefeaffeaffeaffe080102b0fde66f14e0f196090000affeaffeaffeff020000" \
"0000000000000001ff110001e58abcd9e58ad098000001b000000006000000000000000087000600" \
"00000000affeaffeaffe3333ff110001fde66f14e0f196090000affeaffeaffefde66f14e0f19609" \
"0000000000000001e58b1883e58b284e000002080000000500000000000000008000060000000000" \
"affeaffeaffe123456affefefdabcdef123456789000000000000001fde66f14e0f1960900000242" \
"ac110002e58b1883e58b284e0000020800000005000000000000000081000600000000000242aa73" \
"bbfaaffeaffeaffefe800000000000000042aafffe73bbfafde66f14e0f196090000affeaffeaffe" \
"e58b2caee58b2cae000000480000000100000000000000008700060000000000123456affefe0242" \
"ac110002fde66f14e0f196090000affeaffeaffefe800000000000000042aafffe73bbfae58b2cae" \
"e58b2cae000000400000000100000000000000008800060000000000affeaffeaffe123456affefe" \
"fe800000000000000042acfffe110002fe800000000000000042aafffe73bbfae58b40aee58b40ae" \
"000000480000000100000000000000008700060000000000affeaffeaffe123456affefefe800000" \
"000000000042aafffe73bbfafe800000000000000042acfffe110002e58b40aee58b40ae00000040" \
"0000000100000000000000008800060000000000123456affefeaffeaffeaffefe80000000000000" \
"0042aafffe73bbfafe800000000000000042acfffe110002e58b54aee58b54ae0000004800000001" \
"00000000000000008700060000000000123456affefeaffeaffeaffefe800000000000000042acff" \
"fe110002fe800000000000000042aafffe73bbfae58b54aee58b54ae000000400000000100000000" \
"000000008800060000000000affeaffeaffe123456affefe"
PACKET_IPFIX_ETHER = "000a02905e8b0aa90000001600000000" \
"08000054fde66f14e0f196090000affeaffeaffefd40abcdabcd00000000000000011111e58b84a4" \
"e58b84a40000004a000000010000000000000000d20100351100060000000000affeaffeaffe0242" \
"aa73bbfa04000074ac1100027f000001e58b9831e58b988a00000036000000010000000000000000" \
"c49600351100040000000000affeaffeaffe123456affefe7f000001ac110002e58b9831e58b988a" \
"000000760000000100000000000000000035c4961100040000000000123456affefeaffeaffeaffe" \
"08000054fde66f14e0f196090000affeaffeaffefd40abcdabcd00000000000000011111e58bb55c" \
"e58bb55c0000004a000000010000000000000000b1e700351100060000000000affeaffeaffe0242" \
"aa73bbfa04000074ac1100027f000001e58bc8e8e58bc8ec00000036000000010000000000000000" \
"a8f900351100040000000000affeaffeaffe123456affefe7f000001ac110002e58bc8e8e58bc8ec" \
"000000a60000000100000000000000000035a8f91100040000000000123456affefeaffeaffeaffe" \
"0801009cfe800000000000000042acfffe110002fdabcdef123456789000000000000001e58bc958" \
"e58bc958000000480000000100000000000000008700060000000000affeaffeaffe123456affefe" \
"fdabcdef123456789000000000000001fe800000000000000042acfffe110002e58bc958e58bc958" \
"000000400000000100000000000000008800060000000000123456affefeaffeaffeaffe08000054" \
"fde66f14e0f196090000affeaffeaffe2a044e42020000000000000000000223e58bc8ede58be4d8" \
"00000140000000040000000000000000e54c01bb0602060000000000affeaffeaffe123456affefe"
|
from __future__ import annotations
import inspect
import sys
from functools import partial, reduce
from importlib import import_module
from operator import attrgetter, not_
from textwrap import dedent
from types import MethodType
from typing import Any, Callable, Dict, Generic, TypeVar, Union, overload
from .utils import no_default
__all__ = ('identity', 'apply', 'thread_first', 'thread_last', 'memoize',
'compose', 'compose_left', 'pipe', 'complement', 'juxt', 'do',
'curry', 'flip', 'excepts')
PYPY = hasattr(sys, 'pypy_version_info')
_T = TypeVar("_T")
_T2 = TypeVar("_T2")
def identity(x: _T) ->_T:
""" Identity function. Return x
>>> identity(3)
3
"""
return x
def apply(*func_and_args, **kwargs):
""" Applies a function and returns the results
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> apply(double, 5)
10
>>> tuple(map(apply, [double, inc, double], [10, 500, 8000]))
(20, 501, 16000)
"""
if not func_and_args:
raise TypeError('func argument is required')
func, args = func_and_args[0], func_and_args[1:]
return func(*args, **kwargs)
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def instanceproperty(fget=None, fset=None, fdel=None, doc=None, classval=None):
""" Like @property, but returns ``classval`` when used as a class attribute
>>> class MyClass(object):
... '''The class docstring'''
... @instanceproperty(classval=__doc__)
... def __doc__(self):
... return 'An object docstring'
... @instanceproperty
... def val(self):
... return 42
...
>>> MyClass.__doc__
'The class docstring'
>>> MyClass.val is None
True
>>> obj = MyClass()
>>> obj.__doc__
'An object docstring'
>>> obj.val
42
"""
if fget is None:
return partial(instanceproperty, fset=fset, fdel=fdel, doc=doc,
classval=classval)
return InstanceProperty(fget=fget, fset=fset, fdel=fdel, doc=doc,
classval=classval)
class InstanceProperty(property):
""" Like @property, but returns ``classval`` when used as a class attribute
Should not be used directly. Use ``instanceproperty`` instead.
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None,
classval=None):
self.classval = classval
property.__init__(self, fget=fget, fset=fset, fdel=fdel, doc=doc)
def __get__(self, obj, type=None):
if obj is None:
return self.classval
return property.__get__(self, obj, type)
def __reduce__(self):
state = (self.fget, self.fset, self.fdel, self.__doc__, self.classval)
return InstanceProperty, state
class curry(Generic[_T]):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
https://toolz.readthedocs.io/en/latest/curry.html
"""
@overload
def __init__(self, func: Callable[..., _T], *args: Any, **kwargs: Any) -> None: ...
# this overload should never be used, mypy complains if only one overload exists
@overload
def __init__(self, *args: Union[Callable[..., _T], Any], **kwargs: Any) -> None: ...
def __init__(self, *args: Any, **kwargs: Any) -> None:
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (
hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)
):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
self.__module__ = getattr(func, '__module__', None)
self.__qualname__ = getattr(func, '__qualname__', None)
self._sigspec = None
self._has_unknown_args = None
@instanceproperty
def func(self) -> Callable[..., _T]:
return self._partial.func
@instanceproperty
def __signature__(self):
sig = inspect.signature(self.func)
args = self.args or ()
keywords = self.keywords or {}
if is_partial_args(self.func, args, keywords, sig) is False:
raise TypeError('curry object has incorrect arguments')
params = list(sig.parameters.values())
skip = 0
for param in params[:len(args)]:
if param.kind == param.VAR_POSITIONAL:
break
skip += 1
kwonly = False
newparams = []
for param in params[skip:]:
kind = param.kind
default = param.default
if kind == param.VAR_KEYWORD:
pass
elif kind == param.VAR_POSITIONAL:
if kwonly:
continue
elif param.name in keywords:
default = keywords[param.name]
kind = param.KEYWORD_ONLY
kwonly = True
else:
if kwonly:
kind = param.KEYWORD_ONLY
if default is param.empty:
default = no_default
newparams.append(param.replace(default=default, kind=kind))
return sig.replace(parameters=newparams)
@instanceproperty
def args(self):
return self._partial.args
@instanceproperty
def keywords(self) -> Dict[str, Any]:
return self._partial.keywords
@instanceproperty
def func_name(self) -> str:
return self.__name__
def __str__(self) -> str:
return str(self.func)
def __repr__(self) -> str:
return repr(self.func)
def __hash__(self) -> int:
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other: Any) -> bool:
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __call__(self, *args: Any, **kwargs: Any) -> Union[_T, curry[_T]]:
try:
return self._partial(*args, **kwargs)
except TypeError as exc:
if self._should_curry(args, kwargs, exc):
return self.bind(*args, **kwargs)
raise
def _should_curry(self, args, kwargs, exc=None):
func = self.func
args = self.args + args
if self.keywords:
kwargs = dict(self.keywords, **kwargs)
if self._sigspec is None:
sigspec = self._sigspec = _sigs.signature_or_spec(func)
self._has_unknown_args = has_varargs(func, sigspec) is not False
else:
sigspec = self._sigspec
if is_partial_args(func, args, kwargs, sigspec) is False:
# Nothing can make the call valid
return False
elif self._has_unknown_args:
# The call may be valid and raised a TypeError, but we curry
# anyway because the function may have `*args`. This is useful
# for decorators with signature `func(*args, **kwargs)`.
return True
elif not is_valid_args(func, args, kwargs, sigspec):
# Adding more arguments may make the call valid
return True
else:
# There was a genuine TypeError
return False
def bind(self, *args, **kwargs) -> curry[_T]:
return type(self)(self, *args, **kwargs)
def call(self, *args, **kwargs) -> _T:
return self._partial(*args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
def __reduce__(self):
func = self.func
modname = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
if qualname is None: # pragma: no cover
qualname = getattr(func, '__name__', None)
is_decorated = None
if modname and qualname:
attrs = []
obj = import_module(modname)
for attr in qualname.split('.'):
if isinstance(obj, curry):
attrs.append('func')
obj = obj.func
obj = getattr(obj, attr, None)
if obj is None:
break
attrs.append(attr)
if isinstance(obj, curry) and obj.func is func:
is_decorated = obj is self
qualname = '.'.join(attrs)
func = '%s:%s' % (modname, qualname)
# functools.partial objects can't be pickled
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k not in ('_partial', '_sigspec'))
state = (type(self), func, self.args, self.keywords, userdict,
is_decorated)
return _restore_curry, state
def _restore_curry(cls, func, args, kwargs, userdict, is_decorated):
if isinstance(func, str):
modname, qualname = func.rsplit(':', 1)
obj = import_module(modname)
for attr in qualname.split('.'):
obj = getattr(obj, attr)
if is_decorated:
return obj
func = obj.func
obj = cls(func, *args, **(kwargs or {}))
obj.__dict__.update(userdict)
return obj
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_keywords(func) is not False
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = is_arity(1, func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except KeyError:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = 'first', 'funcs'
def __init__(self, funcs):
funcs = tuple(reversed(funcs))
self.first = funcs[0]
self.funcs = funcs[1:]
def __call__(self, *args, **kwargs):
ret = self.first(*args, **kwargs)
for f in self.funcs:
ret = f(ret)
return ret
def __getstate__(self):
return self.first, self.funcs
def __setstate__(self, state):
self.first, self.funcs = state
@instanceproperty(classval=__doc__)
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
@property
def __name__(self):
try:
return '_of_'.join(
(f.__name__ for f in reversed((self.first,) + self.funcs))
)
except AttributeError:
return type(self).__name__
def __repr__(self):
return '{.__class__.__name__}{!r}'.format(
self, tuple(reversed((self.first, ) + self.funcs)))
def __eq__(self, other):
if isinstance(other, Compose):
return other.first == self.first and other.funcs == self.funcs
return NotImplemented
def __ne__(self, other):
equality = self.__eq__(other)
return NotImplemented if equality is NotImplemented else not equality
def __hash__(self):
return hash(self.first) ^ hash(self.funcs)
# Mimic the descriptor behavior of python functions.
# i.e. let Compose be called as a method when bound to a class.
# adapted from
# docs.python.org/3/howto/descriptor.html#functions-and-methods
def __get__(self, obj, objtype=None):
return self if obj is None else MethodType(self, obj)
# introspection with Signature is only possible from py3.3+
@instanceproperty
def __signature__(self):
base = inspect.signature(self.first)
last = inspect.signature(self.funcs[-1])
return base.replace(return_annotation=last.return_annotation)
__wrapped__ = instanceproperty(attrgetter('first'))
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
compose_left
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(funcs)
def compose_left(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from left to right so that
``compose_left(f, g, h)(x, y)`` is the same as ``h(g(f(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose_left(inc, str)(3)
'4'
See Also:
compose
pipe
"""
return compose(*reversed(funcs))
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
compose_left
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(not_, func)
class juxt(object):
""" Creates a function that calls several functions with the same arguments
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func: Callable[[_T], Any], x: _T) -> _T:
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func: Callable[[_T, _T], _T2], a: _T, b: _T) -> _T2:
""" Call the function call with the arguments flipped
This function is curried.
>>> def div(a, b):
... return a // b
...
>>> flip(div, 2, 6)
3
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
def return_none(exc: Any) -> None:
""" Returns None.
"""
return None
class excepts(object):
"""A wrapper around a function to catch exceptions and
dispatch to a handler.
This is like a functional try/except block, in the same way that
ifexprs are functional if/else blocks.
Examples
--------
>>> excepting = excepts(
... ValueError,
... lambda a: [1, 2].index(a),
... lambda _: -1,
... )
>>> excepting(1)
0
>>> excepting(3)
-1
Multiple exceptions and default except clause.
>>> excepting = excepts((IndexError, KeyError), lambda a: a[0])
>>> excepting([])
>>> excepting([1])
1
>>> excepting({})
>>> excepting({0: 1})
1
"""
def __init__(self, exc, func, handler=return_none):
self.exc = exc
self.func = func
self.handler = handler
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.exc as e:
return self.handler(e)
@instanceproperty(classval=__doc__)
def __doc__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.func.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.func.__name__!r}:
{inst.func.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=self,
exc=exc_name,
)
except AttributeError:
return type(self).__doc__
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return '%s_excepting_%s' % (self.func.__name__, exc_name)
except AttributeError:
return 'excepting'
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (ValueError, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return None, builtin_func(*builtin_args)
elif not isinstance(sigspec, inspect.Signature):
if (
func in _sigs.signatures
and ((
hasattr(func, '__signature__')
and hasattr(func.__signature__, '__get__')
))
):
val = builtin_func(*builtin_args)
return None, val
return None, False
return sigspec, None
if PYPY: # pragma: no cover
_check_sigspec_orig = _check_sigspec
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
# PyPy may lie, so use our registry for builtins instead
if func in _sigs.signatures:
val = builtin_func(*builtin_args)
return None, val
return _check_sigspec_orig(sigspec, func, builtin_func, *builtin_args)
_check_sigspec.__doc__ = """ \
Private function to aid in introspection compatibly across Python versions.
If a callable doesn't have a signature (Python 3) or an argspec (Python 2),
the signature registry in toolz._signatures is used.
"""
def num_required_args(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._num_required_args,
func)
if sigspec is None:
return rv
return sum(1 for p in sigspec.parameters.values()
if p.default is p.empty
and p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY))
def has_varargs(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_varargs, func)
if sigspec is None:
return rv
return any(p.kind == p.VAR_POSITIONAL
for p in sigspec.parameters.values())
def has_keywords(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_keywords, func)
if sigspec is None:
return rv
return any(p.default is not p.empty
or p.kind in (p.KEYWORD_ONLY, p.VAR_KEYWORD)
for p in sigspec.parameters.values())
def is_valid_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_valid_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind(*args, **kwargs)
except TypeError:
return False
return True
def is_partial_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_partial_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind_partial(*args, **kwargs)
except TypeError:
return False
return True
def is_arity(n, func, sigspec=None):
""" Does a function have only n positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x):
... return x
>>> is_arity(1, f)
True
>>> def g(x, y=1):
... return x + y
>>> is_arity(1, g)
False
"""
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_arity, n, func)
if sigspec is None:
return rv
num = num_required_args(func, sigspec)
if num is not None:
num = num == n
if not num:
return False
varargs = has_varargs(func, sigspec)
if varargs:
return False
keywords = has_keywords(func, sigspec)
if keywords:
return False
if num is None or varargs is None or keywords is None: # pragma: no cover
return None
return True
num_required_args.__doc__ = """ \
Number of required positional arguments
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y, z=3):
... return x + y + z
>>> num_required_args(f)
2
>>> def g(*args, **kwargs):
... pass
>>> num_required_args(g)
0
"""
has_varargs.__doc__ = """ \
Does a function have variadic positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(*args):
... return args
>>> has_varargs(f)
True
>>> def g(**kwargs):
... return kwargs
>>> has_varargs(g)
False
"""
has_keywords.__doc__ = """ \
Does a function have keyword arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y=0):
... return x + y
>>> has_keywords(f)
True
"""
is_valid_args.__doc__ = """ \
Is ``func(*args, **kwargs)`` a valid function call?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_valid_args(add, (1,), {})
False
>>> is_valid_args(add, (1, 2), {})
True
>>> is_valid_args(map, (), {})
False
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
is_partial_args.__doc__ = """ \
Can partial(func, *args, **kwargs)(*args2, **kwargs2) be a valid call?
Returns True *only* if the call is valid or if it is possible for the
call to become valid by adding more positional or keyword arguments.
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_partial_args(add, (1,), {})
True
>>> is_partial_args(add, (1, 2), {})
True
>>> is_partial_args(add, (1, 2, 3), {})
False
>>> is_partial_args(map, (), {})
True
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
from . import _signatures as _sigs
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pennylane as qml
from pennylane import numpy as np
pytestmark = pytest.mark.gpu
torch = pytest.importorskip("torch")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda support")
class TestTorchDevice:
def test_device_to_cuda(self):
"""Checks device executes with cuda is input data is cuda"""
dev = qml.device("default.qubit.torch", wires=1)
x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda"))
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.expval(qml.PauliX(0))
res = dev.execute(tape)
assert res.is_cuda
assert dev._torch_device == "cuda"
res.backward()
assert x.grad.is_cuda
def test_mixed_devices(self):
"""Asserts works with both cuda and cpu input data"""
dev = qml.device("default.qubit.torch", wires=1)
x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda"))
y = torch.tensor(0.2, requires_grad=True, device=torch.device("cpu"))
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.RY(y, wires=0)
qml.expval(qml.PauliX(0))
res = dev.execute(tape)
assert res.is_cuda
assert dev._torch_device == "cuda"
res.backward()
assert x.grad.is_cuda
# check that this works
ygrad = y.grad
def test_matrix_input(self):
"""Test goes to GPU for matrix valued inputs."""
dev = qml.device("default.qubit.torch", wires=1)
U = torch.eye(2, requires_grad=False, device=torch.device("cuda"))
with qml.tape.QuantumTape() as tape:
qml.QubitUnitary(U, wires=0)
qml.expval(qml.PauliZ(0))
res = dev.execute(tape)
assert res.is_cuda
assert dev._torch_device == "cuda"
def test_resets(self):
"""Asserts reverts to cpu after execution on gpu"""
dev = qml.device("default.qubit.torch", wires=1)
x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda"))
y = torch.tensor(0.2, requires_grad=True, device=torch.device("cpu"))
with qml.tape.QuantumTape() as tape1:
qml.RX(x, wires=0)
qml.expval(qml.PauliZ(0))
res1 = dev.execute(tape1)
assert dev._torch_device == "cuda"
assert res1.is_cuda
with qml.tape.QuantumTape() as tape2:
qml.RY(y, wires=0)
qml.expval(qml.PauliZ(0))
res2 = dev.execute(tape2)
assert dev._torch_device == "cpu"
assert not res2.is_cuda
def test_integration(self):
"""Test cuda supported when device created in qnode creation."""
dev = qml.device("default.qubit", wires=1)
x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda"))
y = torch.tensor(0.2, requires_grad=True)
@qml.qnode(dev, interface="torch", diff_method="backprop")
def circ(x, y):
qml.RX(x, wires=0)
qml.RY(y, wires=0)
return qml.expval(qml.PauliZ(0))
res = circ(x, y)
assert res.is_cuda
assert circ.device._torch_device == "cuda"
res.backward()
assert x.grad.is_cuda
@pytest.mark.parametrize("init_device, par_device", [("cpu", "cuda"), ("cuda", "cpu")])
def test_different_devices_creation_and_parameters_warn(self, init_device, par_device):
"""Test that a warning is raised if the Torch device specified on
PennyLane device creation differs from the Torch device of gate
parameters.
"""
dev = qml.device("default.qubit.torch", wires=1, torch_device=init_device)
p = torch.tensor(0.543, dtype=torch.float64, device=par_device)
@qml.qnode(dev, interface="torch")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
with pytest.warns(
UserWarning,
match=f"Torch device {init_device} specified upon PennyLane device creation does not match",
):
circuit(p)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda support")
class TestqnnTorchLayer:
def test_torch_device_cuda_if_tensors_on_cuda(self):
"""Test that if any tensor passed to operators is on the GPU then CUDA
is set internally as a device option for 'default.qubit.torch'."""
n_qubits = 3
n_layers = 1
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(inputs, weights):
qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits))
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
weight_shapes = {"weights": (n_layers, n_qubits)}
qlayer = qml.qnn.TorchLayer(circuit, weight_shapes)
x = torch.rand((5, n_qubits), dtype=torch.float64).to(torch.device("cuda"))
res = qlayer(x)
assert circuit.device.short_name == "default.qubit.torch"
assert circuit.device._torch_device == "cuda"
assert res.is_cuda
loss = torch.sum(res).squeeze()
loss.backward()
assert loss.is_cuda
def test_qnn_torchlayer(self):
"""Test if TorchLayer can be run on GPU"""
n_qubits = 4
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev, interface="torch")
def circuit(inputs, weights):
qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits))
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
n_layers = 1
weight_shapes = {"weights": (n_layers, n_qubits)}
qlayer = qml.qnn.TorchLayer(circuit, weight_shapes)
x = torch.rand((5, n_qubits), dtype=torch.float64).to(torch.device("cuda"))
res = qlayer(x)
assert res.is_cuda
loss = torch.sum(res).squeeze()
loss.backward()
assert loss.is_cuda
|
import FWCore.ParameterSet.Config as cms
import RecoVertex.BeamSpotProducer.beamSpotOnlineProducer_cfi as _mod
onlineBeamSpotProducer = _mod.beamSpotOnlineProducer.clone(
src = 'scalersRawToDigi',
setSigmaZ = -1, #negative value disables it.
gtEvmLabel = 'gtEvmDigis'
)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(onlineBeamSpotProducer, useTransientRecord = True)
|
# coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetBlockedDomains(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'domains': 'list[str]'
}
attribute_map = {
'domains': 'domains'
}
def __init__(self, domains=None): # noqa: E501
"""GetBlockedDomains - a model defined in Swagger""" # noqa: E501
self._domains = None
self.discriminator = None
self.domains = domains
@property
def domains(self):
"""Gets the domains of this GetBlockedDomains. # noqa: E501
List of all blocked domains # noqa: E501
:return: The domains of this GetBlockedDomains. # noqa: E501
:rtype: list[str]
"""
return self._domains
@domains.setter
def domains(self, domains):
"""Sets the domains of this GetBlockedDomains.
List of all blocked domains # noqa: E501
:param domains: The domains of this GetBlockedDomains. # noqa: E501
:type: list[str]
"""
if domains is None:
raise ValueError("Invalid value for `domains`, must not be `None`") # noqa: E501
self._domains = domains
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetBlockedDomains, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetBlockedDomains):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import sense_hat
from sense_hat import SenseHat
import time
from enum import Enum
class Colors(Enum):
Green = (0, 255, 0)
Yellow = (255, 255, 0)
Blue = (0, 0, 255)
Red = (255, 0, 0)
White = (255,255,255)
Nothing = (0,0,0)
Pink = (255,105, 180)
Orange = (255,165, 0)
class DisplayManager(object):
def __raspberry(self):
G = Colors.Green.value
N = Colors.Nothing.value
R = Colors.Red.value
logo = [
N, G, G, N, N, G, G, N,
N, N, G, G, G, G, N, N,
N, N, R, R, R, R, N, N,
N, R, R, R, R, R, R, N,
R, R, R, R, R, R, R, R,
R, R, R, R, R, R, R, R,
N, R, R, R, R, R, R, N,
N, N, R, R, R, R, N, N,
]
return logo
def __lego(self):
W = Colors.White.value
R = Colors.Red.value
Y = Colors.Yellow.value
logo = [
R, Y, Y, Y, Y, R, R, R,
R, Y, W, W, Y, R, R, R,
R, Y, W, W, Y, R, R, R,
R, Y, W, W, Y, R, R, R,
R, Y, W, W, Y, Y, Y, R,
R, Y, W, W, W, W, Y, R,
R, Y, W, W, W, W, Y, R,
R, Y, Y, Y, Y, Y, Y, R,
]
return logo
def __figure(self):
N = Colors.Nothing.value
Y = Colors.Yellow.value
B = Colors.Blue.value
R = Colors.Red.value
logo = [
N, N, N, N, N, N, N, N,
N, N, Y, Y, Y, Y, N, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, B, Y, Y, B, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, R, Y, Y, R, Y, N,
N, N, Y, R, R, Y, N, N,
N, N, N, Y, Y, N, N, N,
]
return logo
def __red4x2(self):
N = Colors.Nothing.value
Y = Colors.Red.value
logo = [
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
Y, N, Y, N, Y, N, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
]
return logo
def __yellow4x2(self):
N = Colors.Nothing.value
Y = Colors.Yellow.value
logo = [
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
Y, N, Y, N, Y, N, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
]
return logo
def __blue4x2(self):
N = Colors.Nothing.value
Y = Colors.Blue.value
logo = [
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
Y, N, Y, N, Y, N, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
Y, Y, Y, Y, Y, Y, Y, N,
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
]
return logo
def __red2x2(self):
N = Colors.Nothing.value
Y = Colors.Red.value
logo = [
N, N, N, N, N, N, N, N,
N, Y, Y, N, N, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
]
return logo
def __yellow2x2(self):
N = Colors.Nothing.value
Y = Colors.Yellow.value
logo = [
N, N, N, N, N, N, N, N,
N, Y, Y, N, N, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
]
return logo
def __blue2x2(self):
N = Colors.Nothing.value
Y = Colors.Blue.value
logo = [
N, N, N, N, N, N, N, N,
N, Y, Y, N, N, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, Y, Y, Y, Y, Y, Y, N,
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N, N,
]
return logo
def __unknown(self):
N = Colors.Nothing.value
R = Colors.Red.value
logo = [
N, N, N, R, R, N, N, N,
N, N, R, N, N, R, N, N,
N, R, N, N, N, N, R, N,
N, R, N, N, N, N, R, N,
N, N, R, N, N, R, N, N,
N, N, N, N, R, N, N, N,
N, N, N, N, N, N, N, N,
N, N, N, N, R, N, N, N,
]
return logo
def __init__(self):
self.s = SenseHat()
self.s.low_light = True
self.__displayImage(self.__raspberry())#Flash the raspberry pi logo at initialization
time.sleep(1)
self.s.clear()
def __displayImage(self, image):
self.s.set_pixels(image)
def displayImage(self, strImage):
print("Displaying " + strImage)
if 'yellow-4x2' in strImage.lower():
self.__displayImage(self.__yellow4x2())
elif 'yellow-2x2' in strImage.lower():
self.__displayImage(self.__yellow2x2())
elif 'red-4x2' in strImage.lower():
self.__displayImage(self.__red4x2())
elif 'red-2x2' in strImage.lower():
self.__displayImage(self.__red2x2())
elif 'blue-4x2' in strImage.lower():
self.__displayImage(self.__blue4x2())
elif 'blue-2x2' in strImage.lower():
self.__displayImage(self.__blue2x2())
elif 'figure' in strImage.lower():
self.__displayImage(self.__figure())
elif 'none' in strImage.lower():
self.__displayImage(self.__lego())
else:
self.__displayImage(self.__lego())
|
# Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess, time, os, sys
# compose the arguments
fastserver_path = sys.argv[1] # '\\...\\discovery-server\\build64\\Debug\\fast-discovery-serverd-1.0.0'
server_listening_port = sys.argv[2] # CMake generated random port
discovery_path = sys.argv[3] # '\\...\\discovery-server\\build64\\Debug\\discovery-server-1.1.0d.exe'
xml_config_path = sys.argv[4] # '\\...\\discovery-server\\build64\\Debug\\test_15_fds.xml'
# print(f'arguments: {fastserver_path} {server_listening_port} {discovery_path} {xml_config_path}')
# launch server and clients
server = subprocess.Popen([fastserver_path, '-i', '5', '-l', '127.0.0.1', '-p', f'{server_listening_port}'])
clients = subprocess.Popen([discovery_path, xml_config_path])
# wait till clients test is finished, then kill the server
clients.communicate()
server.kill()
if clients.returncode: # 0 if everything goes fine, validation passed
print(f'discovery-server process fault on clients: returncode {clients.returncode}', file=sys.stderr)
sys.exit(clients.returncode)
# success
sys.exit(0)
|
import os
import re
import glob
import subprocess
from collections import OrderedDict
# We need to define our own `Action` method to control the verbosity of output
# and whenever we need to run those commands in a subprocess on some platforms.
from SCons.Script import Action
from SCons import Node
from SCons.Script import Glob
from platform_methods import run_in_subprocess
def add_source_files(self, sources, files, warn_duplicates=True):
# Convert string to list of absolute paths (including expanding wildcard)
if isinstance(files, (str, bytes)):
# Keep SCons project-absolute path as they are (no wildcard support)
if files.startswith("#"):
if "*" in files:
print("ERROR: Wildcards can't be expanded in SCons project-absolute path: '{}'".format(files))
return
files = [files]
else:
dir_path = self.Dir(".").abspath
files = sorted(glob.glob(dir_path + "/" + files))
# Add each path as compiled Object following environment (self) configuration
for path in files:
obj = self.Object(path)
if obj in sources:
if warn_duplicates:
print('WARNING: Object "{}" already included in environment sources.'.format(obj))
else:
continue
sources.append(obj)
def disable_warnings(self):
# 'self' is the environment
if self.msvc:
# We have to remove existing warning level defines before appending /w,
# otherwise we get: "warning D9025 : overriding '/W3' with '/w'"
warn_flags = ["/Wall", "/W4", "/W3", "/W2", "/W1", "/WX"]
self.Append(CCFLAGS=["/w"])
self.Append(CFLAGS=["/w"])
self.Append(CXXFLAGS=["/w"])
self["CCFLAGS"] = [x for x in self["CCFLAGS"] if not x in warn_flags]
self["CFLAGS"] = [x for x in self["CFLAGS"] if not x in warn_flags]
self["CXXFLAGS"] = [x for x in self["CXXFLAGS"] if not x in warn_flags]
else:
self.Append(CCFLAGS=["-w"])
self.Append(CFLAGS=["-w"])
self.Append(CXXFLAGS=["-w"])
def add_module_version_string(self, s):
self.module_version_string += "." + s
def update_version(module_version_string=""):
build_name = "custom_build"
if os.getenv("BUILD_NAME") != None:
build_name = os.getenv("BUILD_NAME")
print("Using custom build name: " + build_name)
import version
# NOTE: It is safe to generate this file here, since this is still executed serially
f = open("core/version_generated.gen.h", "w")
f.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
f.write("#ifndef VERSION_GENERATED_GEN_H\n")
f.write("#define VERSION_GENERATED_GEN_H\n")
f.write('#define VERSION_SHORT_NAME "' + str(version.short_name) + '"\n')
f.write('#define VERSION_NAME "' + str(version.name) + '"\n')
f.write("#define VERSION_MAJOR " + str(version.major) + "\n")
f.write("#define VERSION_MINOR " + str(version.minor) + "\n")
f.write("#define VERSION_PATCH " + str(version.patch) + "\n")
f.write('#define VERSION_STATUS "' + str(version.status) + '"\n')
f.write('#define VERSION_BUILD "' + str(build_name) + '"\n')
f.write('#define VERSION_MODULE_CONFIG "' + str(version.module_config) + module_version_string + '"\n')
f.write("#define VERSION_YEAR " + str(version.year) + "\n")
f.write('#define VERSION_WEBSITE "' + str(version.website) + '"\n')
f.write("#endif // VERSION_GENERATED_GEN_H\n")
f.close()
# NOTE: It is safe to generate this file here, since this is still executed serially
fhash = open("core/version_hash.gen.h", "w")
fhash.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
fhash.write("#ifndef VERSION_HASH_GEN_H\n")
fhash.write("#define VERSION_HASH_GEN_H\n")
githash = ""
gitfolder = ".git"
if os.path.isfile(".git"):
module_folder = open(".git", "r").readline().strip()
if module_folder.startswith("gitdir: "):
gitfolder = module_folder[8:]
if os.path.isfile(os.path.join(gitfolder, "HEAD")):
head = open(os.path.join(gitfolder, "HEAD"), "r", encoding="utf8").readline().strip()
if head.startswith("ref: "):
head = os.path.join(gitfolder, head[5:])
if os.path.isfile(head):
githash = open(head, "r").readline().strip()
else:
githash = head
fhash.write('#define VERSION_HASH "' + githash + '"\n')
fhash.write("#endif // VERSION_HASH_GEN_H\n")
fhash.close()
def parse_cg_file(fname, uniforms, sizes, conditionals):
fs = open(fname, "r")
line = fs.readline()
while line:
if re.match(r"^\s*uniform", line):
res = re.match(r"uniform ([\d\w]*) ([\d\w]*)")
type = res.groups(1)
name = res.groups(2)
uniforms.append(name)
if type.find("texobj") != -1:
sizes.append(1)
else:
t = re.match(r"float(\d)x(\d)", type)
if t:
sizes.append(int(t.groups(1)) * int(t.groups(2)))
else:
t = re.match(r"float(\d)", type)
sizes.append(int(t.groups(1)))
if line.find("[branch]") != -1:
conditionals.append(name)
line = fs.readline()
fs.close()
def detect_modules(at_path):
module_list = OrderedDict() # name : path
modules_glob = os.path.join(at_path, "*")
files = glob.glob(modules_glob)
files.sort() # so register_module_types does not change that often, and also plugins are registered in alphabetic order
for x in files:
if not is_module(x):
continue
name = os.path.basename(x)
path = x.replace("\\", "/") # win32
module_list[name] = path
return module_list
def is_module(path):
return os.path.isdir(path) and os.path.exists(os.path.join(path, "SCsub"))
def write_modules(module_list):
includes_cpp = ""
preregister_cpp = ""
register_cpp = ""
unregister_cpp = ""
for name, path in module_list.items():
try:
with open(os.path.join(path, "register_types.h")):
includes_cpp += '#include "' + path + '/register_types.h"\n'
preregister_cpp += "#ifdef MODULE_" + name.upper() + "_ENABLED\n"
preregister_cpp += "#ifdef MODULE_" + name.upper() + "_HAS_PREREGISTER\n"
preregister_cpp += "\tpreregister_" + name + "_types();\n"
preregister_cpp += "#endif\n"
preregister_cpp += "#endif\n"
register_cpp += "#ifdef MODULE_" + name.upper() + "_ENABLED\n"
register_cpp += "\tregister_" + name + "_types();\n"
register_cpp += "#endif\n"
unregister_cpp += "#ifdef MODULE_" + name.upper() + "_ENABLED\n"
unregister_cpp += "\tunregister_" + name + "_types();\n"
unregister_cpp += "#endif\n"
except OSError:
pass
modules_cpp = """// register_module_types.gen.cpp
/* THIS FILE IS GENERATED DO NOT EDIT */
#include "register_module_types.h"
#include "modules/modules_enabled.gen.h"
%s
void preregister_module_types() {
%s
}
void register_module_types() {
%s
}
void unregister_module_types() {
%s
}
""" % (
includes_cpp,
preregister_cpp,
register_cpp,
unregister_cpp,
)
# NOTE: It is safe to generate this file here, since this is still executed serially
with open("modules/register_module_types.gen.cpp", "w") as f:
f.write(modules_cpp)
def convert_custom_modules_path(path):
if not path:
return path
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
err_msg = "Build option 'custom_modules' must %s"
if not os.path.isdir(path):
raise ValueError(err_msg % "point to an existing directory.")
if path == os.path.realpath("modules"):
raise ValueError(err_msg % "be a directory other than built-in `modules` directory.")
if is_module(path):
raise ValueError(err_msg % "point to a directory with modules, not a single module.")
return path
def disable_module(self):
self.disabled_modules.append(self.current_module)
def module_check_dependencies(self, module, dependencies):
"""
Checks if module dependencies are enabled for a given module,
and prints a warning if they aren't.
Meant to be used in module `can_build` methods.
Returns a boolean (True if dependencies are satisfied).
"""
missing_deps = []
for dep in dependencies:
opt = "module_{}_enabled".format(dep)
if not opt in self or not self[opt]:
missing_deps.append(dep)
if missing_deps != []:
print(
"Disabling '{}' module as the following dependencies are not satisfied: {}".format(
module, ", ".join(missing_deps)
)
)
return False
else:
return True
def use_windows_spawn_fix(self, platform=None):
if os.name != "nt":
return # not needed, only for windows
# On Windows, due to the limited command line length, when creating a static library
# from a very high number of objects SCons will invoke "ar" once per object file;
# that makes object files with same names to be overwritten so the last wins and
# the library looses symbols defined by overwritten objects.
# By enabling quick append instead of the default mode (replacing), libraries will
# got built correctly regardless the invocation strategy.
# Furthermore, since SCons will rebuild the library from scratch when an object file
# changes, no multiple versions of the same object file will be present.
self.Replace(ARFLAGS="q")
def mySubProcess(cmdline, env):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(
cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
shell=False,
env=env,
)
_, err = proc.communicate()
rv = proc.wait()
if rv:
print("=====")
print(err)
print("=====")
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = " ".join(args[1:])
cmdline = cmd + " " + newargs
rv = 0
env = {str(key): str(value) for key, value in iter(env.items())}
if len(cmdline) > 32000 and cmd.endswith("ar"):
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3, len(args)):
rv = mySubProcess(cmdline + args[i], env)
if rv:
break
else:
rv = mySubProcess(cmdline, env)
return rv
self["SPAWN"] = mySpawn
def save_active_platforms(apnames, ap):
for x in ap:
names = ["logo"]
if os.path.isfile(x + "/run_icon.png"):
names.append("run_icon")
for name in names:
pngf = open(x + "/" + name + ".png", "rb")
b = pngf.read(1)
str = " /* AUTOGENERATED FILE, DO NOT EDIT */ \n"
str += " static const unsigned char _" + x[9:] + "_" + name + "[]={"
while len(b) == 1:
str += hex(ord(b))
b = pngf.read(1)
if len(b) == 1:
str += ","
str += "};\n"
pngf.close()
# NOTE: It is safe to generate this file here, since this is still executed serially
wf = x + "/" + name + ".gen.h"
with open(wf, "w") as pngw:
pngw.write(str)
def no_verbose(sys, env):
colors = {}
# Colors are disabled in non-TTY environments such as pipes. This means
# that if output is redirected to a file, it will not contain color codes
if sys.stdout.isatty():
colors["cyan"] = "\033[96m"
colors["purple"] = "\033[95m"
colors["blue"] = "\033[94m"
colors["green"] = "\033[92m"
colors["yellow"] = "\033[93m"
colors["red"] = "\033[91m"
colors["end"] = "\033[0m"
else:
colors["cyan"] = ""
colors["purple"] = ""
colors["blue"] = ""
colors["green"] = ""
colors["yellow"] = ""
colors["red"] = ""
colors["end"] = ""
compile_source_message = "{}Compiling {}==> {}$SOURCE{}".format(
colors["blue"], colors["purple"], colors["yellow"], colors["end"]
)
java_compile_source_message = "{}Compiling {}==> {}$SOURCE{}".format(
colors["blue"], colors["purple"], colors["yellow"], colors["end"]
)
compile_shared_source_message = "{}Compiling shared {}==> {}$SOURCE{}".format(
colors["blue"], colors["purple"], colors["yellow"], colors["end"]
)
link_program_message = "{}Linking Program {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
link_library_message = "{}Linking Static Library {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
ranlib_library_message = "{}Ranlib Library {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
link_shared_library_message = "{}Linking Shared Library {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
java_library_message = "{}Creating Java Archive {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
env.Append(CXXCOMSTR=[compile_source_message])
env.Append(CCCOMSTR=[compile_source_message])
env.Append(SHCCCOMSTR=[compile_shared_source_message])
env.Append(SHCXXCOMSTR=[compile_shared_source_message])
env.Append(ARCOMSTR=[link_library_message])
env.Append(RANLIBCOMSTR=[ranlib_library_message])
env.Append(SHLINKCOMSTR=[link_shared_library_message])
env.Append(LINKCOMSTR=[link_program_message])
env.Append(JARCOMSTR=[java_library_message])
env.Append(JAVACCOMSTR=[java_compile_source_message])
def detect_visual_c_compiler_version(tools_env):
# tools_env is the variable scons uses to call tools that execute tasks, SCons's env['ENV'] that executes tasks...
# (see the SCons documentation for more information on what it does)...
# in order for this function to be well encapsulated i choose to force it to receive SCons's TOOLS env (env['ENV']
# and not scons setup environment (env)... so make sure you call the right environment on it or it will fail to detect
# the proper vc version that will be called
# There is no flag to give to visual c compilers to set the architecture, ie scons bits argument (32,64,ARM etc)
# There are many different cl.exe files that are run, and each one compiles & links to a different architecture
# As far as I know, the only way to figure out what compiler will be run when Scons calls cl.exe via Program()
# is to check the PATH variable and figure out which one will be called first. Code below does that and returns:
# the following string values:
# "" Compiler not detected
# "amd64" Native 64 bit compiler
# "amd64_x86" 64 bit Cross Compiler for 32 bit
# "x86" Native 32 bit compiler
# "x86_amd64" 32 bit Cross Compiler for 64 bit
# There are other architectures, but Godot does not support them currently, so this function does not detect arm/amd64_arm
# and similar architectures/compilers
# Set chosen compiler to "not detected"
vc_chosen_compiler_index = -1
vc_chosen_compiler_str = ""
# Start with Pre VS 2017 checks which uses VCINSTALLDIR:
if "VCINSTALLDIR" in tools_env:
# print("Checking VCINSTALLDIR")
# find() works with -1 so big ifs below are needed... the simplest solution, in fact
# First test if amd64 and amd64_x86 compilers are present in the path
vc_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64;")
if vc_amd64_compiler_detection_index > -1:
vc_chosen_compiler_index = vc_amd64_compiler_detection_index
vc_chosen_compiler_str = "amd64"
vc_amd64_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64_x86;")
if vc_amd64_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index
vc_chosen_compiler_str = "amd64_x86"
# Now check the 32 bit compilers
vc_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN;")
if vc_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_compiler_detection_index
vc_chosen_compiler_str = "x86"
vc_x86_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\x86_amd64;")
if vc_x86_amd64_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index
vc_chosen_compiler_str = "x86_amd64"
# and for VS 2017 and newer we check VCTOOLSINSTALLDIR:
if "VCTOOLSINSTALLDIR" in tools_env:
# Newer versions have a different path available
vc_amd64_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX64\\X64;")
)
if vc_amd64_compiler_detection_index > -1:
vc_chosen_compiler_index = vc_amd64_compiler_detection_index
vc_chosen_compiler_str = "amd64"
vc_amd64_x86_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX64\\X86;")
)
if vc_amd64_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index
vc_chosen_compiler_str = "amd64_x86"
vc_x86_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX86\\X86;")
)
if vc_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_compiler_detection_index
vc_chosen_compiler_str = "x86"
vc_x86_amd64_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX86\\X64;")
)
if vc_x86_amd64_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index
vc_chosen_compiler_str = "x86_amd64"
return vc_chosen_compiler_str
def find_visual_c_batch_file(env):
from SCons.Tool.MSCommon.vc import get_default_version, get_host_target, find_batch_file
version = get_default_version(env)
(host_platform, target_platform, _) = get_host_target(env)
return find_batch_file(env, version, host_platform, target_platform)[0]
def generate_cpp_hint_file(filename):
if os.path.isfile(filename):
# Don't overwrite an existing hint file since the user may have customized it.
pass
else:
try:
with open(filename, "w") as fd:
fd.write("#define GDCLASS(m_class, m_inherits)\n")
except OSError:
print("Could not write cpp.hint file.")
def glob_recursive(pattern, node="."):
results = []
for f in Glob(str(node) + "/*", source=True):
if type(f) is Node.FS.Dir:
results += glob_recursive(pattern, f)
results += Glob(str(node) + "/" + pattern, source=True)
return results
def add_to_vs_project(env, sources):
for x in sources:
if type(x) == type(""):
fname = env.File(x).path
else:
fname = env.File(x)[0].path
pieces = fname.split(".")
if len(pieces) > 0:
basename = pieces[0]
basename = basename.replace("\\\\", "/")
if os.path.isfile(basename + ".h"):
env.vs_incs += [basename + ".h"]
elif os.path.isfile(basename + ".hpp"):
env.vs_incs += [basename + ".hpp"]
if os.path.isfile(basename + ".c"):
env.vs_srcs += [basename + ".c"]
elif os.path.isfile(basename + ".cpp"):
env.vs_srcs += [basename + ".cpp"]
def generate_vs_project(env, num_jobs):
batch_file = find_visual_c_batch_file(env)
if batch_file:
def build_commandline(commands):
common_build_prefix = [
'cmd /V /C set "plat=$(PlatformTarget)"',
'(if "$(PlatformTarget)"=="x64" (set "plat=x86_amd64"))',
'set "tools=%s"' % env["tools"],
'(if "$(Configuration)"=="release" (set "tools=no"))',
'call "' + batch_file + '" !plat!',
]
# windows allows us to have spaces in paths, so we need
# to double quote off the directory. However, the path ends
# in a backslash, so we need to remove this, lest it escape the
# last double quote off, confusing MSBuild
common_build_postfix = [
"--directory=\"$(ProjectDir.TrimEnd('\\'))\"",
"platform=windows",
"target=$(Configuration)",
"progress=no",
"tools=!tools!",
"-j%s" % num_jobs,
]
if env["custom_modules"]:
common_build_postfix.append("custom_modules=%s" % env["custom_modules"])
result = " ^& ".join(common_build_prefix + [" ".join([commands] + common_build_postfix)])
return result
add_to_vs_project(env, env.core_sources)
add_to_vs_project(env, env.drivers_sources)
add_to_vs_project(env, env.main_sources)
add_to_vs_project(env, env.modules_sources)
add_to_vs_project(env, env.scene_sources)
add_to_vs_project(env, env.servers_sources)
add_to_vs_project(env, env.editor_sources)
for header in glob_recursive("**/*.h"):
env.vs_incs.append(str(header))
env["MSVSBUILDCOM"] = build_commandline("scons")
env["MSVSREBUILDCOM"] = build_commandline("scons vsproj=yes")
env["MSVSCLEANCOM"] = build_commandline("scons --clean")
# This version information (Win32, x64, Debug, Release, Release_Debug seems to be
# required for Visual Studio to understand that it needs to generate an NMAKE
# project. Do not modify without knowing what you are doing.
debug_variants = ["debug|Win32"] + ["debug|x64"]
release_variants = ["release|Win32"] + ["release|x64"]
release_debug_variants = ["release_debug|Win32"] + ["release_debug|x64"]
variants = debug_variants + release_variants + release_debug_variants
debug_targets = ["bin\\godot.windows.tools.32.exe"] + ["bin\\godot.windows.tools.64.exe"]
release_targets = ["bin\\godot.windows.opt.32.exe"] + ["bin\\godot.windows.opt.64.exe"]
release_debug_targets = ["bin\\godot.windows.opt.tools.32.exe"] + ["bin\\godot.windows.opt.tools.64.exe"]
targets = debug_targets + release_targets + release_debug_targets
if not env.get("MSVS"):
env["MSVS"]["PROJECTSUFFIX"] = ".vcxproj"
env["MSVS"]["SOLUTIONSUFFIX"] = ".sln"
env.MSVSProject(
target=["#godot" + env["MSVSPROJECTSUFFIX"]],
incs=env.vs_incs,
srcs=env.vs_srcs,
runfile=targets,
buildtarget=targets,
auto_build_solution=1,
variant=variants,
)
else:
print("Could not locate Visual Studio batch file to set up the build environment. Not generating VS project.")
def precious_program(env, program, sources, **args):
program = env.ProgramOriginal(program, sources, **args)
env.Precious(program)
return program
def add_shared_library(env, name, sources, **args):
library = env.SharedLibrary(name, sources, **args)
env.NoCache(library)
return library
def add_library(env, name, sources, **args):
library = env.Library(name, sources, **args)
env.NoCache(library)
return library
def add_program(env, name, sources, **args):
program = env.Program(name, sources, **args)
env.NoCache(program)
return program
def CommandNoCache(env, target, sources, command, **args):
result = env.Command(target, sources, command, **args)
env.NoCache(result)
return result
def Run(env, function, short_message, subprocess=True):
output_print = short_message if not env["verbose"] else ""
if not subprocess:
return Action(function, output_print)
else:
return Action(run_in_subprocess(function), output_print)
def detect_darwin_sdk_path(platform, env):
sdk_name = ""
if platform == "osx":
sdk_name = "macosx"
var_name = "MACOS_SDK_PATH"
elif platform == "iphone":
sdk_name = "iphoneos"
var_name = "IPHONESDK"
elif platform == "iphonesimulator":
sdk_name = "iphonesimulator"
var_name = "IPHONESDK"
else:
raise Exception("Invalid platform argument passed to detect_darwin_sdk_path")
if not env[var_name]:
try:
sdk_path = subprocess.check_output(["xcrun", "--sdk", sdk_name, "--show-sdk-path"]).strip().decode("utf-8")
if sdk_path:
env[var_name] = sdk_path
except (subprocess.CalledProcessError, OSError):
print("Failed to find SDK path while running xcrun --sdk {} --show-sdk-path.".format(sdk_name))
raise
def is_vanilla_clang(env):
if not using_clang(env):
return False
try:
version = subprocess.check_output([env.subst(env["CXX"]), "--version"]).strip().decode("utf-8")
except (subprocess.CalledProcessError, OSError):
print("Couldn't parse CXX environment variable to infer compiler version.")
return False
return not version.startswith("Apple")
def get_compiler_version(env):
"""
Returns an array of version numbers as ints: [major, minor, patch].
The return array should have at least two values (major, minor).
"""
if not env.msvc:
# Not using -dumpversion as some GCC distros only return major, and
# Clang used to return hardcoded 4.2.1: # https://reviews.llvm.org/D56803
try:
version = subprocess.check_output([env.subst(env["CXX"]), "--version"]).strip().decode("utf-8")
except (subprocess.CalledProcessError, OSError):
print("Couldn't parse CXX environment variable to infer compiler version.")
return None
else: # TODO: Implement for MSVC
return None
match = re.search("[0-9]+\.[0-9.]+", version)
if match is not None:
return list(map(int, match.group().split(".")))
else:
return None
def using_gcc(env):
return "gcc" in os.path.basename(env["CC"])
def using_clang(env):
return "clang" in os.path.basename(env["CC"])
def show_progress(env):
import sys
from SCons.Script import Progress, Command, AlwaysBuild
screen = sys.stdout
# Progress reporting is not available in non-TTY environments since it
# messes with the output (for example, when writing to a file)
show_progress = env["progress"] and sys.stdout.isatty()
node_count = 0
node_count_max = 0
node_count_interval = 1
node_count_fname = str(env.Dir("#")) + "/.scons_node_count"
import time, math
class cache_progress:
# The default is 1 GB cache and 12 hours half life
def __init__(self, path=None, limit=1073741824, half_life=43200):
self.path = path
self.limit = limit
self.exponent_scale = math.log(2) / half_life
if env["verbose"] and path != None:
screen.write(
"Current cache limit is {} (used: {})\n".format(
self.convert_size(limit), self.convert_size(self.get_size(path))
)
)
self.delete(self.file_list())
def __call__(self, node, *args, **kw):
nonlocal node_count, node_count_max, node_count_interval, node_count_fname, show_progress
if show_progress:
# Print the progress percentage
node_count += node_count_interval
if node_count_max > 0 and node_count <= node_count_max:
screen.write("\r[%3d%%] " % (node_count * 100 / node_count_max))
screen.flush()
elif node_count_max > 0 and node_count > node_count_max:
screen.write("\r[100%] ")
screen.flush()
else:
screen.write("\r[Initial build] ")
screen.flush()
def delete(self, files):
if len(files) == 0:
return
if env["verbose"]:
# Utter something
screen.write("\rPurging %d %s from cache...\n" % (len(files), len(files) > 1 and "files" or "file"))
[os.remove(f) for f in files]
def file_list(self):
if self.path is None:
# Nothing to do
return []
# Gather a list of (filename, (size, atime)) within the
# cache directory
file_stat = [(x, os.stat(x)[6:8]) for x in glob.glob(os.path.join(self.path, "*", "*"))]
if file_stat == []:
# Nothing to do
return []
# Weight the cache files by size (assumed to be roughly
# proportional to the recompilation time) times an exponential
# decay since the ctime, and return a list with the entries
# (filename, size, weight).
current_time = time.time()
file_stat = [(x[0], x[1][0], (current_time - x[1][1])) for x in file_stat]
# Sort by the most recently accessed files (most sensible to keep) first
file_stat.sort(key=lambda x: x[2])
# Search for the first entry where the storage limit is
# reached
sum, mark = 0, None
for i, x in enumerate(file_stat):
sum += x[1]
if sum > self.limit:
mark = i
break
if mark is None:
return []
else:
return [x[0] for x in file_stat[mark:]]
def convert_size(self, size_bytes):
if size_bytes == 0:
return "0 bytes"
size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (int(s) if i == 0 else s, size_name[i])
def get_size(self, start_path="."):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def progress_finish(target, source, env):
nonlocal node_count, progressor
with open(node_count_fname, "w") as f:
f.write("%d\n" % node_count)
progressor.delete(progressor.file_list())
try:
with open(node_count_fname) as f:
node_count_max = int(f.readline())
except:
pass
cache_directory = os.environ.get("SCONS_CACHE")
# Simple cache pruning, attached to SCons' progress callback. Trim the
# cache directory to a size not larger than cache_limit.
cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024
progressor = cache_progress(cache_directory, cache_limit)
Progress(progressor, interval=node_count_interval)
progress_finish_command = Command("progress_finish", [], progress_finish)
AlwaysBuild(progress_finish_command)
def dump(env):
# Dumps latest build information for debugging purposes and external tools.
from json import dump
def non_serializable(obj):
return "<<non-serializable: %s>>" % (type(obj).__qualname__)
with open(".scons_env.json", "w") as f:
dump(env.Dictionary(), f, indent=4, default=non_serializable)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRhtslib(RPackage):
"""HTSlib high-throughput sequencing library as an R package.
This package provides version 1.7 of the 'HTSlib' C library for high-
throughput sequence analysis. The package is primarily useful to
developers of other R packages who wish to make use of HTSlib.
Motivation and instructions for use of this package are in the vignette,
vignette(package="Rhtslib", "Rhtslib")."""
homepage = "https://bioconductor.org/packages/Rhtslib"
git = "https://git.bioconductor.org/packages/Rhtslib.git"
version('1.18.1', commit='751a2ebaed43b7991204b27bd6c7870645001d82')
version('1.16.3', commit='3ed0b5db2ee3cf0df1c6096fde8855c8485eebd4')
version('1.14.1', commit='4be260720f845a34d0ac838278fce1363f645230')
version('1.12.1', commit='e3487b1355995d09b28fde5d0a7504a3e79a7203')
version('1.10.0', commit='53dcf7dfe35d735283956c77c011a97ca3f4eb26')
version('1.8.0', commit='3b5493473bed42958614091c58c739932ffcfa79')
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('bzip2', type=('build', 'link', 'run'))
depends_on('xz', type=('build', 'link', 'run'))
depends_on('curl', type=('build', 'link', 'run'))
depends_on('gmake', type='build')
# Some versions of this package will leave the temporary installation
# directory in the htslib shared object. R will fix this if patchelf is
# available
depends_on('patchelf', when='@1.12:1.14', type='build')
patch('use_spack_Makeconf.patch', when='@1.12:')
patch('find_deps-1.12.patch', when='@1.12:1.14')
patch('find_deps-1.16.patch', when='@1.16:')
@when('@1.12:')
def setup_build_environment(self, env):
env.set('BZIP2_INCLUDE', self.spec['bzip2'].headers.include_flags)
env.set('XZ_INCLUDE', self.spec['xz'].headers.include_flags)
env.set('BZIP2_LIB', self.spec['bzip2'].libs.search_flags)
env.set('XZ_LIB', self.spec['xz'].libs.search_flags)
|
import os
import sys
sys.path.append( '../' )
from rtfng import *
def MakeExample1() :
doc = Document()
ss = doc.StyleSheet
section = Section()
doc.Sections.append( section )
# text can be added directly to the section
# a paragraph object is create as needed
section.append( 'Image Example 1' )
section.append( 'You can add images in one of two ways, either converting the '
'image each and every time like;' )
image = Image( 'examples/image.jpg' )
section.append( Paragraph( image ) )
section.append( 'Or you can use the image object to convert the image and then '
'save it to a raw code element that can be included later.' )
fout = file( 'image_tmp.py', 'w' )
print >> fout, 'from rtfng import RawCode'
print >> fout
fout.write( image.ToRawCode( 'TEST_IMAGE' ) )
fout.close()
import image_tmp
section.append( Paragraph( image_tmp.TEST_IMAGE ) )
section.append( 'Have a look in image_tmp.py for the converted RawCode.' )
section.append( 'here are some png files' )
for f in [ 'examples/img1.png',
'examples/img2.png',
'examples/img3.png',
'examples/img4.png' ] :
section.append( Paragraph( Image( f ) ) )
return doc
def OpenFile( name ) :
return file( '%s.rtf' % name, 'w' )
if __name__ == '__main__' :
DR = Renderer()
doc1 = MakeExample1()
DR.Write( doc1, OpenFile( 'Image1' ) )
print "Finished"
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class CcrClient(NamespacedClient):
@query_params()
def delete_auto_follow_pattern(self, name, params=None, headers=None):
"""
Deletes auto-follow patterns.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-delete-auto-follow-pattern.html>`_
:arg name: The name of the auto follow pattern.
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"DELETE",
_make_path("_ccr", "auto_follow", name),
params=params,
headers=headers,
)
@query_params("wait_for_active_shards")
def follow(self, index, body, params=None, headers=None):
"""
Creates a new follower index configured to follow the referenced leader index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-put-follow.html>`_
:arg index: The name of the follower index
:arg body: The name of the leader index and other optional ccr
related parameters
:arg wait_for_active_shards: Sets the number of shard copies
that must be active before returning. Defaults to 0. Set to `all` for
all shard copies, otherwise set to any non-negative value less than or
equal to the total number of copies for the shard (number of replicas +
1) Default: 0
"""
for param in (index, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path(index, "_ccr", "follow"),
params=params,
headers=headers,
body=body,
)
@query_params()
def follow_info(self, index, params=None, headers=None):
"""
Retrieves information about all follower indices, including parameters and
status for each follower index
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-get-follow-info.html>`_
:arg index: A comma-separated list of index patterns; use `_all`
to perform the operation on all indices
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"GET", _make_path(index, "_ccr", "info"), params=params, headers=headers
)
@query_params()
def follow_stats(self, index, params=None, headers=None):
"""
Retrieves follower stats. return shard-level stats about the following tasks
associated with each shard for the specified indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-get-follow-stats.html>`_
:arg index: A comma-separated list of index patterns; use `_all`
to perform the operation on all indices
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"GET", _make_path(index, "_ccr", "stats"), params=params, headers=headers
)
@query_params()
def forget_follower(self, index, body, params=None, headers=None):
"""
Removes the follower retention leases from the leader.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-post-forget-follower.html>`_
:arg index: the name of the leader index for which specified
follower retention leases should be removed
:arg body: the name and UUID of the follower index, the name of
the cluster containing the follower index, and the alias from the
perspective of that cluster for the remote cluster containing the leader
index
"""
for param in (index, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"POST",
_make_path(index, "_ccr", "forget_follower"),
params=params,
headers=headers,
body=body,
)
@query_params()
def get_auto_follow_pattern(self, name=None, params=None, headers=None):
"""
Gets configured auto-follow patterns. Returns the specified auto-follow pattern
collection.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-get-auto-follow-pattern.html>`_
:arg name: The name of the auto follow pattern.
"""
return self.transport.perform_request(
"GET",
_make_path("_ccr", "auto_follow", name),
params=params,
headers=headers,
)
@query_params()
def pause_follow(self, index, params=None, headers=None):
"""
Pauses a follower index. The follower index will not fetch any additional
operations from the leader index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-post-pause-follow.html>`_
:arg index: The name of the follower index that should pause
following its leader index.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path(index, "_ccr", "pause_follow"),
params=params,
headers=headers,
)
@query_params()
def put_auto_follow_pattern(self, name, body, params=None, headers=None):
"""
Creates a new named collection of auto-follow patterns against a specified
remote cluster. Newly created indices on the remote cluster matching any of the
specified patterns will be automatically configured as follower indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-put-auto-follow-pattern.html>`_
:arg name: The name of the auto follow pattern.
:arg body: The specification of the auto follow pattern
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_ccr", "auto_follow", name),
params=params,
headers=headers,
body=body,
)
@query_params()
def resume_follow(self, index, body=None, params=None, headers=None):
"""
Resumes a follower index that has been paused
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-post-resume-follow.html>`_
:arg index: The name of the follow index to resume following.
:arg body: The name of the leader index and other optional ccr
related parameters
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path(index, "_ccr", "resume_follow"),
params=params,
headers=headers,
body=body,
)
@query_params()
def stats(self, params=None, headers=None):
"""
Gets all stats related to cross-cluster replication.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-get-stats.html>`_
"""
return self.transport.perform_request(
"GET", "/_ccr/stats", params=params, headers=headers
)
@query_params()
def unfollow(self, index, params=None, headers=None):
"""
Stops the following task associated with a follower index and removes index
metadata and settings associated with cross-cluster replication.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-post-unfollow.html>`_
:arg index: The name of the follower index that should be turned
into a regular index.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path(index, "_ccr", "unfollow"),
params=params,
headers=headers,
)
@query_params()
def pause_auto_follow_pattern(self, name, params=None, headers=None):
"""
Pauses an auto-follow pattern
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-pause-auto-follow-pattern.html>`_
:arg name: The name of the auto follow pattern that should pause
discovering new indices to follow.
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"POST",
_make_path("_ccr", "auto_follow", name, "pause"),
params=params,
headers=headers,
)
@query_params()
def resume_auto_follow_pattern(self, name, params=None, headers=None):
"""
Resumes an auto-follow pattern that has been paused
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.14/ccr-resume-auto-follow-pattern.html>`_
:arg name: The name of the auto follow pattern to resume
discovering new indices to follow.
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"POST",
_make_path("_ccr", "auto_follow", name, "resume"),
params=params,
headers=headers,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.