text
stringlengths 2
999k
|
|---|
from helpers import *
from plots import *
from col_type_detector import *
# from configs import *
import warnings
def generate_flomaster_plot(df, x="None", y=[], group_by=None, plot_type=None, x_axis=None, y_axis=None, title=None):
"""
Function generates interactive plot for given dataframe and columns
Args:
df (pd.DataFrame)
x (str): name of the column to use as x_axis
y (str or list): either one column or list of columns to plot as y axis
group_by (str): column by which to group data (default is None)
plot_type (str): possible values vary depending on input data, the list is`
ONE_NUMERIC = ['Histogram', 'Distplot']
ONE_CATEOGIRCAL = ['Donut', 'Pie', 'Histogram']
ONE_TEXT = ['Wordcloud']
TWO_NUMERIC = ["Scatter", "Scatter plot with margins", "2D density plot", "Distplot", "Histogram", "Basic Stats"]
TWO_NUMERIC_SORTED = ['Connected Scatter', "Area plot", "Line plot"]
ONE_CATEOGIRCAL_ONE_NUMERICAL = ['Box', "Violin", "Basic Stats"]
TWO_CATEGORICAL = ['Cross tab', "Stacked bar"]
ONE_DATETIME_ONE_NUMERIC = ['Connected Scatter']
x_axis (str): defaults to x columns name
y_axis (str): defaults to y, if y is a list then to the first element of y
title (str): defaults to f"{x_axis} vs {y_axis}"
Note:
Some illogical results might occur in case of column_type_detector classifies some
columns incorrectly, also note that this package is in a very early stage of development
Raises:
ValueError: if plot_type is not from allowed list
Returns:
plotly figure object
"""
if type(y) == str:
y = [y]
data_types = get_column_types(df, num_unique_categories=2)
if x_axis is None:
x_axis = x
if y != [] and y_axis is None:
y_axis = y[0]
if title is None:
title = f"{x_axis} vs {y_axis}"
x_dtype = get_data_type_for_given_feature(data_types, x)
y_dtype = get_data_type_for_given_feature(data_types, y[0])
# print(x)
# print(y)
# print(x_dtype)
# print(y_dtype)
# one feature
if x != "None" and y[0] == 'None':
if x_dtype == 'numeric': # 1
possible_graphs = ONE_NUMERIC
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_numeric(df, x, group_by, plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
if x_dtype == 'categorical': # 2
possible_graphs = ONE_CATEOGIRCAL
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_categoric(df, x, group_by, plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
if x_dtype == 'texts': # 3
possible_graphs = ONE_TEXT
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_textual(df, x)
return fig
# two features
if x != "None" and y[0] != 'None':
# two numeric
if x_dtype == "numeric" and y_dtype == 'numeric': # 4
global TWO_NUMERIC
if df[x].to_list() == sorted(df[x].to_list()):
TWO_NUMERIC += TWO_NUMERIC_SORTED
possible_graphs = TWO_NUMERIC
if len(df)>2000 and plot_type in ["Histogram", "Scatter"]:
warnings.warn('**Data has too many rows, we suggest plotting \
with one of the following: "Scatter plot with margins", "2D density plot", "Distplot"**')
if len(df)<2000 and plot_type not in ["Histogram", "Scatter", "Basic Stats"]:
warnings.warn('**Data has few rows, we suggest plotting \
with one of the following: "Histogram", "Scatter"**')
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = two_numeric(df, x, y[0], group_by, plot_type)
if plot_type in ["Basic Stats",'Histogram']:
if y_axis == y[0]:
y_axis = ''
if x_axis == x:
x_axis = ''
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
#one numeric one categoric # 5
if x_dtype == "categorical" and y_dtype == 'numeric':
possible_graphs = ONE_CATEOGIRCAL_ONE_NUMERICAL
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_numeric_one_categorical(df, x, y, group_by, plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
# two categoricals
if x_dtype == "categorical" and y_dtype == 'categorical':
possible_graphs = TWO_CATEGORICAL
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
if plot_type == 'Cross tab':
fig = two_categorical(df, x, y[0], plot_type)
elif plot_type == 'Stacked bar':
fig = two_categorical(df, x, y[0], plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
# one datetime one numeric
if x_dtype == "datetime" and y_dtype == 'numeric':
global ONE_DATETIME_ONE_NUMERIC
if check_list_in_list(list(df.columns), ['Date', "Open", "High", "Low", "Close"]):
ONE_DATETIME_ONE_NUMERIC += ["Stock price"]
possible_graphs = ONE_DATETIME_ONE_NUMERIC
if (plot_type is not None) and (plot_type not in possible_graphs):
raise ValueError(f"Please select one from {possible_graphs}")
else:
fig = one_datetime_one_numeric(df, x, y, group_by,plot_type)
add_labels_to_fig(fig, x_axis, y_axis, title)
return fig
return "Something went wrong, contact team Flomaster"
|
numbers = [int(num) for num in input().split(' ')]
for i in range(len(numbers) // 2):
temp = numbers[i]
numbers[i] = numbers[- 1 - i]
numbers[- 1 - i] = temp
print(" ".join(map(str, numbers)))
|
import sys,os,glob
try:
import pyUSRP as u
except ImportError:
try:
sys.path.append('..')
import pyUSRP as u
except ImportError:
print "Cannot find the pyUSRP package"
import argparse
def run(backend, files, welch, dbc):
for f in files:
u.calculate_noise(f, verbose = True, welch = max(welch,1), dbc = dbc, clip = 0.1)
print u.plot_noise_spec(files, channel_list=None, max_frequency=None, title_info=None, backend=backend,
cryostat_attenuation=0, auto_open=True, output_filename=None, add_info = ["decimation: 100x fs: 100Msps","loopback decimation 100x","decimation: OFF fs: 1Msps"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Test the basic VNA functionality.')
parser.add_argument('--folder', '-fn', help='Name of the folder in which the data are stored', type=str, default = "data")
parser.add_argument('--backend', '-b', help='backend to use for plotting', type=str, default= "matplotlib")
parser.add_argument('--welch', '-w', help='Whelch factor relative to timestream length so that welch factor is len(timestream)/this_arg', type=int, default= 5)
parser.add_argument('--dbc', '-dbc', help='Analyze and plot in dBc or not', action="store_true")
args = parser.parse_args()
try:
os.mkdir(args.folder)
except OSError:
pass
os.chdir(args.folder)
files = glob.glob("USRP_Noise*.h5")
run(backend = args.backend, files = files, welch = args.welch, dbc = args.dbc)
|
from collections import defaultdict
import pyvex
from ..knowledge_plugins.xrefs import XRef, XRefType
from ..engines.light import SimEngineLight, SimEngineLightVEXMixin
from .propagator.vex_vars import VEXTmp
from .propagator.values import Top
from . import register_analysis
from .analysis import Analysis
from .forward_analysis import FunctionGraphVisitor, SingleNodeGraphVisitor, ForwardAnalysis
class SimEngineXRefsVEX(
SimEngineLightVEXMixin,
SimEngineLight,
):
def __init__(self, xref_manager, project=None, replacements=None):
super().__init__()
self.project = project
self.xref_manager = xref_manager
self.replacements = replacements if replacements is not None else { }
def add_xref(self, xref_type, from_loc, to_loc):
self.xref_manager.add_xref(XRef(ins_addr=from_loc.ins_addr, block_addr=from_loc.block_addr,
stmt_idx=from_loc.stmt_idx, dst=to_loc, xref_type=xref_type)
)
#
# Statement handlers
#
def _handle_WrTmp(self, stmt):
# Don't execute the tmp write since it has been done during constant propagation
self._expr(stmt.data)
if type(stmt.data) is pyvex.IRExpr.Load:
self._handle_data_offset_refs(stmt.tmp)
def _handle_Put(self, stmt):
# if there is a Load, get it executed
self._expr(stmt.data)
def _handle_Store(self, stmt):
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
addr_tmp = VEXTmp(stmt.addr.tmp)
blockloc = self._codeloc(block_only=True)
if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top):
addr = self.replacements[blockloc][addr_tmp]
if isinstance(addr, int):
self.add_xref(XRefType.Write, self._codeloc(), addr)
elif isinstance(stmt.addr, pyvex.IRExpr.Const):
addr = stmt.addr.con.value
self.add_xref(XRefType.Write, self._codeloc(), addr)
def _handle_StoreG(self, stmt):
blockloc = self._codeloc(block_only=True)
if type(stmt.addr) is pyvex.IRExpr.RdTmp:
addr_tmp = VEXTmp(stmt.addr.tmp)
if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top):
addr = self.replacements[blockloc][addr_tmp]
if isinstance(addr, int):
self.add_xref(XRefType.Write, self._codeloc(), addr)
def _handle_LoadG(self, stmt):
# What are we reading?
blockloc = self._codeloc(block_only=True)
if type(stmt.addr) is pyvex.IRExpr.RdTmp:
addr_tmp = VEXTmp(stmt.addr.tmp)
if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top):
addr = self.replacements[blockloc][addr_tmp]
if isinstance(addr, int):
self.add_xref(XRefType.Read, self._codeloc(), addr)
self._handle_data_offset_refs(stmt.dst)
def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC):
blockloc = self._codeloc(block_only=True)
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
addr_tmp = VEXTmp(stmt.addr.tmp)
if addr_tmp in self.replacements[blockloc]:
addr = self.replacements[blockloc][addr_tmp]
if isinstance(addr, int):
if stmt.storedata is None:
# load-link
xref_type = XRefType.Read
else:
xref_type = XRefType.Write
self.add_xref(xref_type, self._codeloc(), addr)
def _handle_data_offset_refs(self, data_tmp):
# is this thing a pointer?
# If so, produce the ida-style "Offset" XRefs.
blockloc = self._codeloc(block_only=True)
tmp = VEXTmp(data_tmp)
if tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][tmp], Top):
data = self.replacements[blockloc][tmp]
# Is this thing not an integer? If so, get out of here
# e.g., you can't find_object_containing on an SPOffset
if not isinstance(data, int):
return
if data is not None and self.project.loader.find_object_containing(data) is not None:
# HACK: Avoid spamming Xrefs if the binary is loaded at 0
# e.g., firmware!
# (magic value chosen due to length of CM EVT)
if data > 0x200:
self.add_xref(XRefType.Offset, self._codeloc(), data)
#
# Expression handlers
#
def _handle_Get(self, expr):
return None
def _handle_Load(self, expr):
blockloc = self._codeloc(block_only=True)
if type(expr.addr) is pyvex.IRExpr.RdTmp:
addr_tmp = VEXTmp(expr.addr.tmp)
if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top):
addr = self.replacements[blockloc][addr_tmp]
if isinstance(addr, int):
self.add_xref(XRefType.Read, self._codeloc(), addr)
elif type(expr.addr) is pyvex.IRExpr.Const:
addr = expr.addr.con.value
self.add_xref(XRefType.Read, self._codeloc(), addr)
def _handle_CCall(self, expr):
return None
def _handle_function(self, func):
# pylint: disable=unused-argument,no-self-use
return None # TODO: Maybe add an execute-type XRef?
class XRefsAnalysis(ForwardAnalysis, Analysis): # pylint:disable=abstract-method
"""
XRefsAnalysis recovers in-depth x-refs (cross-references) in disassembly code.
Here is an example::
.text:
000023C8 LDR R2, =time_now
000023CA LDR R3, [R2]
000023CC ADDS R3, #1
000023CE STR R3, [R2]
000023D0 BX LR
.bss:
1FFF36F4 time_now % 4
You will have the following x-refs for time_now::
23c8 - offset
23ca - read access
23ce - write access
"""
def __init__(self, func=None, func_graph=None, block=None, max_iterations=1, replacements=None):
if func is not None:
if block is not None:
raise ValueError('You cannot specify both "func" and "block".')
# traversing a function
graph_visitor = FunctionGraphVisitor(func, func_graph)
if replacements is None:
prop = self.project.analyses.Propagator(func=func, func_graph=func_graph)
replacements = prop.replacements
elif block is not None:
# traversing a block
graph_visitor = SingleNodeGraphVisitor(block)
if replacements is None:
prop = self.project.analyses.Propagator(block=block)
replacements = prop.replacements
else:
raise ValueError('Unsupported analysis target.')
ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=False,
graph_visitor=graph_visitor)
self._function = func
self._max_iterations = max_iterations
self._replacements = replacements
self._node_iterations = defaultdict(int)
self._engine_vex = SimEngineXRefsVEX(self.kb.xrefs, project=self.project, replacements=replacements)
self._engine_ail = None
self._analyze()
#
# Main analysis routines
#
def _pre_analysis(self):
pass
def _pre_job_handling(self, job):
pass
def _initial_abstract_state(self, node):
return None
def _merge_states(self, node, *states):
return None
def _run_on_node(self, node, state):
block = self.project.factory.block(node.addr, node.size, opt_level=1, cross_insn_opt=False)
if block.size == 0:
# VEX couldn't decode it
return False, None
block_key = node.addr
engine = self._engine_vex
engine.process(None, block=block, fail_fast=self._fail_fast)
self._node_iterations[block_key] += 1
if self._node_iterations[block_key] < self._max_iterations:
return True, None
else:
return False, None
def _intra_analysis(self):
pass
def _post_analysis(self):
pass
register_analysis(XRefsAnalysis, "XRefs")
|
import requests
class RepositoryMixin:
def has_open_repository(self):
url = "https://api.github.com/repos/{0}/{1}".format(self.owner, self.repo)
try:
response = requests.get(url)
# If the response was successful, no Exception will be raised
response.raise_for_status()
except requests.HTTPError:
self.print_state(check_name="has_open_repository", state=False)
return False
except Exception as err:
print(f"Other error occurred: {err}")
self.print_state(check_name="has_open_repository", state=True)
return True
|
# coding: utf-8
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import datetime
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from tb_rest_client.configuration import Configuration
import tb_rest_client.models.models_ce
import tb_rest_client.models.models_pe
from tb_rest_client import rest
class ApiClient(object):
"""
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
# Use the pool property to lazily initialize the ThreadPool.
self._pool = None
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
def __del__(self):
if self._pool is not None:
self._pool.close()
self._pool.join()
@property
def pool(self):
if self._pool is None:
self._pool = ThreadPool()
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
if isinstance(v, dict) and v.get("entityType") is not None and v.get('id') is not None:
v = v["id"]
# specified safe chars, encode everything
if v is not None:
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v) if v is not None else "", safe=config.safe_chars_for_path_param)
).replace(
'{?%s}' % k,
quote(str("?"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param)
).replace(
'{?%s,' % k,
quote(str("?"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param)
).replace(
',%s' % k,
quote(str("&"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param)
).replace(
',?%s' % k,
quote(str("&"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param)
).replace(
',%s}' % k,
quote("}" + str(v) if v is not None else "", safe=config.safe_chars_for_path_param)
)
# resource_path = resource_path.replace(
# '{%s}' % k,
# quote(str(k+"="+v), safe=config.safe_chars_for_path_param)
# ).replace(
# '{?%s}' % k,
# quote(str("?"+k+"="+v), safe=config.safe_chars_for_path_param)
# ).replace(
# '{?%s,' % k,
# quote(str("?"+k+"="+v) + "{", safe=config.safe_chars_for_path_param)
# ).replace(
# ',%s,' % k,
# quote("}" + str("&"+k+"="+v) + "{", safe=config.safe_chars_for_path_param)
# ).replace(
# ',?%s,' % k,
# quote("}" + str("&"+k+"="+v) + "{", safe=config.safe_chars_for_path_param)
# ).replace(
# ',%s}' % k,
# quote("}" + str(v), safe=config.safe_chars_for_path_param)
# )
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = [param for param in query_params if (isinstance(param, tuple) and len(param) > 1 and param[1] is not None) or not isinstance(param, tuple)]
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = [param for param in post_params if (isinstance(param, tuple) and len(param) > 1 and param[1] is not None) or not isinstance(param, tuple)]
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
clean_path = self.sanitize_path(resource_path)
url = self.configuration.host + clean_path
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_path(self, url):
pattern = r'(\{[\?a-zA-Z,]{1,}\})'
matching = re.search(pattern, url)
if matching is not None and len(matching.groups()) > 0:
for match in matching.groups():
clean_url = url.replace(match, "")
else:
clean_url = url
return clean_url
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object..
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if klass == "DeferredResultResponseEntity":
return self.__deserialize(data, type(data))
#
# elif type(klass) == str:
# # convert str to class
elif klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
elif klass == list:
return_data = [self.__deserialize(sub_data, type(sub_data))
for sub_data in data]
return return_data
elif klass == dict:
return_data = {k: self.__deserialize(v, type(v))
for k, v in six.iteritems(data)}
return return_data
elif type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
try:
found_class = getattr(tb_rest_client.models.models_pe, klass)
# if sorted(list(found_class.attribute_map.values())) == sorted(list(data.keys())):
if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())):
klass = found_class
else:
found_class = getattr(tb_rest_client.models.models_ce, klass)
# if sorted(list(found_class.attribute_map.values())) == sorted(list(data.keys())):
if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())):
klass = found_class
except AttributeError:
found_class = getattr(tb_rest_client.models.models_ce, klass)
if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())):
# if sorted(list(found_class.attribute_map.values())) == sorted(list(data.keys())):
klass = found_class
# else:
# return self.__deserialize(data, type(data))
return self.__deserialize_data(data, klass)
def __deserialize_data(self, data, klass):
try:
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
except Exception as e:
return e
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __hasattr(self, object, name):
return name in object.__class__.__dict__
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if (not klass.swagger_types and
not self.__hasattr(klass, 'get_real_child_model')):
return data
kwargs = {}
if klass.swagger_types is not None:
for attr, attr_type in six.iteritems(klass.swagger_types):
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if (isinstance(instance, dict) and
klass.swagger_types is not None and
isinstance(data, dict)):
for key, value in data.items():
if key not in klass.swagger_types:
instance[key] = value
if self.__hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
from typing import Dict, Tuple, Any, Optional
from .dialogue_object import DialogueObject
from memory_nodes import ObjectNode, RewardNode
from .interpreter_helper import interpret_reference_object, ErrorWithResponse
class PutMemoryHandler(DialogueObject):
def __init__(self, speaker_name: str, action_dict: Dict, **kwargs):
super().__init__(**kwargs)
self.provisional: Dict = {}
self.speaker_name = speaker_name
self.action_dict = action_dict
def step(self) -> Tuple[Optional[str], Any]:
r = self._step()
self.finished = True
return r
def _step(self) -> Tuple[Optional[str], Any]:
assert self.action_dict["dialogue_type"] == "PUT_MEMORY"
memory_type = self.action_dict["upsert"]["memory_data"]["memory_type"]
if memory_type == "REWARD":
return self.handle_reward()
elif memory_type == "TRIPLE":
return self.handle_triple()
else:
raise NotImplementedError
def handle_reward(self) -> Tuple[Optional[str], Any]:
reward_value = self.action_dict["upsert"]["memory_data"]["reward_value"]
assert reward_value in ("POSITIVE", "NEGATIVE"), self.action_dict
RewardNode.create(self.memory, reward_value)
if reward_value == "POSITIVE":
return "Thank you!", None
else:
return "I'll try to do better in the future.", None
def handle_triple(self) -> Tuple[Optional[str], Any]:
ref_obj_d = self.action_dict["filters"]["reference_object"]
r = interpret_reference_object(self, self.speaker_name, ref_obj_d)
if len(r) == 0:
raise ErrorWithResponse("I don't know what you're referring to")
mem = r[0]
name = "it"
triples = self.memory.get_triples(subj=mem.memid, pred="has_tag")
if len(triples) > 0:
name = triples[0][2].strip("_")
memory_data = self.action_dict["upsert"]["memory_data"]
schematic_memid = (
self.memory.convert_block_object_to_schematic(mem.memid).memid
if isinstance(mem, ObjectNode)
else None
)
for k, v in memory_data.items():
if k.startswith("has_"):
logging.info("Tagging {} {} {}".format(mem.memid, k, v))
self.memory.add_triple(mem.memid, k, v)
if schematic_memid:
self.memory.add_triple(schematic_memid, k, v)
point_at_target = mem.get_point_at_target()
self.agent.send_chat("OK I'm tagging this %r as %r " % (name, v))
self.agent.point_at(list(point_at_target))
return "Done!", None
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test objects for interacting with a bitcoind node over the p2p protocol.
The P2PInterface objects interact with the bitcoind nodes under test using the
node's p2p interface. They can be used to send messages to the node, and
callbacks can be registered that execute when messages are received from the
node. Messages are sent to/received from the node on an asyncio event loop.
State held inside the objects must be guarded by the p2p_lock to avoid data
races between the main testing thread and the event loop.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages
P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps
a count of how many times each txid has been announced."""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MAX_HEADERS_RESULTS,
msg_addr,
msg_addrv2,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cfcheckpt,
msg_cfheaders,
msg_cfilter,
msg_cmpctblock,
msg_feefilter,
msg_filteradd,
msg_filterclear,
msg_filterload,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_merkleblock,
msg_notfound,
msg_ping,
msg_pong,
msg_sendaddrv2,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
MSG_WTX,
msg_wtxidrelay,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import (
MAX_NODES,
p2p_port,
wait_until_helper,
)
logger = logging.getLogger("TestFramework.p2p")
# The minimum P2P version that this test framework supports
MIN_P2P_VERSION_SUPPORTED = 60001
# The P2P version that this test framework implements and sends in its `version` message
# Version 110016 supports wtxid relay
P2P_VERSION = 110016
# The services that this test framework offers in its `version` message
P2P_SERVICES = NODE_NETWORK | NODE_WITNESS
# The P2P user agent string that this test framework sends in its `version` message
P2P_SUBVERSION = "/python-p2p-tester:0.0.3/"
# Value for relay that this test framework sends in its `version` message
P2P_VERSION_RELAY = 1
MESSAGEMAP = {
b"addr": msg_addr,
b"addrv2": msg_addrv2,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cfcheckpt": msg_cfcheckpt,
b"cfheaders": msg_cfheaders,
b"cfilter": msg_cfilter,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"filteradd": msg_filteradd,
b"filterclear": msg_filterclear,
b"filterload": msg_filterload,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"merkleblock": msg_merkleblock,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"sendaddrv2": msg_sendaddrv2,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
b"wtxidrelay": msg_wtxidrelay,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
"signet": b"\x0a\x03\xcf\x40", # signet
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor):
assert not self.is_connected
self.timeout_factor = timeout_factor
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
self.peer_connect_helper(dstaddr, dstport, net, timeout_factor)
loop = NetworkThread.network_event_loop
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine)
def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor):
self.peer_connect_helper('0', 0, net, timeout_factor)
logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id))
return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id)
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf)))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if msgtype not in MESSAGEMAP:
raise ValueError("Received unknown msgtype from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, msgtype, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[msgtype]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
if self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
msgtype = message.msgtype
data = message.serialize()
tmsg = self.magic_bytes
tmsg += msgtype
tmsg += b"\x00" * (12 - len(msgtype))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self, support_addrv2=False, wtxidrelay=True):
super().__init__()
# Track number of messages of each type received.
# Should be read-only in a test.
self.message_count = defaultdict(int)
# Track the most recent message of each type.
# To wait for a message to be received, pop that message from
# this and use self.wait_until.
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
self.support_addrv2 = support_addrv2
# If the peer supports wtxid-relay
self.wtxidrelay = wtxidrelay
def peer_connect_send_version(self, services):
# Send a version msg
vt = msg_version()
vt.nVersion = P2P_VERSION
vt.strSubVer = P2P_SUBVERSION
vt.relay = P2P_VERSION_RELAY
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent in connection_made callback
def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
self.peer_connect_send_version(services)
return create_conn
def peer_accept_connection(self, *args, services=NODE_NETWORK | NODE_WITNESS, **kwargs):
create_conn = super().peer_accept_connection(*args, **kwargs)
self.peer_connect_send_version(services)
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with p2p_lock:
try:
msgtype = message.msgtype.decode('ascii')
self.message_count[msgtype] += 1
self.last_message[msgtype] = message
getattr(self, 'on_' + msgtype)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_addrv2(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cfcheckpt(self, message): pass
def on_cfheaders(self, message): pass
def on_cfilter(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_filteradd(self, message): pass
def on_filterclear(self, message): pass
def on_filterload(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_merkleblock(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_sendaddrv2(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_wtxidrelay(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED)
if message.nVersion >= 110016 and self.wtxidrelay:
self.send_message(msg_wtxidrelay())
if self.support_addrv2:
self.send_message(msg_sendaddrv2())
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
def test_function():
if check_connected:
assert self.is_connected
return test_function_in()
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
def wait_for_connect(self, timeout=60):
test_function = lambda: self.is_connected
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
self.wait_until(test_function, timeout=timeout)
def wait_for_block(self, blockhash, timeout=60):
def test_function():
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
self.wait_until(test_function, timeout=timeout)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == int(blockhash, 16)
self.wait_until(test_function, timeout=timeout)
def wait_for_merkleblock(self, blockhash, timeout=60):
def test_function():
last_filtered_block = self.last_message.get('merkleblock')
if not last_filtered_block:
return False
return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
self.wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, hash_list, timeout=60):
"""Waits for a getdata message.
The object hashes in the inventory vector must match the provided hash_list."""
def test_function():
last_data = self.last_message.get("getdata")
if not last_data:
return False
return [x.hash for x in last_data.inv] == hash_list
self.wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
def test_function():
return self.last_message.get("getheaders")
self.wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
def test_function():
return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
self.wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
def test_function():
return "verack" in self.last_message
self.wait_until(test_function, timeout=timeout)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
def sync_send_with_ping(self, timeout=60):
"""Ensure SendMessages is called on this connection"""
# Calling sync_with_ping twice requires that the node calls
# `ProcessMessage` twice, and thus ensures `SendMessages` must have
# been called at least once
self.sync_with_ping()
self.sync_with_ping()
def sync_with_ping(self, timeout=60):
"""Ensure ProcessMessages is called on this connection"""
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
self.wait_until(test_function, timeout=timeout)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
p2p_lock = threading.Lock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.listeners = {}
NetworkThread.protos = {}
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
# Safe to remove event loop.
NetworkThread.network_event_loop = None
@classmethod
def listen(cls, p2p, callback, port=None, addr=None, idx=1):
""" Ensure a listening server is running on the given port, and run the
protocol specified by `p2p` on the next connection to it. Once ready
for connections, call `callback`."""
if port is None:
assert 0 < idx <= MAX_NODES
port = p2p_port(MAX_NODES - idx)
if addr is None:
addr = '127.0.0.1'
coroutine = cls.create_listen_server(addr, port, callback, p2p)
cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine)
@classmethod
async def create_listen_server(cls, addr, port, callback, proto):
def peer_protocol():
"""Returns a function that does the protocol handling for a new
connection. To allow different connections to have different
behaviors, the protocol function is first put in the cls.protos
dict. When the connection is made, the function removes the
protocol function from that dict, and returns it so the event loop
can start executing it."""
response = cls.protos.get((addr, port))
cls.protos[(addr, port)] = None
return response
if (addr, port) not in cls.listeners:
# When creating a listener on a given (addr, port) we only need to
# do it once. If we want different behaviors for different
# connections, we can accomplish this by providing different
# `proto` functions
listener = await cls.network_event_loop.create_server(peer_protocol, addr, port)
logger.debug("Listening server on %s:%d should be started" % (addr, port))
cls.listeners[(addr, port)] = listener
cls.protos[(addr, port)] = proto
callback(addr, port)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with p2p_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
self.wait_until(
lambda: blocks[-1].sha256 in self.getdata_requests,
timeout=timeout,
check_connected=success,
)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with p2p_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
class P2PTxInvStore(P2PInterface):
"""A P2PInterface which stores a count of how many times each txid has been announced."""
def __init__(self):
super().__init__()
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
super().on_inv(message) # Send getdata in response.
# Store how many times invs have been received for each tx.
for i in message.inv:
if (i.type == MSG_TX) or (i.type == MSG_WTX):
# save txid
self.tx_invs_received[i.hash] += 1
def get_invs(self):
with p2p_lock:
return list(self.tx_invs_received.keys())
def wait_for_broadcast(self, txns, timeout=60):
"""Waits for the txns (list of txids) to complete initial broadcast.
The mempool should mark unbroadcast=False for these transactions.
"""
# Wait until invs have been received (and getdatas sent) for each txid.
self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)
# Flush messages and wait for the getdatas to be processed
self.sync_with_ping()
|
"""This file contains all the classes you must complete for this project.
You can use the test cases in agent_test.py to help during development, and
augment the test suite with your own test cases to further test your code.
You must test your agent's strength against a set of agents with known
relative strength using tournament.py and include the results in your report.
"""
import random
import sys
import math
class Timeout(Exception):
"""Subclass base exception for code clarity."""
pass
def custom_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# TODO: finish this function!
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
'''
identify the corners of the board and avoid these
topright_corner = (game.height-1,game.width-1)
bottomright-corner = (0,game.width-1)
bottomleft_corner = (0,0)
topleft_corner = (game.height-1,0)
'''
# heuristic function a variation of #my-moves-#opp_moves
#go down four levels further
#print('height :',game.height)
#print('width: ',game.width)
own_score = 0.0
opp_score = 0.0
own_moves = game.get_legal_moves(player)
opp_moves = game.get_legal_moves(game.get_opponent(player))
percent_board_unoccupied = (len(game.get_blank_spaces())/(game.height*game.width))*100
#own_move_coverage = (len(game.get_legal_moves(player))/len(game.get_blank_spaces()))*100
#opp_move_coverage = (len(game.get_opponent(player))/len(game.get_blank_spaces()))*100
#common_moves = list(set(own_moves).intersection(opp_moves))
#union_moves = list(set(own_moves).union(opp_moves))
#opp_diff_moves = len(opp_moves)-len(common_moves)
#own_diff_moves = len(own_moves)-len(common_moves)
#corners= [(game.height-1,game.width-1),(0,game.width-1),(0,0),(game.height-1,0)]
walls = [
[(0,i) for i in range(game.width)],[(i,0) for i in range(game.height)],[(game.height-1,i) for i in range(game.width)],[(i,game.width-1) for i in range(game.height)]
]
#==============================================================================
# for move in game.get_legal_moves(player):
# if move in corners:
# own_score -= 10
# else:
# own_score += 1
# return own_score
#==============================================================================
centers = [(i,j) for i in range(math.floor(game.width/2)-1,math.floor(game.width/2)+1) for j in range(math.floor(game.height/2)-1,math.floor(game.height/2)+1)]
#print(center)
#==============================================================================
# for move in own_moves:
# if move in centers and percent_board_unoccupied<25:
# own_score += 30
# elif move in centers and percent_board_unoccupied>=25 and percent_board_unoccupied<50:
# own_score +=20
# elif move in centers and percent_board_unoccupied>=50 and percent_board_unoccupied<75:
# own_score +=10
# elif move in centers and percent_board_unoccupied>=75:
# own_score +=5
# return own_score
#==============================================================================
#==============================================================================
# for move in opp_moves:
# if move in centers and percent_board_unoccupied<25:
# opp_score += 30
# elif move in centers and percent_board_unoccupied>=25 and percent_board_unoccupied<50:
# opp_score +=20
# elif move in centers and percent_board_unoccupied>=50 and percent_board_unoccupied<75:
# opp_score +=10
# elif move in centers and percent_board_unoccupied>=75:
# opp_score +=5
# return opp_score
#==============================================================================
for move in own_moves:
for wall in walls:
if move in wall and percent_board_unoccupied<25:
own_score -=5 #30
elif move in wall and (percent_board_unoccupied>=25 and percent_board_unoccupied<50):
own_score -=10 #20
elif move in wall and (percent_board_unoccupied>=50 and percent_board_unoccupied<75):
own_score -=20 #10
elif move in wall and percent_board_unoccupied>=75:
own_score -=30 #5
elif move in centers:
own_score +=20
else:
own_score += 1
return own_score
for move in opp_moves:
for wall in walls:
if move in wall and percent_board_unoccupied<25:
opp_score -=30
elif move in wall and (percent_board_unoccupied>=25 and percent_board_unoccupied<50):
opp_score -=20
elif move in wall and (percent_board_unoccupied>=50 and percent_board_unoccupied<75):
opp_score -=10
elif move in wall and percent_board_unoccupied>=75:
opp_score -=5 #91.43
elif move in centers:
opp_score +=20
else:
opp_score += 1
return opp_score
return float((own_score)-(2*opp_score)) #float((own_score)-(2*len(opp_moves)))
class CustomPlayer:
"""Game-playing agent that chooses a move using your evaluation function
and a depth-limited minimax algorithm with alpha-beta pruning. You must
finish and test this player to make sure it properly uses minimax and
alpha-beta to return a good move before the search time limit expires.
Parameters
----------
search_depth : int (optional)
A strictly positive integer (i.e., 1, 2, 3,...) for the number of
layers in the game tree to explore for fixed-depth search. (i.e., a
depth of one (1) would only explore the immediate sucessors of the
current state.)
score_fn : callable (optional)
A function to use for heuristic evaluation of game states.
iterative : boolean (optional)
Flag indicating whether to perform fixed-depth search (False) or
iterative deepening search (True).
method : {'minimax', 'alphabeta'} (optional)
The name of the search method to use in get_move().
timeout : float (optional)
Time remaining (in milliseconds) when search is aborted. Should be a
positive value large enough to allow the function to return before the
timer expires.
"""
def __init__(self, search_depth=3, score_fn=custom_score,
iterative=True, method='minimax', timeout=10.):
self.search_depth = search_depth
self.iterative = iterative
self.score = score_fn
self.method = method
self.time_left = None
self.TIMER_THRESHOLD = timeout
def get_move(self, game, legal_moves, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
This function must perform iterative deepening if self.iterative=True,
and it must use the search method (minimax or alphabeta) corresponding
to the self.method value.
**********************************************************************
NOTE: If time_left < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
legal_moves : list<(int, int)>
A list containing legal moves. Moves are encoded as tuples of pairs
of ints defining the next (row, col) for the agent to occupy.
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# TODO: finish this function!
# Perform any required initializations, including selecting an initial
# move from the game board (i.e., an opening book), or returning
# immediately if there are no legal moves
try:
# The search method call (alpha beta or minimax) should happen in
# here in order to avoid timeout. The try/except block will
# automatically catch the exception raised by the search method
# when the timer gets close to expiring
# Perform Iterative Deepening with depth d
if not legal_moves:
return (-1, -1)
best_move = legal_moves[0] #have to start somewhere
#most_depth = 0
if self.iterative:
for d in range(0,sys.maxsize): #IDS goes from 0 to inf
if self.method == 'minimax':
_,best_move = self.minimax(game, d)
else:
_,best_move = self.alphabeta(game, d)
#most_depth=d
#print('board:', game.to_string())
return best_move
except Timeout:
# Handle any actions required at timeout, if necessary
# In case of timeout return the last timeout
return best_move
# Return the best move from the last completed search iteration
#raise NotImplementedError
def minimax(self, game, depth, maximizing_player=True):
"""Implement the minimax search algorithm as described in the lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
maximizing_player : bool
Flag indicating whether the current search depth corresponds to a
maximizing layer (True) or a minimizing layer (False)
Returns
-------
float
The score for the current search branch
tuple(int, int)
The best move for the current branch; (-1, -1) for no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project unit tests; you cannot call any other
evaluation function directly.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise Timeout()
# TODO: finish this function!
if depth == 0:
return self.score(game,self), game.get_player_location(self)
else:
if maximizing_player:
best_score = float('-inf')
for m in game.get_legal_moves():
score, move = self.minimax(game.forecast_move(m), depth - 1, False)
if score > best_score:
best_score = score
best_move = m
#print('score and move:',best_score,best_move)
return best_score, best_move
else: #minimizing player
best_score = float('inf')
for m in game.get_legal_moves(game.get_opponent(self)):
score, move = self.minimax(game.forecast_move(m), depth - 1, True)
if score < best_score:
best_score = score
best_move = m
#print('score and move:',best_score,best_move)
return best_score, best_move
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf"), maximizing_player=True):
"""Implement minimax search with alpha-beta pruning as described in the
lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
maximizing_player : bool
Flag indicating whether the current search depth corresponds to a
maximizing layer (True) or a minimizing layer (False)
Returns
-------
float
The score for the current search branch
tuple(int, int)
The best move for the current branch; (-1, -1) for no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project unit tests; you cannot call any other
evaluation function directly.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise Timeout()
# TODO: finish this function!
#legal_moves=game.get_legal_moves()
best_move=(3,3) #occupy the middle square
if depth == 0:
return self.score(game,self), game.get_player_location(self)
else:
if maximizing_player:
best_score = float('-inf')
for m in game.get_legal_moves():
score, move = self.alphabeta(game.forecast_move(m), depth - 1, alpha, beta, False)
if score > best_score:
best_score = score
best_move = m
alpha = max(alpha,best_score)
if beta <= alpha:
break #prune
#print('score and move:',best_score,best_move)
return best_score, best_move
else: #minimizing player
best_score = float('inf')
for m in game.get_legal_moves(game.get_opponent(self)):
score, move = self.alphabeta(game.forecast_move(m), depth - 1, alpha, beta, True)
if score < best_score:
best_score = score
best_move = m
beta = min(beta, best_score)
if beta <= alpha:
break #prune
#print('score and move:',best_score,best_move)
return best_score, best_move
|
"""
requests:
sqlalchemy
apscheduler
定时任务
sqlalchemy 文档: https://apscheduler.readthedocs.io/en/stable/index.html
"""
import time
import json
try:
from pytz import utc, timezone
china_tz = timezone('Asia/Shanghai')
from apscheduler.schedulers.background import BackgroundScheduler
# from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
except:
print("需安装下述包")
print("pip3 install sqlalchemy", "pip3 install apscheduler")
raise "Stop!"
import os
import sys
upath = os.path.dirname(os.path.abspath(__file__))
path = upath.split("/")[:-1]
path = '/'.join(path)
sys.path.append(path)
from loggers import scheLog
import weather
class AllScheduler():
def __init__(self):
pass
def listener(self, event):
"""任务执行状态监听"""
if event.exception:
log_job = {
"code": event.code,
"jobid": event.job_id,
"jobstore": event.jobstore,
}
scheLog.error(f'The job {event.job_id} crashed :( | {log_job}')
else:
scheLog.info(f'The job {event.job_id} worked :)')
def run(self):
jobstores = {
# 'mongo': MongoDBJobStore(),
# 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
"memory": MemoryJobStore(),
}
executors = {'default': ThreadPoolExecutor(5), 'processpool': ProcessPoolExecutor(2)}
job_defaults = {'coalesce': False, 'max_instances': 3}
scheduler = BackgroundScheduler(
jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=china_tz)
scheduler.add_listener(self.listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
#scheduler.add_job(weather.weather_alarm, 'interval', seconds=10*60, id='sign_push_report')
scheduler.add_job(weather.weather_alarm, 'interval', seconds=2, id='sign_weather_alarm')
scheduler.start()
return scheduler
# scheLog.info(f"scheduler.get_jobs: {scheduler.get_jobs()}")
# scheduler.remove_job('sign_push_report')
# scheduler.shutdown(wait=True)
if __name__ == "__main__":
jobs = AllScheduler().run()
time.sleep(3)
jobs.remove_job('sign_weather_alarm')
jobs.shutdown(wait=True)
while jobs:
try:
time.sleep(3)
except:
jobs.remove_job('sign_weather_alarm')
jobs.shutdown(wait=True)
print("Stop.")
|
"""
Functions for Imaging Pipeline
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from astropy.io import fits
from astropy.modeling import models, fitting
from astropy.table import Table
from scipy.optimize import curve_fit
import os
from astropy.coordinates import SkyCoord
import astropy.units as u
from astroquery.sdss import SDSS
import astroalign as aa
import sep
from pynot import alfosc
from pynot.functions import get_version_number, mad
from pynot.data.organizer import get_filter
__version__ = get_version_number()
def source_detection(fname, zeropoint=0., threshold=5.0, aperture=10.0, kwargs_bg={}, kwargs_ext={}):
"""
Run source detection in the input image using the python package SEP, based on the SExtractor algorithm.
Parameters
----------
fname : str
Filename of the FITS image to be analyzed. The image must have at least two extensions:
the first should be the image in counts, and one should be named ERR holding the associated error image
zeropoint : float [default=0.]
Magnitude zero-point for the given photometric filter used for the observations.
By defualt instrument magnitudes will be returned if no zero-point is given.
threshold : float [default=5.0]
Detection threshold in 'sigmas'.
aperture : float [default=10.]
Circular aperture radius in pixels.
kwargs_bg : dict
Parameters to pass to background subtraction (sep.Background()).
See defition in `default_options_img.yml`
kwargs_ext : dict
Parameters to pass to source extraction (sep.extract()).
See defition in `default_options_img.yml`
Returns
-------
table_fname : str
The autogenerated filename of the source catalog. The format is: file-base of the input filename + '_phot.fits'.
Ex.: fname='alfosc_rband.fits' -> table_fname='alfosc_rband_phot.fits'
segmap_fname : str
The autogenerated filename of the segmentation map. This image holds the regions associated to each source
in the source catalog. The format is: file-base of the input filename + '_sep.fits'
output_msg : str
Log of messages from the function call.
"""
msg = list()
# get GAIN from header
data = fits.getdata(fname)
error_image = fits.getdata(fname, 'ERR')
hdr = fits.getheader(fname)
msg.append(" - Loaded input image: %s" % fname)
if 'EXPTIME' in hdr:
exptime = hdr['EXPTIME']
msg.append(" - Loaded exposure time from image header: %.1f" % exptime)
else:
exptime = 1.
msg.append("[WARNING] - No exposure time found in image header! Assuming image in counts.")
data = data * 1.
error_image = error_image * 1.
if 'threshold' in kwargs_ext:
threshold = kwargs_ext.pop('threshold')
if 'aperture' in kwargs_ext:
aperture = kwargs_ext.pop('aperture')
bkg = sep.Background(data, **kwargs_bg)
data_sub = data - bkg
msg.append(" - Subtracted sky background")
msg.append(" - Background RMS: %.2e" % bkg.globalrms)
data_sub = data_sub.byteswap().newbyteorder()
error_image = error_image.byteswap().newbyteorder()
if data_sub.dtype.byteorder != '<':
data_sub = data_sub.byteswap().newbyteorder()
error_image = error_image.byteswap().newbyteorder()
extract_output = sep.extract(data_sub, threshold, err=bkg.globalrms, **kwargs_ext)
if len(extract_output) == 2:
objects, segmap = extract_output
else:
objects = extract_output
segmap = None
N_obj = len(objects)
msg.append(" - Detected %i objects" % N_obj)
# Calculate fixed aperture magnitudes:
aper_results = sep.sum_circle(data_sub, objects['x'], objects['y'], aperture, err=error_image)
aper_flux, aper_fluxerr, aper_flag = aper_results
msg.append(" - Calculating fluxes within circular aperture of: %i pixels" % aperture)
# Calculate Kron radius:
x = objects['x']
y = objects['y']
a = objects['a']
b = objects['b']
theta = objects['theta']
kronrad, krflag = sep.kron_radius(data_sub, x, y, a, b, theta, 6.0)
kronrad[kronrad < 1.] = 1.
# Sum fluxes in ellipse apertures:
flux, fluxerr, flag = sep.sum_ellipse(data_sub, x, y, a, b, theta, 2.5*kronrad, subpix=1)
msg.append(" - Calculating Kron radii and fluxes within elliptical apertures")
# combine flags:
flag |= krflag
# If the Kron radius is less than r_min (aperture), use aperture fluxes:
r_min = aperture
use_circle = kronrad * np.sqrt(b * a) < r_min
flux[use_circle] = aper_flux[use_circle]
fluxerr[use_circle] = aper_fluxerr[use_circle]
flag[use_circle] = aper_flag[use_circle]
msg.append(" - Targets with Kron radii below R_min (%.2f) are ignored" % r_min)
msg.append(" - Circular aperture fluxes used instead where R_kron < R_min")
if np.sum(use_circle) == 1:
msg.append(" - %i source identified with R_kron < R_min" % np.sum(use_circle))
else:
msg.append(" - %i sources identified with R_kron < R_min" % np.sum(use_circle))
# Save output table:
base, ext = os.path.splitext(fname)
table_fname = base + '_phot.fits'
object_table = Table(objects)
object_table['flux_auto'] = flux
object_table['flux_err_auto'] = fluxerr
object_table['flux_aper'] = aper_flux
object_table['flux_err_aper'] = aper_fluxerr
object_table['R_kron'] = kronrad
flux[flux <= 0] = 1.
object_table['mag_auto'] = zeropoint - 2.5*np.log10(flux)
object_table.write(table_fname, format='fits', overwrite=True)
msg.append(" [OUTPUT] - Saved extraction table: %s" % table_fname)
# Save segmentation map:
if segmap is not None:
segmap_fname = base + '_seg.fits'
seg_hdr = fits.Header()
seg_hdr['AUTHOR'] = 'PyNOT version %s' % __version__
seg_hdr['IMAGE'] = fname
seg_hdr['FILTER'] = get_filter(hdr)
seg_hdr.add_comment("Segmentation map from SEP (SExtractor)")
fits.writeto(segmap_fname, segmap, header=seg_hdr, overwrite=True)
msg.append(" [OUTPUT] - Saved source segmentation map: %s" % segmap_fname)
else:
segmap_fname = ''
# Plot source identifications:
fig_fname = base + '_sources.pdf'
plot_objects(fig_fname, data_sub, objects, threshold=threshold)
msg.append(" [OUTPUT] - Saved source identification overview: %s" % fig_fname)
msg.append("")
output_msg = "\n".join(msg)
return table_fname, segmap_fname, output_msg
def plot_objects(fig_fname, data, objects, threshold=5.):
"""
Create a plot of the image and the detected sources from SEP.
Parameters
----------
fig_fname : str
Filename of the resulting figure
data : np.array, shape (N, M)
Numpy array of the image data, must be a 2D array.
objects : astropy.table.Table or List[dict]
List of dictionaries or astropy table holding the object information:
x, y : x, y positions
a, b : aperture minor and major axes in pixels
theta : aperture orientation in radians
threshold : float [default=5.]
Constract threshold for the image. The color-scale is normalized based on the image
statistics (median and MAD). The min and max values are -1*MAD and +`threshold`*MAD
around the median value of the image counts, where MAD is the median absolute deviation.
Returns
-------
None
"""
# plot background-subtracted image
fig, ax = plt.subplots()
m, s = np.median(data), 1.5*mad(data)
ax.imshow(data, interpolation='nearest', cmap='gray_r',
vmin=m-1*s, vmax=m+threshold*s, origin='lower')
# plot an ellipse for each object
for item in objects:
e = Ellipse(xy=(item['x'], item['y']),
width=10*item['a'],
height=10*item['b'],
angle=item['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
e.set_linewidth(0.8)
ax.add_artist(e)
fig.tight_layout()
fig.savefig(fig_fname)
def load_fits_image(fname):
"""Load a FITS image with an associated error extension and an optional data quality MASK."""
with fits.open(fname) as hdu_list:
image = hdu_list[0].data
hdr = hdu_list[0].header
if 'ERR' in hdu_list:
error = hdu_list['ERR'].data
else:
raise TypeError("No error image detected")
if 'MASK' in hdu_list:
mask = hdu_list['MASK'].data
else:
mask = np.zeros_like(image, dtype=bool)
return image, error, mask, hdr
def measure_seeing(img, centers, size=20, max_obj=10):
"""
Measure the average seeing in an image by fitting a 2D Gaussian to pre-defined point sources.
Parameters
----------
img : np.array, shape(N, M)
Numpy array of the image to analyze.
centers : list[number, number]
List of positions of point sources (x, y) in pixels
size : int [default=20]
Image cutout size. The Gaussian PSF is fitted in a box of size 2*size by 2*size pixels.
max_obj : int [default=10]
Maximum number of sources to include in the fitting.
Returns
-------
fwhm : float
The average seeing FWHM in pixels.
ratio : float
The average axis ratio (ellipticity) of the Gaussian PSF.
msg : str
Output message of the function call.
If no warnings occurred, this is an emptry string.
"""
X = np.arange(img.shape[1])
Y = np.arange(img.shape[0])
sigmas = list()
ratios = list()
good_x = (centers[:, 0] > size) & (centers[:, 0] < X.max()-size)
good_y = (centers[:, 1] > size) & (centers[:, 1] < Y.max()-size)
if np.sum(good_x & good_y) < 2:
msg = "[WARNING] - Not enough sources to measure seeing."
return (-1, -1, msg)
max_obj = min(max_obj, np.sum(good_x & good_y))
idx = np.random.choice(np.arange(len(centers))[good_x & good_y], max_obj, replace=False)
for x_cen, y_cen in centers[idx]:
x1, x2 = int(x_cen)-size, int(x_cen)+size
y1, y2 = int(y_cen)-size, int(y_cen)+size
cutout = img[y1:y2, x1:x2]
x, y = np.meshgrid(X[x1:x2], Y[y1:y2])
A = img[int(y_cen), int(x_cen)]
p_init = models.Gaussian2D(amplitude=A, x_mean=x_cen, y_mean=y_cen, x_stddev=5, y_stddev=5, theta=0)
try:
fitter = fitting.LevMarLSQFitter()
except TypeError:
continue
p_opt = fitter(p_init, x, y, cutout-np.median(cutout))
sigma_x = p_opt.x_stddev
sigma_y = p_opt.y_stddev
sig = np.sqrt(sigma_x**2 + sigma_y**2)
ba = min(sigma_x, sigma_y) / max(sigma_x, sigma_y)
sigmas.append(sig)
ratios.append(ba)
if len(sigmas) < 2:
msg = "[WARNING] - Not enough sources to measure seeing."
return (-1, -1, msg)
fwhm = np.median(sigmas) * 2.35
ratio = np.median(ratios)
msg = ""
return (fwhm, ratio, msg)
def save_file_log(log_name, image_log, target_hdr):
with open(log_name, 'w') as out:
out.write("# PyNOT Combination Log of Target: %s\n" % target_hdr['OBJECT'])
out.write("# Filter: %s\n" % get_filter(target_hdr))
out.write("# Col 1: Filename\n")
out.write("# Col 2: FWHM / pixels (seeing)\n")
out.write("# Col 3: PSF axis ratio (minor/major)\n")
out.write("# Col 4: Exp. Time / seconds\n")
out.write("# " + 40*"-" + "\n")
for line in image_log:
out.write(" %s %.1f %5.2f %6.1f\n" % tuple(line))
def image_combine(corrected_images, output='', log_name='', fringe_image='', method='weighted', max_control_points=50, detection_sigma=5, min_area=9):
"""
Register and combine a list of FITS images using affine transformation.
Parameters
----------
corrected_images : List[str]
List of input filenames of `corrected` images, i.e., bias, flat corrected
and trimmed for filter/aperture vignetting.
output : str [default='']
Output filename of the combined image. If not given, it is generated from the OBJECT keyword of the FITS header.
log_name : str [default='']
Filename of the combination log. This table holds the average seeing FWHM, PSF ellipticity, and exposure time
for each image in the input list.
fringe_image : str [default='']
Filename of the fringe image (FITS format) from `pynot.create_fringe_image`.
If given, this image will be subtracted from each input image before combination.
method : str [default='weighted']
Method for image combination: mean, median or weighted.
By default an inverse-variance weighting is used.
max_control_points : int [default=50]
Maximum number of control point-sources to find the transformation.
A lower number will converge faster but may result in a less robust image registration.
detection_sigma : float [default=5.]
Detection threshold for control points in units of standard deviations of the sky background.
min_area : int [default=9]
Minimum number of connected pixels to be considered a source
Returns
-------
output_msg : str
Log of messages from the function call.
"""
msg = list()
if fringe_image != '':
norm_sky = fits.getdata(fringe_image)
msg.append(" - Loaded normalized fringe image: %s" % fringe_image)
else:
norm_sky = 1.
target_fname = corrected_images[0]
target, target_err, target_mask, target_hdr = load_fits_image(target_fname)
target = target - norm_sky*np.median(target)
exptime = target_hdr['EXPTIME']
target /= exptime
target_err /= exptime
target_hdr['BUNIT'] = 'count / s'
msg.append(" - Aligning all images to reference: %s" % target_fname)
msg.append(" - Registering input images:")
shifted_images = [target]
shifted_vars = [target_err**2]
target = target.byteswap().newbyteorder()
if target.dtype.byteorder != '<':
target = target.byteswap().newbyteorder()
final_exptime = exptime
image_log = list()
if len(corrected_images) > 1:
for fname in corrected_images[1:]:
msg.append(" - Input image: %s" % fname)
source, source_err, source_mask, hdr_i = load_fits_image(fname)
source = source - norm_sky*np.median(source)
source /= hdr_i['EXPTIME']
source_err /= hdr_i['EXPTIME']
final_exptime += hdr_i['EXPTIME']
try:
transf, (coords) = aa.find_transform(source, target,
max_control_points=max_control_points,
detection_sigma=detection_sigma,
min_area=min_area)
except:
msg.append(" [ERROR] - Failed to find image transformation!")
msg.append(" - Skipping image")
continue
source = source.byteswap().newbyteorder()
source_err = source_err.byteswap().newbyteorder()
source_mask = source_mask.byteswap().newbyteorder()
if source.dtype.byteorder != '<':
source = source.byteswap().newbyteorder()
if source_err.dtype.byteorder != '<':
source_err = source_err.byteswap().newbyteorder()
if source_mask.dtype.byteorder != '<':
source_mask = source_mask.byteswap().newbyteorder()
registered_image, _ = aa.apply_transform(transf, source, target, fill_value=0)
registered_error, _ = aa.apply_transform(transf, source_err, target, fill_value=0)
registered_mask, _ = aa.apply_transform(transf, source_mask, target, fill_value=0)
target_mask += 1 * (registered_mask > 0)
registered_error[registered_error == 0] = np.mean(registered_error)*10
shifted_images.append(registered_image)
shifted_vars.append(registered_error**2)
source_list, target_list = coords
if len(image_log) == 0:
fwhm, ratio, seeing_msg = measure_seeing(target, target_list)
image_log.append([os.path.basename(target_fname), fwhm, ratio, exptime])
if seeing_msg:
msg.append(seeing_msg)
fwhm, ratio, seeing_msg = measure_seeing(source, source_list)
if seeing_msg:
msg.append(seeing_msg)
image_log.append([os.path.basename(fname), fwhm, ratio, hdr_i['EXPTIME']])
if log_name == '':
filter_name = alfosc.filter_translate[get_filter(target_hdr)]
log_name = 'filelist_%s_%s.txt' % (target_hdr['OBJECT'], filter_name)
save_file_log(log_name, image_log, target_hdr)
msg.append(" [OUTPUT] - Saved file log and image stats: %s" % log_name)
if method == 'median':
final_image = np.nanmedian(shifted_images, axis=0)
final_error = np.sqrt(np.nanmean(shifted_vars, axis=0))
target_hdr['COMBINE'] = "Median"
elif method == 'mean':
final_image = np.nanmean(shifted_images, axis=0)
final_error = np.sqrt(np.nanmean(shifted_vars, axis=0))
target_hdr['COMBINE'] = "Mean"
else:
w = 1./np.array(shifted_vars)
shifted_images = np.array(shifted_images)
final_image = np.nansum(w*shifted_images, axis=0) / np.sum(w, axis=0)
final_error = np.sqrt(1. / np.nansum(w, axis=0))
target_hdr['COMBINE'] = "Inverse Variance Weighted"
final_mask = 1 * (target_mask > 0)
else:
final_image = target
final_error = target_err
final_mask = target_mask
target_hdr['COMBINE'] = "None"
target_hdr['NCOMBINE'] = len(shifted_images)
target_hdr['EXPTIME'] = final_exptime / len(shifted_images)
# Fix NaN values from negative pixel values:
err_NaN = np.isnan(final_error)
final_error[err_NaN] = np.nanmean(final_error)*100
msg.append(" - Correcting NaNs in noise image: %i pixel(s)" % np.sum(err_NaN))
target_hdr['DATAMIN'] = np.nanmin(final_image)
target_hdr['DATAMAX'] = np.nanmax(final_image)
target_hdr['EXTNAME'] = 'DATA'
target_hdr['AUTHOR'] = 'PyNOT version %s' % __version__
mask_hdr = fits.Header()
mask_hdr.add_comment("0 = Good Pixels")
mask_hdr.add_comment("1 = Cosmic Ray Hits")
if output == '':
output = "combined_%s.fits" % target_hdr['OBJECT']
sci_ext = fits.PrimaryHDU(final_image, header=target_hdr)
err_ext = fits.ImageHDU(final_error, header=target_hdr, name='ERR')
mask_ext = fits.ImageHDU(final_mask, header=mask_hdr, name='MASK')
output_HDU = fits.HDUList([sci_ext, err_ext, mask_ext])
output_HDU.writeto(output, overwrite=True)
msg.append(" - Successfully combined the images")
msg.append(" [OUTPUT] - Saving output: %s" % output)
msg.append("")
output_msg = "\n".join(msg)
return output_msg
def plot_image2D(fname, image, vmin=-2, vmax=2):
fig = plt.figure()
ax = fig.add_subplot(111)
med = np.median(image)
s = mad(image)
im = ax.imshow(image, origin='lower', vmin=med+vmin*s, vmax=med+vmax*s)
fig.colorbar(im)
fig.tight_layout()
fig.savefig(fname)
def create_fringe_image(input_filenames, output='', fig_fname='', threshold=3.0):
"""
Create a normalized average fringe image for a list of images taken with the same filter.
Parameters
----------
input_filenames : str
List of FITS filenames of images taken in the same photometric band.
output : str [default='']
Output filename of the fringe image.
fig_fname : str [default='']
Output filename of the diagnostic figure showing the normalized fringe image.
threshold : float [default=3.]
Threshold for source rejection in the image stacking in units of the standard deviation
of the sky background (estimated via median absolute deviation).
Returns
-------
output_msg : str
Log of messages from the function call.
"""
msg = list()
hdr = fits.getheader(input_filenames[0])
img_list = [fits.getdata(fname) for fname in input_filenames]
exptimes = [fits.getheader(fname)['EXPTIME'] for fname in input_filenames]
msg.append(" - Loaded input images")
mask = [np.fabs(im-np.median(im)) < threshold*mad(im) for im in img_list]
msg.append(" - Created image mask using threshold: %.2f" % threshold)
N = np.sum(mask, 0)
skysum = np.sum([im*m/t for im, m, t in zip(img_list, mask, exptimes)], axis=0)
skysum[N == 0] = np.median(skysum)
N[N == 0] = 1
sky = skysum / N
norm_sky = sky / np.median(sky)
msg.append(" - Created normalized fringe image")
if fig_fname:
plot_image2D(fig_fname, norm_sky, vmin=-2, vmax=2)
msg.append(" [OUTPUT] - Saving figure: %s" % fig_fname)
if output == '':
output = "fringe_%s.fits" % hdr['OBJECT']
hdr['OBJECT'] = 'Fringe Image'
hdr['EXTNAME'] = 'MODEL'
hdr.add_comment('Average Fringe image, median normalized')
fits.writeto(output, norm_sky, header=hdr, overwrite=True)
msg.append(" [OUTPUT] - Saving output: %s" % output)
msg.append("")
output_msg = "\n".join(msg)
return output_msg
def match_phot_catalogs(sep, phot, match_radius=1.):
"""
Match a source catalog from SEP to a photometric catalog `phot`.
Both catalogs must include columns 'ra' and 'dec'.
Parameters
----------
match_radius : float [default=1.0]
Matching radius in arcseconds
Returns
-------
matched_sep : astropy.table.Table
An astropy table of sources in the SEP source catalog that have matches
in the reference `phot` catalog.
matched_phot : astropy.table.Table
An astropy table of sources in the reference `phot` catalog that have matches
in the SEP source catalog.
"""
matched_sep = list()
matched_phot = list()
refs = np.array([phot['ra'], phot['dec']]).T
for row in sep:
xy = np.array([row['ra'], row['dec']])
dist = np.sqrt(np.sum((refs - xy)**2, axis=1))
index = np.argmin(dist)
if np.min(dist) < match_radius/3600.:
matched_phot.append(np.array(phot[index]))
matched_sep.append(np.array(row))
matched_sep = np.array(matched_sep)
matched_phot = np.array(matched_phot)
return Table(matched_sep), Table(matched_phot)
def get_sdss_catalog(ra, dec, radius=4.):
"""Download the SDSS photometry using astroquery for a circular region of radius in deg."""
catalog_fname = 'sdss_phot_%.2f%+.2f.csv' % (ra, dec)
fields = ['ra', 'dec', 'psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z',
'psfMagErr_u', 'psfMagErr_g', 'psfMagErr_r', 'psfMagErr_i', 'psfMagErr_z']
field_center = SkyCoord(ra, dec, frame='icrs', unit='deg')
sdss_result = SDSS.query_region(field_center, radius*u.arcmin, photoobj_fields=fields)
if sdss_result is not None:
sdss_result.write(catalog_fname, format='ascii.csv', overwrite=True)
return sdss_result
ext_coeffs = {'u': 0.517,
'g': 0.165,
'r': 0.0754,
'i': 0.0257,
'z': 0.0114}
def flux_calibration_sdss(img_fname, sep_fname, fig_fname='', q_lim=0.8, kappa=3, match_radius=1.):
"""
Self-calibration of magnitude zero point using SDSS photometry as reference
Parameters
----------
img_fname : string
Filename of WCS calibrated image (_wcs.fits)
sep_fname : string
Filename of the source extraction table (_phot.fits)
fig_fname : string
Filename of the diagnostic figure. Autogenerated by default.
q_lim : float [default=0.8]
Reject elliptical sources with axis ratio < `q_lim`.
Axis ratio is defined as minor/major.
kappa : float [default=3]
Threshold for projected distance filtering. Sources are rejected if the distance differs
more then `kappa` times the median absolute deviation from the median of all distances.
match_radius : float [default=1]
Matching radius between SDSS sources and image sources
Returns
-------
output_msg : string
Log of messages from the function call.
"""
# -- Get SDSS catalog
msg = list()
hdr = fits.getheader(img_fname)
msg.append(" - Loaded image: %s" % img_fname)
radius = np.sqrt(hdr['CD1_1']**2 + hdr['CD1_2']**2)*60 * hdr['NAXIS1'] / np.sqrt(2)
msg.append(" - Downloading SDSS photometric catalog...")
try:
sdss_cat = get_sdss_catalog(hdr['CRVAL1'], hdr['CRVAL2'], radius)
except:
msg.append(" [ERROR] - Could not connect to SDSS server. Check your internet connection.")
msg.append("")
return "\n".join(msg)
def line(x, zp):
return zp + x
if sdss_cat is None:
msg.append(" [ERROR] - No data found in SDSS. No zero point calculated")
msg.append("")
return "\n".join(msg)
airmass = hdr['AIRMASS']
filter = alfosc.filter_translate[alfosc.get_filter(hdr)]
if 'SDSS' in filter:
band = filter.split('_')[0]
else:
msg.append(" [ERROR] - The image was not taken with an SDSS filter. No zero point calculated")
msg.append("")
return "\n".join(msg)
# For r-band: (measured from La Palma extinction curve)
mag_key = 'psfMag_%s' % band
mag_err_key = 'psfMagErr_%s' % band
good = (sdss_cat[mag_key] > 0) & (sdss_cat[mag_key] < 30)
sdss_cat = sdss_cat[good]
# Load SEP filename:
try:
sep_cat = Table.read(sep_fname)
sep_hdr = fits.getheader(sep_fname)
msg.append(" - Loaded SEP source table: %s" % sep_fname)
except (FileNotFoundError, OSError):
msg.append(" [ERROR] - Could not load SEP source table: %s" % sep_fname)
msg.append("")
return "\n".join(msg)
if 'MAG_ZP' in sep_hdr:
msg.append("[WARNING] - The source table has already been flux calibrated by PyNOT")
msg.append(" - Terminating task...")
msg.append("")
return "\n".join(msg)
axis_ratio = sep_cat['b']/sep_cat['a']
# Select only 'round' sources:
sep_points = sep_cat[axis_ratio > q_lim]
# Match catalogs:
match_sep, match_sdss = match_phot_catalogs(sep_points, sdss_cat)
msg.append(" - Cross matched source catalog")
mag = match_sdss[mag_key]
mag_err = match_sdss[mag_err_key]
m_inst = match_sep['mag_auto']
k = ext_coeffs[band]
# Get first estimate using the median:
zp0, _ = curve_fit(line, m_inst+k*airmass, mag, p0=[27], sigma=mag_err)
# Filter outliers:
cut = np.abs(zp0 + m_inst + k*airmass - mag) < kappa*mad(zp0 + m_inst + k*airmass - mag)
cut &= (mag < 20.1) & (mag > 15)
# Get weighted average zero point:
w = 1./mag_err[cut]**2
zp = np.sum((mag[cut] - m_inst[cut] - k*airmass) * w) / np.sum(w)
msg.append(" - Calculating zero point in SDSS %s band using %i sources" % (band, len(w)))
# Zero point dispersion:
zp_err = np.std(mag[cut] - zp - m_inst[cut] - k*airmass)
msg.append(" - Zero Point = %.3f ± %.3f mag" % (zp, zp_err))
sep_cat['mag_auto'] += zp
sep_cat.write(sep_fname, overwrite=True)
with fits.open(sep_fname, 'update') as sep_file:
sep_file[0].header.add_comment("Self-calibration of mag. zero point using SDSS")
sep_file[0].header['MAG_ZP'] = (np.round(zp, 3), "Magnitude zero point (AB mag)")
sep_file[0].header['ZP_ERR'] = (np.round(zp_err, 3), "Uncertainty on magnitude zero point (AB mag)")
msg.append(" [OUTPUT] - Updating magnitudes in source table: %s" % sep_fname)
# -- Plot the zero point for visual aid:
base, _ = os.path.splitext(os.path.basename(img_fname))
dirname = os.path.dirname(img_fname)
if fig_fname == '':
fig_fname = 'zero_point_' + base + '.pdf'
fig_fname = os.path.join(dirname, fig_fname)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.errorbar(m_inst, mag, 3*mag_err, ls='', marker='.', color='k', alpha=0.8)
ax.plot(m_inst[cut], mag[cut], ls='', marker='o', color='b', alpha=0.7)
ax.plot(np.sort(m_inst), zp + np.sort(m_inst) + k*airmass, ls='--', color='crimson',
label='ZP = %.2f ± %.2f' % (zp, zp_err))
ax.set_ylim(np.min(mag)-0.2, np.max(mag)+0.5)
ax.set_xlabel("Instrument Magnitude")
ax.set_ylabel("Reference SDSS Magnitude (r-band)")
ax.legend()
ax.tick_params(which='both', top=False, right=False)
fig.tight_layout()
fig.savefig(fig_fname)
msg.append(" [OUTPUT] - Saving diagnostic figure: %s" % fig_fname)
# -- Update header in FITS image:
with fits.open(img_fname) as hdu_list:
hdu_list['DATA'].header.add_comment("Self-calibration of mag. zero point using SDSS")
hdu_list['DATA'].header['MAG_ZP'] = (np.round(zp, 3), "Magnitude zero point (AB mag)")
hdu_list['DATA'].header['ZP_ERR'] = (np.round(zp_err, 3), "Uncertainty on magnitude zero point (AB mag)")
hdu_list.writeto(img_fname, overwrite=True)
msg.append(" [OUTPUT] - Updating header of input image: %s" % img_fname)
msg.append(" - MAG_ZP = %10.3f / %s" % (zp, "Magnitude zero point (AB mag)"))
msg.append(" - ZP_ERR = %10.3f / %s" % (zp_err, "Uncertainty on magnitude zero point (AB mag)"))
msg.append("")
return "\n".join(msg)
|
from elasticsearch.client import SnapshotClient
from fiases.fias_data import ES
import fiases.fias_data
sn = SnapshotClient(ES)
def register(location="/usr/share/elasticsearch/snapshots"):
sn_body = {
"type": "fs",
"settings": {
"compress": "true",
"location": location
}
}
sn.create_repository(repository="fias", body=sn_body)
def restore():
ES.indices.delete(index=address.INDEX, ignore=[400, 404])
ES.indices.delete(index=houses.INDEX, ignore=[400, 404])
sn.restore(repository="fias",
snapshot="fias_full",
body={
"indices": [address.INDEX, houses.INDEX]
})
def restoreIfNotExist():
if not ES.indices.exists(address.INDEX):
sn.restore(repository="fias",
snapshot="fias_full",
body={
"indices": [address.INDEX, houses.INDEX]
})
else:
pass
def createFullSnapshot():
try:
sn.delete(repository="fias", snapshot="fias_full")
except(Exception):
pass
sn_body = {
"indices": [address.INDEX, houses.INDEX],
"ignore_unavailable": "true",
"include_global_state": "false",
"metadata": {
"taken_by": "fias",
"taken_because": "backup before update"
}
}
sn.create(repository="fias", snapshot="fias_full", body=sn_body)
|
from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<Task %r>' % self.id
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
task_content = request.form['content']
new_task = Todo(content=task_content)
try:
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return 'There was an issue adding your task'
else:
tasks = Todo.query.order_by(Todo.date_created).all()
return render_template('index.html', tasks=tasks)
@app.route('/delete/<int:id>')
def delete(id):
task_to_delete = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect('/')
except:
return 'There was a problem deleting that task'
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
task = Todo.query.get_or_404(id)
if request.method == 'POST':
task.content = request.form['content']
try:
db.session.commit()
return redirect('/')
except:
return 'There was an issue updating your task'
else:
return render_template('update.html', task=task)
if __name__ == "__main__":
app.run(debug=True)
|
from __future__ import print_function
from builtins import object
from builtins import str
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'PowerCat',
# list of one or more authors for the module
'Author': ['besimorhino'],
'Software': '',
'Techniques': ['T1036'],
# more verbose multi-line description of the module
'Description': (
'powercat is a powershell function. First you need to load the function before you can execute it.'
'You can put one of the below commands into your powershell profile so powercat is automatically'
'loaded when powershell starts..'),
# True if the module needs to run in the background
'Background': True,
# File extension to save the file as
'OutputExtension': None,
# True if the module needs admin rights to run
'NeedsAdmin': False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': True,
'Language': 'powershell',
'MinLanguageVersion': '2',
# list of any references/other comments
'Comments': [
'https://github.com/besimorhino/powercat'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent': {
'Description': 'Agent to run module on.',
'Required': True,
'Value': ''
},
'l': {
'Description': 'Switch. Listen for a connection',
'Required': False,
'Value': ''
},
'c': {
'Description': 'Connect to a listener',
'Required': False,
'Value': ''
},
'p': {
'Description': 'The port to connect to, or listen on.',
'Required': False,
'Value': ''
},
'e': {
'Description': 'Execute. (GAPING_SECURITY_HOLE) ',
'Required': False,
'Value': ''
},
'ep': {
'Description': 'Switch. Execute Powershell.',
'Required': False,
'Value': ''
},
'r': {
'Description': 'Switch. Relay. Format: -r tcp:10.1.1.1:443',
'Required': False,
'Value': ''
},
'u': {
'Description': 'Switch. Transfer data over UDP.',
'Required': False,
'Value': ''
},
'dns': {
'Description': 'Transfer data over dns (dnscat2).',
'Required': False,
'Value': ''
},
'dnsft': {
'Description': 'DNS Failure Threshold. ',
'Required': False,
'Value': ''
},
't': {
'Description': 'Timeout option. Default: 60 ',
'Required': False,
'Value': ''
},
'i': {
'Description': 'Input: Filepath (string), byte array, or string.',
'Required': False,
'Value': ''
},
'o': {
'Description': 'Console Output Type: "Host", "Bytes", or "String" ',
'Required': False,
'Value': ''
},
'of': {
'Description': 'Output File Path. ',
'Required': False,
'Value': ''
},
'd': {
'Description': 'Switch. Disconnect after connecting.',
'Required': False,
'Value': ''
},
'rep': {
'Description': 'Switch. Repeater. Restart after disconnecting.',
'Required': False,
'Value': ''
},
'g': {
'Description': 'Switch. Generate Payload',
'Required': False,
'Value': ''
},
'ge': {
'Description': 'Switch. Generate Encoded Payload',
'Required': False,
'Value': ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# the PowerShell script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
"""
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/management/powercat.ps1"
try:
f = open(moduleSource, 'r')
except:
print((helpers.color("[!] Could not read module source path at: " + str(moduleSource))))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "powercat"
# add any arguments to the end execution of the script
for option, values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd,
obfuscationCommand=obfuscationCommand)
script += scriptEnd
script = helpers.keyword_obfuscation(script)
return script
|
import argparse
import logging
import os
from time import sleep
from googleapiclient import discovery
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Control a google cloud instance.")
parser.add_argument('--debug', '-d', dest='debug', action='store_true', help="Debug mode")
parser.add_argument('--project', '-p', dest='project', help="instance project id")
parser.add_argument('--zone', '-z', dest='zone', help="instance zone")
parser.add_argument('--instance', '-i', dest='instance', help="instance name")
def restart_instance(project, zone, instance):
compute = discovery.build('compute', 'v1')
compute.instances().stop(project=project, zone=zone, instance=instance).execute()
stopped = False
while not stopped:
sleep(10)
list = compute.instances().list(project=project, zone=zone).execute()['items']
data = next((x for x in list if x['name'] == instance), None)
if data['status'] == "TERMINATED":
stopped = True
return compute.instances().start(project=project, zone=zone, instance=instance).execute()
if __name__ == "__main__":
args = parser.parse_args()
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(message)s', level=level)
restart_instance(args.project or os.environ['gc_project'], args.zone or os.environ['gc_zone'], args.instance or os.environ['gc_instance'])
|
#!/usr/bin/env python3
# pyfu/flat.py
"""
Method for calculating the correction for the relative transmissions of the fibres using extracted sky flats.
The OBJTYP term "flatness" was created to distinguish this correction from that of a true flatfield.
"""
import numpy as np
import logging
from astropy.io import fits
from astropy.table import Table, Column
from astropy.wcs import WCS
from pyFU.defaults import pyFU_default_formats, pyFU_default_keywords, pyFU_logging_level, pyFU_logging_format
from pyFU.ifu import Fibre, get_fibres
from pyFU.utils import hist_integral, merge_dictionaries, get_infiles_and_outfiles, read_tables
logging.basicConfig (level=pyFU_logging_level, format=pyFU_logging_format)
def main () :
import matplotlib.pyplot as plt
import sys
import yaml
from pyFU.utils import parse_arguments, read_tables
logging.info ('*************************** flat ******************************')
# ---- GET DEFAULT CONFIGURATION AND COMMAND LINE PARAMETERS
README = """
Script for calculating the correction for the different transmissions of IFU fibres.
"""
arguments = {
'errcol':{'path':'flat:','default':'err_flux',
'flg':'-E','type':str,'help':'name of flux error column'},
'flxcol':{'path':'flat:','default':'wavelength',
'flg':'-F','type':str,'help':'name of flux column'},
'infiles':{'path':'flat:','default':None, \
'flg':'-i','type':str,'help':'input FITS table file(s)'},
'outfiles':{'path':'flat:','default':None, \
'flg':'-o','type':str,'help':'output YAML file(s) containing the corrections'},
'pixcol':{'path':'flat:','default':'pixel',
'flg':'-P','type':str,'help':'name of pixel column'},
'pixels':{'path':'flat:','default':None, \
'flg':'-x','type':list,'help':'integration pixels of output image'},
'plot':{'path':None,'default':False, \
'flg':'-p','type':bool,'help':'display resulting image'},
'scale':{'path':None,'default':False, \
'flg':'-s','type':bool,'help':'scale corrections by fibre area (show intensity, not flux)'},
'waves':{'path':'flat:','default':None, \
'flg':'-w','type':list,'help':'integration wavelengths of output image'},
'yaml':{'path':None,'default':None, \
'flg':'-y','type':str,'help':'name of pyFU configuration file'}
'wavcol':{'path':'flat:','default':'wavelength',
'flg':'-W','type':str,'help':'name of wavelength column'},
}
args,cfg = parse_arguments (arguments)
info = cfg['flat']
# ---- GET THE INPUT AND OUTPUT FILES
infiles,outfiles = get_infiles_and_outfiles (args.infiles,args.outfiles)
# ---- FOR ALL INPUT AND OUTPUT FILES
for infile,outfile in zip(infiles,outfiles) :
logging.info (f'Reading {infile} ...')
spectra,pheader = read_tables (pathname=infile)
nspectra = len(spectra)
med = np.zeros(nspectra)
pheader['OBJECT'] = 'ifu-flattness'
pheader['OBJTYP'] = 'flatness'
# FOR ALL SPECTRA IN A FILE
for i in range(nspectra) :
spectrum = spectra[i]
p = spectrum[info['pixcol']
w = spectrum[info['wavcol']
f = spectrum[infi['flxcol']
# GET MEDIAN FLUX USING WAVELENGTH OR PIXEL REGION
if 'waves' in info and info['waves'] is not None :
wav = info['waves']
pix = [max(int(np.interp(wav[0],w,p)), 0),
min(int(np.interp(wav[1],w,p))+1,nspectra)]
elif 'pixels' in info and info['pixels'] is not None :
pix = info['pixels']
else :
pix = [0,nspectra]
mask = (p >= pix[0])*(p <= pix[1])
med[i] = np.nanmedian(f[mask],axis=0)
spectrum.meta['OBJECT'] = 'ifu-flatness'
spectrum.meta['OBJTYP'] = 'flatness'
# GET QE'S AND PUT THEM BACK INTO THE SPECTRA
qe = med/np.nanmean(med)
for i in range(nspectra) :
spectrum = spectra[i]
f = spectrum[info['flxcol']
err = spectrum[info['errcol']
rerr = err/f
spectrum[info['flxcol']] = qe+f-f
spectrum[info['errcol']] = rerr*qe
# OUTPUT FLATFIELD SPECTRA
write_spectra (outfile, spectra, pheader)
logging.info ('******************************************************************\n')
if __name__ == '__main__' :
main ()
|
## \file Constants.py
# \author Thulasi Jegatheesan
# \brief Provides the structure for holding constant values
## \brief Structure for holding the constant values
class Constants:
pi = 3.14159265
L_min = 0.1
L_max = 50.0
rho_W_min = 950.0
rho_W_max = 1000.0
A_C_max = 100000.0
C_W_min = 4170.0
C_W_max = 4210.0
h_C_min = 10.0
h_C_max = 10000.0
t_final_max = 86400.0
AR_min = 1.0e-2
AR_max = 100.0
|
from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path(
"<str:username>",
view = views.UserProfile.as_view(),
name = "user_profile"
),
path(
"<str:username>/following",
view = views.UserFollowing.as_view(),
name = "user_following"
),
path(
"<str:username>/followers",
view = views.UserFollowers.as_view(),
name = "user_followers"
),
path(
"search/",
view = views.SearchUser.as_view(),
name = 'search_user'
),
path(
"follow/<str:username>",
view = views.FollowCount.as_view(),
name = 'follow_count'
),
path(
"change/<str:username>",
view = views.ChangePassword.as_view(),
name = 'change_password'
)
]
|
"""
zeep.wsdl.messages.soap
~~~~~~~~~~~~~~~~~~~~~~~
"""
import copy
from collections import OrderedDict
from lxml import etree
from lxml.builder import ElementMaker
from zeep import exceptions, xsd
from zeep.utils import as_qname
from zeep.xsd.context import XmlParserContext
from zeep.wsdl.messages.base import ConcreteMessage, SerializedMessage
from zeep.wsdl.messages.multiref import process_multiref
__all__ = [
'DocumentMessage',
'RpcMessage',
]
class SoapMessage(ConcreteMessage):
"""Base class for the SOAP Document and RPC messages
:param wsdl: The main wsdl document
:type wsdl: zeep.wsdl.Document
:param name:
:param operation: The operation to which this message belongs
:type operation: zeep.wsdl.bindings.soap.SoapOperation
:param type: 'input' or 'output'
:type type: str
:param nsmap: The namespace mapping
:type nsmap: dict
"""
def __init__(self, wsdl, name, operation, type, nsmap):
super(SoapMessage, self).__init__(wsdl, name, operation)
self.nsmap = nsmap
self.abstract = None # Set during resolve()
self.type = type
self._is_body_wrapped = False
self.body = None
self.header = None
self.envelope = None
def serialize(self, *args, **kwargs):
"""Create a SerializedMessage for this message"""
nsmap = {
'soap-env': self.nsmap['soap-env']
}
nsmap.update(self.wsdl.types._prefix_map_custom)
soap = ElementMaker(namespace=self.nsmap['soap-env'], nsmap=nsmap)
# Create the soap:envelope
envelope = soap.Envelope()
# Create the soap:header element
headers_value = kwargs.pop('_soapheaders', None)
header = self._serialize_header(headers_value, nsmap)
if header is not None:
envelope.append(header)
# Create the soap:body element. The _is_body_wrapped attribute signals
# that the self.body element is of type soap:body, so we don't have to
# create it in that case. Otherwise we create a Element soap:body and
# render the content into this.
if self.body:
body_value = self.body(*args, **kwargs)
if self._is_body_wrapped:
self.body.render(envelope, body_value)
else:
body = soap.Body()
envelope.append(body)
self.body.render(body, body_value)
else:
body = soap.Body()
envelope.append(body)
# XXX: This is only used in Soap 1.1 so should be moved to the the
# Soap11Binding._set_http_headers(). But let's keep it like this for
# now.
headers = {
'SOAPAction': '"%s"' % self.operation.soapaction
}
return SerializedMessage(
path=None, headers=headers, content=envelope)
def deserialize(self, envelope):
"""Deserialize the SOAP:Envelope and return a CompoundValue with the
result.
"""
if not self.envelope:
return None
body = envelope.find('soap-env:Body', namespaces=self.nsmap)
body_result = self._deserialize_body(body)
header = envelope.find('soap-env:Header', namespaces=self.nsmap)
headers_result = self._deserialize_headers(header)
kwargs = body_result
kwargs.update(headers_result)
result = self.envelope(**kwargs)
# If the message
if self.header.type._element:
return result
result = result.body
if result is None or len(result) == 0:
return None
elif len(result) > 1:
return result
# Check if we can remove the wrapping object to make the return value
# easier to use.
result = next(iter(result.__values__.values()))
if isinstance(result, xsd.CompoundValue):
children = result._xsd_type.elements
attributes = result._xsd_type.attributes
if len(children) == 1 and len(attributes) == 0:
item_name, item_element = children[0]
retval = getattr(result, item_name)
return retval
return result
def signature(self, as_output=False):
if not self.envelope:
return None
if as_output:
if isinstance(self.envelope.type, xsd.ComplexType):
try:
if len(self.envelope.type.elements) == 1:
return self.envelope.type.elements[0][1].type.signature(
schema=self.wsdl.types, standalone=False)
except AttributeError:
return None
return self.envelope.type.signature(schema=self.wsdl.types, standalone=False)
if self.body:
parts = [self.body.type.signature(schema=self.wsdl.types, standalone=False)]
else:
parts = []
if self.header.type._element:
parts.append('_soapheaders={%s}' % self.header.type.signature(
schema=self.wsdl.types, standalone=False))
return ', '.join(part for part in parts if part)
@classmethod
def parse(cls, definitions, xmlelement, operation, type, nsmap):
"""Parse a wsdl:binding/wsdl:operation/wsdl:operation for the SOAP
implementation.
Each wsdl:operation can contain three child nodes:
- input
- output
- fault
Definition for input/output::
<input>
<soap:body parts="nmtokens"? use="literal|encoded"
encodingStyle="uri-list"? namespace="uri"?>
<soap:header message="qname" part="nmtoken" use="literal|encoded"
encodingStyle="uri-list"? namespace="uri"?>*
<soap:headerfault message="qname" part="nmtoken"
use="literal|encoded"
encodingStyle="uri-list"? namespace="uri"?/>*
</soap:header>
</input>
And the definition for fault::
<soap:fault name="nmtoken" use="literal|encoded"
encodingStyle="uri-list"? namespace="uri"?>
"""
name = xmlelement.get('name')
obj = cls(definitions.wsdl, name, operation, nsmap=nsmap, type=type)
body_data = None
header_data = None
# After some profiling it turns out that .find() and .findall() in this
# case are twice as fast as the xpath method
body = xmlelement.find('soap:body', namespaces=operation.binding.nsmap)
if body is not None:
body_data = cls._parse_body(body)
# Parse soap:header (multiple)
elements = xmlelement.findall(
'soap:header', namespaces=operation.binding.nsmap)
header_data = cls._parse_header(
elements, definitions.target_namespace, operation)
obj._resolve_info = {
'body': body_data,
'header': header_data
}
return obj
@classmethod
def _parse_body(cls, xmlelement):
"""Parse soap:body and return a dict with data to resolve it.
<soap:body parts="nmtokens"? use="literal|encoded"?
encodingStyle="uri-list"? namespace="uri"?>
"""
return {
'part': xmlelement.get('part'),
'use': xmlelement.get('use', 'literal'),
'encodingStyle': xmlelement.get('encodingStyle'),
'namespace': xmlelement.get('namespace'),
}
@classmethod
def _parse_header(cls, xmlelements, tns, operation):
"""Parse the soap:header and optionally included soap:headerfault elements
<soap:header
message="qname"
part="nmtoken"
use="literal|encoded"
encodingStyle="uri-list"?
namespace="uri"?
/>*
The header can optionally contain one ore more soap:headerfault
elements which can contain the same attributes as the soap:header::
<soap:headerfault message="qname" part="nmtoken" use="literal|encoded"
encodingStyle="uri-list"? namespace="uri"?/>*
"""
result = []
for xmlelement in xmlelements:
data = cls._parse_header_element(xmlelement, tns)
# Add optional soap:headerfault elements
data['faults'] = []
fault_elements = xmlelement.findall(
'soap:headerfault', namespaces=operation.binding.nsmap)
for fault_element in fault_elements:
fault_data = cls._parse_header_element(fault_element, tns)
data['faults'].append(fault_data)
result.append(data)
return result
@classmethod
def _parse_header_element(cls, xmlelement, tns):
attributes = xmlelement.attrib
message_qname = as_qname(
attributes['message'], xmlelement.nsmap, tns)
try:
return {
'message': message_qname,
'part': attributes['part'],
'use': attributes['use'],
'encodingStyle': attributes.get('encodingStyle'),
'namespace': attributes.get('namespace'),
}
except KeyError:
raise exceptions.WsdlSyntaxError("Invalid soap:header(fault)")
def resolve(self, definitions, abstract_message):
"""Resolve the data in the self._resolve_info dict (set via parse())
This creates three xsd.Element objects:
- self.header
- self.body
- self.envelope (combination of headers and body)
XXX headerfaults are not implemented yet.
"""
info = self._resolve_info
del self._resolve_info
# If this message has no parts then we have nothing to do. This might
# happen for output messages which don't return anything.
if (abstract_message is None or not abstract_message.parts) and self.type != 'input':
return
self.abstract = abstract_message
parts = OrderedDict(self.abstract.parts)
self.header = self._resolve_header(info['header'], definitions, parts)
self.body = self._resolve_body(info['body'], definitions, parts)
self.envelope = self._create_envelope_element()
def _create_envelope_element(self):
"""Create combined `envelope` complexType which contains both the
elements from the body and the headers.
"""
all_elements = xsd.Sequence([])
if self.header.type._element:
all_elements.append(
xsd.Element('{%s}header' % self.nsmap['soap-env'], self.header.type))
all_elements.append(
xsd.Element(
'{%s}body' % self.nsmap['soap-env'],
self.body.type if self.body else None))
return xsd.Element('{%s}envelope' % self.nsmap['soap-env'], xsd.ComplexType(all_elements))
def _serialize_header(self, headers_value, nsmap):
if not headers_value:
return
headers_value = copy.deepcopy(headers_value)
soap = ElementMaker(namespace=self.nsmap['soap-env'], nsmap=nsmap)
header = soap.Header()
if isinstance(headers_value, list):
for header_value in headers_value:
if hasattr(header_value, '_xsd_elm'):
header_value._xsd_elm.render(header, header_value)
elif hasattr(header_value, '_xsd_type'):
header_value._xsd_type.render(header, header_value)
elif isinstance(header_value, etree._Element):
header.append(header_value)
else:
raise ValueError("Invalid value given to _soapheaders")
elif isinstance(headers_value, dict):
if not self.header:
raise ValueError(
"_soapheaders only accepts a dictionary if the wsdl "
"defines the headers.")
# Only render headers for which we have a value
headers_value = self.header(**headers_value)
for name, elm in self.header.type.elements:
if name in headers_value and headers_value[name] is not None:
elm.render(header, headers_value[name], ['header', name])
else:
raise ValueError("Invalid value given to _soapheaders")
return header
def _deserialize_headers(self, xmlelement):
"""Deserialize the values in the SOAP:Header element"""
if not self.header or xmlelement is None:
return {}
context = XmlParserContext(settings=self.wsdl.settings)
result = self.header.parse(xmlelement, self.wsdl.types, context=context)
if result is not None:
return {'header': result}
return {}
def _resolve_header(self, info, definitions, parts):
name = etree.QName(self.nsmap['soap-env'], 'Header')
container = xsd.All(consume_other=True)
if not info:
return xsd.Element(name, xsd.ComplexType(container))
for item in info:
message_name = item['message'].text
part_name = item['part']
message = definitions.get('messages', message_name)
if message == self.abstract and part_name in parts:
del parts[part_name]
part = message.parts[part_name]
if part.element:
element = part.element.clone()
element.attr_name = part_name
else:
element = xsd.Element(part_name, part.type)
container.append(element)
return xsd.Element(name, xsd.ComplexType(container))
class DocumentMessage(SoapMessage):
"""In the document message there are no additional wrappers, and the
message parts appear directly under the SOAP Body element.
.. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage
:parts: 1
:param wsdl: The main wsdl document
:type wsdl: zeep.wsdl.Document
:param name:
:param operation: The operation to which this message belongs
:type operation: zeep.wsdl.bindings.soap.SoapOperation
:param type: 'input' or 'output'
:type type: str
:param nsmap: The namespace mapping
:type nsmap: dict
"""
def __init__(self, *args, **kwargs):
super(DocumentMessage, self).__init__(*args, **kwargs)
def _deserialize_body(self, xmlelement):
if not self._is_body_wrapped:
# TODO: For now we assume that the body only has one child since
# only one part is specified in the wsdl. This should be handled
# way better
xmlelement = list(xmlelement)[0]
context = XmlParserContext(settings=self.wsdl.settings)
result = self.body.parse(xmlelement, self.wsdl.types, context=context)
return {'body': result}
def _resolve_body(self, info, definitions, parts):
name = etree.QName(self.nsmap['soap-env'], 'Body')
if not info or not parts:
return None
# If the part name is omitted then all parts are available under
# the soap:body tag. Otherwise only the part with the given name.
if info['part']:
part_name = info['part']
sub_elements = [parts[part_name].element]
else:
sub_elements = []
for part_name, part in parts.items():
element = part.element.clone()
element.attr_name = part_name or element.name
sub_elements.append(element)
if len(sub_elements) > 1:
self._is_body_wrapped = True
return xsd.Element(name, xsd.ComplexType(xsd.All(sub_elements)))
else:
self._is_body_wrapped = False
return sub_elements[0]
class RpcMessage(SoapMessage):
"""In RPC messages each part is a parameter or a return value and appears
inside a wrapper element within the body.
The wrapper element is named identically to the operation name and its
namespace is the value of the namespace attribute. Each message part
(parameter) appears under the wrapper, represented by an accessor named
identically to the corresponding parameter of the call. Parts are arranged
in the same order as the parameters of the call.
.. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage
:parts: 1
:param wsdl: The main wsdl document
:type wsdl: zeep.wsdl.Document
:param name:
:param operation: The operation to which this message belongs
:type operation: zeep.wsdl.bindings.soap.SoapOperation
:param type: 'input' or 'output'
:type type: str
:param nsmap: The namespace mapping
:type nsmap: dict
"""
def _resolve_body(self, info, definitions, parts):
"""Return an XSD element for the SOAP:Body.
Each part is a parameter or a return value and appears inside a
wrapper element within the body named identically to the operation
name and its namespace is the value of the namespace attribute.
"""
if not info:
return None
namespace = info['namespace']
if self.type == 'input':
tag_name = etree.QName(namespace, self.operation.name)
else:
tag_name = etree.QName(namespace, self.abstract.name.localname)
# Create the xsd element to create/parse the response. Each part
# is a sub element of the root node (which uses the operation name)
elements = []
for name, msg in parts.items():
if msg.element:
elements.append(msg.element)
else:
elements.append(xsd.Element(name, msg.type))
return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements)))
def _deserialize_body(self, body_element):
"""The name of the wrapper element is not defined. The WS-I defines
that it should be the operation name with the 'Response' string as
suffix. But lets just do it really stupid for now and use the first
element.
"""
process_multiref(body_element)
response_element = list(body_element)[0]
if self.body:
context = XmlParserContext(self.wsdl.settings)
result = self.body.parse(
response_element, self.wsdl.types, context=context)
return {'body': result}
return {'body': None}
|
import unittest
import re
import pytest
import numpy as np
import scipy
from scipy.optimize import check_grad, approx_fprime
from six.moves import xrange
from sklearn.metrics import pairwise_distances, euclidean_distances
from sklearn.datasets import (load_iris, make_classification, make_regression,
make_spd_matrix)
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from sklearn.utils.testing import assert_warns_message
from sklearn.exceptions import ConvergenceWarning, ChangedBehaviorWarning
from sklearn.utils.validation import check_X_y
try:
from inverse_covariance import quic
assert(quic)
except ImportError:
HAS_SKGGM = False
else:
HAS_SKGGM = True
from metric_learn import (LMNN, NCA, LFDA, Covariance, MLKR, MMC,
LSML_Supervised, ITML_Supervised, SDML_Supervised,
RCA_Supervised, MMC_Supervised, SDML, RCA, ITML,
LSML)
# Import this specially for testing.
from metric_learn.constraints import wrap_pairs
from metric_learn.lmnn import _sum_outer_products
def class_separation(X, labels):
unique_labels, label_inds = np.unique(labels, return_inverse=True)
ratio = 0
for li in xrange(len(unique_labels)):
Xc = X[label_inds == li]
Xnc = X[label_inds != li]
ratio += pairwise_distances(Xc).mean() / pairwise_distances(Xc, Xnc).mean()
return ratio / len(unique_labels)
class MetricTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
# runs once per test class
iris_data = load_iris()
self.iris_points = iris_data['data']
self.iris_labels = iris_data['target']
np.random.seed(1234)
class TestCovariance(MetricTestCase):
def test_iris(self):
cov = Covariance()
cov.fit(self.iris_points)
csep = class_separation(cov.transform(self.iris_points), self.iris_labels)
# deterministic result
self.assertAlmostEqual(csep, 0.72981476)
def test_singular_returns_pseudo_inverse(self):
"""Checks that if the input covariance matrix is singular, we return
the pseudo inverse"""
X, y = load_iris(return_X_y=True)
# We add a virtual column that is a linear combination of the other
# columns so that the covariance matrix will be singular
X = np.concatenate([X, X[:, :2].dot([[2], [3]])], axis=1)
cov_matrix = np.cov(X, rowvar=False)
covariance = Covariance()
covariance.fit(X)
pseudo_inverse = covariance.get_mahalanobis_matrix()
# here is the definition of a pseudo inverse according to wikipedia:
assert_allclose(cov_matrix.dot(pseudo_inverse).dot(cov_matrix),
cov_matrix)
assert_allclose(pseudo_inverse.dot(cov_matrix).dot(pseudo_inverse),
pseudo_inverse)
class TestLSML(MetricTestCase):
def test_iris(self):
lsml = LSML_Supervised(num_constraints=200)
lsml.fit(self.iris_points, self.iris_labels)
csep = class_separation(lsml.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.8) # it's pretty terrible
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lsml_supervised = LSML_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, lsml_supervised.fit, X, y)
def test_changed_behaviour_warning(self):
# test that a ChangedBehavior warning is thrown about the init, if the
# default parameters are used.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lsml_supervised = LSML_Supervised()
msg = ("Warning, no prior was set (`prior=None`). As of version 0.5.0, "
"the default prior will now be set to "
"'identity', instead of 'covariance'. If you still want to use "
"the inverse of the covariance matrix as a prior, "
"set prior='covariance'. This warning will disappear in "
"v0.6.0, and `prior` parameter's default value will be set to "
"'identity'.")
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
lsml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
pairs = np.array([[[-10., 0.], [10., 0.], [-5., 3.], [5., 0.]],
[[0., 50.], [0., -60], [-10., 0.], [10., 0.]]])
lsml = LSML()
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
lsml.fit(pairs)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_deprecation_random_state(self):
# test that a deprecation message is thrown if random_state is set at
# fit time
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lsml_supervised = LSML_Supervised()
msg = ('"random_state" parameter in the `fit` function is '
'deprecated. Set `random_state` at initialization '
'instead (when instantiating a new `LSML_Supervised` '
'object).')
with pytest.warns(DeprecationWarning) as raised_warning:
lsml_supervised.fit(X, y, random_state=np.random)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning_random_state(self):
# test that a ChangedBehavior warning is thrown if the random_state is
# not set in fit.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lsml_supervised = LSML_Supervised()
msg = ('As of v0.5.0, `LSML_Supervised` now uses the '
'`random_state` given at initialization to sample '
'constraints, not the default `np.random` from the `fit` '
'method, since this argument is now deprecated. '
'This warning will disappear in v0.6.0.')
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
lsml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
class TestITML(MetricTestCase):
def test_iris(self):
itml = ITML_Supervised(num_constraints=200)
itml.fit(self.iris_points, self.iris_labels)
csep = class_separation(itml.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.2)
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y)
def test_deprecation_bounds(self):
# test that a deprecation message is thrown if bounds is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised(bounds=None)
msg = ('"bounds" parameter from initialization is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use the "bounds" parameter of this '
'fit method instead.')
assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y)
def test_deprecation_A0(self):
# test that a deprecation message is thrown if A0 is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised(A0=np.ones_like(X))
msg = ('"A0" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. Use "prior" instead.')
with pytest.warns(DeprecationWarning) as raised_warning:
itml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
itml = ITML(A0=np.ones_like(X))
with pytest.warns(DeprecationWarning) as raised_warning:
itml.fit(pairs, y_pairs)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_deprecation_random_state(self):
# test that a deprecation message is thrown if random_state is set at
# fit time
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised()
msg = ('"random_state" parameter in the `fit` function is '
'deprecated. Set `random_state` at initialization '
'instead (when instantiating a new `ITML_Supervised` '
'object).')
with pytest.warns(DeprecationWarning) as raised_warning:
itml_supervised.fit(X, y, random_state=np.random)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning_random_state(self):
# test that a ChangedBehavior warning is thrown if the random_state is
# not set in fit.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised()
msg = ('As of v0.5.0, `ITML_Supervised` now uses the '
'`random_state` given at initialization to sample '
'constraints, not the default `np.random` from the `fit` '
'method, since this argument is now deprecated. '
'This warning will disappear in v0.6.0.')
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
itml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
@pytest.mark.parametrize('bounds', [None, (20., 100.), [20., 100.],
np.array([20., 100.]),
np.array([[20., 100.]]),
np.array([[20], [100]])])
def test_bounds_parameters_valid(bounds):
"""Asserts that we can provide any array-like of two elements as bounds,
and that the attribute bound_ is a numpy array"""
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
itml = ITML()
itml.fit(pairs, y_pairs, bounds=bounds)
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised()
itml_supervised.fit(X, y, bounds=bounds)
@pytest.mark.parametrize('bounds', ['weird', ['weird1', 'weird2'],
np.array([1, 2, 3])])
def test_bounds_parameters_invalid(bounds):
"""Assert that if a non array-like is put for bounds, or an array-like
of length different than 2, an error is returned"""
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
itml = ITML()
with pytest.raises(Exception):
itml.fit(pairs, y_pairs, bounds=bounds)
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised()
with pytest.raises(Exception):
itml_supervised.fit(X, y, bounds=bounds)
class TestLMNN(MetricTestCase):
def test_iris(self):
lmnn = LMNN(k=5, learn_rate=1e-6, verbose=False)
lmnn.fit(self.iris_points, self.iris_labels)
csep = class_separation(lmnn.transform(self.iris_points),
self.iris_labels)
self.assertLess(csep, 0.25)
def test_loss_grad_lbfgs(self):
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
rng = np.random.RandomState(42)
X, y = make_classification(random_state=rng)
L = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1])
lmnn = LMNN()
k = lmnn.k
reg = lmnn.regularization
X, y = lmnn._prepare_inputs(X, y, dtype=float,
ensure_min_samples=2)
num_pts, n_components = X.shape
unique_labels, label_inds = np.unique(y, return_inverse=True)
lmnn.labels_ = np.arange(len(unique_labels))
lmnn.components_ = np.eye(n_components)
target_neighbors = lmnn._select_targets(X, label_inds)
# sum outer products
dfG = _sum_outer_products(X, target_neighbors.flatten(),
np.repeat(np.arange(X.shape[0]), k))
# initialize L
def loss_grad(flat_L):
return lmnn._loss_grad(X, flat_L.reshape(-1, X.shape[1]), dfG,
k, reg, target_neighbors, label_inds)
def fun(x):
return loss_grad(x)[1]
def grad(x):
return loss_grad(x)[0].ravel()
# compute relative error
epsilon = np.sqrt(np.finfo(float).eps)
rel_diff = (check_grad(fun, grad, L.ravel()) /
np.linalg.norm(approx_fprime(L.ravel(), fun, epsilon)))
np.testing.assert_almost_equal(rel_diff, 0., decimal=5)
def test_changed_behaviour_warning(self):
# test that a ChangedBehavior warning is thrown about the init, if the
# default parameters are used.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lmnn = LMNN(k=2)
msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, "
"the default init will now be set to 'auto', instead of the "
"previous identity matrix. If you still want to use the identity "
"matrix as before, set init='identity'. This warning "
"will disappear in v0.6.0, and `init` parameter's default value "
"will be set to 'auto'.")
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
lmnn.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_deprecation_use_pca(self):
# test that a DeprecationWarning is thrown about use_pca, if the
# default parameters are used.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lmnn = LMNN(k=2, use_pca=True)
msg = ('"use_pca" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0.')
assert_warns_message(DeprecationWarning, msg, lmnn.fit, X, y)
def test_loss_func(capsys):
"""Test the loss function (and its gradient) on a simple example,
by comparing the results with the actual implementation of metric-learn,
with a very simple (but nonperformant) implementation"""
# toy dataset to use
X, y = make_classification(n_samples=10, n_classes=2,
n_features=6,
n_redundant=0, shuffle=True,
scale=[1, 1, 20, 20, 20, 20], random_state=42)
def hinge(a):
if a > 0:
return a, 1
else:
return 0, 0
def loss_fn(L, X, y, target_neighbors, reg):
L = L.reshape(-1, X.shape[1])
Lx = np.dot(X, L.T)
loss = 0
total_active = 0
grad = np.zeros_like(L)
for i in range(X.shape[0]):
for j in target_neighbors[i]:
loss += (1 - reg) * np.sum((Lx[i] - Lx[j]) ** 2)
grad += (1 - reg) * np.outer(Lx[i] - Lx[j], X[i] - X[j])
for l in range(X.shape[0]):
if y[i] != y[l]:
hin, active = hinge(1 + np.sum((Lx[i] - Lx[j])**2) -
np.sum((Lx[i] - Lx[l])**2))
total_active += active
if active:
loss += reg * hin
grad += (reg * (np.outer(Lx[i] - Lx[j], X[i] - X[j]) -
np.outer(Lx[i] - Lx[l], X[i] - X[l])))
grad = 2 * grad
return grad, loss, total_active
# we check that the gradient we have computed in the non-performant implem
# is indeed the true gradient on a toy example:
def _select_targets(X, y, k):
target_neighbors = np.empty((X.shape[0], k), dtype=int)
for label in np.unique(y):
inds, = np.nonzero(y == label)
dd = euclidean_distances(X[inds], squared=True)
np.fill_diagonal(dd, np.inf)
nn = np.argsort(dd)[..., :k]
target_neighbors[inds] = inds[nn]
return target_neighbors
target_neighbors = _select_targets(X, y, 2)
regularization = 0.5
n_features = X.shape[1]
x0 = np.random.randn(1, n_features)
def loss(x0):
return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors,
regularization)[1]
def grad(x0):
return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors,
regularization)[0].ravel()
scipy.optimize.check_grad(loss, grad, x0.ravel())
class LMNN_with_callback(LMNN):
""" We will use a callback to get the gradient (see later)
"""
def __init__(self, callback, *args, **kwargs):
self.callback = callback
super(LMNN_with_callback, self).__init__(*args, **kwargs)
def _loss_grad(self, *args, **kwargs):
grad, objective, total_active = (
super(LMNN_with_callback, self)._loss_grad(*args, **kwargs))
self.callback.append(grad)
return grad, objective, total_active
class LMNN_nonperformant(LMNN_with_callback):
def fit(self, X, y):
self.y = y
return super(LMNN_nonperformant, self).fit(X, y)
def _loss_grad(self, X, L, dfG, k, reg, target_neighbors, label_inds):
grad, loss, total_active = loss_fn(L.ravel(), X, self.y,
target_neighbors, self.regularization)
self.callback.append(grad)
return grad, loss, total_active
mem1, mem2 = [], []
lmnn_perf = LMNN_with_callback(verbose=True, random_state=42,
init='identity', max_iter=30, callback=mem1)
lmnn_nonperf = LMNN_nonperformant(verbose=True, random_state=42,
init='identity', max_iter=30,
callback=mem2)
objectives, obj_diffs, learn_rate, total_active = (dict(), dict(), dict(),
dict())
for algo, name in zip([lmnn_perf, lmnn_nonperf], ['perf', 'nonperf']):
algo.fit(X, y)
out, _ = capsys.readouterr()
lines = re.split("\n+", out)
# we get every variable that is printed from the algorithm in verbose
num = r'(-?\d+.?\d*(e[+|-]\d+)?)'
strings = [re.search(r"\d+ (?:{}) (?:{}) (?:(\d+)) (?:{})"
.format(num, num, num), s) for s in lines]
objectives[name] = [float(match.group(1)) for match in strings if match is
not None]
obj_diffs[name] = [float(match.group(3)) for match in strings if match is
not None]
total_active[name] = [float(match.group(5)) for match in strings if
match is not
None]
learn_rate[name] = [float(match.group(6)) for match in strings if match is
not None]
assert len(strings) >= 10 # we ensure that we actually did more than 10
# iterations
assert total_active[name][0] >= 2 # we ensure that we have some active
# constraints (that's the case we want to test)
# we remove the last element because it can be equal to the penultimate
# if the last gradient update is null
for i in range(len(mem1)):
np.testing.assert_allclose(lmnn_perf.callback[i],
lmnn_nonperf.callback[i],
err_msg='Gradient different at position '
'{}'.format(i))
np.testing.assert_allclose(objectives['perf'], objectives['nonperf'])
np.testing.assert_allclose(obj_diffs['perf'], obj_diffs['nonperf'])
np.testing.assert_allclose(total_active['perf'], total_active['nonperf'])
np.testing.assert_allclose(learn_rate['perf'], learn_rate['nonperf'])
@pytest.mark.parametrize('X, y, loss', [(np.array([[0], [1], [2], [3]]),
[1, 1, 0, 0], 3.0),
(np.array([[0], [1], [2], [3]]),
[1, 0, 0, 1], 26.)])
def test_toy_ex_lmnn(X, y, loss):
"""Test that the loss give the right result on a toy example"""
L = np.array([[1]])
lmnn = LMNN(k=1, regularization=0.5)
k = lmnn.k
reg = lmnn.regularization
X, y = lmnn._prepare_inputs(X, y, dtype=float,
ensure_min_samples=2)
num_pts, n_components = X.shape
unique_labels, label_inds = np.unique(y, return_inverse=True)
lmnn.labels_ = np.arange(len(unique_labels))
lmnn.components_ = np.eye(n_components)
target_neighbors = lmnn._select_targets(X, label_inds)
# sum outer products
dfG = _sum_outer_products(X, target_neighbors.flatten(),
np.repeat(np.arange(X.shape[0]), k))
# storage
a1 = [None] * k
a2 = [None] * k
for nn_idx in xrange(k):
a1[nn_idx] = np.array([])
a2[nn_idx] = np.array([])
# assert that the loss equals the one computed by hand
assert lmnn._loss_grad(X, L.reshape(-1, X.shape[1]), dfG, k,
reg, target_neighbors, label_inds)[1] == loss
def test_convergence_simple_example(capsys):
# LMNN should converge on this simple example, which it did not with
# this issue: https://github.com/scikit-learn-contrib/metric-learn/issues/88
X, y = make_classification(random_state=0)
lmnn = LMNN(verbose=True)
lmnn.fit(X, y)
out, _ = capsys.readouterr()
assert "LMNN converged with objective" in out
def test_no_twice_same_objective(capsys):
# test that the objective function never has twice the same value
# see https://github.com/scikit-learn-contrib/metric-learn/issues/88
X, y = make_classification(random_state=0)
lmnn = LMNN(verbose=True)
lmnn.fit(X, y)
out, _ = capsys.readouterr()
lines = re.split("\n+", out)
# we get only objectives from each line:
# the regexp matches a float that follows an integer (the iteration
# number), and which is followed by a (signed) float (delta obj). It
# matches for instance:
# 3 **1113.7665747189938** -3.182774197440267 46431.0200999999999998e-06
objectives = [re.search(r"\d* (?:(\d*.\d*))[ | -]\d*.\d*", s)
for s in lines]
objectives = [match.group(1) for match in objectives if match is not None]
# we remove the last element because it can be equal to the penultimate
# if the last gradient update is null
assert len(objectives[:-1]) == len(set(objectives[:-1]))
class TestSDML(MetricTestCase):
@pytest.mark.skipif(HAS_SKGGM,
reason="The warning can be thrown only if skggm is "
"not installed.")
def test_sdml_supervised_raises_warning_msg_not_installed_skggm(self):
"""Tests that the right warning message is raised if someone tries to
use SDML_Supervised but has not installed skggm, and that the algorithm
fails to converge"""
# TODO: remove if we don't need skggm anymore
# load_iris: dataset where we know scikit-learn's graphical lasso fails
# with a Floating Point error
X, y = load_iris(return_X_y=True)
sdml_supervised = SDML_Supervised(balance_param=0.5, use_cov=True,
sparsity_param=0.01)
msg = ("There was a problem in SDML when using scikit-learn's graphical "
"lasso solver. skggm's graphical lasso can sometimes converge on "
"non SPD cases where scikit-learn's graphical lasso fails to "
"converge. Try to install skggm and rerun the algorithm (see "
"the README.md for the right version of skggm). The following "
"error message was thrown:")
with pytest.raises(RuntimeError) as raised_error:
sdml_supervised.fit(X, y)
assert str(raised_error.value).startswith(msg)
@pytest.mark.skipif(HAS_SKGGM,
reason="The warning can be thrown only if skggm is "
"not installed.")
def test_sdml_raises_warning_msg_not_installed_skggm(self):
"""Tests that the right warning message is raised if someone tries to
use SDML but has not installed skggm, and that the algorithm fails to
converge"""
# TODO: remove if we don't need skggm anymore
# case on which we know that scikit-learn's graphical lasso fails
# because it will return a non SPD matrix
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
sdml = SDML(prior='identity', balance_param=100, verbose=True)
msg = ("There was a problem in SDML when using scikit-learn's graphical "
"lasso solver. skggm's graphical lasso can sometimes converge on "
"non SPD cases where scikit-learn's graphical lasso fails to "
"converge. Try to install skggm and rerun the algorithm (see "
"the README.md for the right version of skggm).")
with pytest.raises(RuntimeError) as raised_error:
sdml.fit(pairs, y_pairs)
assert msg == str(raised_error.value)
@pytest.mark.skipif(not HAS_SKGGM,
reason="The warning can be thrown only if skggm is "
"installed.")
def test_sdml_raises_warning_msg_installed_skggm(self):
"""Tests that the right warning message is raised if someone tries to
use SDML and has installed skggm, and that the algorithm fails to
converge"""
# TODO: remove if we don't need skggm anymore
# case on which we know that skggm's graphical lasso fails
# because it will return non finite values
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
sdml = SDML(prior='identity', balance_param=100, verbose=True)
msg = ("There was a problem in SDML when using skggm's graphical "
"lasso solver.")
with pytest.raises(RuntimeError) as raised_error:
sdml.fit(pairs, y_pairs)
assert msg == str(raised_error.value)
@pytest.mark.skipif(not HAS_SKGGM,
reason="The warning can be thrown only if skggm is "
"installed.")
def test_sdml_supervised_raises_warning_msg_installed_skggm(self):
"""Tests that the right warning message is raised if someone tries to
use SDML_Supervised but has not installed skggm, and that the algorithm
fails to converge"""
# TODO: remove if we don't need skggm anymore
# case on which we know that skggm's graphical lasso fails
# because it will return non finite values
rng = np.random.RandomState(42)
# This example will create a diagonal em_cov with a negative coeff (
# pathological case)
X = np.array([[-10., 0.], [10., 0.], [5., 0.], [3., 0.]])
y = [0, 0, 1, 1]
sdml_supervised = SDML_Supervised(balance_param=0.5, prior='identity',
sparsity_param=0.01, random_state=rng)
msg = ("There was a problem in SDML when using skggm's graphical "
"lasso solver.")
with pytest.raises(RuntimeError) as raised_error:
sdml_supervised.fit(X, y)
assert msg == str(raised_error.value)
@pytest.mark.skipif(not HAS_SKGGM,
reason="It's only in the case where skggm is installed"
"that no warning should be thrown.")
def test_raises_no_warning_installed_skggm(self):
# otherwise we should be able to instantiate and fit SDML and it
# should raise no error and no ConvergenceWarning
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])
y_pairs = [1, -1]
X, y = make_classification(random_state=42)
with pytest.warns(None) as records:
sdml = SDML(prior='covariance')
sdml.fit(pairs, y_pairs)
for record in records:
assert record.category is not ConvergenceWarning
with pytest.warns(None) as records:
sdml_supervised = SDML_Supervised(prior='identity', balance_param=1e-5)
sdml_supervised.fit(X, y)
for record in records:
assert record.category is not ConvergenceWarning
def test_iris(self):
# Note: this is a flaky test, which fails for certain seeds.
# TODO: un-flake it!
rs = np.random.RandomState(5555)
sdml = SDML_Supervised(num_constraints=1500, prior='identity',
balance_param=5e-5)
sdml.fit(self.iris_points, self.iris_labels, random_state=rs)
csep = class_separation(sdml.transform(self.iris_points),
self.iris_labels)
self.assertLess(csep, 0.22)
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X, y = make_classification(random_state=42)
sdml_supervised = SDML_Supervised(num_labeled=np.inf, prior='identity',
balance_param=5e-5)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, sdml_supervised.fit, X, y)
def test_sdml_raises_warning_non_psd(self):
"""Tests that SDML raises a warning on a toy example where we know the
pseudo-covariance matrix is not PSD"""
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y = [1, -1]
sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5)
msg = ("Warning, the input matrix of graphical lasso is not "
"positive semi-definite (PSD). The algorithm may diverge, "
"and lead to degenerate solutions. "
"To prevent that, try to decrease the balance parameter "
"`balance_param` and/or to set prior='identity'.")
with pytest.warns(ConvergenceWarning) as raised_warning:
try:
sdml.fit(pairs, y)
except Exception:
pass
# we assert that this warning is in one of the warning raised by the
# estimator
assert msg in list(map(lambda w: str(w.message), raised_warning))
def test_sdml_converges_if_psd(self):
"""Tests that sdml converges on a simple problem where we know the
pseudo-covariance matrix is PSD"""
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])
y = [1, -1]
sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5)
sdml.fit(pairs, y)
assert np.isfinite(sdml.get_mahalanobis_matrix()).all()
@pytest.mark.skipif(not HAS_SKGGM,
reason="sklearn's graphical_lasso can sometimes not "
"work on some non SPD problems. We test that "
"is works only if skggm is installed.")
def test_sdml_works_on_non_spd_pb_with_skggm(self):
"""Test that SDML works on a certain non SPD problem on which we know
it should work, but scikit-learn's graphical_lasso does not work"""
X, y = load_iris(return_X_y=True)
sdml = SDML_Supervised(balance_param=0.5, sparsity_param=0.01,
prior='covariance',
random_state=np.random.RandomState(42))
sdml.fit(X, y)
def test_deprecation_use_cov(self):
# test that a deprecation message is thrown if use_cov is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
sdml_supervised = SDML_Supervised(use_cov=np.ones_like(X),
balance_param=1e-5)
msg = ('"use_cov" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. Use "prior" instead.')
with pytest.warns(DeprecationWarning) as raised_warning:
sdml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
sdml = SDML(use_cov=np.ones_like(X), balance_param=1e-5)
with pytest.warns(DeprecationWarning) as raised_warning:
sdml.fit(pairs, y_pairs)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning(self):
# test that a ChangedBehavior warning is thrown about the init, if the
# default parameters are used (except for the balance_param that we need
# to set for the algorithm to not diverge)
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
sdml_supervised = SDML_Supervised(balance_param=1e-5)
msg = ("Warning, no prior was set (`prior=None`). As of version 0.5.0, "
"the default prior will now be set to "
"'identity', instead of 'covariance'. If you still want to use "
"the inverse of the covariance matrix as a prior, "
"set prior='covariance'. This warning will disappear in "
"v0.6.0, and `prior` parameter's default value will be set to "
"'identity'.")
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
sdml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
sdml = SDML(balance_param=1e-5)
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
sdml.fit(pairs, y_pairs)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_deprecation_random_state(self):
# test that a deprecation message is thrown if random_state is set at
# fit time
# TODO: remove in v.0.6
X, y = load_iris(return_X_y=True)
sdml_supervised = SDML_Supervised(balance_param=5e-5)
msg = ('"random_state" parameter in the `fit` function is '
'deprecated. Set `random_state` at initialization '
'instead (when instantiating a new `SDML_Supervised` '
'object).')
with pytest.warns(DeprecationWarning) as raised_warning:
sdml_supervised.fit(X, y, random_state=np.random)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning_random_state(self):
# test that a ChangedBehavior warning is thrown if the random_state is
# not set in fit.
# TODO: remove in v.0.6
X, y = load_iris(return_X_y=True)
sdml_supervised = SDML_Supervised(balance_param=5e-5)
msg = ('As of v0.5.0, `SDML_Supervised` now uses the '
'`random_state` given at initialization to sample '
'constraints, not the default `np.random` from the `fit` '
'method, since this argument is now deprecated. '
'This warning will disappear in v0.6.0.')
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
sdml_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
@pytest.mark.skipif(not HAS_SKGGM,
reason='The message should be printed only if skggm is '
'installed.')
def test_verbose_has_installed_skggm_sdml(capsys):
# Test that if users have installed skggm, a message is printed telling them
# skggm's solver is used (when they use SDML)
# TODO: remove if we don't need skggm anymore
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])
y_pairs = [1, -1]
sdml = SDML(verbose=True, prior='covariance')
sdml.fit(pairs, y_pairs)
out, _ = capsys.readouterr()
assert "SDML will use skggm's graphical lasso solver." in out
@pytest.mark.skipif(not HAS_SKGGM,
reason='The message should be printed only if skggm is '
'installed.')
def test_verbose_has_installed_skggm_sdml_supervised(capsys):
# Test that if users have installed skggm, a message is printed telling them
# skggm's solver is used (when they use SDML_Supervised)
# TODO: remove if we don't need skggm anymore
X, y = load_iris(return_X_y=True)
sdml = SDML_Supervised(verbose=True, prior='identity', balance_param=1e-5)
sdml.fit(X, y)
out, _ = capsys.readouterr()
assert "SDML will use skggm's graphical lasso solver." in out
@pytest.mark.skipif(HAS_SKGGM,
reason='The message should be printed only if skggm is '
'not installed.')
def test_verbose_has_not_installed_skggm_sdml(capsys):
# Test that if users have installed skggm, a message is printed telling them
# skggm's solver is used (when they use SDML)
# TODO: remove if we don't need skggm anymore
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])
y_pairs = [1, -1]
sdml = SDML(verbose=True, prior='covariance')
sdml.fit(pairs, y_pairs)
out, _ = capsys.readouterr()
assert "SDML will use scikit-learn's graphical lasso solver." in out
@pytest.mark.skipif(HAS_SKGGM,
reason='The message should be printed only if skggm is '
'not installed.')
def test_verbose_has_not_installed_skggm_sdml_supervised(capsys):
# Test that if users have installed skggm, a message is printed telling them
# skggm's solver is used (when they use SDML_Supervised)
# TODO: remove if we don't need skggm anymore
X, y = make_classification(random_state=42)
sdml = SDML_Supervised(verbose=True, balance_param=1e-5, prior='identity')
sdml.fit(X, y)
out, _ = capsys.readouterr()
assert "SDML will use scikit-learn's graphical lasso solver." in out
class TestNCA(MetricTestCase):
def test_iris(self):
n = self.iris_points.shape[0]
# Without dimension reduction
nca = NCA(max_iter=(100000 // n))
nca.fit(self.iris_points, self.iris_labels)
csep = class_separation(nca.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.15)
# With dimension reduction
nca = NCA(max_iter=(100000 // n), n_components=2)
nca.fit(self.iris_points, self.iris_labels)
csep = class_separation(nca.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.20)
def test_finite_differences(self):
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
# Initialize the transformation `M`, as well as `X` and `y` and `NCA`
X, y = make_classification()
M = np.random.randn(np.random.randint(1, X.shape[1] + 1), X.shape[1])
mask = y[:, np.newaxis] == y[np.newaxis, :]
nca = NCA()
nca.n_iter_ = 0
def fun(M):
return nca._loss_grad_lbfgs(M, X, mask)[0]
def grad(M):
return nca._loss_grad_lbfgs(M, X, mask)[1].ravel()
# compute relative error
epsilon = np.sqrt(np.finfo(float).eps)
rel_diff = (check_grad(fun, grad, M.ravel()) /
np.linalg.norm(approx_fprime(M.ravel(), fun, epsilon)))
np.testing.assert_almost_equal(rel_diff, 0., decimal=6)
def test_simple_example(self):
"""Test on a simple example.
Puts four points in the input space where the opposite labels points are
next to each other. After transform the same labels points should be next
to each other.
"""
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
nca = NCA(n_components=2,)
nca.fit(X, y)
Xansformed = nca.transform(X)
np.testing.assert_equal(pairwise_distances(Xansformed).argsort()[:, 1],
np.array([2, 3, 0, 1]))
def test_singleton_class(self):
X = self.iris_points
y = self.iris_labels
# one singleton class: test fitting works
singleton_class = 1
ind_singleton, = np.where(y == singleton_class)
y[ind_singleton] = 2
y[ind_singleton[0]] = singleton_class
nca = NCA(max_iter=30)
nca.fit(X, y)
# One non-singleton class: test fitting works
ind_1, = np.where(y == 1)
ind_2, = np.where(y == 2)
y[ind_1] = 0
y[ind_1[0]] = 1
y[ind_2] = 0
y[ind_2[0]] = 2
nca = NCA(max_iter=30)
nca.fit(X, y)
# Only singleton classes: test fitting does nothing (the gradient
# must be null in this case, so the final matrix must stay like
# the initialization)
ind_0, = np.where(y == 0)
ind_1, = np.where(y == 1)
ind_2, = np.where(y == 2)
X = X[[ind_0[0], ind_1[0], ind_2[0]]]
y = y[[ind_0[0], ind_1[0], ind_2[0]]]
A = make_spd_matrix(X.shape[1], X.shape[1])
nca = NCA(init=A, max_iter=30, n_components=X.shape[1])
nca.fit(X, y)
assert_array_equal(nca.components_, A)
def test_one_class(self):
# if there is only one class the gradient is null, so the final matrix
# must stay like the initialization
X = self.iris_points[self.iris_labels == 0]
y = self.iris_labels[self.iris_labels == 0]
A = make_spd_matrix(X.shape[1], X.shape[1])
nca = NCA(init=A, max_iter=30, n_components=X.shape[1])
nca.fit(X, y)
assert_array_equal(nca.components_, A)
def test_changed_behaviour_warning(self):
# test that a ChangedBehavior warning is thrown about the init, if the
# default parameters are used.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
nca = NCA()
msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, "
"the default init will now be set to 'auto', instead of the "
"previous scaling matrix. If you still want to use the same "
"scaling matrix as before, set "
"init=np.eye(X.shape[1])/(np.maximum(X.max(axis=0)-X.min(axis=0)"
", EPS))). This warning will disappear in v0.6.0, and `init` "
"parameter's default value will be set to 'auto'.")
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
nca.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
@pytest.mark.parametrize('num_dims', [None, 2])
def test_deprecation_num_dims_nca(num_dims):
# test that a deprecation message is thrown if num_dims is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
nca = NCA(num_dims=num_dims)
msg = ('"num_dims" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use "n_components" instead')
with pytest.warns(DeprecationWarning) as raised_warning:
nca.fit(X, y)
assert (str(raised_warning[0].message) == msg)
class TestLFDA(MetricTestCase):
def test_iris(self):
lfda = LFDA(k=2, n_components=2)
lfda.fit(self.iris_points, self.iris_labels)
csep = class_separation(lfda.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.15)
# Sanity checks for learned matrices.
self.assertEqual(lfda.get_mahalanobis_matrix().shape, (4, 4))
self.assertEqual(lfda.components_.shape, (2, 4))
@pytest.mark.parametrize('num_dims', [None, 2])
def test_deprecation_num_dims_lfda(num_dims):
# test that a deprecation message is thrown if num_dims is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lfda = LFDA(num_dims=num_dims)
msg = ('"num_dims" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use "n_components" instead')
with pytest.warns(DeprecationWarning) as raised_warning:
lfda.fit(X, y)
assert (str(raised_warning[0].message) == msg)
class TestRCA(MetricTestCase):
def test_iris(self):
rca = RCA_Supervised(n_components=2, num_chunks=30, chunk_size=2)
rca.fit(self.iris_points, self.iris_labels)
csep = class_separation(rca.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.29)
def test_deprecation_pca_comps(self):
# test that a deprecation message is thrown if pca_comps is set at
# initialization
# TODO: remove in v.0.6
X, y = make_classification(random_state=42, n_samples=100)
rca_supervised = RCA_Supervised(pca_comps=X.shape[1], num_chunks=20)
msg = ('"pca_comps" parameter is not used. '
'It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. RCA will not do PCA preprocessing anymore. If '
'you still want to do it, you could use '
'`sklearn.decomposition.PCA` and an `sklearn.pipeline.Pipeline`.')
with pytest.warns(ChangedBehaviorWarning) as expected_msg:
rca_supervised.fit(X, y)
assert any(str(w.message) == msg for w in expected_msg)
rca = RCA(pca_comps=X.shape[1])
with pytest.warns(ChangedBehaviorWarning) as expected_msg:
rca.fit(X, y)
assert any(str(w.message) == msg for w in expected_msg)
def test_changedbehaviorwarning_preprocessing(self):
# test that a ChangedBehaviorWarning is thrown when using RCA
# TODO: remove in v.0.6
msg = ("RCA will no longer center the data before training. If you want "
"to do some preprocessing, you should do it manually (you can also "
"use an `sklearn.pipeline.Pipeline` for instance). This warning "
"will disappear in version 0.6.0.")
X, y = make_classification(random_state=42, n_samples=100)
rca_supervised = RCA_Supervised(num_chunks=20)
with pytest.warns(ChangedBehaviorWarning) as expected_msg:
rca_supervised.fit(X, y)
assert any(str(w.message) == msg for w in expected_msg)
rca = RCA()
with pytest.warns(ChangedBehaviorWarning) as expected_msg:
rca.fit(X, y)
assert any(str(w.message) == msg for w in expected_msg)
def test_rank_deficient_returns_warning(self):
"""Checks that if the covariance matrix is not invertible, we raise a
warning message advising to use PCA"""
X, y = load_iris(return_X_y=True)
# we make the fourth column a linear combination of the two first,
# so that the covariance matrix will not be invertible:
X[:, 3] = X[:, 0] + 3 * X[:, 1]
rca = RCA()
msg = ('The inner covariance matrix is not invertible, '
'so the transformation matrix may contain Nan values. '
'You should reduce the dimensionality of your input,'
'for instance using `sklearn.decomposition.PCA` as a '
'preprocessing step.')
with pytest.warns(None) as raised_warnings:
rca.fit(X, y)
assert any(str(w.message) == msg for w in raised_warnings)
def test_deprecation_random_state(self):
# test that a deprecation message is thrown if random_state is set at
# fit time
# TODO: remove in v.0.6
X, y = make_classification(random_state=42, n_samples=100)
rca_supervised = RCA_Supervised(num_chunks=20)
msg = ('"random_state" parameter in the `fit` function is '
'deprecated. Set `random_state` at initialization '
'instead (when instantiating a new `RCA_Supervised` '
'object).')
with pytest.warns(DeprecationWarning) as raised_warning:
rca_supervised.fit(X, y, random_state=np.random)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning_random_state(self):
# test that a ChangedBehavior warning is thrown if the random_state is
# not set in fit.
# TODO: remove in v.0.6
X, y = make_classification(random_state=42, n_samples=100)
rca_supervised = RCA_Supervised(num_chunks=20)
msg = ('As of v0.5.0, `RCA_Supervised` now uses the '
'`random_state` given at initialization to sample '
'constraints, not the default `np.random` from the `fit` '
'method, since this argument is now deprecated. '
'This warning will disappear in v0.6.0.')
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
rca_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
@pytest.mark.parametrize('num_dims', [None, 2])
def test_deprecation_num_dims_rca(num_dims):
# test that a deprecation message is thrown if num_dims is set at
# initialization
# TODO: remove in v.0.6
X, y = load_iris(return_X_y=True)
rca = RCA(num_dims=num_dims)
msg = ('"num_dims" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use "n_components" instead')
with pytest.warns(DeprecationWarning) as raised_warning:
rca.fit(X, y)
assert any(str(w.message) == msg for w in raised_warning)
# we take a small number of chunks so that RCA works on iris
rca_supervised = RCA_Supervised(num_dims=num_dims, num_chunks=10)
msg = ('"num_dims" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use "n_components" instead')
with pytest.warns(DeprecationWarning) as raised_warning:
rca_supervised.fit(X, y)
assert any(str(w.message) == msg for w in raised_warning)
class TestMLKR(MetricTestCase):
def test_iris(self):
mlkr = MLKR()
mlkr.fit(self.iris_points, self.iris_labels)
csep = class_separation(mlkr.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.25)
def test_finite_differences(self):
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
# Initialize the transformation `M`, as well as `X`, and `y` and `MLKR`
X, y = make_regression(n_features=4, random_state=1, n_samples=20)
X, y = check_X_y(X, y)
M = np.random.randn(2, X.shape[1])
mlkr = MLKR()
mlkr.n_iter_ = 0
def fun(M):
return mlkr._loss(M, X, y)[0]
def grad_fn(M):
return mlkr._loss(M, X, y)[1].ravel()
# compute relative error
rel_diff = check_grad(fun, grad_fn, M.ravel()) / np.linalg.norm(grad_fn(M))
np.testing.assert_almost_equal(rel_diff, 0.)
def test_deprecation_A0(self):
# test that a deprecation message is thrown if A0 is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mlkr = MLKR(A0=np.ones_like(X))
msg = ('"A0" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. Use "init" instead.')
with pytest.warns(DeprecationWarning) as raised_warning:
mlkr.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning(self):
# test that a ChangedBehavior warning is thrown about the init, if the
# default parameters are used.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([0.1, 0.2, 0.3, 0.4])
mlkr = MLKR()
msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, "
"the default init will now be set to 'auto', instead of 'pca'. "
"If you still want to use PCA as an init, set init='pca'. "
"This warning will disappear in v0.6.0, and `init` parameter's"
" default value will be set to 'auto'.")
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
mlkr.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
@pytest.mark.parametrize('num_dims', [None, 2])
def test_deprecation_num_dims_mlkr(num_dims):
# test that a deprecation message is thrown if num_dims is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mlkr = MLKR(num_dims=num_dims)
msg = ('"num_dims" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use "n_components" instead')
with pytest.warns(DeprecationWarning) as raised_warning:
mlkr.fit(X, y)
assert (str(raised_warning[0].message) == msg)
class TestMMC(MetricTestCase):
def test_iris(self):
# Generate full set of constraints for comparison with reference
# implementation
mask = self.iris_labels[None] == self.iris_labels[:, None]
a, b = np.nonzero(np.triu(mask, k=1))
c, d = np.nonzero(np.triu(~mask, k=1))
# Full metric
n_features = self.iris_points.shape[1]
mmc = MMC(convergence_threshold=0.01, init=np.eye(n_features) / 10)
mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d]))
expected = [[+0.000514, +0.000868, -0.001195, -0.001703],
[+0.000868, +0.001468, -0.002021, -0.002879],
[-0.001195, -0.002021, +0.002782, +0.003964],
[-0.001703, -0.002879, +0.003964, +0.005648]]
assert_array_almost_equal(expected, mmc.get_mahalanobis_matrix(),
decimal=6)
# Diagonal metric
mmc = MMC(diagonal=True)
mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d]))
expected = [0, 0, 1.210220, 1.228596]
assert_array_almost_equal(np.diag(expected), mmc.get_mahalanobis_matrix(),
decimal=6)
# Supervised Full
mmc = MMC_Supervised()
mmc.fit(self.iris_points, self.iris_labels)
csep = class_separation(mmc.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.15)
# Supervised Diagonal
mmc = MMC_Supervised(diagonal=True)
mmc.fit(self.iris_points, self.iris_labels)
csep = class_separation(mmc.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.2)
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mmc_supervised = MMC_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, mmc_supervised.fit, X, y)
def test_deprecation_A0(self):
# test that a deprecation message is thrown if A0 is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mmc_supervised = MMC_Supervised(A0=np.ones_like(X))
msg = ('"A0" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. Use "init" instead.')
with pytest.warns(DeprecationWarning) as raised_warning:
mmc_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
mmc = MMC(A0=np.ones_like(X))
with pytest.warns(DeprecationWarning) as raised_warning:
mmc.fit(pairs, y_pairs)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning(self):
# test that a ChangedBehavior warning is thrown about the init, if the
# default parameters are used.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mmc_supervised = MMC_Supervised()
msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, "
"the default init will now be set to 'identity', instead of the "
"identity divided by a scaling factor of 10. "
"If you still want to use the same init as in previous "
"versions, set init=np.eye(d)/10, where d is the dimension "
"of your input space (d=pairs.shape[1]). "
"This warning will disappear in v0.6.0, and `init` parameter's"
" default value will be set to 'auto'.")
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
mmc_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])
y_pairs = [1, -1]
mmc = MMC()
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
mmc.fit(pairs, y_pairs)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_deprecation_random_state(self):
# test that a deprecation message is thrown if random_state is set at
# fit time
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mmc_supervised = MMC_Supervised()
msg = ('"random_state" parameter in the `fit` function is '
'deprecated. Set `random_state` at initialization '
'instead (when instantiating a new `MMC_Supervised` '
'object).')
with pytest.warns(DeprecationWarning) as raised_warning:
mmc_supervised.fit(X, y, random_state=np.random)
assert any(msg == str(wrn.message) for wrn in raised_warning)
def test_changed_behaviour_warning_random_state(self):
# test that a ChangedBehavior warning is thrown if the random_state is
# not set in fit.
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mmc_supervised = MMC_Supervised()
msg = ('As of v0.5.0, `MMC_Supervised` now uses the '
'`random_state` given at initialization to sample '
'constraints, not the default `np.random` from the `fit` '
'method, since this argument is now deprecated. '
'This warning will disappear in v0.6.0.')
with pytest.warns(ChangedBehaviorWarning) as raised_warning:
mmc_supervised.fit(X, y)
assert any(msg == str(wrn.message) for wrn in raised_warning)
@pytest.mark.parametrize(('algo_class', 'dataset'),
[(NCA, make_classification()),
(MLKR, make_regression())])
def test_verbose(algo_class, dataset, capsys):
# assert there is proper output when verbose = True
X, y = dataset
model = algo_class(verbose=True)
model.fit(X, y)
out, _ = capsys.readouterr()
# check output
lines = re.split('\n+', out)
header = '{:>10} {:>20} {:>10}'.format('Iteration', 'Objective Value',
'Time(s)')
assert lines[0] == '[{}]'.format(algo_class.__name__)
assert lines[1] == '[{}] {}'.format(algo_class.__name__, header)
assert lines[2] == '[{}] {}'.format(algo_class.__name__, '-' * len(header))
for line in lines[3:-2]:
# The following regex will match for instance:
# '[NCA] 0 6.988936e+01 0.01'
assert re.match(r"\[" + algo_class.__name__ + r"\]\ *\d+\ *\d\.\d{6}e[+|-]"
r"\d+\ *\d+\.\d{2}", line)
assert re.match(r"\[" + algo_class.__name__ + r"\] Training took\ *"
r"\d+\.\d{2}s\.", lines[-2])
assert lines[-1] == ''
@pytest.mark.parametrize(('algo_class', 'dataset'),
[(NCA, make_classification()),
(MLKR, make_regression(n_features=10))])
def test_no_verbose(dataset, algo_class, capsys):
# assert by default there is no output (verbose=False)
X, y = dataset
model = algo_class()
model.fit(X, y)
out, _ = capsys.readouterr()
# check output
assert (out == '')
@pytest.mark.parametrize(('algo_class', 'dataset'),
[(NCA, make_classification()),
(MLKR, make_regression(n_features=10))])
def test_convergence_warning(dataset, algo_class):
X, y = dataset
model = algo_class(max_iter=2, verbose=True)
cls_name = model.__class__.__name__
assert_warns_message(ConvergenceWarning,
'[{}] {} did not converge'.format(cls_name, cls_name),
model.fit, X, y)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2 on 2020-01-19 12:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csv_to_table', '0003_auto_20200119_1405'),
]
operations = [
migrations.AlterField(
model_name='people',
name='date',
field=models.DateField(auto_now_add=True, verbose_name='Date'),
),
]
|
import json
config = 'config.json'
with open(config, 'r') as f:
data = json.load(f)
default = data["default"]
class AbstractCommand():
def __init__(self, handler = [], description = None):
self.handler = handler
self.description = description
def hdl(self):
return self.handler
def dsc(self):
return self.description
@staticmethod
async def ans_up(ans, m, att = None):
if (m.text.count(' ') == 0):
if (m.text == m.text.upper()):
up = True
else:
up = False
else:
ind = m.text.index(' ')
text = m.text[:ind]
if (text == text.upper()):
up = True
else:
up = False
if (ans != ''):
if (up):
await m.answer(default["prefix"] + ans.upper())
return True
else:
await m.answer(ans)
return True
elif (att != None):
if (up):
await m.answer(default["prefix"].upper(), attachment=att)
return True
else:
await m.answer(attachment=att)
return True
|
#!/usr/bin/env python
import functools
import glob
import logging
import os
import platform
import re
import shutil
import stat
import sys
import tempfile
import time
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import requests
from plugin import Plugin, PluginManager
from localstack import config
from localstack.config import dirs, is_env_true
from localstack.constants import (
DEFAULT_SERVICE_PORTS,
DYNAMODB_JAR_URL,
ELASTICMQ_JAR_URL,
ELASTICSEARCH_DEFAULT_VERSION,
ELASTICSEARCH_DELETE_MODULES,
ELASTICSEARCH_PLUGIN_LIST,
KMS_URL_PATTERN,
LOCALSTACK_MAVEN_VERSION,
MODULE_MAIN_PATH,
STS_JAR_URL,
)
from localstack.runtime import hooks
from localstack.utils.common import (
chmod_r,
download,
file_exists_not_empty,
get_arch,
get_os,
is_windows,
load_file,
mkdir,
new_tmp_file,
parallelize,
retry,
rm_rf,
run,
safe_run,
save_file,
untar,
unzip,
)
from localstack.utils.docker_utils import DOCKER_CLIENT
LOG = logging.getLogger(__name__)
INSTALL_DIR_NPM = "%s/node_modules" % MODULE_MAIN_PATH # FIXME: migrate to infra
INSTALL_DIR_DDB = "%s/dynamodb" % dirs.static_libs
INSTALL_DIR_KCL = "%s/amazon-kinesis-client" % dirs.static_libs
INSTALL_DIR_STEPFUNCTIONS = "%s/stepfunctions" % dirs.static_libs
INSTALL_DIR_KMS = "%s/kms" % dirs.static_libs
INSTALL_DIR_ELASTICMQ = "%s/elasticmq" % dirs.static_libs
INSTALL_PATH_LOCALSTACK_FAT_JAR = "%s/localstack-utils-fat.jar" % dirs.static_libs
INSTALL_PATH_DDB_JAR = os.path.join(INSTALL_DIR_DDB, "DynamoDBLocal.jar")
INSTALL_PATH_KCL_JAR = os.path.join(INSTALL_DIR_KCL, "aws-java-sdk-sts.jar")
INSTALL_PATH_STEPFUNCTIONS_JAR = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "StepFunctionsLocal.jar")
INSTALL_PATH_KMS_BINARY_PATTERN = os.path.join(INSTALL_DIR_KMS, "local-kms.<arch>.bin")
INSTALL_PATH_ELASTICMQ_JAR = os.path.join(INSTALL_DIR_ELASTICMQ, "elasticmq-server.jar")
INSTALL_PATH_KINESALITE_CLI = os.path.join(INSTALL_DIR_NPM, "kinesalite", "cli.js")
INSTALL_PATH_KINESIS_MOCK = os.path.join(dirs.static_libs, "kinesis-mock")
URL_LOCALSTACK_FAT_JAR = (
"https://repo1.maven.org/maven2/"
+ "cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar"
).format(v=LOCALSTACK_MAVEN_VERSION)
MARKER_FILE_LIGHT_VERSION = "%s/.light-version" % dirs.static_libs
IMAGE_NAME_SFN_LOCAL = "amazon/aws-stepfunctions-local"
ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts"
SFN_PATCH_URL_PREFIX = (
f"{ARTIFACTS_REPO}/raw/047cc6dcd2e31f5ff3ec52d293c61b875f606958/stepfunctions-local-patch"
)
SFN_PATCH_CLASS1 = "com/amazonaws/stepfunctions/local/runtime/Config.class"
SFN_PATCH_CLASS2 = (
"com/amazonaws/stepfunctions/local/runtime/executors/task/LambdaTaskStateExecutor.class"
)
SFN_PATCH_CLASS_STARTER = "cloud/localstack/StepFunctionsStarter.class"
SFN_PATCH_CLASS_REGION = "cloud/localstack/RegionAspect.class"
SFN_PATCH_FILE_METAINF = "META-INF/aop.xml"
# additional JAR libs required for multi-region and persistence (PRO only) support
MAVEN_REPO = "https://repo1.maven.org/maven2"
URL_ASPECTJRT = f"{MAVEN_REPO}/org/aspectj/aspectjrt/1.9.7/aspectjrt-1.9.7.jar"
URL_ASPECTJWEAVER = f"{MAVEN_REPO}/org/aspectj/aspectjweaver/1.9.7/aspectjweaver-1.9.7.jar"
JAR_URLS = [URL_ASPECTJRT, URL_ASPECTJWEAVER]
# kinesis-mock version
KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.2.0"
KINESIS_MOCK_RELEASE_URL = (
"https://api.github.com/repos/etspaceman/kinesis-mock/releases/tags/" + KINESIS_MOCK_VERSION
)
# debugpy module
DEBUGPY_MODULE = "debugpy"
DEBUGPY_DEPENDENCIES = ["gcc", "python3-dev", "musl-dev"]
# Target version for javac, to ensure compatibility with earlier JREs
JAVAC_TARGET_VERSION = "1.8"
# SQS backend implementation provider - either "moto" or "elasticmq"
SQS_BACKEND_IMPL = os.environ.get("SQS_PROVIDER") or "moto"
# GO Lambda runtime
GO_RUNTIME_VERSION = "0.4.0"
GO_RUNTIME_DOWNLOAD_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz"
GO_INSTALL_FOLDER = os.path.join(config.dirs.var_libs, "awslamba-go-runtime")
GO_LAMBDA_RUNTIME = os.path.join(GO_INSTALL_FOLDER, "aws-lambda-mock")
GO_LAMBDA_MOCKSERVER = os.path.join(GO_INSTALL_FOLDER, "mockserver")
# Terraform (used for tests, whose templates require TF < 0.14.0 )
TERRAFORM_VERSION = "0.13.7"
TERRAFORM_URL_TEMPLATE = (
"https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{os}_{arch}.zip"
)
TERRAFORM_BIN = os.path.join(dirs.static_libs, f"terraform-{TERRAFORM_VERSION}", "terraform")
# Java Test Jar Download (used for tests)
TEST_LAMBDA_JAVA = os.path.join(config.dirs.var_libs, "localstack-utils-tests.jar")
MAVEN_BASE_URL = "https://repo.maven.apache.org/maven2"
TEST_LAMBDA_JAR_URL = "{url}/cloud/localstack/{name}/{version}/{name}-{version}-tests.jar".format(
version=LOCALSTACK_MAVEN_VERSION, url=MAVEN_BASE_URL, name="localstack-utils"
)
def get_elasticsearch_install_version(version: str) -> str:
from localstack.services.es import versions
if config.SKIP_INFRA_DOWNLOADS:
return ELASTICSEARCH_DEFAULT_VERSION
return versions.get_install_version(version)
def get_elasticsearch_install_dir(version: str) -> str:
version = get_elasticsearch_install_version(version)
if version == ELASTICSEARCH_DEFAULT_VERSION and not os.path.exists(MARKER_FILE_LIGHT_VERSION):
# install the default version into a subfolder of the code base
install_dir = os.path.join(dirs.static_libs, "elasticsearch")
else:
# put all other versions into the TMP_FOLDER
install_dir = os.path.join(config.dirs.tmp, "elasticsearch", version)
return install_dir
def install_elasticsearch(version=None):
from localstack.services.es import versions
if not version:
version = ELASTICSEARCH_DEFAULT_VERSION
version = get_elasticsearch_install_version(version)
install_dir = get_elasticsearch_install_dir(version)
installed_executable = os.path.join(install_dir, "bin", "elasticsearch")
if not os.path.exists(installed_executable):
log_install_msg("Elasticsearch (%s)" % version)
es_url = versions.get_download_url(version)
install_dir_parent = os.path.dirname(install_dir)
mkdir(install_dir_parent)
# download and extract archive
tmp_archive = os.path.join(config.dirs.tmp, "localstack.%s" % os.path.basename(es_url))
download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent)
elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*"))
if not elasticsearch_dir:
raise Exception("Unable to find Elasticsearch folder in %s" % install_dir_parent)
shutil.move(elasticsearch_dir[0], install_dir)
for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"):
dir_path = os.path.join(install_dir, dir_name)
mkdir(dir_path)
chmod_r(dir_path, 0o777)
# install default plugins
for plugin in ELASTICSEARCH_PLUGIN_LIST:
plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin")
plugin_dir = os.path.join(install_dir, "plugins", plugin)
if not os.path.exists(plugin_dir):
LOG.info("Installing Elasticsearch plugin %s", plugin)
def try_install():
safe_run([plugin_binary, "install", "-b", plugin])
# We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries
download_attempts = 3
try:
retry(try_install, retries=download_attempts - 1, sleep=2)
except Exception:
LOG.warning(
"Unable to download Elasticsearch plugin '%s' after %s attempts",
plugin,
download_attempts,
)
if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"):
raise
# delete some plugins to free up space
for plugin in ELASTICSEARCH_DELETE_MODULES:
module_dir = os.path.join(install_dir, "modules", plugin)
rm_rf(module_dir)
# disable x-pack-ml plugin (not working on Alpine)
xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform")
rm_rf(xpack_dir)
# patch JVM options file - replace hardcoded heap size settings
jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
if os.path.exists(jvm_options_file):
jvm_options = load_file(jvm_options_file)
jvm_options_replaced = re.sub(
r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE
)
if jvm_options != jvm_options_replaced:
save_file(jvm_options_file, jvm_options_replaced)
def install_sqs_provider():
if SQS_BACKEND_IMPL == "elasticmq":
install_elasticmq()
def install_elasticmq():
# TODO remove this function if we stop using ElasticMQ entirely
if not os.path.exists(INSTALL_PATH_ELASTICMQ_JAR):
log_install_msg("ElasticMQ")
mkdir(INSTALL_DIR_ELASTICMQ)
# download archive
tmp_archive = os.path.join(config.dirs.tmp, "elasticmq-server.jar")
if not os.path.exists(tmp_archive):
download(ELASTICMQ_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_ELASTICMQ)
def install_kinesis():
if config.KINESIS_PROVIDER == "kinesalite":
return install_kinesalite()
elif config.KINESIS_PROVIDER == "kinesis-mock":
return install_kinesis_mock()
else:
raise ValueError("unknown kinesis provider %s" % config.KINESIS_PROVIDER)
def install_kinesalite():
if not os.path.exists(INSTALL_PATH_KINESALITE_CLI):
log_install_msg("Kinesis")
run('cd "%s" && npm install' % MODULE_MAIN_PATH)
def install_kinesis_mock():
target_dir = INSTALL_PATH_KINESIS_MOCK
machine = platform.machine().lower()
system = platform.system().lower()
version = platform.version().lower()
is_probably_m1 = system == "darwin" and ("arm64" in version or "arm32" in version)
LOG.debug("getting kinesis-mock for %s %s", system, machine)
if is_env_true("KINESIS_MOCK_FORCE_JAVA"):
# sometimes the static binaries may have problems, and we want to fal back to Java
bin_file = "kinesis-mock.jar"
elif (machine == "x86_64" or machine == "amd64") and not is_probably_m1:
if system == "windows":
bin_file = "kinesis-mock-mostly-static.exe"
elif system == "linux":
bin_file = "kinesis-mock-linux-amd64-static"
elif system == "darwin":
bin_file = "kinesis-mock-macos-amd64-dynamic"
else:
bin_file = "kinesis-mock.jar"
else:
bin_file = "kinesis-mock.jar"
bin_file_path = os.path.join(target_dir, bin_file)
if os.path.exists(bin_file_path):
LOG.debug("kinesis-mock found at %s", bin_file_path)
return bin_file_path
response = requests.get(KINESIS_MOCK_RELEASE_URL)
if not response.ok:
raise ValueError(
"Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text)
)
github_release = response.json()
download_url = None
for asset in github_release.get("assets", []):
# find the correct binary in the release
if asset["name"] == bin_file:
download_url = asset["browser_download_url"]
break
if download_url is None:
raise ValueError(
"could not find required binary %s in release %s" % (bin_file, KINESIS_MOCK_RELEASE_URL)
)
mkdir(target_dir)
LOG.info("downloading kinesis-mock binary from %s", download_url)
download(download_url, bin_file_path)
chmod_r(bin_file_path, 0o777)
return bin_file_path
def install_local_kms():
local_arch = get_os()
binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch)
if not os.path.exists(binary_path):
log_install_msg("KMS")
mkdir(INSTALL_DIR_KMS)
# TODO ARM download platform specific binary
kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch)
download(kms_url, binary_path)
chmod_r(binary_path, 0o777)
def install_stepfunctions_local():
if not os.path.exists(INSTALL_PATH_STEPFUNCTIONS_JAR):
# pull the JAR file from the Docker image, which is more up-to-date than the downloadable JAR file
# TODO: works only when running on the host, outside of Docker -> add a fallback if running in Docker?
log_install_msg("Step Functions")
mkdir(INSTALL_DIR_STEPFUNCTIONS)
DOCKER_CLIENT.pull_image(IMAGE_NAME_SFN_LOCAL)
docker_name = "tmp-ls-sfn"
DOCKER_CLIENT.run_container(
IMAGE_NAME_SFN_LOCAL,
remove=True,
entrypoint="",
name=docker_name,
detach=True,
command=["sleep", "15"],
)
time.sleep(5)
DOCKER_CLIENT.copy_from_container(
docker_name, local_path=dirs.static_libs, container_path="/home/stepfunctionslocal/"
)
path = Path(f"{dirs.static_libs}/stepfunctionslocal/")
for file in path.glob("*.jar"):
file.rename(Path(INSTALL_DIR_STEPFUNCTIONS) / file.name)
rm_rf("%s/stepfunctionslocal" % dirs.static_libs)
classes = [
SFN_PATCH_CLASS1,
SFN_PATCH_CLASS2,
SFN_PATCH_CLASS_REGION,
SFN_PATCH_CLASS_STARTER,
SFN_PATCH_FILE_METAINF,
]
for patch_class in classes:
patch_url = f"{SFN_PATCH_URL_PREFIX}/{patch_class}"
add_file_to_jar(patch_class, patch_url, target_jar=INSTALL_PATH_STEPFUNCTIONS_JAR)
# special case for Manifest file - extract first, replace content, then update in JAR file
manifest_file = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "META-INF", "MANIFEST.MF")
if not os.path.exists(manifest_file):
content = run(["unzip", "-p", INSTALL_PATH_STEPFUNCTIONS_JAR, "META-INF/MANIFEST.MF"])
content = re.sub(
"Main-Class: .+", "Main-Class: cloud.localstack.StepFunctionsStarter", content
)
classpath = " ".join([os.path.basename(jar) for jar in JAR_URLS])
content = re.sub(r"Class-Path: \. ", f"Class-Path: {classpath} . ", content)
save_file(manifest_file, content)
run(
["zip", INSTALL_PATH_STEPFUNCTIONS_JAR, "META-INF/MANIFEST.MF"],
cwd=INSTALL_DIR_STEPFUNCTIONS,
)
# download additional jar libs
for jar_url in JAR_URLS:
target = os.path.join(INSTALL_DIR_STEPFUNCTIONS, os.path.basename(jar_url))
if not file_exists_not_empty(target):
download(jar_url, target)
def add_file_to_jar(class_file, class_url, target_jar, base_dir=None):
base_dir = base_dir or os.path.dirname(target_jar)
patch_class_file = os.path.join(base_dir, class_file)
if not os.path.exists(patch_class_file):
download(class_url, patch_class_file)
run(["zip", target_jar, class_file], cwd=base_dir)
def install_dynamodb_local():
if not os.path.exists(INSTALL_PATH_DDB_JAR):
log_install_msg("DynamoDB")
# download and extract archive
tmp_archive = os.path.join(tempfile.gettempdir(), "localstack.ddb.zip")
download_and_extract_with_retry(DYNAMODB_JAR_URL, tmp_archive, INSTALL_DIR_DDB)
# fix logging configuration for DynamoDBLocal
log4j2_config = """<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="WARN"><AppenderRef ref="Console"/></Root>
</Loggers>
</Configuration>"""
log4j2_file = os.path.join(INSTALL_DIR_DDB, "log4j2.xml")
save_file(log4j2_file, log4j2_config)
run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
def install_amazon_kinesis_client_libs():
# install KCL/STS JAR files
if not os.path.exists(INSTALL_PATH_KCL_JAR):
mkdir(INSTALL_DIR_KCL)
tmp_archive = os.path.join(tempfile.gettempdir(), "aws-java-sdk-sts.jar")
if not os.path.exists(tmp_archive):
download(STS_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_KCL)
# Compile Java files
from localstack.utils.kinesis import kclipy_helper
classpath = kclipy_helper.get_kcl_classpath()
if is_windows():
classpath = re.sub(r":([^\\])", r";\1", classpath)
java_files = "%s/utils/kinesis/java/cloud/localstack/*.java" % MODULE_MAIN_PATH
class_files = "%s/utils/kinesis/java/cloud/localstack/*.class" % MODULE_MAIN_PATH
if not glob.glob(class_files):
run(
'javac -source %s -target %s -cp "%s" %s'
% (JAVAC_TARGET_VERSION, JAVAC_TARGET_VERSION, classpath, java_files)
)
def install_lambda_java_libs():
# install LocalStack "fat" JAR file (contains all dependencies)
if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR):
log_install_msg("LocalStack Java libraries", verbatim=True)
download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR)
def install_lambda_java_testlibs():
# Download the LocalStack Utils Test jar file from the maven repo
if not os.path.exists(TEST_LAMBDA_JAVA):
mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
def install_go_lambda_runtime():
if os.path.isfile(GO_LAMBDA_RUNTIME):
return
log_install_msg("Installing golang runtime")
system = platform.system().lower()
arch = get_arch()
if system not in ["linux"]:
raise ValueError("unsupported os %s for awslambda-go-runtime" % system)
if arch not in ["amd64", "arm64"]:
raise ValueError("unsupported arch %s for awslambda-go-runtime" % arch)
url = GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(
version=GO_RUNTIME_VERSION,
os=system,
arch=arch,
)
download_and_extract(url, GO_INSTALL_FOLDER)
st = os.stat(GO_LAMBDA_RUNTIME)
os.chmod(GO_LAMBDA_RUNTIME, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
st = os.stat(GO_LAMBDA_MOCKSERVER)
os.chmod(GO_LAMBDA_MOCKSERVER, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def install_cloudformation_libs():
from localstack.services.cloudformation import deployment_utils
# trigger download of CF module file
deployment_utils.get_cfn_response_mod_file()
def install_terraform() -> str:
if os.path.isfile(TERRAFORM_BIN):
return TERRAFORM_BIN
log_install_msg(f"Installing terraform {TERRAFORM_VERSION}")
system = platform.system().lower()
arch = get_arch()
url = TERRAFORM_URL_TEMPLATE.format(version=TERRAFORM_VERSION, os=system, arch=arch)
download_and_extract(url, os.path.dirname(TERRAFORM_BIN))
chmod_r(TERRAFORM_BIN, 0o777)
return TERRAFORM_BIN
def get_terraform_binary() -> str:
if not os.path.isfile(TERRAFORM_BIN):
install_terraform()
return TERRAFORM_BIN
def install_component(name):
installer = installers.get(name)
if installer:
installer()
def install_components(names):
parallelize(install_component, names)
install_lambda_java_libs()
def install_all_components():
# install dependencies - make sure that install_components(..) is called before hooks.install below!
install_components(DEFAULT_SERVICE_PORTS.keys())
hooks.install.run()
def install_debugpy_and_dependencies():
try:
import debugpy
assert debugpy
logging.debug("Debugpy module already Installed")
except ModuleNotFoundError:
logging.debug("Installing Debugpy module")
import pip
if hasattr(pip, "main"):
pip.main(["install", DEBUGPY_MODULE])
else:
pip._internal.main(["install", DEBUGPY_MODULE])
# -----------------
# HELPER FUNCTIONS
# -----------------
def log_install_msg(component, verbatim=False):
component = component if verbatim else "local %s server" % component
LOG.info("Downloading and installing %s. This may take some time.", component)
def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None):
mkdir(target_dir)
if tmp_archive:
_, ext = os.path.splitext(tmp_archive)
else:
_, ext = os.path.splitext(archive_url)
tmp_archive = tmp_archive or new_tmp_file()
if not os.path.exists(tmp_archive) or os.path.getsize(tmp_archive) <= 0:
# create temporary placeholder file, to avoid duplicate parallel downloads
save_file(tmp_archive, "")
for i in range(retries + 1):
try:
download(archive_url, tmp_archive)
break
except Exception:
time.sleep(sleep)
if ext == ".zip":
unzip(tmp_archive, target_dir)
elif ext == ".gz" or ext == ".bz2":
untar(tmp_archive, target_dir)
else:
raise Exception("Unsupported archive format: %s" % ext)
def download_and_extract_with_retry(archive_url, tmp_archive, target_dir):
try:
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
except Exception as e:
# try deleting and re-downloading the zip file
LOG.info("Unable to extract file, re-downloading ZIP archive %s: %s", tmp_archive, e)
rm_rf(tmp_archive)
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
# kept here for backwards compatibility (installed on "make init" - TODO should be removed)
installers = {
"cloudformation": install_cloudformation_libs,
"dynamodb": install_dynamodb_local,
"kinesis": install_kinesis,
"kms": install_local_kms,
"sqs": install_sqs_provider,
"stepfunctions": install_stepfunctions_local,
}
Installer = Tuple[str, Callable]
class InstallerRepository(Plugin):
namespace = "localstack.installer"
def get_installer(self) -> List[Installer]:
raise NotImplementedError
class CommunityInstallerRepository(InstallerRepository):
name = "community"
def get_installer(self) -> List[Installer]:
return [
("awslamba-go-runtime", install_go_lambda_runtime),
("cloudformation-libs", install_cloudformation_libs),
("dynamodb-local", install_dynamodb_local),
("elasticmq", install_elasticmq),
("elasticsearch", install_elasticsearch),
("kinesalite", install_kinesalite),
("kinesis-client-libs", install_amazon_kinesis_client_libs),
("kinesis-mock", install_kinesis_mock),
("lambda-java-libs", install_lambda_java_libs),
("local-kms", install_local_kms),
("stepfunctions-local", install_stepfunctions_local),
("terraform", install_terraform),
]
class InstallerManager:
def __init__(self):
self.repositories: PluginManager[InstallerRepository] = PluginManager(
InstallerRepository.namespace
)
@functools.lru_cache()
def get_installers(self) -> Dict[str, Callable]:
installer: List[Installer] = []
for repo in self.repositories.load_all():
installer.extend(repo.get_installer())
return dict(installer)
def install(self, package: str, *args, **kwargs):
installer = self.get_installers().get(package)
if not installer:
raise ValueError("no installer for package %s" % package)
return installer(*args, **kwargs)
def main():
if len(sys.argv) > 1:
# set test API key so pro install hooks are called
os.environ["LOCALSTACK_API_KEY"] = os.environ.get("LOCALSTACK_API_KEY") or "test"
if sys.argv[1] == "libs":
print("Initializing installation.")
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
install_all_components()
if sys.argv[1] in ("libs", "testlibs"):
# Install additional libraries for testing
install_amazon_kinesis_client_libs()
install_lambda_java_testlibs()
print("Done.")
if __name__ == "__main__":
main()
|
"""
le script principale sert à annoter un répertoire de fichiers xml de recettes
"""
import glob
import re
import os
from oper_utils import xml_to_recipe_annotated
from Ner_classifieur_annote import load_crf_model, predict_text, transform_to_xml_annote
from NER_ingredient_detector import get_content_from_xmlfile
from ComplexCalculator import ComplexCalculator
modelpath = "../ml_models/model-20210515.pkl"
ner_clf = load_crf_model(modelpath)
def annote_with_crf(filename, ner_clf):
"""
Annoter le fichier avec CRF, renvoie une string de recette avec annotation
"""
ingredients, text_recette = get_content_from_xmlfile(filename)
liste = predict_text(text_recette,ner_clf)
text_after = transform_to_xml_annote(liste)
return text_after
def transform_doc_to_xml(doc):
text_after = []
for token in doc:
if token.ent_iob_ == "O":
text_after.append(token.text)
elif token.ent_iob_ == "B" and token.i == doc[-1].i:
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "B" and doc[token.i+1].ent_iob_ == "I":
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text)
elif token.ent_iob_ == "B" and doc[token.i+1].ent_iob_ != "I":
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "I" and token.i == doc[-1].i:
text_after.append(token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "I" and doc[token.i+1].ent_iob_ == "I":
text_after.append(token.text)
elif token.ent_iob_ == "I" and doc[token.i+1].ent_iob_ != "I":
text_after.append(token.text + f"</{token.ent_type_}>")
text_after = " ".join(text_after)
text_after = re.sub("' ", "'", text_after)
text_after = re.sub(r" (,|\.)", "\\1", text_after)
return text_after
def parcours_corpus_annote(corpus_path, output_dir, liste=False):
if not liste:
fics = glob.glob(f"{corpus_path}\*.xml")
# fics = glob.glob(f"{corpus_path}{os.sep}*.xml")
else:
fics = corpus_path
for fic in fics:
try:
fic_name = fic.split(f'{os.sep}')[-1]
recette_annote_crf = annote_with_crf(fic, ner_clf)
recette_doc_spacy, dico_ingreds, dico_opers = xml_to_recipe_annotated(fic)
recette_annote_rules = transform_doc_to_xml(recette_doc_spacy)
calculator = ComplexCalculator(dico_ingreds, dico_opers)
complex_temps = calculator.get_O_temps()
complex_espace = calculator.O_espace_f()
ingreds = dico_ingreds_to_xml(dico_ingreds)
opers = dico_opers_to_xml(dico_opers)
## add to xmlfile
with open(fic,encoding="utf8") as f:
xml_text = f.read()
recette_xml_rules = '\n <annotation methode="symbolique">\n '+ recette_annote_rules + '\n </annotation>'
recette_xml_crf = '\n <annotation methode="crf">\n '+ recette_annote_crf + '\n </annotation>'
complexite_t = '\n <complexite>\n <temps>' + complex_temps + '</temps>\n <complexite>'
complexite_e = '\n <complexite>\n <espace>' + complex_espace + '</espace>\n <complexite>'
xml_text = re.sub("(</preparation>)", "\\1" + recette_xml_rules + recette_xml_crf + complexite_t + complexite_e + ingreds + opers, xml_text)
with open(output_dir + os.sep + fic_name, "w", encoding="utf8") as f:
f.write(xml_text)
except Exception:
print(f"Rencontrer problème pour: {fic}")
def dico_ingreds_to_xml(dico_ingreds):
liste = []
for ingred in dico_ingreds.values():
formate = f'ingredient:{ingred["ingredient"]}\t id:{ingred["id"]}\t quantité:{ingred["quantite"]}\t unité:{ingred["unit"]}\t denombrable:{ingred["denombrable"]}\t recipient:{ingred["recipient"]}\n'
liste.append(formate)
liste = "".join(liste)
liste = "\n<ingredients_trouve>\n<![CDATA[\n" + liste + "]]>\n</ingredients_trouve>"
return liste
def dico_opers_to_xml(dico_opers):
liste = []
for oper_id,oper in dico_opers.items():
formate = f'operation:{oper["action"]}\t id:{oper_id}\t ingrédients_ralatifs:{oper["ingreds"]}\t nombre_opération_atomique:{oper["nb_oper"]}\t temps:{oper["temps"]}\t recipient:{oper["recipient"]}\n'
liste.append(formate)
liste = "".join(liste)
liste = "\n<operation_trouve>\n<![CDATA[\n" + liste + "]]>\n</operation_trouve>"
return liste
if __name__ == "__main__":
corpus_path = "../corpus_recettes/corpus_for_final"
output = "../corpus_recettes/out_put"
parcours_corpus_annote(corpus_path, output)
|
from setuptools import setup
setup(name='lognotify',
version='0.1',
py_modules = ['lognotify'],
description='A real-time log monitoring & notification utility which pops up a notification (while running your application) whenever it sees an error in log-file.',
url='http://github.com/shashank-ssriva',
author='Shashank Srivastava',
license='MIT',
entry_points={
'console_scripts':[
'lognotify = lognotify.app:main'
]
},
zip_safe=False)
|
import cv2
import numpy as np
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove text
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 1000:
cv2.drawContours(thresh, [c], -1, 0, -1)
thresh = 255 - thresh
result = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
coordinates = []
# Find rectangular boxes and obtain centroid coordinates
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
if len(approx) == 4 and area < 100000:
# cv2.drawContours(result, [c], -1, (36,255,12), 1)
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
coordinates.append((cx, cy))
cv2.circle(result, (cx, cy), 3, (36,255,12), -1)
cv2.putText(result, '({}, {})'.format(int(cx), int(cy)), (int(cx) -40, int(cy) -10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 2)
print(coordinates)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('result', result)
cv2.waitKey()
|
import os
import sys
import requests
from collections import OrderedDict
import conans
from conans import __version__ as client_version
from conans.client.cmd.create import create
from conans.client.hook_manager import HookManager
from conans.client.recorder.action_recorder import ActionRecorder
from conans.client.client_cache import ClientCache
from conans.client.conf import MIN_SERVER_COMPATIBLE_VERSION, ConanClientConfigParser
from conans.client.manager import ConanManager
from conans.client.migrations import ClientMigrator
from conans.client.output import ConanOutput, ScopedOutput
from conans.client.profile_loader import read_profile, profile_from_args, \
read_conaninfo_profile
from conans.client.recorder.search_recorder import SearchRecorder
from conans.client.recorder.upload_recoder import UploadRecorder
from conans.client.remote_manager import RemoteManager
from conans.client.remote_registry import RemoteRegistry
from conans.client.rest.auth_manager import ConanApiAuthManager
from conans.client.rest.rest_client import RestApiClient
from conans.client.rest.conan_requester import ConanRequester
from conans.client.rest.version_checker import VersionCheckerRequester
from conans.client.runner import ConanRunner
from conans.client.store.localdb import LocalDB
from conans.client.cmd.test import PackageTester
from conans.client.userio import UserIO
from conans.errors import ConanException
from conans.model.ref import ConanFileReference, PackageReference, check_valid_ref
from conans.model.version import Version
from conans.paths import get_conan_user_home, CONANINFO, BUILD_INFO
from conans.util.env_reader import get_env
from conans.util.files import save_files, exception_message_safe, mkdir
from conans.util.log import configure_logger
from conans.util.tracer import log_command, log_exception
from conans.tools import set_global_instances
from conans.client.cmd.uploader import CmdUpload
from conans.client.cmd.profile import cmd_profile_update, cmd_profile_get,\
cmd_profile_delete_key, cmd_profile_create, cmd_profile_list
from conans.client.cmd.search import Search
from conans.client.cmd.user import users_clean, users_list, user_set
from conans.client.importer import undo_imports, run_imports
from conans.client.cmd.export import cmd_export, export_alias, export_source, export_recipe
from conans.unicode import get_cwd
from conans.client.remover import ConanRemover
from conans.client.cmd.download import download
from conans.model.workspace import Workspace
from conans.client.graph.graph_manager import GraphManager
from conans.client.loader import ConanFileLoader
from conans.client.graph.proxy import ConanProxy
from conans.client.graph.python_requires import ConanPythonRequire
from conans.client.graph.range_resolver import RangeResolver
from conans.client import packager
from conans.client.source import config_source_local
from conans.client.cmd.build import build
from conans.client.cmd.export_pkg import export_pkg
from conans.client import tools
default_manifest_folder = '.conan_manifests'
def get_request_timeout():
timeout = os.getenv("CONAN_REQUEST_TIMEOUT")
try:
return float(timeout) if timeout is not None else None
except ValueError:
raise ConanException("Specify a numeric parameter for 'request_timeout'")
def get_basic_requester(client_cache):
requester = requests.Session()
# Manage the verify and the client certificates and setup proxies
return ConanRequester(requester, client_cache, get_request_timeout())
def api_method(f):
def wrapper(*args, **kwargs):
the_self = args[0]
try:
curdir = get_cwd()
log_command(f.__name__, kwargs)
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
return f(*args, **kwargs)
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except BaseException:
pass
raise
finally:
os.chdir(curdir)
return wrapper
def _make_abs_path(path, cwd=None, default=None):
"""convert 'path' to absolute if necessary (could be already absolute)
if not defined (empty, or None), will return 'default' one or 'cwd'
"""
cwd = cwd or get_cwd()
if not path:
abs_path = default or cwd
elif os.path.isabs(path):
abs_path = path
else:
abs_path = os.path.normpath(os.path.join(cwd, path))
return abs_path
def _get_conanfile_path(path, cwd, py):
"""
param py= True: Must be .py, False: Must be .txt, None: Try .py, then .txt
"""
candidate_paths = list()
path = _make_abs_path(path, cwd)
if os.path.isdir(path): # Can be a folder
if py:
path = os.path.join(path, "conanfile.py")
candidate_paths.append(path)
elif py is False:
path = os.path.join(path, "conanfile.txt")
candidate_paths.append(path)
else:
path_py = os.path.join(path, "conanfile.py")
candidate_paths.append(path_py)
if os.path.exists(path_py):
path = path_py
else:
path = os.path.join(path, "conanfile.txt")
candidate_paths.append(path)
else:
candidate_paths.append(path)
if not os.path.isfile(path): # Must exist
raise ConanException("Conanfile not found at %s" % " or ".join(candidate_paths))
if py and not path.endswith(".py"):
raise ConanException("A conanfile.py is needed, " + path + " is not acceptable")
return path
class ConanAPIV1(object):
@staticmethod
def instance_remote_manager(requester, client_cache, user_io, _client_version,
min_server_compatible_version, hook_manager):
# Verify client version against remotes
version_checker_req = VersionCheckerRequester(requester, _client_version,
min_server_compatible_version,
user_io.out)
# To handle remote connections
put_headers = client_cache.read_put_headers()
rest_api_client = RestApiClient(user_io.out, requester=version_checker_req,
put_headers=put_headers)
# To store user and token
localdb = LocalDB(client_cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(rest_api_client, user_io, localdb)
# Handle remote connections
remote_manager = RemoteManager(client_cache, auth_manager, user_io.out, hook_manager)
return localdb, rest_api_client, remote_manager
@staticmethod
def factory(interactive=None):
"""Factory"""
# Respect color env setting or check tty if unset
color_set = "CONAN_COLOR_DISPLAY" in os.environ
if ((color_set and get_env("CONAN_COLOR_DISPLAY", 1))
or (not color_set
and hasattr(sys.stdout, "isatty")
and sys.stdout.isatty())):
import colorama
if get_env("PYCHARM_HOSTED"): # in PyCharm disable convert/strip
colorama.init(convert=False, strip=False)
else:
colorama.init()
color = True
else:
color = False
out = ConanOutput(sys.stdout, color)
user_io = UserIO(out=out)
try:
user_home = get_conan_user_home()
client_cache = migrate_and_get_client_cache(user_home, out)
sys.path.append(os.path.join(user_home, "python"))
except Exception as e:
out.error(str(e))
raise
with tools.environment_append(client_cache.conan_config.env_vars):
# Adjust CONAN_LOGGING_LEVEL with the env readed
conans.util.log.logger = configure_logger()
# Create Hook Manager
hook_manager = HookManager(client_cache.hooks_path, get_env("CONAN_HOOKS", list()),
user_io.out)
# Get the new command instance after migrations have been done
requester = get_basic_requester(client_cache)
_, _, remote_manager = ConanAPIV1.instance_remote_manager(
requester,
client_cache, user_io,
Version(client_version),
Version(MIN_SERVER_COMPATIBLE_VERSION),
hook_manager)
# Adjust global tool variables
set_global_instances(out, requester)
# Settings preprocessor
if interactive is None:
interactive = not get_env("CONAN_NON_INTERACTIVE", False)
conan = ConanAPIV1(client_cache, user_io, get_conan_runner(), remote_manager,
hook_manager, interactive=interactive)
return conan, client_cache, user_io
def __init__(self, client_cache, user_io, runner, remote_manager, hook_manager,
interactive=True):
assert isinstance(user_io, UserIO)
assert isinstance(client_cache, ClientCache)
self._client_cache = client_cache
self._user_io = user_io
self._runner = runner
self._remote_manager = remote_manager
self._registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
if not interactive:
self._user_io.disable_input()
self._proxy = ConanProxy(client_cache, self._user_io.out, remote_manager,
registry=self._registry)
resolver = RangeResolver(self._user_io.out, client_cache, self._proxy)
python_requires = ConanPythonRequire(self._proxy, resolver)
self._loader = ConanFileLoader(self._runner, self._user_io.out, python_requires)
self._graph_manager = GraphManager(self._user_io.out, self._client_cache, self._registry,
self._remote_manager, self._loader, self._proxy,
resolver)
self._hook_manager = hook_manager
def _init_manager(self, action_recorder):
"""Every api call gets a new recorder and new manager"""
return ConanManager(self._client_cache, self._user_io,
self._remote_manager, action_recorder, self._registry,
self._graph_manager, self._hook_manager)
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
cwd=None, visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None,
osx_clang_versions=None, shared=None, upload_url=None, gitignore=None,
gitlab_gcc_versions=None, gitlab_clang_versions=None,
circleci_gcc_versions=None, circleci_clang_versions=None, circleci_osx_versions=None):
from conans.client.cmd.new import cmd_new
cwd = os.path.abspath(cwd or get_cwd())
files = cmd_new(name, header=header, pure_c=pure_c, test=test,
exports_sources=exports_sources, bare=bare,
visual_versions=visual_versions,
linux_gcc_versions=linux_gcc_versions,
linux_clang_versions=linux_clang_versions,
osx_clang_versions=osx_clang_versions, shared=shared,
upload_url=upload_url, gitignore=gitignore,
gitlab_gcc_versions=gitlab_gcc_versions,
gitlab_clang_versions=gitlab_clang_versions,
circleci_gcc_versions=circleci_gcc_versions,
circleci_clang_versions=circleci_clang_versions,
circleci_osx_versions=circleci_osx_versions)
save_files(cwd, files)
for f in sorted(files):
self._user_io.out.success("File saved: %s" % f)
@api_method
def inspect(self, path, attributes, remote_name=None):
try:
reference = ConanFileReference.loads(path)
except ConanException:
reference = None
cwd = get_cwd()
conanfile_path = _get_conanfile_path(path, cwd, py=True)
else:
update = True if remote_name else False
result = self._proxy.get_recipe(reference, update, update, remote_name,
ActionRecorder())
conanfile_path, _, _, reference = result
conanfile = self._loader.load_basic(conanfile_path, self._user_io.out)
result = OrderedDict()
if not attributes:
attributes = ['name', 'version', 'url', 'homepage', 'license', 'author',
'description', 'topics', 'generators', 'exports', 'exports_sources',
'short_paths', 'apply_env', 'build_policy', 'settings', 'options',
'default_options']
for attribute in attributes:
try:
attr = getattr(conanfile, attribute)
result[attribute] = attr
except AttributeError as e:
raise ConanException(str(e))
return result
@api_method
def test(self, path, reference, profile_name=None, settings=None, options=None, env=None,
remote_name=None, update=False, build_modes=None, cwd=None, test_build_folder=None):
settings = settings or []
options = options or []
env = env or []
conanfile_path = _get_conanfile_path(path, cwd, py=True)
cwd = cwd or get_cwd()
profile = profile_from_args(profile_name, settings, options, env, cwd,
self._client_cache)
reference = ConanFileReference.loads(reference)
recorder = ActionRecorder()
manager = self._init_manager(recorder)
pt = PackageTester(manager, self._user_io)
pt.install_build_and_test(conanfile_path, reference, profile, remote_name,
update, build_modes=build_modes,
test_build_folder=test_build_folder)
@api_method
def create(self, conanfile_path, name=None, version=None, user=None, channel=None,
profile_name=None, settings=None,
options=None, env=None, test_folder=None, not_export=False,
build_modes=None,
keep_source=False, keep_build=False, verify=None,
manifests=None, manifests_interactive=None,
remote_name=None, update=False, cwd=None, test_build_folder=None):
"""
API method to create a conan package
:param test_folder: default None - looks for default 'test' or 'test_package' folder),
string - test_folder path
False - disabling tests
"""
settings = settings or []
options = options or []
env = env or []
try:
cwd = cwd or os.getcwd()
recorder = ActionRecorder()
conanfile_path = _get_conanfile_path(conanfile_path, cwd, py=True)
reference, conanfile = self._loader.load_export(conanfile_path, name, version, user,
channel)
# Make sure keep_source is set for keep_build
keep_source = keep_source or keep_build
# Forcing an export!
if not not_export:
cmd_export(conanfile_path, conanfile, reference, keep_source, self._user_io.out,
self._client_cache, self._hook_manager)
recorder.recipe_exported(reference)
if build_modes is None: # Not specified, force build the tested library
build_modes = [conanfile.name]
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env,
cwd, self._client_cache)
manager = self._init_manager(recorder)
recorder.add_recipe_being_developed(reference)
create(reference, manager, self._user_io, profile, remote_name, update, build_modes,
manifest_folder, manifest_verify, manifest_interactive, keep_build,
test_build_folder, test_folder, conanfile_path)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def export_pkg(self, conanfile_path, name, channel, source_folder=None, build_folder=None,
package_folder=None, install_folder=None, profile_name=None, settings=None,
options=None, env=None, force=False, user=None, version=None, cwd=None):
settings = settings or []
options = options or []
env = env or []
cwd = cwd or get_cwd()
try:
recorder = ActionRecorder()
# Checks that info files exists if the install folder is specified
if install_folder and not existing_info_files(_make_abs_path(install_folder, cwd)):
raise ConanException("The specified install folder doesn't contain '%s' and '%s' "
"files" % (CONANINFO, BUILD_INFO))
conanfile_path = _get_conanfile_path(conanfile_path, cwd, py=True)
if package_folder:
if build_folder or source_folder:
raise ConanException("package folder definition incompatible with build "
"and source folders")
package_folder = _make_abs_path(package_folder, cwd)
build_folder = _make_abs_path(build_folder, cwd)
install_folder = _make_abs_path(install_folder, cwd, default=build_folder)
source_folder = _make_abs_path(source_folder, cwd,
default=os.path.dirname(conanfile_path))
# Checks that no both settings and info files are specified
infos_present = existing_info_files(install_folder)
if profile_name or settings or options or env or not infos_present:
profile = profile_from_args(profile_name, settings, options, env=env,
cwd=cwd, client_cache=self._client_cache)
else:
profile = read_conaninfo_profile(install_folder)
reference, conanfile = self._loader.load_export(conanfile_path, name, version, user,
channel)
recorder.recipe_exported(reference)
recorder.add_recipe_being_developed(reference)
cmd_export(conanfile_path, conanfile, reference, False, self._user_io.out,
self._client_cache, self._hook_manager)
export_pkg(self._client_cache, self._graph_manager, self._hook_manager, recorder,
self._user_io.out,
reference, source_folder=source_folder, build_folder=build_folder,
package_folder=package_folder, install_folder=install_folder,
profile=profile, force=force)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def download(self, reference, remote_name=None, package=None, recipe=False):
if package and recipe:
raise ConanException("recipe parameter cannot be used together with package")
# Install packages without settings (fixed ids or all)
conan_ref = ConanFileReference.loads(reference)
if check_valid_ref(conan_ref, allow_pattern=False):
recorder = ActionRecorder()
download(conan_ref, package, remote_name, recipe, self._registry, self._remote_manager,
self._client_cache, self._user_io.out, recorder, self._loader,
self._hook_manager)
else:
raise ConanException("Provide a valid full reference without wildcards.")
@api_method
def install_reference(self, reference, settings=None, options=None, env=None,
remote_name=None, verify=None, manifests=None,
manifests_interactive=None, build=None, profile_name=None,
update=False, generators=None, install_folder=None, cwd=None):
try:
recorder = ActionRecorder()
cwd = cwd or os.getcwd()
install_folder = _make_abs_path(install_folder, cwd)
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, cwd,
self._client_cache)
if not generators: # We don't want the default txt
generators = False
mkdir(install_folder)
manager = self._init_manager(recorder)
manager.install(reference=reference, install_folder=install_folder,
remote_name=remote_name, profile=profile, build_modes=build,
update=update, manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generators)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def install(self, path="", settings=None, options=None, env=None,
remote_name=None, verify=None, manifests=None,
manifests_interactive=None, build=None, profile_name=None,
update=False, generators=None, no_imports=False, install_folder=None, cwd=None):
try:
recorder = ActionRecorder()
cwd = cwd or os.getcwd()
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, cwd,
self._client_cache)
wspath = _make_abs_path(path, cwd)
if install_folder:
if os.path.isabs(install_folder):
wsinstall_folder = install_folder
else:
wsinstall_folder = os.path.join(cwd, install_folder)
else:
wsinstall_folder = None
workspace = Workspace.get_workspace(wspath, wsinstall_folder)
if workspace:
self._user_io.out.success("Using conanws.yml file from %s" % workspace._base_folder)
manager = self._init_manager(recorder)
manager.install_workspace(profile, workspace, remote_name, build, update)
return
install_folder = _make_abs_path(install_folder, cwd)
conanfile_path = _get_conanfile_path(path, cwd, py=None)
manager = self._init_manager(recorder)
manager.install(reference=conanfile_path,
install_folder=install_folder,
remote_name=remote_name,
profile=profile,
build_modes=build,
update=update,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generators,
no_imports=no_imports)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def config_get(self, item):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
self._user_io.out.info(config_parser.get_item(item))
return config_parser.get_item(item)
@api_method
def config_set(self, item, value):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
config_parser.set_item(item, value)
self._client_cache.invalidate()
@api_method
def config_rm(self, item):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
config_parser.rm_item(item)
self._client_cache.invalidate()
@api_method
def config_install(self, item, verify_ssl, config_type=None, args=None):
# _make_abs_path, but could be not a path at all
if item is not None and os.path.exists(item) and not os.path.isabs(item):
item = os.path.abspath(item)
from conans.client.conf.config_installer import configuration_install
return configuration_install(item, self._client_cache, self._user_io.out, verify_ssl,
requester=self._remote_manager._auth_manager._rest_client.requester, # FIXME: Look out!
config_type=config_type, args=args)
def _info_get_profile(self, reference, install_folder, profile_name, settings, options, env):
cwd = get_cwd()
try:
reference = ConanFileReference.loads(reference)
except ConanException:
reference = _get_conanfile_path(reference, cwd=None, py=None)
if install_folder or not (profile_name or settings or options or env):
# When not install folder is specified but neither any setting, we try to read the
# info from cwd
install_folder = _make_abs_path(install_folder, cwd)
if existing_info_files(install_folder):
return reference, read_conaninfo_profile(install_folder)
return reference, profile_from_args(profile_name, settings, options, env=env,
cwd=cwd, client_cache=self._client_cache)
@api_method
def info_build_order(self, reference, settings=None, options=None, env=None,
profile_name=None, remote_name=None, build_order=None, check_updates=None,
install_folder=None):
reference, profile = self._info_get_profile(reference, install_folder, profile_name,
settings, options, env)
recorder = ActionRecorder()
deps_graph, _, _ = self._graph_manager.load_graph(reference, None, profile, ["missing"],
check_updates, False, remote_name,
recorder, workspace=None)
return deps_graph.build_order(build_order)
@api_method
def info_nodes_to_build(self, reference, build_modes, settings=None, options=None, env=None,
profile_name=None, remote_name=None, check_updates=None,
install_folder=None):
reference, profile = self._info_get_profile(reference, install_folder, profile_name,
settings, options, env)
recorder = ActionRecorder()
deps_graph, conanfile, _ = self._graph_manager.load_graph(reference, None, profile,
build_modes, check_updates,
False, remote_name, recorder,
workspace=None)
nodes_to_build = deps_graph.nodes_to_build()
return nodes_to_build, conanfile
@api_method
def info(self, reference, remote_name=None, settings=None, options=None, env=None,
profile_name=None, update=False, install_folder=None, build=None):
reference, profile = self._info_get_profile(reference, install_folder, profile_name,
settings, options, env)
recorder = ActionRecorder()
deps_graph, conanfile, _ = self._graph_manager.load_graph(reference, None, profile, build,
update, False, remote_name,
recorder, workspace=None)
return deps_graph, conanfile
@api_method
def build(self, conanfile_path, source_folder=None, package_folder=None, build_folder=None,
install_folder=None, should_configure=True, should_build=True, should_install=True,
should_test=True, cwd=None):
cwd = cwd or get_cwd()
conanfile_path = _get_conanfile_path(conanfile_path, cwd, py=True)
build_folder = _make_abs_path(build_folder, cwd)
install_folder = _make_abs_path(install_folder, cwd, default=build_folder)
source_folder = _make_abs_path(source_folder, cwd, default=os.path.dirname(conanfile_path))
default_pkg_folder = os.path.join(build_folder, "package")
package_folder = _make_abs_path(package_folder, cwd, default=default_pkg_folder)
build(self._graph_manager, self._hook_manager, conanfile_path, self._user_io.out,
source_folder, build_folder, package_folder, install_folder,
should_configure=should_configure, should_build=should_build,
should_install=should_install, should_test=should_test)
@api_method
def package(self, path, build_folder, package_folder, source_folder=None, install_folder=None,
cwd=None):
cwd = cwd or get_cwd()
conanfile_path = _get_conanfile_path(path, cwd, py=True)
build_folder = _make_abs_path(build_folder, cwd)
install_folder = _make_abs_path(install_folder, cwd, default=build_folder)
source_folder = _make_abs_path(source_folder, cwd, default=os.path.dirname(conanfile_path))
default_pkg_folder = os.path.join(build_folder, "package")
package_folder = _make_abs_path(package_folder, cwd, default=default_pkg_folder)
if package_folder == build_folder:
raise ConanException("Cannot 'conan package' to the build folder. "
"--build-folder and package folder can't be the same")
output = ScopedOutput("PROJECT", self._user_io.out)
conanfile = self._graph_manager.load_consumer_conanfile(conanfile_path, install_folder,
output, deps_info_required=True)
packager.create_package(conanfile, None, source_folder, build_folder, package_folder,
install_folder, output, self._hook_manager, conanfile_path, None,
local=True, copy_info=True)
@api_method
def source(self, path, source_folder=None, info_folder=None, cwd=None):
cwd = cwd or get_cwd()
conanfile_path = _get_conanfile_path(path, cwd, py=True)
source_folder = _make_abs_path(source_folder, cwd)
info_folder = _make_abs_path(info_folder, cwd)
mkdir(source_folder)
if not os.path.exists(info_folder):
raise ConanException("Specified info-folder doesn't exist")
output = ScopedOutput("PROJECT", self._user_io.out)
# only infos if exist
conanfile = self._graph_manager.load_consumer_conanfile(conanfile_path, info_folder, output)
conanfile_folder = os.path.dirname(conanfile_path)
if conanfile_folder != source_folder:
output.info("Executing exports to: %s" % source_folder)
export_recipe(conanfile, conanfile_folder, source_folder, output)
export_source(conanfile, conanfile_folder, source_folder, output)
config_source_local(source_folder, conanfile, output, conanfile_path,
self._hook_manager)
@api_method
def imports(self, path, dest=None, info_folder=None, cwd=None):
"""
:param path: Path to the conanfile
:param dest: Dir to put the imported files. (Abs path or relative to cwd)
:param info_folder: Dir where the conaninfo.txt and conanbuildinfo.txt files are
:param cwd: Current working directory
:return: None
"""
cwd = cwd or get_cwd()
info_folder = _make_abs_path(info_folder, cwd)
dest = _make_abs_path(dest, cwd)
mkdir(dest)
conanfile_abs_path = _get_conanfile_path(path, cwd, py=None)
output = ScopedOutput("PROJECT", self._user_io.out)
conanfile = self._graph_manager.load_consumer_conanfile(conanfile_abs_path, info_folder,
output, deps_info_required=True)
run_imports(conanfile, dest, output)
@api_method
def imports_undo(self, manifest_path):
cwd = get_cwd()
manifest_path = _make_abs_path(manifest_path, cwd)
undo_imports(manifest_path, self._user_io.out)
@api_method
def export(self, path, name, version, user, channel, keep_source=False, cwd=None):
conanfile_path = _get_conanfile_path(path, cwd, py=True)
reference, conanfile = self._loader.load_export(conanfile_path, name, version, user,
channel)
cmd_export(conanfile_path, conanfile, reference, keep_source, self._user_io.out,
self._client_cache, self._hook_manager)
@api_method
def remove(self, pattern, query=None, packages=None, builds=None, src=False, force=False,
remote_name=None, outdated=False):
remover = ConanRemover(self._client_cache, self._remote_manager, self._user_io,
self._registry)
remover.remove(pattern, remote_name, src, builds, packages, force=force,
packages_query=query, outdated=outdated)
@api_method
def copy(self, reference, user_channel, force=False, packages=None):
"""
param packages: None=No binaries, True=All binaries, else list of IDs
"""
from conans.client.cmd.copy import cmd_copy
# FIXME: conan copy does not support short-paths in Windows
reference = ConanFileReference.loads(str(reference))
cmd_copy(reference, user_channel, packages, self._client_cache,
self._user_io, self._remote_manager, self._registry, self._loader, force=force)
@api_method
def authenticate(self, name, password, remote_name):
remote = self.get_remote_by_name(remote_name)
_, remote_name, prev_user, user = self._remote_manager.authenticate(remote, name, password)
return remote_name, prev_user, user
@api_method
def user_set(self, user, remote_name=None):
remote = (self.get_default_remote() if not remote_name
else self.get_remote_by_name(remote_name))
return user_set(self._client_cache.localdb, user, remote)
@api_method
def users_clean(self):
users_clean(self._client_cache.localdb)
@api_method
def users_list(self, remote_name=None):
info = {"error": False, "remotes": []}
remotes = [self.get_remote_by_name(remote_name)] if remote_name else self.remote_list()
try:
info["remotes"] = users_list(self._client_cache.localdb, remotes)
return info
except ConanException as exc:
info["error"] = True
exc.info = info
raise
@api_method
def search_recipes(self, pattern, remote_name=None, case_sensitive=False):
recorder = SearchRecorder()
search = Search(self._client_cache, self._remote_manager, self._registry)
try:
references = search.search_recipes(pattern, remote_name, case_sensitive)
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
for remote_name, refs in references.items():
for ref in refs:
recorder.add_recipe(remote_name, ref, with_packages=False)
return recorder.get_info()
@api_method
def search_packages(self, reference, query=None, remote_name=None, outdated=False):
recorder = SearchRecorder()
search = Search(self._client_cache, self._remote_manager, self._registry)
try:
reference = ConanFileReference.loads(str(reference))
references = search.search_packages(reference, remote_name, query=query,
outdated=outdated)
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
for remote_name, remote_ref in references.items():
recorder.add_recipe(remote_name, reference)
if remote_ref.ordered_packages:
for package_id, properties in remote_ref.ordered_packages.items():
package_recipe_hash = properties.get("recipe_hash", None)
recorder.add_package(remote_name, reference,
package_id, properties.get("options", []),
properties.get("settings", []),
properties.get("full_requires", []),
remote_ref.recipe_hash != package_recipe_hash)
return recorder.get_info()
@api_method
def upload(self, pattern, package=None, remote_name=None, all_packages=False, confirm=False,
retry=2, retry_wait=5, integrity_check=False, policy=None, query=None):
""" Uploads a package recipe and the generated binary packages to a specified remote
"""
recorder = UploadRecorder()
uploader = CmdUpload(self._client_cache, self._user_io, self._remote_manager,
self._registry, self._loader, self._hook_manager)
try:
uploader.upload(recorder, pattern, package, all_packages, confirm, retry,
retry_wait, integrity_check, policy, remote_name, query=query)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def remote_list(self):
return self._registry.remotes.list
@api_method
def remote_add(self, remote_name, url, verify_ssl=True, insert=None, force=None):
return self._registry.remotes.add(remote_name, url, verify_ssl, insert, force)
@api_method
def remote_remove(self, remote_name):
return self._registry.remotes.remove(remote_name)
@api_method
def remote_update(self, remote_name, url, verify_ssl=True, insert=None):
return self._registry.remotes.update(remote_name, url, verify_ssl, insert)
@api_method
def remote_rename(self, remote_name, new_new_remote):
return self._registry.remotes.rename(remote_name, new_new_remote)
@api_method
def remote_list_ref(self):
return {r: remote_name for r, remote_name in self._registry.refs.list.items()}
@api_method
def remote_add_ref(self, reference, remote_name):
reference = ConanFileReference.loads(str(reference), validate=True)
return self._registry.refs.set(reference, remote_name, check_exists=True)
@api_method
def remote_remove_ref(self, reference):
reference = ConanFileReference.loads(str(reference), validate=True)
return self._registry.refs.remove(reference)
@api_method
def remote_update_ref(self, reference, remote_name):
reference = ConanFileReference.loads(str(reference), validate=True)
return self._registry.refs.update(reference, remote_name)
@api_method
def remote_list_pref(self, reference):
reference = ConanFileReference.loads(str(reference), validate=True)
ret = {}
tmp = self._registry.prefs.list
for r, remote in tmp.items():
pref = PackageReference.loads(r)
if pref.conan == reference:
ret[pref.full_repr()] = remote
return ret
@api_method
def remote_add_pref(self, package_reference, remote_name):
p_reference = PackageReference.loads(str(package_reference), validate=True)
return self._registry.prefs.set(p_reference, remote_name, check_exists=True)
@api_method
def remote_remove_pref(self, package_reference):
p_reference = PackageReference.loads(str(package_reference), validate=True)
return self._registry.prefs.remove(p_reference)
@api_method
def remote_update_pref(self, package_reference, remote_name):
p_reference = PackageReference.loads(str(package_reference), validate=True)
return self._registry.prefs.update(p_reference, remote_name)
def remote_clean(self):
return self._registry.remotes.clean()
@api_method
def profile_list(self):
return cmd_profile_list(self._client_cache.profiles_path, self._user_io.out)
@api_method
def create_profile(self, profile_name, detect=False):
return cmd_profile_create(profile_name, self._client_cache.profiles_path,
self._user_io.out, detect)
@api_method
def update_profile(self, profile_name, key, value):
return cmd_profile_update(profile_name, key, value, self._client_cache.profiles_path)
@api_method
def get_profile_key(self, profile_name, key):
return cmd_profile_get(profile_name, key, self._client_cache.profiles_path)
@api_method
def delete_profile_key(self, profile_name, key):
return cmd_profile_delete_key(profile_name, key, self._client_cache.profiles_path)
@api_method
def read_profile(self, profile=None):
p, _ = read_profile(profile, get_cwd(), self._client_cache.profiles_path)
return p
@api_method
def get_path(self, reference, package_id=None, path=None, remote_name=None):
from conans.client.local_file_getter import get_path
reference = ConanFileReference.loads(reference)
if not path:
path = "conanfile.py" if not package_id else "conaninfo.txt"
if not remote_name:
return get_path(self._client_cache, reference, package_id, path), path
else:
remote = self.get_remote_by_name(remote_name)
return self._remote_manager.get_path(reference, package_id, path, remote), path
@api_method
def export_alias(self, reference, target_reference):
reference = ConanFileReference.loads(reference)
target_reference = ConanFileReference.loads(target_reference)
return export_alias(reference, target_reference, self._client_cache)
@api_method
def get_default_remote(self):
return self._registry.remotes.default
@api_method
def get_remote_by_name(self, remote_name):
return self._registry.remotes.get(remote_name)
Conan = ConanAPIV1
def _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd):
if manifests and manifests_interactive:
raise ConanException("Do not specify both manifests and "
"manifests-interactive arguments")
if verify and (manifests or manifests_interactive):
raise ConanException("Do not specify both 'verify' and "
"'manifests' or 'manifests-interactive' arguments")
manifest_folder = verify or manifests or manifests_interactive
if manifest_folder:
if not os.path.isabs(manifest_folder):
if not cwd:
raise ConanException("'cwd' should be defined if the manifest folder is relative.")
manifest_folder = os.path.join(cwd, manifest_folder)
manifest_verify = verify is not None
manifest_interactive = manifests_interactive is not None
else:
manifest_verify = manifest_interactive = False
return manifest_folder, manifest_interactive, manifest_verify
def existing_info_files(folder):
return os.path.exists(os.path.join(folder, CONANINFO)) and \
os.path.exists(os.path.join(folder, BUILD_INFO))
def get_conan_runner():
print_commands_to_output = get_env("CONAN_PRINT_RUN_COMMANDS", False)
generate_run_log_file = get_env("CONAN_LOG_RUN_TO_FILE", False)
log_run_to_output = get_env("CONAN_LOG_RUN_TO_OUTPUT", True)
runner = ConanRunner(print_commands_to_output, generate_run_log_file, log_run_to_output)
return runner
def migrate_and_get_client_cache(base_folder, out, storage_folder=None):
# Init paths
client_cache = ClientCache(base_folder, storage_folder, out)
# Migration system
migrator = ClientMigrator(client_cache, Version(client_version), out)
migrator.migrate()
return client_cache
|
"""
使用有限状态机算法
通过已经创建的事件和状态
对订单状态进行自动的调度
"""
# pylint: disable=arguments-differ
from typing import NoReturn
from ...core.tools import web
from ...core.algorithm import fsm
from . import events
from . import status
from . import settings
# 状态转移表
_TransferTable = (
(status.Created, events.Confirm, status.Confirmed),
(status.Created, events.UserClose, status.Closed),
(status.Created, events.OrderTimedOut, status.Closed),
(status.Confirmed, events.Paying, status.Paying),
(status.Confirmed, events.UserClose, status.Closed),
(status.Confirmed, events.OrderTimedOut, status.Closed),
(status.Paying, events.PayingSuccess, status.Paid),
(status.Paying, events.PayingFailed, status.PayFailed),
(status.PayFailed, events.OrderTimedOut, status.Closed),
(status.PayFailed, events.OrderRetry, status.Created),
(status.Paid, events.Shipped, status.Shipping),
(status.Shipping, events.Delieverd, status.Delieverd),
(status.Delieverd, events.Recieved, status.Completed),
(status.Delieverd, events.RecieveTimingExcced, status.Completed),
(status.Completed, events.RequestRefund, status.RefundReviewing),
(status.RefundReviewing, events.RefundApproved, status.Refunding),
(status.RefundReviewing, events.RefundDenied, status.Completed),
(status.Refunding, events.RefundSuccess, status.Closed),
(status.Refunding, events.RefundFailed, status.Completed)
)
class StatusManager(fsm.Machine):
"""创建一个订单状态管理器"""
def __init__(self, orderid: str) -> NoReturn:
"""
订单状态管理器构造函数:
0. 根据订单id对管理器命名
1. 初始化状态转移对应表
2. 初始化进入状态
"""
super().__init__(str(orderid))
# 初始化状态转移对应表
for record in _TransferTable:
self.add(*record)
def start(self) -> NoReturn:
"""开始从订单创建开始"""
super().start(status.Created)
def json(self) -> str:
"""将当前状态信息导出为 JSON 字串"""
if not self.current is None:
current = {
settings.Status.Key.Code: self.current.code,
settings.Status.Key.Description: self.current.description,
settings.Status.Key.Extra: self.current.extra
} # 当前状态导出字典
else:
current = None
mapping = map(lambda event: {
settings.Events.Key.Time: event.time,
settings.Events.Key.OperationCode: event.opcode,
settings.Events.Key.Extra: event.extra,
settings.Events.Key.Description: event.description
}, self.events) # 事件记录器导出为字典
return web.JSONcreater({
settings.Manager.Key.Name: self.name,
settings.Manager.Key.CurrentStat: current,
settings.Manager.Key.EventsRecorder: list(mapping)
})
|
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.ma as ma
from numpy.testing import *
from numpy.compat import sixu
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(sixu("Unicode")))
def test_atleast_2d(self):
"""Ticket #1559"""
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
def test_set_fill_value_unicode_py3(self):
"""Ticket #2733"""
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_(a.fill_value == 'X')
def test_var_sets_maskedarray_scalar(self):
"""Issue gh-2757"""
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array(-1, dtype=float)
a.var(out=mout)
assert_(mout._data == 0)
if __name__ == "__main__":
run_module_suite()
|
"""\U0001F1EB\U0001F1EF \U00002B50 CSV track coordinate to TrackMate XML conversion.
Fiji allows for quick and easy viewing of images. TrackMate can be used to view tracks.
Unfortunately, it isn't that simple to convert "normal" coordinate output into
TrackMate-viewable format.
Requires a "tracks.csv" file that contains the following columns:
- x, y: Coordinate positions in x-/y-axis
- particle: Unique ID assigned to all coordinates along one track
- frame: Current point in time / frame
"""
import argparse
import os
import tempfile
import xml.dom.minidom
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import skimage.io
def get_gaps(frames):
def __longest_consecutive(a):
"""Return length of longest consecutive range in list of integers."""
a = set(a)
longest = 0
for i in a:
if i - 1 not in a:
streak = 0
while i in a:
i += 1
streak += 1
longest = max(longest, streak)
return longest
full_length = np.arange(min(frames), max(frames))
diff = np.setdiff1d(full_length, frames)
longest = __longest_consecutive(diff)
total = len(diff)
return str(longest), str(total), str(len(full_length))
def __create_model(root, spatialunits: str = "pixel", timeunits: str = "sec"):
dict_spotfeatures = [
{
"feature": "QUALITY",
"name": "Quality",
"shortname": "Quality",
"dimension": "QUALITY",
"isint": "false",
},
{
"feature": "POSITION_X",
"name": "X",
"shortname": "X",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "POSITION_Y",
"name": "Y",
"shortname": "Y",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "POSITION_Z",
"name": "Z",
"shortname": "Z",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "POSITION_T",
"name": "T",
"shortname": "T",
"dimension": "TIME",
"isint": "false",
},
{
"feature": "FRAME",
"name": "Frame",
"shortname": "Frame",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "RADIUS",
"name": "Radius",
"shortname": "R",
"dimension": "LENGTH",
"isint": "false",
},
{
"feature": "VISIBILITY",
"name": "Visibility",
"shortname": "Visibility",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "MANUAL_INTEGER_SPOT_FEATURE",
"name": "Custom Integer Spot Feature",
"shortname": "Integer Spot Feature",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "MANUAL_DOUBLE_SPOT_FEATURE",
"name": "Custom Double Spot Feature",
"shortname": "Double Spot Feature",
"dimension": "NONE",
"isint": "false",
},
{
"feature": "HAS_MAX_QUALITY_IN_FRAME",
"name": "Has max quality",
"shortname": "Max Quality",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "MANUAL_COLOR",
"name": "Manual spot color",
"shortname": "Spot color",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "MEAN_INTENSITY",
"name": "Mean intensity",
"shortname": "Mean",
"dimension": "INTENSITY",
"isint": "false",
},
{
"feature": "MEDIAN_INTENSITY",
"name": "Median intensity",
"shortname": "Median",
"dimension": "INTENSITY",
"isint": "false",
},
{
"feature": "MIN_INTENSITY",
"name": "Minimal intensity",
"shortname": "Min",
"dimension": "INTENSITY",
"isint": "false",
},
{
"feature": "MAX_INTENSITY",
"name": "Maximal intensity",
"shortname": "Max",
"dimension": "INTENSITY",
"isint": "false",
},
{
"feature": "TOTAL_INTENSITY",
"name": "Total intensity",
"shortname": "Total int.",
"dimension": "INTENSITY",
"isint": "false",
},
{
"feature": "STANDARD_DEVIATION",
"name": "Standard deviation",
"shortname": "Stdev.",
"dimension": "INTENSITY",
"isint": "false",
},
{
"feature": "ESTIMATED_DIAMETER",
"name": "Estimated diameter",
"shortname": "Diam.",
"dimension": "LENGTH",
"isint": "false",
},
{
"feature": "CONTRAST",
"name": "Contrast",
"shortname": "Constrast",
"dimension": "NONE",
"isint": "false",
},
{
"feature": "SNR",
"name": "Signal/Noise, ratio",
"shortname": "SNR",
"dimension": "NONE",
"isint": "false",
},
]
dict_edgefeatures = [
{
"feature": "SPOT_SOURCE_ID",
"name": "Source spot ID",
"shortname": "Source ID",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "SPOT_TARGET_ID",
"name": "Target spot ID",
"shortname": "Target ID",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "LINK_COST",
"name": "Link cost",
"shortname": "Cost",
"dimension": "NONE",
"isint": "false",
},
{
"feature": "EDGE_TIME",
"name": "Time (mean)",
"shortname": "T",
"dimension": "TIME",
"isint": "false",
},
{
"feature": "EDGE_X_LOCATION",
"name": "X Location (mean)",
"shortname": "X",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "EDGE_Y_LOCATION",
"name": "Y Location (mean)",
"shortname": "Y",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "EDGE_Z_LOCATION",
"name": "Z Location (mean)",
"shortname": "Z",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "VELOCITY",
"name": "Velocity",
"shortname": "V",
"dimension": "VELOCITY",
"isint": "false",
},
{
"feature": "DISPLACEMENT",
"name": "Displacement",
"shortname": "D",
"dimension": "LENGTH",
"isint": "false",
},
{
"feature": "MANUAL_COLOR",
"name": "Manual edge color",
"shortname": "Edge color",
"dimension": "NONE",
"isint": "true",
},
]
dict_trackfeatures = [
{
"feature": "MANUAL_INTEGER_TRACK_FEATURE",
"name": "Custom Integer Track Feature",
"shortname": "Integer Track Feature",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "MANUAL_DOUBLE_TRACK_FEATURE",
"name": "Custom Double Track Feature",
"shortname": "Double Track Feature",
"dimension": "NONE",
"isint": "false",
},
{
"feature": "NUMBER_SPOTS",
"name": "Number of spots in track",
"shortname": "N spots",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "NUMBER_GAPS",
"name": "Number of gaps",
"shortname": "Gaps",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "LONGEST_GAP",
"name": "Longest gap",
"shortname": "Longest gap",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "NUMBER_SPLITS",
"name": "Number of split events",
"shortname": "Splits",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "NUMBER_MERGES",
"name": "Number of merge events",
"shortname": "Merges",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "NUMBER_COMPLEX",
"name": "Complex points",
"shortname": "Complex",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "TRACK_DURATION",
"name": "Duration of track",
"shortname": "Duration",
"dimension": "TIME",
"isint": "false",
},
{
"feature": "TRACK_START",
"name": "Track start",
"shortname": "T start",
"dimension": "TIME",
"isint": "false",
},
{
"feature": "TRACK_STOP",
"name": "Track stop",
"shortname": "T stop",
"dimension": "TIME",
"isint": "false",
},
{
"feature": "TRACK_DISPLACEMENT",
"name": "Track displacement",
"shortname": "Displacement",
"dimension": "LENGTH",
"isint": "false",
},
{
"feature": "TRACK_INDEX",
"name": "Track index",
"shortname": "Index",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "TRACK_ID",
"name": "Track ID",
"shortname": "ID",
"dimension": "NONE",
"isint": "true",
},
{
"feature": "TRACK_X_LOCATION",
"name": "X Location (mean)",
"shortname": "X",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "TRACK_Y_LOCATION",
"name": "Y Location (mean)",
"shortname": "Y",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "TRACK_Z_LOCATION",
"name": "Z Location (mean)",
"shortname": "Z",
"dimension": "POSITION",
"isint": "false",
},
{
"feature": "TRACK_MEAN_SPEED",
"name": "Mean velocity",
"shortname": "Mean V",
"dimension": "VELOCITY",
"isint": "false",
},
{
"feature": "TRACK_MAX_SPEED",
"name": "Maximal velocity",
"shortname": "Max V",
"dimension": "VELOCITY",
"isint": "false",
},
{
"feature": "TRACK_MIN_SPEED",
"name": "Minimal velocity",
"shortname": "Min V",
"dimension": "VELOCITY",
"isint": "false",
},
{
"feature": "TRACK_MEDIAN_SPEED",
"name": "Median velocity",
"shortname": "Median V",
"dimension": "VELOCITY",
"isint": "false",
},
{
"feature": "TRACK_STD_SPEED",
"name": "Velocity standard deviation",
"shortname": "V std",
"dimension": "VELOCITY",
"isint": "false",
},
{
"feature": "TRACK_MEAN_QUALITY",
"name": "Mean quality",
"shortname": "Mean Q",
"dimension": "QUALITY",
"isint": "false",
},
{
"feature": "TRACK_MAX_QUALITY",
"name": "Maximal quality",
"shortname": "Max Q",
"dimension": "QUALITY",
"isint": "false",
},
{
"feature": "TRACK_MIN_QUALITY",
"name": "Minimal quality",
"shortname": "Min Q",
"dimension": "QUALITY",
"isint": "false",
},
{
"feature": "TRACK_MEDIAN_QUALITY",
"name": "Median quality",
"shortname": "Median Q",
"dimension": "QUALITY",
"isint": "false",
},
{
"feature": "TRACK_STD_QUALITY",
"name": "Quality standard deviation",
"shortname": "Q std",
"dimension": "QUALITY",
"isint": "false",
},
]
# Model
model = ET.SubElement(root, "Model", spatialunits=spatialunits, timeunits=timeunits)
featuredeclarations = ET.SubElement(model, "FeatureDeclarations")
# SpotFeatures
spotfeatures = ET.SubElement(featuredeclarations, "SpotFeatures")
for dct in dict_spotfeatures:
_ = ET.SubElement(spotfeatures, "Feature", **dct)
# Edgefeatures
edgefeatures = ET.SubElement(featuredeclarations, "EdgeFeatures")
for dct in dict_edgefeatures:
_ = ET.SubElement(edgefeatures, "Feature", **dct)
# TrackFeatures
trackfeatures = ET.SubElement(featuredeclarations, "TrackFeatures")
for dct in dict_trackfeatures:
_ = ET.SubElement(trackfeatures, "Feature", **dct)
return model
def __create_allspots(model, df):
# List of all spots (without tracks)
allspots = ET.SubElement(model, "AllSpots", nspots=str(len(df)))
spotid = 0
for frame in df["slice"].unique():
frame_id = str(float(frame))
df_frame = df[df["slice"] == frame]
spotsinframe = ET.SubElement(allspots, "SpotsInFrame", frame=str(frame))
for row in df_frame.iterrows():
try:
size = str(row[1]["size"] * 2)
except KeyError:
size = "1.0"
dict_spot = {
"ID": f"{spotid:06}",
"name": f"ID{spotid:06}",
"QUALITY": "1.0",
"POSITION_T": frame_id,
"MAX_INTENSITY": "1.0",
"FRAME": frame_id,
"MEDIAN_INTENSITY": "1.0",
"VISIBILITY": "1",
"MEAN_INTENSITY": "1.0",
"TOTAL_INTENSITY": "1.0",
"ESTIMATED_DIAMETER": size,
"RADIUS": "1.0",
"SNR": "1.0",
"POSITION_X": str(row[1]["x"]),
"POSITION_Y": str(row[1]["y"]),
"STANDARD_DEVIATION": "1.0",
"CONTRAST": "1.0",
"MANUAL_COLOR": "-10921639",
"MIN_INTENSITY": "0.0",
"POSITION_Z": "1",
}
_ = ET.SubElement(spotsinframe, "Spot", **dict_spot)
spotid = spotid + 1
def __create_alltracks(model, df):
# List of all tracks
alltracks = ET.SubElement(model, "AllTracks")
# for particle in df["particle"].unique():
# df_track = df[df["particle"] == particle]
# track_ids = list(df_track.index)
# frames = np.array(df_track["slice"])
# longest, total, duration = get_gaps(frames)
# dict_track = {
# "name": f"Track_{particle}",
# "TRACK_ID": str(particle),
# "NUMBER_SPOTS": str(len(frames)),
# "NUMBER_GAPS": longest,
# "LONGEST_GAP": total,
# "NUMBER_SPLITS": "0",
# "NUMBER_MERGES": "0",
# "NUMBER_COMPLEX": "0",
# "TRACK_DURATION": duration,
# "TRACK_START": str(min(frames)),
# "TRACK_STOP": str(max(frames)),
# "TRACK_DISPLACEMENT": "0.01",
# "TRACK_INDEX": str(particle),
# "TRACK_X_LOCATION": str(df_track["x"].mean()),
# "TRACK_Y_LOCATION": str(df_track["y"].mean()),
# "TRACK_Z_LOCATION": "0.1",
# "TRACK_MEAN_SPEED": "0.1",
# "TRACK_MAX_SPEED": "0.1",
# "TRACK_MIN_SPEED": "0.1",
# "TRACK_MEDIAN_SPEED": "0.1",
# "TRACK_STD_SPEED": "0.1",
# "TRACK_MEAN_QUALITY": "0.1",
# "TRACK_MAX_QUALITY": "0.1",
# "TRACK_MIN_QUALITY": "0.1",
# "TRACK_MEDIAN_QUALITY": "0.1",
# "TRACK_STD_QUALITY": "0.1",
# }
# track = ET.SubElement(alltracks, "Track", **dict_track)
# # Add all spots in the corresponding track
# for row in df_track.iterrows():
# dict_edge = {
# "SPOT_SOURCE_ID": f"{row[0]:06}",
# "SPOT_TARGET_ID": f"{track_ids[track_ids.index(row[0]) - 1]:06}",
# "LINK_COST": "0.1",
# "EDGE_TIME": "0.1",
# "EDGE_X_LOCATION": str(row[1]["x"]),
# "EDGE_Y_LOCATION": str(row[1]["y"]),
# "EDGE_Z_LOCATION": "0.0",
# "VELOCITY": "0.1",
# "DISPLACEMENT": "0.1",
# }
# _ = ET.SubElement(track, "Edge", **dict_edge)
def __create_filteredtracks(model, df):
# Tracks after TrackMate's filtering
filteredtracks = ET.SubElement(model, "FilteredTracks")
# for particle in df["particle"].unique():
# _ = ET.SubElement(filteredtracks, "TrackID", TRACK_ID=str(particle))
def __create_settings(
root,
file_image,
pixelwidth: str = "1.0",
pixelheight: str = "1.0",
voxeldepth: str = "1.0",
timeinterval: str = "1.0",
):
# Image metadata
path, fname = os.path.split(file_image)
image = skimage.io.imread(file_image)
if len(image.shape) == 2:
Warning(
f"Found image with shape = 2; assuming it's 3d data with a single time point."
)
image = np.expand_dims(image, axis=0)
frames, width, height = image.shape
imagedata = {
"filename": fname,
"folder": path,
"width": str(width),
"height": str(height),
"nslices": "1",
"nframes": str(frames),
"pixelwidth": pixelwidth,
"pixelheight": pixelheight,
"voxeldepth": voxeldepth,
"timeinterval": timeinterval,
}
basicsettings = {
"xstart": "0",
"xend": str(width - 1),
"ystart": "0",
"yend": str(height - 1),
"zstart": "0",
"zend": "0",
"tstart": "0",
"tend": str(frames - 1),
}
detectorsettings = {
"DETECTOR_NAME": "LOG_DETECTOR",
"TARGET_CHANNEL": "1",
"RADIUS": "5.0",
"THRESHOLD": "1000.0",
"DO_MEDIAN_FILTERING": "false",
"DO_SUBPIXEL_LOCALIZATION": "true",
}
initialspotfilter = {"feature": "QUALITY", "value": "0.0", "isabove": "true"}
dict_trackersettings = {
"TRACKER_NAME": "SPARSE_LAP_TRACKER",
"CUTOFF_PERCENTILE": "0.9",
"ALTERNATIVE_LINKING_COST_FACTOR": "1.05",
"BLOCKING_VALUE": "Infinity",
}
dict_subtrackersettings = {
"Linking": {"LINKING_MAX_DISTANCE": "0.8"},
"GapClosing": {
"ALLOW_GAP_CLOSING": "false",
"GAP_CLOSING_MAX_DISTANCE": "0.5",
"MAX_FRAME_GAP": "3",
},
"TrackSplitting": {
"ALLOW_TRACK_SPLITTING": "false",
"SPLITTING_MAX_DISTANCE": "15.0",
},
"TrackMerging": {
"ALLOW_TRACK_MERGING": "false",
"MERGING_MAX_DISTANCE": "15.0",
},
}
dict_analyzercollection = {
"SpotAnalyzers": [
"MANUAL_SPOT_COLOR_ANALYZER",
"Spot descriptive statistics",
"Spot radius estimator",
"Spot contrast and SNR",
],
"EdgeAnalyzers": [
"Edge target",
"Edge mean location",
"Edge velocity",
"MANUAL_EDGE_COLOR_ANALYZER",
],
"TrackAnalyzers": [
"Branching analyzer",
"Track duration",
"Track index",
"Track location",
"Velocity",
"TRACK_SPOT_QUALITY",
],
}
# General Settings
settings = ET.SubElement(root, "Settings")
_ = ET.SubElement(settings, "ImageData", **imagedata)
_ = ET.SubElement(settings, "BasicSettings", **basicsettings)
_ = ET.SubElement(settings, "DetectorSettings", **detectorsettings)
_ = ET.SubElement(settings, "InitialSpotFilter", **initialspotfilter)
_ = ET.SubElement(settings, "SpotFilterCollection")
# Tracker settings
trackersettings = ET.SubElement(settings, "TrackerSettings", **dict_trackersettings)
for k, v in dict_subtrackersettings.items():
subelement = ET.SubElement(trackersettings, k, **v)
_ = ET.SubElement(subelement, "FeaturePenalties")
# Filter settings
_ = ET.SubElement(settings, "TrackFilterCollection")
analyzercollection = ET.SubElement(settings, "AnalyzerCollection")
for k, v in dict_analyzercollection.items():
subanalyzer = ET.SubElement(analyzercollection, k)
for lst in v:
_ = ET.SubElement(subanalyzer, "Analyzer", key=lst)
def __create_guistate(root):
# TrackMate's GUI settings
guistate = ET.SubElement(root, "GUIState", state="InitialFiltering")
for _ in range(4):
_ = ET.SubElement(guistate, "View", key="HYPERSTACKDISPLAYER")
def __pretty_output(root, file_output):
# Save file after fancy formatting to prettify
with tempfile.TemporaryDirectory() as tempdirname:
fname = os.path.join(tempdirname, "file.xml")
tree = ET.ElementTree(root)
tree.write(fname, encoding="UTF-8", xml_declaration=True)
dom = xml.dom.minidom.parse(fname)
pretty_xml = dom.toprettyxml()
with open(file_output, "w") as f:
f.write(pretty_xml)
def create_trackmate_xml(
spots_df,
file_image,
file_output,
spatialunits: str = "pixel",
timeunits: str = "sec",
pixelwidth: int = 1,
pixelheight: int = 1,
voxeldepth: int = 1,
timeinterval: int = 1,
):
# Check required track df columns
df = spots_df
df["x"] = df["x"] * pixelwidth
df["y"] = df["y"] * pixelheight
df["z"] = 1.0
df.to_csv(file_output.replace("xml", "csv"))
req_cols = ["x", "y", "slice"]
if not all(req in df.columns for req in req_cols):
raise ValueError(f"Not all required columns present! {req_cols} must exist.")
# XML tree
root = ET.Element("TrackMate", version="6.0.1")
# Model
model = __create_model(root, spatialunits=spatialunits, timeunits=timeunits)
__create_allspots(model, df)
__create_alltracks(model, df)
__create_filteredtracks(model, df)
# Settings
__create_settings(
root,
file_image,
pixelwidth=str(pixelwidth),
pixelheight=str(pixelheight),
voxeldepth=str(voxeldepth),
timeinterval=str(timeinterval),
)
__create_guistate(root)
# Save output
__pretty_output(root, file_output)
|
#
# Copyright 2018 Joachim Lusiardi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
import base64
import binascii
from homekit.http_impl.http_client import HomeKitHTTPConnection
from homekit.zeroconf_ import find_device_ip_and_port
from homekit.protocol import get_session_keys
from homekit.model.characteristics import CharacteristicFormats
from distutils.util import strtobool
from homekit.exception import FormatException
from homekit.tlv import TlvParseException
from homekit import TLV
def load_pairing(file: str) -> dict:
"""
loads data for an existing pairing from the file.
:param file: the file name
:return: a dict containing the pairing data or None if file was not found
"""
try:
with open(file, 'r') as input_fp:
return json.load(input_fp)
except FileNotFoundError:
return None
def save_pairing(file: str, pairing_data: dict):
"""
save the data for an existing pairing.
:param file: the file name
:param pairing_data: a dict containing the pairing data
:return: None
"""
with open(file, 'w') as output_fp:
json.dump(pairing_data, output_fp, indent=4)
def create_session(file):
"""
try to obtain IP and port from the given file and establish a session to a HomeKit accessory. This function covers
IP/ports that might have changed since last run and updates the file accordingly.
:param file: the path to the file where the data is stored
:return:
conn: an instance of HomeKitHTTPConnection
c2a_key: the key used for communication from controller to accessory
a2c_key: the key used for communication from accessory to controller
"""
conn = None
c2a_key = None
a2c_key = None
# load file with pairing data
pairing_data = load_pairing(file)
if pairing_data is None:
print('File {file} not found!'.format(file=file))
sys.exit(-1)
# we need ip and port of the device
connected = False
if 'AccessoryIP' in pairing_data and 'AccessoryPort' in pairing_data:
# if it is known, try it
accessory_ip = pairing_data['AccessoryIP']
accessory_port = pairing_data['AccessoryPort']
conn = HomeKitHTTPConnection(accessory_ip, port=accessory_port)
try:
conn.connect()
c2a_key, a2c_key = get_session_keys(conn, pairing_data)
connected = True
except Exception:
connected = False
if not connected:
# no connection yet, so ip / port might have changed and we need to fall back to slow zeroconf lookup
device_id = pairing_data['AccessoryPairingID']
connection_data = find_device_ip_and_port(device_id)
if connection_data is None:
print('Device {id} not found'.format(id=device_id))
sys.exit(-1)
conn = HomeKitHTTPConnection(connection_data['ip'], port=connection_data['port'])
pairing_data['AccessoryIP'] = connection_data['ip']
pairing_data['AccessoryPort'] = connection_data['port']
save_pairing(file, pairing_data)
c2a_key, a2c_key = get_session_keys(conn, pairing_data)
return conn, c2a_key, a2c_key
def check_convert_value(val, target_format):
"""
Checks if the given value is of the given format or is convertible into the format. If the value is not convertible,
a FormatException is thrown.
:param val: the original value
:param target_format: the target type of the conversion
:raises FormatException: if the value is not of the given format or cannot be converted.
:return: the converted value
"""
if target_format == CharacteristicFormats.bool:
try:
val = strtobool(val)
except ValueError:
raise FormatException('"{v}" is no valid "{t}"!'.format(v=val, t=target_format))
if target_format in [CharacteristicFormats.uint64, CharacteristicFormats.uint32,
CharacteristicFormats.uint16, CharacteristicFormats.uint8,
CharacteristicFormats.int]:
try:
val = int(val)
except ValueError:
raise FormatException('"{v}" is no valid "{t}"!'.format(v=val, t=target_format))
if target_format == CharacteristicFormats.float:
try:
val = float(val)
except ValueError:
raise FormatException('"{v}" is no valid "{t}"!'.format(v=val, t=target_format))
if target_format == CharacteristicFormats.data:
try:
base64.decodebytes(val.encode())
except binascii.Error:
raise FormatException('"{v}" is no valid "{t}"!'.format(v=val, t=target_format))
if target_format == CharacteristicFormats.tlv8:
try:
tmp_bytes = base64.decodebytes(val.encode())
TLV.decode_bytes(tmp_bytes)
except (binascii.Error, TlvParseException):
raise FormatException('"{v}" is no valid "{t}"!'.format(v=val, t=target_format))
return val
|
#!/usr/bin/python3
"""
This module contains the tests for FileStorage class
"""
import unittest
import io
import sys
import models
from models.engine.file_storage import FileStorage
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
class TestFileStorage(unittest.TestCase):
"""
Test for class FileStorage and its methods
"""
def setUp(self):
"""
Set up method
"""
self.storage = FileStorage()
def tearDown(self):
"""
Tear down method
"""
pass
def test_private_class_attributes(self):
with self.assertRaises(AttributeError):
print(self.storage.__objects)
with self.assertRaises(AttributeError):
print(self.storage.__file_path)
def test_file_path(self):
self.assertEqual(self.storage._FileStorage__file_path, "file.json")
def test_objects(self):
self.assertIs(type(self.storage._FileStorage__objects), dict)
def test_all(self):
obj_dict = self.storage.all()
self.assertTrue(type(obj_dict) is dict)
def test_new(self):
base1 = BaseModel()
city1 = City()
state1 = State()
base1_id = "{}.{}".format(base1.__class__.__name__, base1.id)
city1_id = "{}.{}".format(city1.__class__.__name__, city1.id)
state1_id = "{}.{}".format(state1.__class__.__name__, state1.id)
obj_dict = self.storage.all()
self.assertTrue(base1_id in obj_dict)
self.assertTrue(obj_dict[base1_id] is base1)
self.assertTrue(city1_id in obj_dict)
self.assertTrue(state1_id in obj_dict)
self.assertTrue(obj_dict[city1_id] is city1)
self.assertTrue(obj_dict[state1_id] is state1)
def test_save(self):
base1 = BaseModel()
city1 = City()
state1 = State()
base1_id = "{}.{}".format(base1.__class__.__name__, base1.id)
city1_id = "{}.{}".format(city1.__class__.__name__, city1.id)
state1_id = "{}.{}".format(state1.__class__.__name__, state1.id)
obj_dict_presave = self.storage.all()
base1.save()
self.storage.reload()
obj_dict_postsave = self.storage.all()
self.assertTrue(base1_id in obj_dict_postsave)
self.assertTrue(city1_id in obj_dict_postsave)
self.assertTrue(state1_id in obj_dict_postsave)
self.assertTrue(obj_dict_postsave == obj_dict_presave)
def test_reload(self):
base1 = BaseModel()
city1 = City()
state1 = State()
base1_id = "{}.{}".format(base1.__class__.__name__, base1.id)
city1_id = "{}.{}".format(city1.__class__.__name__, city1.id)
state1_id = "{}.{}".format(state1.__class__.__name__, state1.id)
obj_dict_presave = self.storage.all()
base1.save()
self.storage.reload()
obj_dict_postsave = self.storage.all()
self.assertTrue(base1_id in obj_dict_postsave)
self.assertTrue(city1_id in obj_dict_postsave)
self.assertTrue(state1_id in obj_dict_postsave)
self.assertTrue(obj_dict_postsave == obj_dict_presave)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script would evaluate a neural language model (Transformer) trained with
`examples/nlp/language_modeling/transformer_lm.py' as a rescorer for ASR systems.
Given a trained TransformerLMModel `.nemo` file, this script can be used to re-score the beams obtained from a beam
search decoder of an ASR model.
USAGE:
1. Obtain `.tsv` file with beams and their corresponding scores. Scores can be from a regular beam search decoder or
in fusion with an N-gram LM scores. For a given beam size `beam_size` and a number of examples
for evaluation `num_eval_examples`, it should contain (`beam_size` x `num_eval_examples`) lines of
form `beam_candidate_text \t score`. This file can be generated by `scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram.py`.
2. Rescore the candidates:
python eval_neural_rescorer.py
--lm_model=[path to .nemo file of the LM]
--beams_file=[path to beams .tsv file]
--beam_size=[size of the beams]
--eval_manifest=[path to eval manifest .json file]
--batch_size=[batch size used for inference on the LM model]
--alpha=[the value for the parameter rescorer_alpha]
--beta=[the value for the parameter rescorer_beta]
You may find more info on how to use this script at:
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html
"""
import contextlib
import json
from argparse import ArgumentParser
import editdistance
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import tqdm
from nemo.collections.nlp.models.language_modeling import TransformerLMModel
from nemo.utils import logging
class BeamScoresDataset(torch.utils.data.Dataset):
"""
Dataset to read the score file containing the beams and their score
Args:
data_path: path to the beams file
tokenizer: tokenizer of the LM model
manifest_path: manifest `.json` file which contains the ground truths transcripts
beam_size: the number of beams per sample
max_seq_length: the maximum length of sequences
"""
def __init__(self, data_path, tokenizer, manifest_path, beam_size=128, max_seq_length=256):
self.data = pd.read_csv(data_path, delimiter="\t", header=None)
self.tokenizer = tokenizer
self.ground_truths = []
with open(manifest_path, 'r') as f_orig:
for line in f_orig:
item = json.loads(line)
self.ground_truths.append(item['text'])
self.beam_size = beam_size
self.max_seq_length = max_seq_length
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
text = str(self.data[0][idx])
tokens = [self.tokenizer.bos_id] + self.tokenizer.text_to_ids(text) + [self.tokenizer.eos_id]
input_ids = [self.tokenizer.pad_id] * self.max_seq_length
input_ids[: len(tokens)] = tokens
input_ids = np.array(input_ids)
input_mask = (input_ids != self.tokenizer.pad_id).astype(np.float32)
acoustic_score = self.data[1][idx]
dist = editdistance.eval(text.split(), self.ground_truths[idx // self.beam_size].split())
ref_len = len(self.ground_truths[idx // self.beam_size].split())
len_in_chars = len(str(self.data[0][idx]))
return input_ids, input_mask, acoustic_score, dist, ref_len, len_in_chars, idx
def linear_search_wer(
dists, scores1, scores2, total_len, coef_range=[0, 10], coef_steps=10000, param_name='parameter'
):
"""
performs linear search to find the best coefficient when two set of scores are getting linearly fused.
Args:
dists: Tesnor of the distances between the ground truth and the candidates with shape of [number of samples, beam size]
scores1: Tensor of the first set of scores with shape of [number of samples, beam size]
scores2: Tensor of the second set of scores with shape of [number of samples, beam size]
total_len: The total length of all samples
coef_range: the search range for the coefficient
coef_steps: the number of steps that the search range would get divided into
param_name: the name of the parameter to be used in the figure
Output:
(best coefficient found, best WER achived)
"""
scale = scores1.mean().abs().item() / scores2.mean().abs().item()
left = coef_range[0] * scale
right = coef_range[1] * scale
coefs = np.linspace(left, right, coef_steps)
best_wer = 10000
best_coef = left
wers = []
for coef in coefs:
scores = scores1 + coef * scores2
wer = compute_wer(dists, scores, total_len)
wers.append(wer)
if wer < best_wer:
best_wer = wer
best_coef = coef
plt.plot(coefs, wers)
plt.title(f'WER% after rescoring with different values of {param_name}')
plt.ylabel('WER%')
plt.xlabel(param_name)
plt.show()
return best_coef, best_wer
def compute_wer(dists, scores, total_len):
"""
Sorts the candidates based on the scores and calculates the WER with the new top candidates.
Args:
dists: Tensor of the distances between the ground truth and the candidates with shape of [number of samples, beam size]
scores: Tensor of the scores for candidates with shape of [number of samples, beam size]
total_len: The total length of all samples
Output:
WER with the new scores
"""
indices = scores.max(dim=1, keepdim=True)[1]
wer = dists.gather(dim=1, index=indices).sum() / total_len
wer = wer.item()
return wer
def main():
parser = ArgumentParser()
parser.add_argument("--lm_model_file", type=str, required=True, help="path to LM model .nemo file")
parser.add_argument("--beams_file", type=str, required=True, help="path to beams .tsv file")
parser.add_argument(
"--eval_manifest", type=str, required=True, help="path to the evaluation `.json` manifest file"
)
parser.add_argument("--beam_size", type=int, required=True, help="number of beams per candidate")
parser.add_argument("--batch_size", type=int, default=256, help="inference batch size")
parser.add_argument("--alpha", type=float, default=None, help="parameter alpha of the fusion")
parser.add_argument("--beta", type=float, default=None, help="parameter beta of the fusion")
parser.add_argument(
"--scores_output_file", default=None, type=str, help="The optional path to store the rescored beams"
)
parser.add_argument(
"--device", default="cuda", type=str, help="The device to load the model onto to calculate the scores"
)
parser.add_argument(
"--use_amp", action="store_true", help="Whether to use AMP if available to calculate the scores"
)
args = parser.parse_args()
device = args.device
if device.startswith("cuda") and not torch.cuda.is_available():
logging.info(f"cuda is not available! switched to cpu.")
device = "cpu"
if args.lm_model_file.endswith(".nemo"):
logging.info("Attempting to initialize from .nemo file")
model = TransformerLMModel.restore_from(
restore_path=args.lm_model_file, map_location=torch.device(device)
).eval()
else:
raise NotImplementedError(f"Only supports .nemo files, but got: {args.model}")
max_seq_length = model.encoder._embedding.position_embedding.pos_enc.shape[0]
dataset = BeamScoresDataset(args.beams_file, model.tokenizer, args.eval_manifest, args.beam_size, max_seq_length)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=args.batch_size)
if args.use_amp:
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP is enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
logging.info(f"Rescoring with beam_size: {args.beam_size}")
logging.info("Calculating the scores...")
with autocast():
with torch.no_grad():
am_scores, lm_scores, dists, ref_lens, lens_in_chars = [], [], [], [], []
for batch in tqdm.tqdm(data_loader):
input_ids, input_mask, acoustic_score, dist, ref_len, len_in_chars, idx = batch
max_len_in_batch = input_mask.sum(dim=0).argmin().item()
input_ids, input_mask = input_ids[:, :max_len_in_batch], input_mask[:, :max_len_in_batch]
if torch.cuda.is_available():
input_ids, input_mask = input_ids.to(device), input_mask.to(device)
dist, acoustic_score, len_in_chars = (
dist.to(device),
acoustic_score.to(device),
len_in_chars.to(device),
)
log_probs = model.forward(input_ids[:, :-1], input_mask[:, :-1])
target_log_probs = log_probs.gather(2, input_ids[:, 1:].unsqueeze(2)).squeeze(2)
neural_lm_score = torch.sum(target_log_probs * input_mask[:, 1:], dim=-1)
am_scores.append(acoustic_score)
lm_scores.append(neural_lm_score)
dists.append(dist)
ref_lens.append(ref_len)
lens_in_chars.append(len_in_chars)
am_scores = torch.cat(am_scores).view(-1, args.beam_size)
lm_scores = torch.cat(lm_scores).view(-1, args.beam_size)
dists = torch.cat(dists).view(-1, args.beam_size)
ref_lens = torch.cat(ref_lens).view(-1, args.beam_size)
lens_in_chars = torch.cat(lens_in_chars).view(-1, args.beam_size).to(am_scores.dtype)
total_len = ref_lens[:, 0].sum()
model_wer = dists[:, 0].sum() / total_len
ideal_wer = dists.min(dim=1)[0].sum() / total_len
if args.alpha is None:
logging.info("Linear search for alpha...")
coef1, _ = linear_search_wer(
dists=dists, scores1=am_scores, scores2=lm_scores, total_len=total_len, param_name='alpha'
)
coef1 = np.round(coef1, 3)
logging.info(f"alpha={coef1} achieved the best WER.")
logging.info(f"------------------------------------------------")
else:
coef1 = args.alpha
scores = am_scores + coef1 * lm_scores
if args.beta is None:
logging.info("Linear search for beta...")
coef2, _ = linear_search_wer(
dists=dists, scores1=scores, scores2=lens_in_chars, total_len=total_len, param_name='beta'
)
coef2 = np.round(coef2, 3)
logging.info(f"beta={coef2} achieved the best WER.")
logging.info(f"------------------------------------------------")
else:
coef2 = args.beta
new_scores = am_scores + coef1 * lm_scores + coef2 * lens_in_chars
rescored_wer = compute_wer(dists, new_scores, total_len)
logging.info(f"Input beams WER: {np.round(model_wer.item() * 100, 2)}%")
logging.info(f"------------------------------------------------")
logging.info(f" +LM rescoring WER: {np.round(rescored_wer * 100, 2)}%")
logging.info(f" with alpha={coef1}, beta={coef2}")
logging.info(f"------------------------------------------------")
logging.info(f"Best possible WER: {np.round(ideal_wer.item() * 100, 2)}%")
logging.info(f"------------------------------------------------")
new_scores_flatten = new_scores.flatten()
if args.scores_output_file is not None:
logging.info(f'Saving the candidates with their new scores at `{args.scores_output_file}`...')
with open(args.scores_output_file, "w") as fout:
for sample_id in range(len(dataset)):
fout.write(f"{dataset.data[0][sample_id]}\t{new_scores_flatten[sample_id]}\n")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 21 19:43:50 2022
Illustrating a basic transient magnetic diffusion problem, See Jackson Section 5.18
@author: zettergm
"""
import numpy as np
import scipy.sparse.linalg
import scipy.sparse
from scipy.special import erf
import matplotlib.pyplot as plt
from numpy import pi,sqrt,abs
from difftools import matrix_kernel
# Material parameters
mu=4*pi*1e-7
sigma=1e6
D=1/mu/sigma # equivalent diffusion coefficient
a=1
H0=1
nu=1/mu/sigma/a**2
# Size of grid
lz=250
Nmax=200
z=np.linspace(-5*a,5*a,lz)
dz=z[1]-z[0]
dt = 5*dz**2/D/2 # explicit stabilty limit will results in really slow time stepping; use 5 times larger.
# This could definitely benefit for sparse storage and a banded/tridiagonal solver
#A=np.exp(-(x**2/2))
Hx=np.zeros(lz)
indmin=np.argmin(abs(z+a))
indmax=np.argmin(abs(z-a))
Hx[indmin:indmax]=1
# Matrix defining finite-difference equation for laplacian operator, one-time setup for this problem
Msparse=matrix_kernel(lz,dt,dz,D)
rhs=np.zeros( (lz,1) )
# time iterations
for n in range(0,Nmax):
# set up time-dependent part of the problem and solve
for i in range(1,lz-1):
rhs[i]=Hx[i]
rhssparse=scipy.sparse.csr_matrix(np.reshape(rhs,[lz,1]))
Hx=scipy.sparse.linalg.spsolve(Msparse,rhssparse,use_umfpack=True) # umfpack is overkill for this but will presumably work
# Solution from Jackson eqn. 5.176
HxJ=H0/2*( erf((1+abs(z)/a)/2/sqrt((n+1)*dt*nu)) + erf((1-abs(z)/a)/2/sqrt((n+1)*dt*nu)) )
# plot results of each time step and pause briefly
plt.figure(1,dpi=150)
plt.clf()
plt.plot(z,HxJ,'o')
plt.plot(z,Hx)
plt.xlabel("$x$")
plt.ylabel("$H_x(z)$")
plt.title( "$t$ = %6.4f s" % ( (n+1)*dt) )
plt.ylim((0,H0))
plt.xlim((-2*a,2*a))
plt.legend( ("Jackson 5.176","Numerical BTCS") )
plt.show()
plt.pause(0.01)
|
from bs4 import BeautifulSoup
from requests import get
import json
class Script:
def query(self, url):
datas = get(url)
soup = BeautifulSoup(datas.text, 'html.parser')
tag = soup.find_all('article')
data = []
for i in tag:
try:
title = i.find('h2').text
link = i.find('a').get('href')
gambar = i.find('img').get('src')
tipe = i.find('span', class_="kanal").text
waktu = i.find('span', class_="date").text
data.append({
"judul": title,
"link": link,
"poster": gambar,
"tipe": tipe,
"waktu": waktu
})
except:
pass
return data
def index(self):
return self.query('https://www.cnnindonesia.com/')
def nasional(self):
return self.query('https://www.cnnindonesia.com/nasional')
def internasional(self):
return self.query('https://www.cnnindonesia.com/internasional')
def ekonomi(self):
return self.query('https://www.cnnindonesia.com/ekonomi')
def olahraga(self):
return self.query('https://www.cnnindonesia.com/olahraga')
def teknologi(self):
return self.query('https://www.cnnindonesia.com/teknologi')
def hiburan(self):
return self.query('https://www.cnnindonesia.com/hiburan')
def social(self):
return self.query('https://www.cnnindonesia.com/gaya-hidup')
def detail(self, url):
data = []
try:
req = get(url)
soup = BeautifulSoup(req.text, 'html.parser')
tag = soup.find('div', class_="detail_text")
gambar = soup.find('div', class_='media_artikel').find('img').get('src')
judul = soup.find('h1', class_='title').text
body = tag.text
data.append({
"judul": judul,
"poster": gambar,
"body": body,
})
except:
data.append({
"message": "network error",
})
return data
def search(self,q):
return self.query('https://www.cnnindonesia.com/search/?query=' + q)
if __name__ != '__main__':
Code = Script()
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import numpy as np
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img
|
# -*- coding: utf-8 -*-
"""
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = LockFile('somefile')
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print 'somefile', 'is locked already.'
... except LockFailed:
... print 'somefile', 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = LockFile('somefile')
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> lock = LockFile('somefile')
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
"""
from __future__ import absolute_import
import functools
import os
import socket
import threading
import warnings
# Work with PEP8 and non-PEP8 versions of threading module.
if not hasattr(threading, "current_thread"):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, "get_name"):
threading.Thread.get_name = threading.Thread.getName
__all__ = [
"Error",
"LockError",
"LockTimeout",
"AlreadyLocked",
"LockFailed",
"UnlockError",
"NotLocked",
"NotMyLock",
"LinkFileLock",
"MkdirFileLock",
"SQLiteFileLock",
"LockBase",
"locked",
]
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class _SharedBase(object):
def __init__(self, path):
self.path = path
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.path)
class LockBase(_SharedBase):
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
super(LockBase, self).__init__(path)
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
t = threading.current_thread()
# Thread objects in Python 2.4 and earlier do not have ident
# attrs. Worm around that.
ident = getattr(t, "ident", hash(t))
self.tname = "-%x" % (ident & 0xffffffff)
else:
self.tname = ""
dirname = os.path.dirname(self.lock_file)
# unique name is mostly about the current process, but must
# also contain the path -- otherwise, two adjacent locked
# files conflict (one file gets locked, creating lock-file and
# unique file, the other one gets locked, creating lock-file
# and overwriting the already existing lock-file, then one
# gets unlocked, deleting both lock-file and unique file,
# finally the last lock errors out upon releasing.
self.unique_name = os.path.join(
dirname,
"%s%s.%s%s" % (self.hostname, self.tname, self.pid, hash(self.path)),
)
self.timeout = timeout
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __repr__(self):
return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, self.path)
def _fl_helper(cls, mod, *args, **kwds):
warnings.warn(
"Import from %s module instead of lockfile package" % mod,
DeprecationWarning,
stacklevel=2,
)
# This is a bit funky, but it's only for awhile. The way the unit tests
# are constructed this function winds up as an unbound method, so it
# actually takes three args, not two. We want to toss out self.
if not isinstance(args[0], str):
# We are testing, avoid the first arg
args = args[1:]
if len(args) == 1 and not kwds:
kwds["threaded"] = True
return cls(*args, **kwds)
def LinkFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import LinkLockFile from the
lockfile.linklockfile module.
"""
from . import linklockfile
return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", *args, **kwds)
def MkdirFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import MkdirLockFile from the
lockfile.mkdirlockfile module.
"""
from . import mkdirlockfile
return _fl_helper(
mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", *args, **kwds
)
def SQLiteFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import SQLiteLockFile from the
lockfile.mkdirlockfile module.
"""
from . import sqlitelockfile
return _fl_helper(
sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", *args, **kwds
)
def locked(path, timeout=None):
"""Decorator which enables locks for decorated function.
Arguments:
- path: path for lockfile.
- timeout (optional): Timeout for acquiring lock.
Usage:
@locked('/var/run/myname', timeout=0)
def myname(...):
...
"""
def decor(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
lock = FileLock(path, timeout=timeout)
lock.acquire()
try:
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return decor
if hasattr(os, "link"):
from . import linklockfile as _llf
LockFile = _llf.LinkLockFile
else:
from . import mkdirlockfile as _mlf
LockFile = _mlf.MkdirLockFile
FileLock = LockFile
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_tensor_slice """
import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore import dtype as mstype
from mindspore.nn import Cell
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
class NetWorkSlicePositive(Cell):
def __init__(self):
super(NetWorkSlicePositive, self).__init__()
self.tensor_ret0 = Tensor(np.ones([1, 2, 2], np.int32))
self.tensor_ret1 = Tensor(np.ones([4, 7, 4], np.int32))
self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))
self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))
def construct(self, tensor):
ret0 = tensor[3:4:3, 1:5:2, 3:6:2] + self.tensor_ret0
ret1 = tensor[-6:4:1, 7:-8:-1, ::3] + self.tensor_ret1
ret2 = tensor[::, ::, ::] + self.tensor_ret2
ret3 = tensor[::2] + self.tensor_ret3
return ret0, ret1, ret2, ret3
class NetWorkSliceEllipsis(Cell):
def __init__(self):
super(NetWorkSliceEllipsis, self).__init__()
self.tensor_ret0 = Tensor(np.ones([2, 7, 8], np.int32))
self.tensor_ret1 = Tensor(np.ones([6, 7, 8, 9], np.int32))
self.tensor_ret2 = Tensor(np.ones([1, 6, 7, 8, 9], np.int32))
def construct(self, tensor):
ret0 = tensor[0:4:2, ..., 1] + self.tensor_ret0
ret1 = tensor[...] + self.tensor_ret1
ret2 = tensor[None] + self.tensor_ret2
ret3 = tensor[True] + self.tensor_ret2
return ret0, ret1, ret2, ret3
class NetWorkReduceDimension(Cell):
def __init__(self):
super(NetWorkReduceDimension, self).__init__()
self.tensor_ret0 = Tensor(np.ones([2, 4, 1], np.int32))
self.tensor_ret1 = Tensor(np.ones([3, 4], np.int32))
self.tensor_ret2 = Tensor(np.ones([6, 8], np.int32))
self.tensor_ret3 = Tensor(np.array(8, np.int32))
self.tensor_ret4 = Tensor(np.ones([8, 10], np.int32))
def construct(self, tensor):
ret0 = tensor[0:6:3, 1:5:1, 3:5:2] + self.tensor_ret0
ret1 = tensor[::2, 1, ::3] + self.tensor_ret1
ret2 = tensor[::, ::, 0] + self.tensor_ret2
ret3 = tensor[3, 2, 5] + self.tensor_ret3
ret4 = tensor[1] + self.tensor_ret4
return ret0, ret1, ret2, ret3, ret4
class NetWorkStepNegative(Cell):
def __init__(self):
super(NetWorkStepNegative, self).__init__()
self.tensor_ret = Tensor(np.ones([6, 5, 10], np.int32))
def construct(self, tensor):
ret = tensor[::1, -5::, ::-1] + self.tensor_ret
return ret
class NetWorkReduceToScalar(Cell):
def __init__(self):
super(NetWorkReduceToScalar, self).__init__()
self.tensor_ret = Tensor(np.array(9, np.int32))
def construct(self, tensor):
ret = tensor[2, 3, 4] + self.tensor_ret
return ret
class TensorAssignWithSliceError1(Cell):
def __init__(self):
super(TensorAssignWithSliceError1, self).__init__()
def construct(self, a, b):
a[1:3:-1,::] = b
return a
class TensorAssignWithSliceError2(Cell):
def __init__(self):
super(TensorAssignWithSliceError2, self).__init__()
def construct(self, a, b):
a[1:3:-1] = b
return a
class TensorAssignWithSlice2(Cell):
def __init__(self):
super(TensorAssignWithSlice2, self).__init__()
def construct(self, a, b):
a[1:5] = b
a[3:4] = 5
a[-1:1:-1] = b
a[-1:3:-1] = 5
a[::] = b
a[::] = 9
return a
class TensorAssignWithSlice(Cell):
def __init__(self):
super(TensorAssignWithSlice, self).__init__()
self.c = 2
def construct(self, a, b):
a[1:3,::] = b
a[2:3:,3:] = b
a[::] = b
a[::] = self.c
a[::,::] = b
a[::,::] = self.c
a[2:3:,0:, 4:1:-1] = b
a[2:3:,0:, 4:1:-1] = self.c
z = a
return z
def test_tensor_assign():
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
net = TensorAssignWithSlice()
net2= TensorAssignWithSlice2()
net_e1 = TensorAssignWithSliceError1()
net_e2 = TensorAssignWithSliceError2()
a = np.arange(60).reshape(3,4,5)
b = Tensor([1])
Ta = Tensor(a)
Ta4d = Tensor(a.reshape(1,3,4,5))
Tb= Tensor([1,3])
Tc= Tensor([])
t = Tensor([1, 2, 3, 4, 5, 6, 7, 8])
net(Ta, b)
net2(t, b)
# Error for A[Slice] = Number
# 1. A[Slice] = Number, Slice error
with pytest.raises(ValueError):
net_e2(t, 2)
# Error for A[Slice] = U, U is a Tensor
# 1. A[Slice] = U, u.size is error
with pytest.raises(ValueError):
net2(t, Tb)
# 2. A[Slice] = U, U is empty
with pytest.raises(ValueError):
net2(t, Tc)
# 3. A[Slice] = U, U.size error
with pytest.raises(ValueError):
net2(t, Tb)
# Error for A[Tuple(Slice...)] = Tensor
# 1. A[Tuple(Slice...)] = U, U is empty
with pytest.raises(ValueError):
net(Ta, Tc)
# 2. A[Tuple(Slice...)] = U, U.size error
with pytest.raises(ValueError):
net(Ta, Tb)
# 3. A[Tuple(Slice...)] = U, Slice error
with pytest.raises(ValueError):
net_e1(Ta, b)
# Error for A[Tuple(Slice...)] = Number
# 1. A[Tuple(Slice...)] = Number, Slice error
with pytest.raises(ValueError):
net_e1(Ta, 2)
net = TensorAssignWithInteger()
# Error for A[Number] = scalar/Tensor
# 1. A[Number] = U, U is a Tensor, u.size not match
with pytest.raises(ValueError):
net(Ta, Tb)
with pytest.raises(ValueError):
net(Ta, Tc)
# 2. A[Number] = U, the number index error
with pytest.raises(IndexError):
net(Ta4d, b)
# Error for A[(n,m)] = scalar/Tensor
# 1. A[(n,m)] = U, U is a tensor. u.size not match
net = TensorAssignWithTupleInteger()
with pytest.raises(ValueError):
net(Ta, Tc)
with pytest.raises(ValueError):
net(Ta, Tb)
# 2. A[(n,m)] = U, the number index error
with pytest.raises(IndexError):
net(Ta4d, b)
class TensorAssignWithInteger(Cell):
def __init__(self):
super(TensorAssignWithInteger, self).__init__()
def construct(self, a, b):
a[1] = 1
a[0] = b
return a
class TensorAssignWithTupleInteger(Cell):
def __init__(self):
super(TensorAssignWithTupleInteger, self).__init__()
def construct(self, a, b):
a[(1)] = 1
a[(1)] = b
a[(1,1)] = b
a[(1,1)] = 1
return a
class TensorAssignWithBoolTensorIndex(Cell):
def __init__(self):
super(TensorAssignWithBoolTensorIndex, self).__init__()
self.t = Tensor(np.arange(60).reshape([3,4,5]), dtype = mstype.float64)
def construct(self, a, b, c, u_tensor, _scalar):
a[c] = u_scalar
a[b] = u_tensor
z = a + self.t
return z
class TensorAssignWithBoolTensorIndexError(Cell):
def __init__(self):
super(TensorAssignWithBoolTensorIndexError, self).__init__()
def construct(self, a, b, c, u_tensor):
a[b][c] = u_tensor
return a
class TensorAssignWithBoolTensorIndex2(Cell):
def __init__(self):
super(TensorAssignWithBoolTensorIndex2, self).__init__()
self.t = Tensor(np.arange(6).reshape([2, 3]), dtype=mstype.float64)
self.t = Tensor(np.arange(60).reshape([3,4,5]), dtype = mstype.float64)
def construct(self, a, u_tensor, _scalar):
a[a > 8] = u_tensor
a[a >= 6] = u_scalar
a[a < 3] = u_scalar
a[a <= 5] = u_tensor
a[a == 5] = u_scalar
z = a + self.t
return z
class TensorAssignWithBoolTensorIndex2Error(Cell):
def __init__(self):
super(TensorAssignWithBoolTensorIndex2Error, self).__init__()
def construct(self, a, u_tensor):
a[a > 8][a > 5] = u_tensor
return a
a = np.random.uniform(1,10,[3,4,5])
b = a > 5
c = a < 3
Ta = Tensor(a)
Tb = Tensor(b)
Tc = Tensor(c)
Td = Tensor([True, True])
u_tensor = Tensor([1])
u_tensor_error = Tensor([1, 2])
t_1d = Tensor([1, 2, 3, 4, 5, 6, 7, 8])
u_scalar = 5
def test_tensor_assign_bool_index():
net1 = TensorAssignWithBoolTensorIndex()
net2 = TensorAssignWithBoolTensorIndex2()
net1(Ta, Tb, Tc, u_tensor, u_scalar)
net1(Ta, Tb, Tc, u_tensor, u_scalar)
with pytest.raises(ValueError):
net1(Ta, Td, Tc, u_tensor, u_scalar)
with pytest.raises(ValueError):
net1(Ta, u_tensor, Tc, u_tensor, u_scalar)
with pytest.raises(ValueError):
net1(Ta, Tb, Td, u_tensor, u_scalar)
with pytest.raises(ValueError):
net1(Ta, Tb, Ta, u_tensor, u_scalar)
with pytest.raises(ValueError):
net1(Ta, Tb, Tc, u_tensor_error, u_scalar)
# net1(Ta, u_tensor, Tc, u_tensor_error, u_scalar)
with pytest.raises(ValueError):
net2(Ta, u_tensor_error, u_scalar)
net3 = TensorAssignWithBoolTensorIndexError()
with pytest.raises(AttributeError):
net3(Ta, Tb, Tc, u_tensor)
with pytest.raises(AttributeError):
net3(Ta, Tb, Tc, u_scalar)
net4 = TensorAssignWithBoolTensorIndex2Error()
with pytest.raises(AttributeError):
net4(Ta, u_tensor)
with pytest.raises(AttributeError):
net4(Ta, u_scalar)
test_cases = [
('TensorAssignWithTupleInteger', {
'block': TensorAssignWithTupleInteger(),
'desc_inputs': [Ta, u_tensor],
}),
('TensorAssignWithInteger', {
'block': TensorAssignWithInteger(),
'desc_inputs': [Ta, u_tensor],
}),
('TensorAssignWithSlice', {
'block': TensorAssignWithSlice(),
'desc_inputs': [Ta, u_tensor],
}),
('TensorAssignWithSlice2', {
'block': TensorAssignWithSlice2(),
'desc_inputs': [t_1d, u_tensor],
}),
('TensorAssignWithBoolTensorIndex', {
'block': TensorAssignWithBoolTensorIndex(),
'desc_inputs': [Ta, Tb, Tc, u_tensor, u_scalar],
}),
('TensorAssignWithBoolTensorIndex2', {
'block': TensorAssignWithBoolTensorIndex2(),
'desc_inputs': [Ta, u_tensor, u_scalar],
}),
('SlicePositive', {
'block': NetWorkSlicePositive(),
'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],
}),
('SliceReduceDimension', {
'block': NetWorkReduceDimension(),
'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],
}),
('SliceNegative', {
'block': NetWorkStepNegative(),
'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],
}),
('SliceReduceToScalar', {
'block': NetWorkReduceToScalar(),
'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],
}),
('TensorSliceEllipsis', {
'block': NetWorkSliceEllipsis(),
'desc_inputs': [Tensor(np.ones([6, 7, 8, 9], np.int32))],
}),
]
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
def test_compile():
context.set_context(mode=context.GRAPH_MODE)
return test_cases
def test_tensor_slice_reduce_out_of_bounds_neg():
class NetWork(Cell):
def __init__(self):
super(NetWork, self).__init__()
self.tensor_ret = Tensor(np.array(9, np.int32))
def construct(self, tensor):
ret = tensor[-7, 3, 4]
return ret
input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
net = NetWork()
with pytest.raises(ValueError) as ex:
net(input_tensor)
assert "For 'StridedSlice' the `begin[0]` should be an int and must greater or equal to -6, but got `-7`" in str(ex.value)
def test_tensor_slice_reduce_out_of_bounds_positive():
class NetWork(Cell):
def __init__(self):
super(NetWork, self).__init__()
self.tensor_ret = Tensor(np.array(9, np.int32))
def construct(self, tensor):
ret = tensor[6, 3, 4]
return ret
input_tensor = Tensor(np.ones([6, 8, 10], np.int32))
net = NetWork()
with pytest.raises(ValueError) as ex:
net(input_tensor)
assert "For 'StridedSlice' the `begin[0]` should be an int and must less than 6, but got `6`" in str(ex.value)
|
# Copyright 2017 Robert Csordas. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import fcntl
class LockFile:
def __init__(self, fname):
self._fname = fname
self._fd = None
def acquire(self):
self._fd=open(self._fname, "w")
os.chmod(self._fname, 0o777)
fcntl.lockf(self._fd, fcntl.LOCK_EX)
def release(self):
fcntl.lockf(self._fd, fcntl.LOCK_UN)
self._fd.close()
self._fd = None
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
|
m=float(input('Quantos metros?'))
c=m*100
mm=m*1000
print('A conversão de {} para centímetros é {} e para milímetros é {}.'.format(m,c,mm))
|
#!/usr/bin/python3
mice = {"number": 2, "names": [{"name": "Pinky", "tag": "the real genius"},{"name": "The Brain", "tag": "insane one"}], "world_domination_status": "pending"}
## print following
## Pinky is the real genius, and The Brain is the insane one
print(f'{mice["names"][0]["name"]} is {mice["names"][0]["tag"]}, and {mice["names"][1]["name"]} is the {mice["names"][1]["tag"]}.')
|
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SyslogRemoteLoggingClientRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
SyslogRemoteLoggingClientRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this SyslogRemoteLoggingClientRef.
:return: The moid of this SyslogRemoteLoggingClientRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this SyslogRemoteLoggingClientRef.
:param moid: The moid of this SyslogRemoteLoggingClientRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this SyslogRemoteLoggingClientRef.
:return: The object_type of this SyslogRemoteLoggingClientRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this SyslogRemoteLoggingClientRef.
:param object_type: The object_type of this SyslogRemoteLoggingClientRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SyslogRemoteLoggingClientRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from wtforms import StringField, Form
class EditSessionForm(Form):
subject = StringField('Subject')
date = StringField('Date')
other_user = StringField('Other User')
|
from typing import NamedTuple
def CsvExampleGen(
output_examples_uri: 'ExamplesUri',
input_base: str,
input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}},
output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}},
range_config: {'JsonObject': {'data_type': 'proto:tfx.configs.RangeConfig'}} = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('examples_uri', 'ExamplesUri'),
]):
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen as component_class
#Generated code
import os
import tempfile
from tensorflow.io import gfile
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
split_names = [subdir.replace('Split-', '') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(split_names))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = {}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, channel in component_class_instance.outputs.items():
artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
artifact_list = [artifact]
channel._artifacts = artifact_list
output_dict[name] = artifact_list
print('component instance: ' + str(component_class_instance))
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=arguments.get('beam_pipeline_args'),
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
|
# Packages up pygw so it's pip-installable
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
def get_version():
try:
from maven_version import get_maven_version
version = get_maven_version()
except ModuleNotFoundError:
# If maven version isn't found, it must be from the distribution
from pkg_resources import get_distribution
from pkg_resources import DistributionNotFound
version = get_distribution('pygw').version
return version
setup(
name='pygw',
author='GeoWave Contributors',
author_email='geowave.python@gmail.com',
description='GeoWave bindings for Python3',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://locationtech.github.io/geowave/',
project_urls={
'Documentation': 'https://locationtech.github.io/geowave/pydocs/',
'Source': 'https://github.com/locationtech/geowave/tree/master/python/src/main/python',
},
version=get_version(),
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
install_requires=['py4j==0.10.8.1', 'shapely==1.7'],
python_requires='>=3,<3.8' # py4j does not support python 3.8 yet
)
|
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
#
# NumPy aware dynamic Python compiler using LLVM
# https://github.com/numba/numba
#
# Tested with:
# numba 0.26 (Anaconda 4.1.1, Windows), numba 0.28 (Linux)
excludedimports = ["IPython", "scipy"]
hiddenimports = ["llvmlite"]
|
import pandas as pd
import numpy as np
import yfinance as yf
from sklearn.linear_model import LinearRegression
import statsmodels
import statsmodels.api as sm
import statsmodels.tsa.stattools as ts
import datetime
import scipy.stats
import math
import openpyxl as pyxl
from scipy import signal
from scipy import stats as ss
import statistics
from finta import TA
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
import pandas_ta as ta
from pingouin import gzscore
def GaussianRandomStockPrice(mu, sigma, n, end, freq, S0=100):
"""
This function randomly creates a stock price series bases on gaussian probabilities.
Arguments:
----------
- mu: float
The mean parameter
- sigma: float
The standard déviation parameter
- n: int
Number of periods
- end: datetime date
The last date of thé series
- freq: pandas frequency string
The frequency of thé dataseries:
- "D": days
- "min": minutes
- "s": seconds
- S0: float
The first stock price
Return:
----------
- RStock: Pandas DataFrame
Contains thé datetime as index and thé random stock prices in a column
"""
RStock = np.random.normal(mu, sigma, n).astype("float")
RStock = pd.DataFrame(RStock)
RStock.rename(inplace=True, columns={RStock.columns[0]: "Return"})
RStock["Price"] = ((1 + RStock["Return"]).cumprod()) * S0
times = pd.date_range(end=end, freq=freq, periods=n)
RStock.index = times
RStock = pd.DataFrame(RStock["Price"])
return RStock
|
import random
from pyschieber.player.base_player import BasePlayer
from pyschieber.trumpf import Trumpf
class RandomPlayer(BasePlayer):
def choose_trumpf(self, geschoben):
return move(choices=list(Trumpf))
def choose_card(self, state=None):
cards = self.allowed_cards(state=state)
return move(choices=cards)
def move(choices):
allowed = False
while not allowed:
choice = random.choice(choices)
allowed = yield choice
if allowed:
yield None
|
# qubit number=3
# total number=60
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=57
prog.cz(input_qubit[0],input_qubit[2]) # number=58
prog.h(input_qubit[2]) # number=59
prog.x(input_qubit[2]) # number=55
prog.cx(input_qubit[0],input_qubit[2]) # number=56
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit289.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
from tvm import te
from ...vision.rcnn import generate_anchor, reg_bbox, reg_iou
from ...util import get_const_tuple, get_const_int
def predict_bbox_ir(cls_prob_buf, bbox_pred_buf, im_info_buf, out_buf, scales, ratios,
feature_stride, rpn_min_size, iou_loss):
"""Predict bounding boxes based on anchors, scores and deltas.
Parameters
----------
cls_prob_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 4 * num_anchors, height, width]
im_info_buf : tvm.te.schedule.Buffer
2-D with shape [batch, 3]
out_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]
The last dimension is in format of [w_start, h_start, w_end, h_end, score]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_anchors, height, width = get_const_tuple(cls_prob_buf.shape)
num_anchors //= 2
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch * height * width) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
tid = bx * max_threads + tx
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
p_score = ib.buffer_ptr(cls_prob_buf)
p_delta = ib.buffer_ptr(bbox_pred_buf)
p_im_info = ib.buffer_ptr(im_info_buf)
p_out = ib.buffer_ptr(out_buf)
idxm = tvm.tir.indexmod
idxd = tvm.tir.indexdiv
with ib.if_scope(tid < batch * height * width):
w = idxm(tid, width)
h = idxm(idxd(tid, width), height)
b = idxd(idxd(tid, width), height)
for k in range(num_anchors):
out_index = tid * num_anchors + k
ratio = ratios[k // len(scales)]
scale = scales[k % len(scales)]
anchor = generate_anchor(ratio, scale, feature_stride)
im_height = p_im_info[b * 3]
im_width = p_im_info[b * 3 + 1]
x1 = anchor[0] + w * feature_stride
y1 = anchor[1] + h * feature_stride
x2 = anchor[2] + w * feature_stride
y2 = anchor[3] + h * feature_stride
delta = [p_delta[((((b * num_anchors + k) * 4 + i) * height + h) * width + w)]
for i in range(4)]
regression_func = reg_iou if iou_loss else reg_bbox
pred_x1, pred_y1, pred_x2, pred_y2 = regression_func(x1, y1, x2, y2, *delta)
pred_x1 = tvm.te.max(tvm.te.min(pred_x1, im_width - 1.0), 0.0)
pred_y1 = tvm.te.max(tvm.te.min(pred_y1, im_height - 1.0), 0.0)
pred_x2 = tvm.te.max(tvm.te.min(pred_x2, im_width - 1.0), 0.0)
pred_y2 = tvm.te.max(tvm.te.min(pred_y2, im_height - 1.0), 0.0)
real_height = (im_height / feature_stride).astype('int32')
real_width = (im_width / feature_stride).astype('int32')
bbox_w = pred_x2 - pred_x1 + 1.0
bbox_h = pred_y2 - pred_y1 + 1.0
min_size = p_im_info[b * 3 + 2] * rpn_min_size
pred_score = p_score[((b * num_anchors * 2 + num_anchors + k) * height + h) * width + w]
pred_score = tvm.tir.Select(tvm.tir.any(h >= real_height, w >= real_width),
-1.0, pred_score)
p_out[out_index * 5 + 0] = pred_x1
p_out[out_index * 5 + 1] = pred_y1
p_out[out_index * 5 + 2] = pred_x2
p_out[out_index * 5 + 3] = pred_y2
p_out[out_index * 5 + 4] = pred_score
with ib.if_scope(tvm.tir.any(bbox_w < min_size, bbox_h < min_size)):
p_out[out_index * 5 + 0] -= min_size / 2.0
p_out[out_index * 5 + 1] -= min_size / 2.0
p_out[out_index * 5 + 2] += min_size / 2.0
p_out[out_index * 5 + 3] += min_size / 2.0
p_out[out_index * 5 + 4] = -1.0
return ib.get()
def argsort_ir(data_buf, out_index_buf):
"""Batched odd-even transposition sort.
Parameters
----------
data_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]
out_index_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Indices of data in sorted order.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox = get_const_tuple(data_buf.shape)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(data_buf)
index_out = ib.buffer_ptr(out_index_buf)
nthread_tx = max_threads
nthread_bx = (num_bbox + 1) // 2 // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("vthread")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "virtual_thread", nthread_bx)
tid = bx * nthread_tx + tx
temp_data = ib.allocate("float32", (1,), name="temp_data", scope="local")
temp_index = ib.allocate("int32", (1,), name="temp_index", scope="local")
idxm = tvm.tir.indexmod
with ib.for_range(0, batch, for_type="unroll") as b:
start = b * num_bbox
for i in range(2):
bbox_id = tid * 2 + i
with ib.if_scope(bbox_id < num_bbox):
index_out[start + bbox_id] = bbox_id
with ib.for_range(0, num_bbox) as k:
offset = start + 2 * tid + idxm(k, 2)
with ib.if_scope(
tvm.tir.all(offset + 1 < num_bbox, p_data[offset] < p_data[offset + 1])):
temp_data[0] = p_data[offset]
p_data[offset] = p_data[offset + 1]
p_data[offset + 1] = temp_data[0]
temp_index[0] = index_out[offset]
index_out[offset] = index_out[offset + 1]
index_out[offset + 1] = temp_index[0]
ib.emit(tvm.tir.Call(None, 'tvm_storage_sync',
tvm.runtime.convert(['shared']),
tvm.tir.Call.Intrinsic))
return ib.get()
def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum supression.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
nms_threshold : float
Non-maximum suppression threshold.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes.
"""
w = tvm.te.max(0.0, tvm.te.min(out_tensor[box_a_idx + 2], out_tensor[box_b_idx + 2])
- tvm.te.max(out_tensor[box_a_idx], out_tensor[box_b_idx]) + 1.0)
h = tvm.te.max(0.0, tvm.te.min(out_tensor[box_a_idx + 3], out_tensor[box_b_idx + 3])
- tvm.te.max(out_tensor[box_a_idx + 1], out_tensor[box_b_idx + 1]) + 1.0)
i = w * h
u = (out_tensor[box_a_idx + 2] - out_tensor[box_a_idx] + 1.0) * \
(out_tensor[box_a_idx + 3] - out_tensor[box_a_idx + 1] + 1.0) + \
(out_tensor[box_b_idx + 2] - out_tensor[box_b_idx] + 1.0) * \
(out_tensor[box_b_idx + 3] - out_tensor[box_b_idx + 1] + 1.0) - i
return i / u
batch, num_bbox = get_const_tuple(out_buf.shape)
max_threads = int(math.sqrt(tvm.target.Target.current(allow_none=False).max_num_threads))
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(sorted_bbox_buf)
p_out = ib.buffer_ptr(out_buf)
nthread_tx = max_threads
nthread_bx = num_bbox // max_threads + 1
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
i = bx * max_threads + tx
with ib.for_range(0, batch, for_type="unroll", name="n") as b:
base_idx = b * num_bbox
with ib.if_scope(i < num_bbox):
p_out[base_idx + i] = False
with ib.for_range(0, num_bbox - 1) as l:
with ib.if_scope(tvm.tir.all(i < num_bbox, i > l, p_out[base_idx + l] == False)):
iou = calculate_overlap(p_data, (base_idx + l) * 5, (base_idx + i) * 5)
with ib.if_scope(iou > nms_threshold):
p_out[base_idx + i] = True
ib.emit(tvm.tir.Call(None, 'tvm_storage_sync',
tvm.runtime.convert(['shared']),
tvm.tir.Call.Intrinsic))
return ib.get()
def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf):
"""Copy output after applying nms to continuous memory.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
remove_mask_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox, _ = get_const_tuple(sorted_bbox_buf.shape)
rpn_post_nms_top_n = get_const_int(out_buf.shape[0]) // batch
nthread_tx = batch
tx = te.thread_axis("threadIdx.x")
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tx, "thread_extent", nthread_tx)
i = ib.allocate('int32', (1,), 'i', scope='local')
i[0] = 0
p_sorted_bbox = ib.buffer_ptr(sorted_bbox_buf)
p_remove = ib.buffer_ptr(remove_mask_buf)
p_out = ib.buffer_ptr(out_buf)
b = tx
nkeep = ib.allocate('int32', (1,), 'nkeep', scope='local')
nkeep[0] = 0 # number of bbox after nms
with ib.for_range(0, num_bbox) as j:
with ib.if_scope(p_remove[b * num_bbox + j] == False):
nkeep[0] += 1
with ib.if_scope(nkeep[0] > 0):
with ib.for_range(0, te.ceil(
tvm.tir.const(rpn_post_nms_top_n, 'float32') / nkeep[0]).astype('int32')):
with ib.for_range(0, num_bbox) as j:
offset_j = (b * num_bbox + j) * 5
offset_i = (b * rpn_post_nms_top_n + i[0]) * 5
with ib.if_scope(tvm.tir.all(i[0] < rpn_post_nms_top_n,
p_remove[(b*num_bbox+j)] == False)):
p_out[offset_i] = tvm.tir.Cast('float32', b)
with ib.for_range(0, 4, for_type='unroll') as k:
p_out[offset_i + k + 1] = p_sorted_bbox[offset_j + k]
i[0] = i[0] + 1
body = ib.get()
return body
def proposal(cls_prob, bbox_pred, im_info, scales, ratios, feature_stride, threshold,
rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_min_size, iou_loss):
"""Proposal operator.
Parameters
----------
cls_prob : tvm.te.Tensor
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred : tvm.te.Tensor
4-D with shape [batch, 4 * num_anchors, height, width]
im_info : tvm.te.Tensor
2-D with shape [batch, 3]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
out : tvm.te.Tensor
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
batch, _, height, width = get_const_tuple(cls_prob.shape)
num_anchors = len(scales) * len(ratios)
num_bbox = height * width * num_anchors
rpn_pre_nms_top_n = min(rpn_pre_nms_top_n, num_bbox) if rpn_pre_nms_top_n > 0 else num_bbox
bbox = te.extern((batch, num_bbox, 5), [cls_prob, bbox_pred, im_info], lambda ins, outs:
predict_bbox_ir(ins[0], ins[1], ins[2], outs[0], scales, ratios,
feature_stride, rpn_min_size, iou_loss),
dtype=bbox_pred.dtype)
score = te.compute((batch, num_bbox), lambda b, i: bbox[b, i, 4], tag='bbox_score')
sorted_index = te.extern([score.shape], [score],
lambda ins, outs: argsort_ir(ins[0], outs[0]),
dtype='int32')
sorted_bbox = te.compute((batch, rpn_pre_nms_top_n, 5),
lambda b, i, j: bbox[b, sorted_index[b, i], j], tag='sorted_bbox')
nms_remove_mask = te.extern((batch, rpn_pre_nms_top_n), [sorted_bbox],
lambda ins, outs: nms_ir(ins[0], outs[0], threshold),
dtype='bool')
nms_out = te.extern((batch * rpn_post_nms_top_n, 5), [sorted_bbox, nms_remove_mask],
lambda ins, outs: prepare_output_ir(ins[0], ins[1], outs[0]),
dtype=sorted_bbox.dtype)
return nms_out
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Optimize calls to built-in references to specific built-in calls.
For built-in name references, we check if it's one of the supported built-in
types, and then specialize for the ones, where it makes sense.
"""
from nuitka.__past__ import xrange # pylint: disable=I0021,redefined-builtin
from nuitka.Errors import NuitkaAssumptionError
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable,
)
from nuitka.nodes.AttributeNodes import (
ExpressionAttributeLookup,
ExpressionBuiltinGetattr,
ExpressionBuiltinHasattr,
ExpressionBuiltinSetattr,
)
from nuitka.nodes.BuiltinAllNodes import ExpressionBuiltinAll
from nuitka.nodes.BuiltinAnyNodes import ExpressionBuiltinAny
from nuitka.nodes.BuiltinComplexNodes import (
ExpressionBuiltinComplex1,
ExpressionBuiltinComplex2,
)
from nuitka.nodes.BuiltinDecodingNodes import (
ExpressionBuiltinChr,
ExpressionBuiltinOrd,
)
from nuitka.nodes.BuiltinDecoratorNodes import (
ExpressionBuiltinClassmethod,
ExpressionBuiltinStaticmethod,
)
from nuitka.nodes.BuiltinDictNodes import ExpressionBuiltinDict
from nuitka.nodes.BuiltinFormatNodes import (
ExpressionBuiltinAscii,
ExpressionBuiltinBin,
ExpressionBuiltinFormat,
ExpressionBuiltinHex,
ExpressionBuiltinId,
ExpressionBuiltinOct,
)
from nuitka.nodes.BuiltinHashNodes import ExpressionBuiltinHash
from nuitka.nodes.BuiltinIntegerNodes import (
ExpressionBuiltinInt1,
ExpressionBuiltinInt2,
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
ExpressionBuiltinIter2,
)
from nuitka.nodes.BuiltinLenNodes import ExpressionBuiltinLen
from nuitka.nodes.BuiltinNextNodes import (
ExpressionBuiltinNext1,
ExpressionBuiltinNext2,
)
from nuitka.nodes.BuiltinOpenNodes import ExpressionBuiltinOpen
from nuitka.nodes.BuiltinRangeNodes import (
ExpressionBuiltinRange1,
ExpressionBuiltinRange2,
ExpressionBuiltinRange3,
ExpressionBuiltinXrange1,
ExpressionBuiltinXrange2,
ExpressionBuiltinXrange3,
)
from nuitka.nodes.BuiltinRefNodes import (
ExpressionBuiltinAnonymousRef,
makeExpressionBuiltinTypeRef,
)
from nuitka.nodes.BuiltinSumNodes import (
ExpressionBuiltinSum1,
ExpressionBuiltinSum2,
)
from nuitka.nodes.BuiltinTypeNodes import (
ExpressionBuiltinBool,
ExpressionBuiltinBytearray1,
ExpressionBuiltinBytearray3,
ExpressionBuiltinFloat,
ExpressionBuiltinFrozenset,
ExpressionBuiltinList,
ExpressionBuiltinSet,
ExpressionBuiltinStrP2,
ExpressionBuiltinStrP3,
ExpressionBuiltinTuple,
ExpressionBuiltinUnicodeP2,
)
from nuitka.nodes.BuiltinVarsNodes import ExpressionBuiltinVars
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ClassNodes import ExpressionBuiltinType3
from nuitka.nodes.ComparisonNodes import ExpressionComparisonIs
from nuitka.nodes.ConditionalNodes import (
ExpressionConditional,
makeStatementConditional,
)
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTupleOrConstant
from nuitka.nodes.ExecEvalNodes import (
ExpressionBuiltinCompile,
ExpressionBuiltinEval,
)
from nuitka.nodes.GlobalsLocalsNodes import (
ExpressionBuiltinDir1,
ExpressionBuiltinGlobals,
)
from nuitka.nodes.ImportNodes import ExpressionBuiltinImport
from nuitka.nodes.NodeMakingHelpers import (
makeConstantReplacementNode,
makeExpressionBuiltinLocals,
makeRaiseExceptionReplacementExpression,
makeRaiseExceptionReplacementExpressionFromInstance,
wrapExpressionWithSideEffects,
)
from nuitka.nodes.OperatorNodes import ExpressionOperationBinaryDivmod
from nuitka.nodes.OperatorNodesUnary import (
ExpressionOperationNot,
ExpressionOperationUnaryAbs,
ExpressionOperationUnaryRepr,
)
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import makeStatementReturn
from nuitka.nodes.SliceNodes import makeExpressionBuiltinSlice
from nuitka.nodes.TypeNodes import (
ExpressionBuiltinIsinstance,
ExpressionBuiltinIssubclass,
ExpressionBuiltinSuper0,
ExpressionBuiltinSuper2,
ExpressionBuiltinType1,
)
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs import BuiltinParameterSpecs
from nuitka.Tracing import optimization_logger
from nuitka.tree.ReformulationExecStatements import wrapEvalGlobalsAndLocals
from nuitka.tree.ReformulationTryFinallyStatements import (
makeTryFinallyStatement,
)
from nuitka.tree.TreeHelpers import (
makeCallNode,
makeStatementsSequence,
makeStatementsSequenceFromStatement,
)
def dir_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def buildDirEmptyCase(source_ref):
source = makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
result = makeCallNode(
ExpressionAttributeLookup(
expression=source, attribute_name="keys", source_ref=source_ref
),
source_ref,
)
# For Python3, keys doesn't really return values, but instead a handle
# only, but we want it to be a list.
if python_version >= 0x300:
result = ExpressionBuiltinList(value=result, source_ref=source_ref)
return result
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
# TODO: Needs locals_scope attached.
builtin_class=ExpressionBuiltinDir1,
builtin_spec=BuiltinParameterSpecs.builtin_dir_spec,
empty_special_class=buildDirEmptyCase,
)
def vars_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def selectVarsEmptyClass(source_ref):
return makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
# TODO: Needs locals_cope attached
builtin_class=ExpressionBuiltinVars,
builtin_spec=BuiltinParameterSpecs.builtin_vars_spec,
empty_special_class=selectVarsEmptyClass,
)
def import_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinImport,
builtin_spec=BuiltinParameterSpecs.builtin_import_spec,
)
def type_extractor(node):
args = node.subnode_args
if args is None:
iter_length = 0
else:
iter_length = args.getIterationLength()
if iter_length == 1:
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinType1,
builtin_spec=BuiltinParameterSpecs.builtin_type1_spec,
)
elif iter_length == 3:
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinType3,
builtin_spec=BuiltinParameterSpecs.builtin_type3_spec,
)
else:
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError("type() takes 1 or 3 arguments")
)
def iter_extractor(node):
def wrapIterCreation(callable_arg, sentinel, source_ref):
if sentinel is None:
return ExpressionBuiltinIter1(value=callable_arg, source_ref=source_ref)
else:
return ExpressionBuiltinIter2(
callable_arg=callable_arg, sentinel=sentinel, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapIterCreation,
builtin_spec=BuiltinParameterSpecs.builtin_iter_spec,
)
def next_extractor(node):
# Split up next with and without defaults, they are not going to behave
# really very similar.
def selectNextBuiltinClass(iterator, default, source_ref):
if default is None:
return ExpressionBuiltinNext1(value=iterator, source_ref=source_ref)
else:
return ExpressionBuiltinNext2(
iterator=iterator, default=default, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectNextBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_next_spec,
)
def sum_extractor(node):
# Split up sumwith and without start value, one is much easier.
def selectSumBuiltinClass(sequence, start, source_ref):
if start is None:
return ExpressionBuiltinSum1(sequence=sequence, source_ref=source_ref)
else:
return ExpressionBuiltinSum2(
sequence=sequence, start=start, source_ref=source_ref
)
def makeSum0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError(
"sum expected at least 1 arguments, got 0"
if python_version < 0x380
else "sum() takes at least 1 positional argument (0 given)"
),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectSumBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_sum_spec,
empty_special_class=makeSum0,
)
def dict_extractor(node):
# The "dict" built-in is a bit strange in that it accepts a position
# parameter, or not, but won't have a default value.
def wrapExpressionBuiltinDictCreation(positional_args, dict_star_arg, source_ref):
if len(positional_args) > 1:
result = makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError(
"dict expected at most 1 arguments, got %d" % (len(positional_args))
),
)
result = wrapExpressionWithSideEffects(
side_effects=positional_args, old_node=node, new_node=result
)
if dict_star_arg:
result = wrapExpressionWithSideEffects(
side_effects=dict_star_arg, old_node=node, new_node=result
)
return result
return ExpressionBuiltinDict(
pos_arg=positional_args[0] if positional_args else None,
pairs=dict_star_arg,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinDictCreation,
builtin_spec=BuiltinParameterSpecs.builtin_dict_spec,
)
def chr_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinChr,
builtin_spec=BuiltinParameterSpecs.builtin_chr_spec,
)
def ord_extractor(node):
def makeOrd0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("ord() takes exactly one argument (0 given)"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOrd,
builtin_spec=BuiltinParameterSpecs.builtin_ord_spec,
empty_special_class=makeOrd0,
)
def bin_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinBin,
builtin_spec=BuiltinParameterSpecs.builtin_bin_spec,
)
def oct_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOct,
builtin_spec=BuiltinParameterSpecs.builtin_oct_spec,
)
def hex_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinHex,
builtin_spec=BuiltinParameterSpecs.builtin_hex_spec,
)
def id_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinId,
builtin_spec=BuiltinParameterSpecs.builtin_id_spec,
)
def repr_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationUnaryRepr,
builtin_spec=BuiltinParameterSpecs.builtin_repr_spec,
)
if python_version >= 0x300:
def ascii_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAscii,
builtin_spec=BuiltinParameterSpecs.builtin_repr_spec,
)
def range_extractor(node):
def selectRangeBuiltin(low, high, step, source_ref):
if high is None:
return ExpressionBuiltinRange1(low=low, source_ref=source_ref)
elif step is None:
return ExpressionBuiltinRange2(low=low, high=high, source_ref=source_ref)
else:
return ExpressionBuiltinRange3(
low=low, high=high, step=step, source_ref=source_ref
)
def makeRange0(source_ref):
# pylint: disable=unused-argument
try:
range()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("range without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectRangeBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_range_spec,
empty_special_class=makeRange0,
)
def xrange_extractor(node):
def selectXrangeBuiltin(low, high, step, source_ref):
if high is None:
return ExpressionBuiltinXrange1(low=low, source_ref=source_ref)
elif step is None:
return ExpressionBuiltinXrange2(low=low, high=high, source_ref=source_ref)
else:
return ExpressionBuiltinXrange3(
low=low, high=high, step=step, source_ref=source_ref
)
def makeXrange0(source_ref):
# pylint: disable=unused-argument
try:
xrange()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("range without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectXrangeBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_xrange_spec,
empty_special_class=makeXrange0,
)
def len_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinLen,
builtin_spec=BuiltinParameterSpecs.builtin_len_spec,
)
def all_extractor(node):
# pylint: disable=unused-argument
def makeAll0(source_ref):
exception_message = "all() takes exactly one argument (0 given)"
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError(exception_message)
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAll,
builtin_spec=BuiltinParameterSpecs.builtin_all_spec,
empty_special_class=makeAll0,
)
def abs_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationUnaryAbs,
builtin_spec=BuiltinParameterSpecs.builtin_abs_spec,
)
def any_extractor(node):
# pylint: disable=unused-argument
def makeAny0(source_ref):
exception_message = "any() takes exactly one argument (0 given)"
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError(exception_message)
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAny,
builtin_spec=BuiltinParameterSpecs.builtin_any_spec,
empty_special_class=makeAny0,
)
def tuple_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinTuple,
builtin_spec=BuiltinParameterSpecs.builtin_tuple_spec,
)
def list_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinList,
builtin_spec=BuiltinParameterSpecs.builtin_list_spec,
)
def set_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinSet,
builtin_spec=BuiltinParameterSpecs.builtin_set_spec,
)
def frozenset_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFrozenset,
builtin_spec=BuiltinParameterSpecs.builtin_frozenset_spec,
)
def float_extractor(node):
def makeFloat0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=float(), node=node, user_provided=False
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFloat,
builtin_spec=BuiltinParameterSpecs.builtin_float_spec,
empty_special_class=makeFloat0,
)
def complex_extractor(node):
def makeComplex0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=complex(), node=node, user_provided=False
)
def selectComplexBuiltin(real, imag, source_ref):
if imag is None:
return ExpressionBuiltinComplex1(value=real, source_ref=source_ref)
else:
return ExpressionBuiltinComplex2(
real=real, imag=imag, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectComplexBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_complex_spec,
empty_special_class=makeComplex0,
)
def str_extractor(node):
builtin_class = ExpressionBuiltinStrP2 if str is bytes else ExpressionBuiltinStrP3
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=builtin_class,
builtin_spec=builtin_class.builtin_spec,
)
if python_version < 0x300:
def unicode_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinUnicodeP2,
builtin_spec=ExpressionBuiltinUnicodeP2.builtin_spec,
)
else:
from nuitka.nodes.BuiltinTypeNodes import (
ExpressionBuiltinBytes1,
ExpressionBuiltinBytes3,
)
def bytes_extractor(node):
def makeBytes0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=bytes(), node=node, user_provided=False
)
def selectBytesBuiltin(string, encoding, errors, source_ref):
if encoding is None and errors is None:
return ExpressionBuiltinBytes1(value=string, source_ref=source_ref)
else:
return ExpressionBuiltinBytes3(
value=string,
encoding=encoding,
errors=errors,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectBytesBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_bytes_p3_spec,
empty_special_class=makeBytes0,
)
def bool_extractor(node):
def makeBool0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=bool(), node=node, user_provided=False
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinBool,
builtin_spec=BuiltinParameterSpecs.builtin_bool_spec,
empty_special_class=makeBool0,
)
def int_extractor(node):
def makeInt0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=int(), node=node, user_provided=False
)
def selectIntBuiltin(value, base, source_ref):
if base is None:
return ExpressionBuiltinInt1(value=value, source_ref=source_ref)
else:
return ExpressionBuiltinInt2(value=value, base=base, source_ref=source_ref)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectIntBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_int_spec,
empty_special_class=makeInt0,
)
if python_version < 0x300:
from nuitka.nodes.BuiltinIntegerNodes import (
ExpressionBuiltinLong1,
ExpressionBuiltinLong2,
)
def long_extractor(node):
def makeLong0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=int(), node=node, user_provided=False
)
def selectIntBuiltin(value, base, source_ref):
if base is None:
return ExpressionBuiltinLong1(value=value, source_ref=source_ref)
else:
return ExpressionBuiltinLong2(
value=value, base=base, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectIntBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_int_spec,
empty_special_class=makeLong0,
)
def globals_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinGlobals,
builtin_spec=BuiltinParameterSpecs.builtin_globals_spec,
)
def locals_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def makeLocalsNode(source_ref):
return makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
# Note: Locals on the module level is really globals.
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeLocalsNode,
builtin_spec=BuiltinParameterSpecs.builtin_locals_spec,
)
if python_version < 0x300:
from nuitka.nodes.ExecEvalNodes import ExpressionBuiltinExecfile
def execfile_extractor(node):
def wrapExpressionBuiltinExecfileCreation(
filename, globals_arg, locals_arg, source_ref
):
outline_body = ExpressionOutlineBody(
provider=node.getParentVariableProvider(),
name="execfile_call",
source_ref=source_ref,
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=node.getParentVariableProvider(),
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
tried = makeStatementsSequence(
statements=(
tried,
makeStatementReturn(
expression=ExpressionBuiltinExecfile(
source_code=makeCallNode(
ExpressionAttributeLookup(
expression=ExpressionBuiltinOpen(
filename=filename,
mode=makeConstantRefNode(
constant="rU", source_ref=source_ref
),
buffering=None,
source_ref=source_ref,
),
attribute_name="read",
source_ref=source_ref,
),
source_ref,
),
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
),
allow_none=False,
source_ref=source_ref,
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=outline_body,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinExecfileCreation,
builtin_spec=BuiltinParameterSpecs.builtin_execfile_spec,
)
def eval_extractor(node):
def wrapEvalBuiltin(source, globals_arg, locals_arg, source_ref):
provider = node.getParentVariableProvider()
outline_body = ExpressionOutlineBody(
provider=node.getParentVariableProvider(),
name="eval_call",
source_ref=source_ref,
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=provider,
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
# The wrapping should not relocate to the "source_ref".
assert (
globals_arg is None
or globals_ref.getSourceReference() == globals_arg.getSourceReference()
)
assert (
locals_arg is None
or locals_ref.getSourceReference() == locals_arg.getSourceReference()
)
source_variable = outline_body.allocateTempVariable(
temp_scope=None, name="source"
)
final.setChild(
"statements",
final.subnode_statements
+ (
StatementDelVariable(
variable=source_variable, tolerant=True, source_ref=source_ref
),
),
)
strip_choice = makeConstantRefNode(constant=(" \t",), source_ref=source_ref)
if python_version >= 0x300:
strip_choice = ExpressionConditional(
condition=ExpressionComparisonIs(
left=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
source_ref=source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="bytes", source_ref=source_ref
),
source_ref=source_ref,
),
expression_yes=makeConstantRefNode(
constant=(b" \t",), source_ref=source_ref
),
expression_no=strip_choice,
source_ref=source_ref,
)
# Source needs some special treatment for eval, if it's a string, it
# must be stripped.
string_fixup = StatementAssignmentVariable(
variable=source_variable,
source=makeExpressionCall(
called=ExpressionAttributeLookup(
expression=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
attribute_name="strip",
source_ref=source_ref,
),
args=strip_choice, # This is a tuple
kw=None,
source_ref=source_ref,
),
source_ref=source_ref,
)
acceptable_builtin_types = [
ExpressionBuiltinAnonymousRef(builtin_name="code", source_ref=source_ref)
]
if python_version >= 0x270:
acceptable_builtin_types.append(
makeExpressionBuiltinTypeRef(
builtin_name="memoryview", source_ref=source_ref
)
)
statements = (
StatementAssignmentVariable(
variable=source_variable, source=source, source_ref=source_ref
),
makeStatementConditional(
condition=ExpressionOperationNot(
operand=ExpressionBuiltinIsinstance(
instance=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
classes=makeExpressionMakeTupleOrConstant(
elements=acceptable_builtin_types,
user_provided=True,
source_ref=source_ref,
),
source_ref=source_ref,
),
source_ref=source_ref,
),
yes_branch=string_fixup,
no_branch=None,
source_ref=source_ref,
),
makeStatementReturn(
expression=ExpressionBuiltinEval(
source_code=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
)
tried = makeStatementsSequence(
statements=(tried,) + statements, allow_none=False, source_ref=source_ref
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=outline_body,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapEvalBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_eval_spec,
)
if python_version >= 0x300:
from nuitka.nodes.ExecEvalNodes import ExpressionBuiltinExec
def exec_extractor(node):
def wrapExpressionBuiltinExecCreation(
source, globals_arg, locals_arg, source_ref
):
provider = node.getParentVariableProvider()
outline_body = ExpressionOutlineBody(
provider=provider, name="exec_call", source_ref=source_ref
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=provider,
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
tried = makeStatementsSequence(
statements=(
tried,
makeStatementReturn(
expression=ExpressionBuiltinExec(
source_code=source,
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
),
allow_none=False,
source_ref=source_ref,
)
# Hack: Allow some APIs to work already
tried.parent = outline_body
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=provider,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinExecCreation,
builtin_spec=BuiltinParameterSpecs.builtin_eval_spec,
)
def compile_extractor(node):
def wrapExpressionBuiltinCompileCreation(
source_code, filename, mode, flags, dont_inherit, optimize=None, source_ref=None
):
return ExpressionBuiltinCompile(
source_code=source_code,
filename=filename,
mode=mode,
flags=flags,
dont_inherit=dont_inherit,
optimize=optimize,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinCompileCreation,
builtin_spec=BuiltinParameterSpecs.builtin_compile_spec,
)
def open_extractor(node):
def makeOpen0(source_ref):
# pylint: disable=unused-argument
try:
open()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("open without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOpen,
builtin_spec=BuiltinParameterSpecs.builtin_open_spec,
empty_special_class=makeOpen0,
)
def super_extractor(node):
def wrapSuperBuiltin(type_arg, object_arg, source_ref):
if type_arg is None and python_version >= 0x300:
if provider.isCompiledPythonModule():
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="RuntimeError",
exception_value="super(): no arguments",
)
class_variable = provider.getVariableForReference(variable_name="__class__")
provider.trace_collection.getVariableCurrentTrace(class_variable).addUsage()
type_arg = ExpressionVariableRef(
# Ought to be already closure taken due to "super" flag in
# tree building.
variable=class_variable,
source_ref=source_ref,
)
# If we already have this as a local variable, then use that
# instead.
type_arg_owner = class_variable.getOwner()
if type_arg_owner is provider or not (
type_arg_owner.isExpressionFunctionBody()
or type_arg_owner.isExpressionClassBody()
):
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="SystemError"
if python_version < 0x331
else "RuntimeError",
exception_value="super(): __class__ cell not found",
)
if object_arg is None:
if (
provider.isExpressionGeneratorObjectBody()
or provider.isExpressionCoroutineObjectBody()
or provider.isExpressionAsyncgenObjectBody()
):
parameter_provider = provider.getParentVariableProvider()
else:
parameter_provider = provider
if parameter_provider.getParameters().getArgumentCount() == 0:
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="RuntimeError",
exception_value="super(): no arguments",
)
else:
par1_name = parameter_provider.getParameters().getArgumentNames()[0]
object_variable = provider.getVariableForReference(
variable_name=par1_name
)
provider.trace_collection.getVariableCurrentTrace(
object_variable
).addUsage()
object_arg = ExpressionVariableRef(
variable=object_variable, source_ref=source_ref
)
if not object_arg.getVariable().isParameterVariable():
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="SystemError"
if python_version < 0x300
else "RuntimeError",
exception_value="super(): __class__ cell not found",
)
return ExpressionBuiltinSuper0(
type_arg=type_arg, object_arg=object_arg, source_ref=source_ref
)
return ExpressionBuiltinSuper2(
type_arg=type_arg, object_arg=object_arg, source_ref=source_ref
)
provider = node.getParentVariableProvider().getEntryPoint()
if not provider.isCompiledPythonModule():
provider.discardFlag("has_super")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapSuperBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_super_spec,
)
def hasattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinHasattr(object, name, source_ref):
return ExpressionBuiltinHasattr(
expression=object, name=name, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinHasattr,
builtin_spec=BuiltinParameterSpecs.builtin_hasattr_spec,
)
def getattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinGetattr(object, name, default, source_ref):
return ExpressionBuiltinGetattr(
expression=object, name=name, default=default, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinGetattr,
builtin_spec=BuiltinParameterSpecs.builtin_getattr_spec,
)
def setattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinSetattr(object, name, value, source_ref):
return ExpressionBuiltinSetattr(
expression=object, name=name, value=value, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinSetattr,
builtin_spec=BuiltinParameterSpecs.builtin_setattr_spec,
)
def isinstance_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinIsinstance,
builtin_spec=BuiltinParameterSpecs.builtin_isinstance_spec,
)
def issubclass_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinIssubclass,
builtin_spec=BuiltinParameterSpecs.builtin_isinstance_spec,
)
def bytearray_extractor(node):
def makeBytearray0(source_ref):
return makeConstantRefNode(constant=bytearray(), source_ref=source_ref)
def selectNextBuiltinClass(string, encoding, errors, source_ref):
if encoding is None:
return ExpressionBuiltinBytearray1(value=string, source_ref=source_ref)
else:
return ExpressionBuiltinBytearray3(
string=string, encoding=encoding, errors=errors, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectNextBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_bytearray_spec,
empty_special_class=makeBytearray0,
)
def slice_extractor(node):
def wrapSlice(start, stop, step, source_ref):
if start is not None and stop is None:
# Default rules are strange. If one argument is given, it's the
# second one then.
stop = start
start = None
return makeExpressionBuiltinSlice(
start=start, stop=stop, step=step, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapSlice,
builtin_spec=BuiltinParameterSpecs.builtin_slice_spec,
)
def hash_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinHash,
builtin_spec=BuiltinParameterSpecs.builtin_hash_spec,
)
def format_extractor(node):
def makeFormat0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("format() takes at least 1 argument (0 given)"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFormat,
builtin_spec=BuiltinParameterSpecs.builtin_format_spec,
empty_special_class=makeFormat0,
)
def staticmethod_extractor(node):
def makeStaticmethod0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("staticmethod expected 1 arguments, got 0"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinStaticmethod,
builtin_spec=BuiltinParameterSpecs.builtin_staticmethod_spec,
empty_special_class=makeStaticmethod0,
)
def classmethod_extractor(node):
def makeStaticmethod0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("classmethod expected 1 arguments, got 0"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinClassmethod,
builtin_spec=BuiltinParameterSpecs.builtin_classmethod_spec,
empty_special_class=makeStaticmethod0,
)
def divmod_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationBinaryDivmod,
builtin_spec=BuiltinParameterSpecs.builtin_divmod_spec,
)
_dispatch_dict = {
"compile": compile_extractor,
"globals": globals_extractor,
"locals": locals_extractor,
"eval": eval_extractor,
"dir": dir_extractor,
"vars": vars_extractor,
"__import__": import_extractor,
"chr": chr_extractor,
"ord": ord_extractor,
"bin": bin_extractor,
"oct": oct_extractor,
"hex": hex_extractor,
"id": id_extractor,
"type": type_extractor,
"iter": iter_extractor,
"next": next_extractor,
"sum": sum_extractor,
"tuple": tuple_extractor,
"list": list_extractor,
"dict": dict_extractor,
"set": set_extractor,
"frozenset": frozenset_extractor,
"float": float_extractor,
"complex": complex_extractor,
"str": str_extractor,
"bool": bool_extractor,
"int": int_extractor,
"repr": repr_extractor,
"len": len_extractor,
"any": any_extractor,
"abs": abs_extractor,
"all": all_extractor,
"super": super_extractor,
"hasattr": hasattr_extractor,
"getattr": getattr_extractor,
"setattr": setattr_extractor,
"isinstance": isinstance_extractor,
"issubclass": issubclass_extractor,
"bytearray": bytearray_extractor,
"slice": slice_extractor,
"hash": hash_extractor,
"format": format_extractor,
"open": open_extractor,
"staticmethod": staticmethod_extractor,
"classmethod": classmethod_extractor,
"divmod": divmod_extractor,
}
if python_version < 0x300:
# These are not in Python3
_dispatch_dict["long"] = long_extractor
_dispatch_dict["unicode"] = unicode_extractor
_dispatch_dict["execfile"] = execfile_extractor
_dispatch_dict["xrange"] = xrange_extractor
_dispatch_dict["range"] = range_extractor
else:
# This one is not in Python2:
_dispatch_dict["bytes"] = bytes_extractor
_dispatch_dict["ascii"] = ascii_extractor
_dispatch_dict["exec"] = exec_extractor
# The Python3 range is really an xrange, use that.
_dispatch_dict["range"] = xrange_extractor
def check():
from nuitka.Builtins import builtin_names
for builtin_name in _dispatch_dict:
assert builtin_name in builtin_names, builtin_name
check()
_builtin_ignore_list = (
# Not supporting 'print', because it could be replaced, and is not
# worth the effort yet.
"print",
# TODO: This could, and should be supported, as we could e.g. lower
# types easily for it.
"sorted",
# TODO: This would be very worthwhile, as it could easily optimize
# its iteration away.
"zip",
# TODO: This would be most precious due to the type hint it gives
"enumerate",
# TODO: Also worthwhile for known values.
"reversed",
# TODO: Not sure what this really is about.
"memoryview",
)
def _describeNewNode(builtin_name, inspect_node):
"""Describe the change for better understanding."""
# Don't mention side effects, that's not what we care about.
if inspect_node.isExpressionSideEffects():
inspect_node = inspect_node.subnode_expression
if inspect_node.isExpressionBuiltinImport():
tags = "new_import"
message = """\
Replaced dynamic "__import__" call with static built-in call."""
elif inspect_node.isExpressionBuiltin() or inspect_node.isStatementExec():
tags = "new_builtin"
message = "Replaced call to built-in '%s' with built-in call '%s'." % (
builtin_name,
inspect_node.kind,
)
elif inspect_node.isExpressionRaiseException():
tags = "new_raise"
message = """\
Replaced call to built-in '%s' with exception raise.""" % (
builtin_name,
)
elif inspect_node.isExpressionOperationBinary():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with binary operation '%s'.""" % (
builtin_name,
inspect_node.getOperator(),
)
elif inspect_node.isExpressionOperationUnary():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with unary operation '%s'.""" % (
builtin_name,
inspect_node.getOperator(),
)
elif inspect_node.isExpressionCall():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with call.""" % (
builtin_name,
)
elif inspect_node.isExpressionOutlineBody():
tags = "new_expression"
message = (
"""\
Replaced call to built-in '%s' with outlined call."""
% builtin_name
)
elif inspect_node.isExpressionConstantRef():
tags = "new_expression"
message = (
"""\
Replaced call to built-in '%s' with constant value."""
% builtin_name
)
else:
assert False, (builtin_name, "->", inspect_node)
return tags, message
def computeBuiltinCall(builtin_name, call_node):
# There is some dispatching for how to output various types of changes,
# with lots of cases.
if builtin_name in _dispatch_dict:
new_node = _dispatch_dict[builtin_name](call_node)
assert new_node is not call_node, builtin_name
assert new_node is not None, builtin_name
# For traces, we are going to ignore side effects, and output traces
# only based on the basis of it.
tags, message = _describeNewNode(builtin_name, new_node)
return new_node, tags, message
else:
if False and builtin_name not in _builtin_ignore_list:
optimization_logger.warning(
"Not handling built-in %r, consider support." % builtin_name
)
return call_node, None, None
|
class IFormatProvider:
""" Provides a mechanism for retrieving an object to control formatting. """
def GetFormat(self,formatType):
"""
GetFormat(self: IFormatProvider,formatType: Type) -> object
Returns an object that provides formatting services for the specified type.
formatType: An object that specifies the type of format object to return.
Returns: An instance of the object specified by formatType,if the System.IFormatProvider implementation
can supply that type of object; otherwise,null.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
import os
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from starlette.responses import FileResponse
from histocat.api.db import get_db
from histocat.core.panorama import service
router = APIRouter()
@router.get("/panoramas/{id}/image", responses={200: {"content": {"image/png": {}}}})
async def read_panorama_image(
id: int,
# user: User = Depends(get_current_active_user),
db: Session = Depends(get_db),
):
"""
Get panorama image by id
"""
item = service.get(db, id=id)
slide = item.slide
return FileResponse(
os.path.join(
item.slide.location,
"origin",
f"{slide.name}_s{slide.origin_id}_p{item.origin_id}_pano.png",
),
media_type="image/png",
)
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The configs list command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.runtime_config import util
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""List runtime-config resources within the current project.
This command lists runtime-config resources for the current project.
"""
DEFAULT_PAGE_SIZE = 100
detailed_help = {
'EXAMPLES': """\
To list all runtime-config resources for the current project, run:
$ {command}
The --filter parameter can be used to filter results based on content.
For example, to list all runtime-config resources with names that
begin with 'foo', run:
$ {command} --filter 'name=foo*'
""",
}
@staticmethod
def Args(parser):
parser.display_info.AddFormat('table(name, description)')
def Run(self, args):
"""Run 'runtime-configs list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Yields:
The list of runtime-config resources.
Raises:
HttpException: An http error response was received while executing api
request.
"""
config_client = util.ConfigClient()
messages = util.Messages()
project = util.Project()
request = messages.RuntimeconfigProjectsConfigsListRequest(
parent=util.ProjectPath(project),
)
page_size = args.page_size or self.DEFAULT_PAGE_SIZE
results = list_pager.YieldFromList(
config_client, request, field='configs',
batch_size_attribute='pageSize', limit=args.limit,
batch_size=page_size,
)
for result in results:
yield util.FormatConfig(result)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 09:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filemonitor', '0005_auto_20170523_1541'),
]
operations = [
migrations.RenameField(
model_name='actualfile',
old_name='status',
new_name='ckstatus',
),
migrations.AddField(
model_name='actualfile',
name='dlstatus',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='actualfile',
name='remark',
field=models.TextField(default=''),
),
]
|
# 使用yield 实现单线程的异步并发效果
import time
def consumer(name):
print("%s 准备吃包子啦!" %name)
while True:
baozi = yield #接收值
print("包子[%s]来了,被[%s]吃了!" %(baozi,name))
def producer(name):
c = consumer("A")
c2 = consumer("B")
c.__next__()
c2.__next__()
print("老子开始做包子了")
for i in range(1): #0
time.sleep(1)
print("做了两个包子了")
c.send(i) #//给 yiled 送值
c2.send(i)
producer("alex") # 每分钟做两个包子,并同时分给两个人
"""
A 准备吃包子啦!
B 准备吃包子啦!
老子开始做包子了
做了两个包子了
包子[0]来了,被[A]吃了!
包子[0]来了,被[B]吃了!
做了两个包子了
包子[1]来了,被[A]吃了!
包子[1]来了,被[B]吃了!
"""
# python 装饰器
# tv=login(tv)
# tv("alex")
def w1(func):
print("我在w1函数内")
def inner():
print("我在inner函数内") #
#2
#3
return func
return inner
#@w1
def f1():
print('我在f1函数内')
flag=w1(f1) # 执行w1函数
#print(flag)
flag=flag() #执行inner 函数
flag() ##执行f1 函数
'''
我在w1函数内
我在inner函数内
我在f1函数内
'''
#---------------next----------
print("开始@的用法说明")
@w1
def f2():
print('我在f1函数内')
f2()
"""
@w1 :执行w1,把自己装饰的函数名当作参数,相对于w1(f2)
show 函数重新定义,w1(show)返回值
新show =
"""
#@w1(f1) #如此是这样
def f3():
print('我在f1函数内')
"""
@filter(before,after)
1. 执行filter(before,after)
2.@outer
3 新的
"""
#---------------递归 ----------
def calc(n):
if n/2 >1:
print(n)
res=calc(n/2)
print(n,res)
return res
calc(20)
"""
20
10.0
5.0
2.5
2.5 None
5.0 None
10.0 None
20 None
"""
# def 数列 第三位数=数2+数1
def func3(arg1,arg2):
if arg1==0:
print(arg1)
print(arg2)
arg3=arg1+arg2
print(arg3)
if arg3<110:
func3(arg2,arg3)
func3(0,1)
"""
0
1
1
2
3
5
8
"""
# 二分查找法
data = list(range(1,100,3))
print(data)
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 使用gs_guc set方法设置参数autoanalyze为on,观察预期结果
Description :
1.查询autoanalyze默认值
2.默认值off下建表并手动执行analyze
3.查询系统表pg_stat_all_tables中autoanalyze_count,last_autoanalyze
等字段值
4.修改参数值为on并重启数据库
5.查询该参数修改后的值
6.恢复参数默认值
Expect :
1.显示默认值为off
2.建表成功且analyze执行成功
3.查询成功,默认值off下,表未被自动分析
4.修改成功
5.显示on
6.默认值恢复成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
commonsh = CommonSH('dbuser')
class QueryPlan(unittest.TestCase):
def setUp(self):
self.constant = Constant()
LOG.info(
'------Opengauss_Function_Guc_Queryplan_Case0101start------')
def test_autoanalyze(self):
LOG.info('--步骤1:查看默认值--')
sql_cmd = commonsh.execut_db_sql('''show autoanalyze;''')
LOG.info(sql_cmd)
self.res = sql_cmd.splitlines()[-2].strip()
LOG.info('--步骤2:建表后,通过系统表查询表并执行analyze语句--')
sql_cmd = commonsh.execut_db_sql('''drop table if exists test_101;
create table test_101 (id int);
select relname, reltuples,relpages from pg_class where
relname ='test_101';
analyze test_101(id);
''')
LOG.info(sql_cmd)
self.assertIn(self.constant.TABLE_CREATE_SUCCESS, sql_cmd)
self.assertIn('0', sql_cmd)
self.assertIn(self.constant.ANALYZE_SUCCESS_MSG, sql_cmd)
LOG.info('--步骤3:查询系统表--')
sql_cmd = commonsh.execut_db_sql('''select last_analyze,analyze_count,
relname,last_autoanalyze,autovacuum_count from pg_stat_all_tables
where relname='test_101';
''')
LOG.info(sql_cmd)
self.assertIn('0', sql_cmd)
LOG.info('--步骤4:gs_guc set设置autoanalyze为on并重启数据库--')
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
'autoanalyze =on')
LOG.info(msg)
self.assertTrue(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
LOG.info('--步骤5:查询该参数修改后的值--')
sql_cmd = commonsh.execut_db_sql('show autoanalyze;')
LOG.info(sql_cmd)
self.assertIn(self.constant.BOOLEAN_VALUES[0], sql_cmd)
def tearDown(self):
LOG.info('--步骤6:清理环境--')
sql_cmd = commonsh.execut_db_sql('drop table if exists test_101;')
LOG.info(sql_cmd)
sql_cmd = commonsh.execut_db_sql('show autoanalyze;')
LOG.info(sql_cmd)
if self.res != sql_cmd.split('\n')[-2].strip():
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
f"autoanalyze={self.res}")
LOG.info(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
sql_cmd = commonsh.execut_db_sql('show autoanalyze;')
LOG.info(sql_cmd)
LOG.info(
'-----Opengauss_Function_Guc_Queryplan_Case0101执行完成------')
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from ._configuration import AutoRestHeadTestServiceConfiguration
from .operations import HttpSuccessOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional
from azure.core.credentials import AzureKeyCredential
from azure.core.rest import HttpRequest, HttpResponse
class AutoRestHeadTestService(object):
"""Test Infrastructure for AutoRest.
:ivar http_success: HttpSuccessOperations operations
:vartype http_success: azure.key.credential.sample.operations.HttpSuccessOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
"""
def __init__(
self,
credential, # type: AzureKeyCredential
base_url="http://localhost:3000", # type: str
**kwargs # type: Any
):
# type: (...) -> None
self._config = AutoRestHeadTestServiceConfiguration(credential=credential, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {} # type: Dict[str, Any]
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.http_success = HttpSuccessOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AutoRestHeadTestService
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
# Generated by Django 3.1.4 on 2020-12-18 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assignment', '0007_document'),
]
operations = [
migrations.RenameField(
model_name='document',
old_name='description',
new_name='assignmentname',
),
migrations.AddField(
model_name='document',
name='regno',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='document',
name='staffname',
field=models.CharField(blank=True, max_length=255),
),
]
|
import json
import logging
from pathlib import Path
import sys
from typing import Optional
from pydantic import (
BaseModel,
StrictBool,
StrictInt,
StrictStr,
ValidationError,
validator,
)
log = logging.getLogger()
def validate_extension(extension):
"""
Checks that the API extension starts and does not end with a '/'. An error is
raised, at which point the application exits, if the extension does not meet
these validation rules.
:param extension: The extension for the API
"""
extension = extension.strip()
if not extension.startswith("/"):
raise ValueError("must start with '/'")
if extension.endswith("/"):
raise ValueError("must not end with '/'")
return extension
class DataGatewayAPI(BaseModel):
"""
Configuration model class that implements pydantic's BaseModel class to allow for
validation of the DataGatewayAPI config data using Python type annotations. It takes
the backend into account, meaning only the config options for the backend used are
required.
"""
backend: StrictStr
client_cache_size: Optional[StrictInt]
client_pool_init_size: Optional[StrictInt]
client_pool_max_size: Optional[StrictInt]
db_url: Optional[StrictStr]
extension: StrictStr
icat_check_cert: Optional[StrictBool]
icat_url: Optional[StrictStr]
_validate_extension = validator("extension", allow_reuse=True)(validate_extension)
@validator("db_url", always=True)
def require_db_config_value(cls, value, values): # noqa: B902, N805
"""
By default the `db_url` config field is optional so that it does not have to be
present in the config file if `backend` is set to `python_icat`. However, if the
`backend` is set to `db`, this validator esentially makes the `db_url` config
field mandatory. This means that an error is raised, at which point the
application exits, if a `db_url` config value is not present in the config file.
:param cls: :class:`DataGatewayAPI` pointer
:param value: The value of the given config field
:param values: The config field values loaded before the given config field
"""
if "backend" in values and values["backend"] == "db" and value is None:
raise TypeError("field required")
return value
@validator(
"client_cache_size",
"client_pool_init_size",
"client_pool_max_size",
"icat_check_cert",
"icat_url",
always=True,
)
def require_icat_config_value(cls, value, values): # noqa: B902, N805
"""
By default the above config fields that are passed to the `@validator` decorator
are optional so that they do not have to be present in the config file if
`backend` is set to `db`. However, if the `backend` is set to `python_icat`,
this validator esentially makes these config fields mandatory. This means that
an error is raised, at which point the application exits, if any of these config
values are not present in the config file.
:param cls: :class:`DataGatewayAPI` pointer
:param value: The value of the given config field
:param values: The config field values loaded before the given config field
"""
if "backend" in values and values["backend"] == "python_icat" and value is None:
raise TypeError("field required")
return value
def set_backend_type(self, backend_type):
"""
This setter is used as a way for automated tests to set the backend type. The
API can detect if the Flask app setup is from an automated test by checking the
app's config for a `TEST_BACKEND`. If this value exists (a KeyError will be
raised when the API is run normally, which will then grab the backend type from
`config.json`), it needs to be set using this function. This is required because
creating filters in the `QueryFilterFactory` is backend-specific so the backend
type must be fetched. This must be done using this module (rather than directly
importing and checking the Flask app's config) to avoid circular import issues.
"""
self.backend = backend_type
class Config:
"""
The behaviour of the BaseModel class can be controlled via this class.
"""
# Enables assignment validation on the BaseModel fields. Useful for when the
# backend type is changed using the set_backend_type function.
validate_assignment = True
class SearchAPI(BaseModel):
"""
Configuration model class that implements pydantic's BaseModel class to allow for
validation of the SearchAPI config data using Python type annotations.
"""
client_pool_init_size: StrictInt
client_pool_max_size: StrictInt
extension: StrictStr
icat_check_cert: StrictBool
icat_url: StrictStr
_validate_extension = validator("extension", allow_reuse=True)(validate_extension)
class TestUserCredentials(BaseModel):
username: StrictStr
password: StrictStr
class APIConfig(BaseModel):
"""
Configuration model class that implements pydantic's BaseModel class to allow for
validation of the API config data using Python type annotations. It ensures that
all required config options exist before getting too far into the setup of the API.
If a mandatory config option is missing or misspelled, or has a wrong value type,
Pydantic raises a validation error with a breakdown of what was wrong and the
application is exited.
Config options used for testing are not checked here as they should only be used
during tests, not in the typical running of the API.
Some options used when running the API (host, debug_mode etc.) aren't mandatory
when running the API in production (these options aren't used in the `wsgi.py`
entrypoint). As a result, they're not present in `config_keys`. However, they
are required when using `main.py` as an entrypoint. In any case of these
specific missing config options when using that entrypoint, they are checked at
API startup so any missing options will be caught quickly.
"""
datagateway_api: Optional[DataGatewayAPI]
debug_mode: Optional[StrictBool]
flask_reloader: Optional[StrictBool]
generate_swagger: StrictBool
host: Optional[StrictStr]
log_level: StrictStr
log_location: StrictStr
port: Optional[StrictStr]
search_api: Optional[SearchAPI]
test_mechanism: Optional[StrictStr]
test_user_credentials: Optional[TestUserCredentials]
@classmethod
def load(cls, path=Path(__file__).parent.parent.parent / "config.json"):
"""
Loads the config data from the JSON file and returns it as a APIConfig pydantic
model. Exits the application if it fails to locate the JSON config file or
the APIConfig model validation fails.
:param cls: :class:`APIConfig` pointer
:param path: path to the configuration file
:return: APIConfig model object that contains the config data
"""
try:
with open(path, encoding="utf-8") as target:
data = json.load(target)
return cls(**data)
except (IOError, ValidationError) as error:
sys.exit(f"An error occurred while trying to load the config data: {error}")
@validator("search_api")
def validate_api_extensions(cls, value, values): # noqa: B902, N805
"""
Checks that the DataGateway API and Search API extensions are not the same. An
error is raised, at which point the application exits, if the extensions are the
same.
:param cls: :class:`APIConfig` pointer
:param value: The value of the given config field
:param values: The config field values loaded before the given config field
"""
if (
"datagateway_api" in values
and values["datagateway_api"] is not None
and value is not None
and values["datagateway_api"].extension == value.extension
):
raise ValueError(
"extension cannot be the same as datagateway_api extension",
)
return value
config = APIConfig.load()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
from oslo_config import cfg
from oslo_config import types
from kolla.version import version_info as version
BASE_OS_DISTRO = ['centos', 'rhel', 'ubuntu', 'debian']
BASE_ARCH = ['x86_64', 'ppc64le', 'aarch64']
DEFAULT_BASE_TAGS = {
'centos': '7',
'rhel': '7',
'debian': '10',
'ubuntu': '18.04',
}
DISTRO_RELEASE = {
'centos': '7',
'rhel': '7',
'debian': '10',
'ubuntu': '18.04',
}
OPENSTACK_RELEASE = {
'centos': 'train',
'rhel': 'train',
'debian': 'master',
'ubuntu': 'master',
}
# This is noarch repository so we will use it on all architectures
DELOREAN = \
"https://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo"
DELOREAN_DEPS = "https://trunk.rdoproject.org/centos7/delorean-deps.repo"
INSTALL_TYPE_CHOICES = ['binary', 'source', 'rdo', 'rhos']
# TODO(mandre) check for file integrity instead of downloading from an HTTPS
# source
TARBALLS_BASE = "https://tarballs.openstack.org"
_PROFILE_OPTS = [
cfg.ListOpt('infra',
default=[
'ceph',
'certmonger',
'cron',
'elasticsearch',
'etcd',
'fluentd',
'haproxy',
'hacluster',
'keepalived',
'kibana',
'kolla-toolbox',
'logstash',
'mariadb',
'memcached',
'mongodb',
'opendaylight',
'openvswitch',
'ptp',
'qdrouterd',
'rabbitmq',
'redis',
'rsyslog',
'skydive',
'storm',
'tgtd',
],
help='Infra images'),
cfg.ListOpt('main',
default=[
'ceilometer',
'cinder',
'glance',
'heat',
'horizon',
'iscsi',
'keystone',
'neutron',
'nova-',
'placement',
'swift',
],
help='Main images'),
cfg.ListOpt('aux',
default=[
'aodh',
'blazar',
'cloudkitty',
'congress',
'designate',
'ec2-api',
'freezer',
'gnocchi',
'influxdb',
'ironic',
'kafka',
'karbor',
'kuryr',
'magnum',
'manila',
'masakari',
'mistral',
'monasca',
'murano',
'novajoin',
'octavia',
'panko',
'qinling',
'rally',
'redis',
'sahara',
'searchlight',
'senlin',
'solum',
'tacker',
'telegraf',
'trove',
'vitrage',
'zaqar',
'zookeeper',
'zun',
],
help='Aux Images'),
cfg.ListOpt('default',
default=[
'chrony',
'cron',
'kolla-toolbox',
'fluentd',
'glance',
'haproxy',
'heat',
'horizon',
'keepalived',
'keystone',
'mariadb',
'memcached',
'neutron',
'nova-',
'placement',
'openvswitch',
'rabbitmq',
],
help='Default images'),
]
hostarch = os.uname()[4]
_CLI_OPTS = [
cfg.StrOpt('base', short='b', default='centos',
choices=BASE_OS_DISTRO,
help='The distro type of the base image.'),
cfg.StrOpt('base-tag', default='latest',
help='The base distro image tag'),
cfg.StrOpt('base-image',
help='The base image name. Default is the same with base.'),
cfg.StrOpt('base-arch', default=hostarch,
choices=BASE_ARCH,
help='The base architecture. Default is same as host.'),
cfg.BoolOpt('use-dumb-init', default=True,
help='Use dumb-init as init system in containers'),
cfg.BoolOpt('debug', short='d', default=False,
help='Turn on debugging log level'),
cfg.BoolOpt('skip-parents', default=False,
help='Do not rebuild parents of matched images'),
cfg.BoolOpt('skip-existing', default=False,
help='Do not rebuild images present in the docker cache'),
cfg.DictOpt('build-args',
help='Set docker build time variables'),
cfg.BoolOpt('keep', default=False,
help='Keep failed intermediate containers'),
cfg.BoolOpt('list-dependencies', short='l',
help='Show image dependencies (filtering supported)'),
cfg.BoolOpt('list-images',
help='Show all available images (filtering supported)'),
cfg.StrOpt('namespace', short='n', default='kolla',
help='The Docker namespace name'),
cfg.StrOpt('network_mode', default=None,
help='The network mode for Docker build. Example: host'),
cfg.BoolOpt('cache', default=True,
help='Use the Docker cache when building'),
cfg.MultiOpt('profile', types.String(), short='p',
help=('Build a pre-defined set of images, see [profiles]'
' section in config. The default profiles are:'
' {}'.format(', '.join(
[opt.name for opt in _PROFILE_OPTS])
))),
cfg.BoolOpt('push', default=False,
help='Push images after building'),
cfg.IntOpt('push-threads', default=1, min=1,
help=('The number of threads to user while pushing'
' Images. Note: Docker can not handle threading'
' push properly')),
cfg.IntOpt('retries', short='r', default=3, min=0,
help='The number of times to retry while building'),
cfg.MultiOpt('regex', types.String(), positional=True,
help=('Build only images matching regex and its'
' dependencies')),
cfg.StrOpt('registry',
help=('The docker registry host. The default registry host'
' is Docker Hub')),
cfg.StrOpt('save-dependency',
help=('Path to the file to store the docker image'
' dependency in Graphviz dot format')),
cfg.StrOpt('format', short='f', default='json',
choices=['json', 'none'],
help='Format to write the final results in'),
cfg.StrOpt('tarballs-base', default=TARBALLS_BASE,
help='Base url to OpenStack tarballs'),
cfg.StrOpt('type', short='t', default='binary',
choices=INSTALL_TYPE_CHOICES,
dest='install_type',
help=('The method of the OpenStack install.')),
cfg.IntOpt('threads', short='T', default=8, min=1,
help=('The number of threads to use while building.'
' (Note: setting to one will allow real time'
' logging)')),
cfg.StrOpt('tag', default=version.cached_version_string(),
help='The Docker tag'),
cfg.BoolOpt('template-only', default=False,
help="Don't build images. Generate Dockerfile only"),
cfg.IntOpt('timeout', default=120,
help='Time in seconds after which any operation times out'),
cfg.MultiOpt('template-override', types.String(),
help='Path to template override file'),
cfg.MultiOpt('docker-dir', types.String(),
help=('Path to additional docker file template directory,'
' can be specified multiple times'),
short='D', default=[]),
cfg.StrOpt('logs-dir', help='Path to logs directory'),
cfg.BoolOpt('pull', default=True,
help='Attempt to pull a newer version of the base image'),
cfg.StrOpt('work-dir', help=('Path to be used as working directory.'
' By default, a temporary dir is created')),
cfg.BoolOpt('squash', default=False,
help=('Squash the image layers. WARNING: it will consume lots'
' of disk IO. "docker-squash" tool is required, install'
' it by "pip install docker-squash"')),
cfg.StrOpt('openstack-release', default='master',
help='OpenStack release for building kolla-toolbox'),
cfg.StrOpt('openstack-branch', default='master',
help='Branch for source images'),
cfg.BoolOpt('docker-healthchecks', default=True,
help='Add Kolla docker healthcheck scripts in the image')
]
_BASE_OPTS = [
cfg.StrOpt('maintainer',
default='Kolla Project (https://launchpad.net/kolla)',
help='Content of the maintainer label'),
cfg.StrOpt('distro_package_manager', default=None,
help=('Use this parameter to override the default package '
'manager used by kolla. For example, if you want to use '
'yum on a system with dnf, set this to yum which will '
'use yum command in the build process')),
cfg.StrOpt('base_package_type', default=None,
help=('Set the package type of the distro. If not set then '
'the packaging type is set to "rpm" if a RHEL based '
'distro and "deb" if a Debian based distro.')),
cfg.ListOpt('rpm_setup_config', default=[DELOREAN, DELOREAN_DEPS],
help=('Comma separated list of .rpm or .repo file(s) '
'or URL(s) to install before building containers')),
cfg.StrOpt('apt_sources_list', help=('Path to custom sources.list')),
cfg.StrOpt('apt_preferences', help=('Path to custom apt/preferences')),
cfg.BoolOpt('squash-cleanup', default=True,
help='Remove source image from Docker after squashing'),
cfg.StrOpt('squash-tmp-dir',
help='Temporary directory to be used during squashing'),
cfg.BoolOpt('clean_package_cache', default=True,
help='Clean all package cache.')
]
SOURCES = {
'openstack-base': {
'type': 'url',
'location': ('$tarballs_base/requirements/'
'requirements-${openstack_branch}.tar.gz')},
'aodh-base': {
'type': 'url',
'location': ('$tarballs_base/aodh/'
'aodh-${openstack_branch}.tar.gz')},
'barbican-base': {
'type': 'url',
'location': ('$tarballs_base/barbican/'
'barbican-${openstack_branch}.tar.gz')},
'bifrost-base': {
'type': 'url',
'location': ('$tarballs_base/bifrost/'
'bifrost-${openstack_branch}.tar.gz')},
'blazar-base': {
'type': 'url',
'location': ('$tarballs_base/blazar/'
'blazar-${openstack_branch}.tar.gz')},
'ceilometer-base': {
'type': 'url',
'location': ('$tarballs_base/ceilometer/'
'ceilometer-${openstack_branch}.tar.gz')},
'ceilometer-base-plugin-panko': {
'type': 'url',
'location': ('$tarballs_base/panko/'
'panko-${openstack_branch}.tar.gz')},
'cinder-base': {
'type': 'url',
'location': ('$tarballs_base/cinder/'
'cinder-${openstack_branch}.tar.gz')},
'congress-base': {
'type': 'url',
'location': ('$tarballs_base/congress/'
'congress-${openstack_branch}.tar.gz')},
'cloudkitty-base': {
'type': 'url',
'location': ('$tarballs_base/cloudkitty/'
'cloudkitty-${openstack_branch}.tar.gz')},
'cyborg-base': {
'type': 'url',
'location': ('$tarballs_base/cyborg/'
'cyborg-${openstack_branch}.tar.gz')},
'designate-base': {
'type': 'url',
'location': ('$tarballs_base/designate/'
'designate-${openstack_branch}.tar.gz')},
'ec2-api': {
'type': 'url',
'location': ('$tarballs_base/ec2-api/'
'ec2-api-${openstack_branch}.tar.gz')},
'freezer-api': {
'type': 'url',
'location': ('$tarballs_base/freezer-api/'
'freezer-api-${openstack_branch}.tar.gz')},
'freezer-base': {
'type': 'url',
'location': ('$tarballs_base/freezer/'
'freezer-${openstack_branch}.tar.gz')},
'glance-base': {
'type': 'url',
'location': ('$tarballs_base/glance/'
'glance-${openstack_branch}.tar.gz')},
'gnocchi-base': {
'type': 'git',
'reference': 'master',
'location': ('https://github.com/gnocchixyz/'
'gnocchi.git')},
'heat-base': {
'type': 'url',
'location': ('$tarballs_base/heat/'
'heat-${openstack_branch}.tar.gz')},
'horizon': {
'type': 'url',
'location': ('$tarballs_base/horizon/'
'horizon-${openstack_branch}.tar.gz')},
'horizon-plugin-blazar-dashboard': {
'type': 'url',
'location': ('$tarballs_base/blazar-dashboard/'
'blazar-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-congress-dashboard': {
'type': 'url',
'location': ('$tarballs_base/congress-dashboard/'
'congress-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-cloudkitty-dashboard': {
'type': 'url',
'location': ('$tarballs_base/cloudkitty-dashboard/'
'cloudkitty-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-designate-dashboard': {
'type': 'url',
'location': ('$tarballs_base/designate-dashboard/'
'designate-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-fwaas-dashboard': {
'type': 'url',
'location': ('$tarballs_base/neutron-fwaas-dashboard/'
'neutron-fwaas-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-freezer-web-ui': {
'type': 'url',
'location': ('$tarballs_base/freezer-web-ui/'
'freezer-web-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-heat-dashboard': {
'type': 'url',
'location': ('$tarballs_base/heat-dashboard/'
'heat-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-ironic-ui': {
'type': 'url',
'location': ('$tarballs_base/ironic-ui/'
'ironic-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-karbor-dashboard': {
'type': 'url',
'location': ('$tarballs_base/karbor-dashboard/'
'karbor-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-magnum-ui': {
'type': 'url',
'location': ('$tarballs_base/magnum-ui/'
'magnum-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-manila-ui': {
'type': 'url',
'location': ('$tarballs_base/manila-ui/'
'manila-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-masakari-dashboard': {
'type': 'url',
'location': ('$tarballs_base/masakari-dashboard/'
'masakari-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-mistral-dashboard': {
'type': 'url',
'location': ('$tarballs_base/mistral-dashboard/'
'mistral-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-monasca-ui': {
'type': 'url',
'location': ('$tarballs_base/monasca-ui/'
'monasca-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-murano-dashboard': {
'type': 'url',
'location': ('$tarballs_base/murano-dashboard/'
'murano-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-neutron-vpnaas-dashboard': {
'type': 'url',
'location': ('$tarballs_base/neutron-vpnaas-dashboard/'
'neutron-vpnaas-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-octavia-dashboard': {
'type': 'url',
'location': ('$tarballs_base/octavia-dashboard/'
'octavia-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-qinling-dashboard': {
'type': 'url',
'location': ('$tarballs_base/qinling-dashboard/'
'qinling-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-sahara-dashboard': {
'type': 'url',
'location': ('$tarballs_base/sahara-dashboard/'
'sahara-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-searchlight-ui': {
'type': 'url',
'location': ('$tarballs_base/searchlight-ui/'
'searchlight-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-senlin-dashboard': {
'type': 'url',
'location': ('$tarballs_base/senlin-dashboard/'
'senlin-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-solum-dashboard': {
'type': 'url',
'location': ('$tarballs_base/solum-dashboard/'
'solum-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-tacker-dashboard': {
'type': 'url',
'location': ('$tarballs_base/tacker-horizon/'
'tacker-horizon-${openstack_branch}.tar.gz')},
'horizon-plugin-trove-dashboard': {
'type': 'url',
'location': ('$tarballs_base/trove-dashboard/'
'trove-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-vitrage-dashboard': {
'type': 'url',
'location': ('$tarballs_base/vitrage-dashboard/'
'vitrage-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-watcher-dashboard': {
'type': 'url',
'location': ('$tarballs_base/watcher-dashboard/'
'watcher-dashboard-${openstack_branch}.tar.gz')},
'horizon-plugin-zaqar-ui': {
'type': 'url',
'location': ('$tarballs_base/zaqar-ui/'
'zaqar-ui-${openstack_branch}.tar.gz')},
'horizon-plugin-zun-ui': {
'type': 'url',
'location': ('$tarballs_base/zun-ui/'
'zun-ui-${openstack_branch}.tar.gz')},
'ironic-base': {
'type': 'url',
'location': ('$tarballs_base/ironic/'
'ironic-${openstack_branch}.tar.gz')},
'ironic-inspector': {
'type': 'url',
'location': ('$tarballs_base/ironic-inspector/'
'ironic-inspector-${openstack_branch}.tar.gz')},
'karbor-base': {
'type': 'url',
'location': ('$tarballs_base/karbor/'
'karbor-${openstack_branch}.tar.gz')},
'keystone-base': {
'type': 'url',
'location': ('$tarballs_base/keystone/'
'keystone-${openstack_branch}.tar.gz')},
'kuryr-base': {
'type': 'url',
'location': ('$tarballs_base/kuryr/'
'kuryr-${openstack_branch}.tar.gz')},
'kuryr-libnetwork': {
'type': 'url',
'location': ('$tarballs_base/kuryr-libnetwork/'
'kuryr-libnetwork-${openstack_branch}.tar.gz')},
'magnum-base': {
'type': 'url',
'location': ('$tarballs_base/magnum/'
'magnum-${openstack_branch}.tar.gz')},
'manila-base': {
'type': 'url',
'location': ('$tarballs_base/manila/'
'manila-${openstack_branch}.tar.gz')},
'masakari-base': {
'type': 'url',
'location': ('$tarballs_base/masakari/'
'masakari-${openstack_branch}.tar.gz')},
'masakari-monitors': {
'type': 'url',
'location': ('$tarballs_base/masakari-monitors/'
'masakari-monitors-${openstack_branch}.tar.gz')},
'mistral-base': {
'type': 'url',
'location': ('$tarballs_base/mistral/'
'mistral-${openstack_branch}.tar.gz')},
'mistral-base-plugin-tacker': {
'type': 'url',
'location': ('$tarballs_base/tacker/'
'tacker-${openstack_branch}.tar.gz')},
'monasca-agent': {
'type': 'url',
'location': ('$tarballs_base/monasca-agent/'
'monasca-agent-${openstack_branch}.tar.gz')},
'monasca-api': {
'type': 'url',
'location': ('$tarballs_base/monasca-api/'
'monasca-api-${openstack_branch}.tar.gz')},
'monasca-log-api': {
'type': 'url',
'location': ('$tarballs_base/monasca-log-api/'
'monasca-log-api-${openstack_branch}.tar.gz')},
'monasca-notification': {
'type': 'url',
'location': ('$tarballs_base/monasca-notification/'
'monasca-notification-${openstack_branch}.tar.gz')},
'monasca-persister': {
'type': 'url',
'location': ('$tarballs_base/monasca-persister/'
'monasca-persister-${openstack_branch}.tar.gz')},
'monasca-statsd': {
'type': 'url',
'location': ('$tarballs_base/monasca-statsd/'
'monasca-statsd-${openstack_branch}.tar.gz')},
# FIXME(dszumski): Use openstack tar when infra is fixed
'monasca-thresh': {
'type': 'url',
'location': ('https://github.com/openstack/monasca-thresh/archive/'
'master.tar.gz')},
'monasca-thresh-additions-monasca-common': {
'type': 'url',
'location': ('$tarballs_base/monasca-common/'
'monasca-common-${openstack_branch}.tar.gz')},
'murano-base': {
'type': 'url',
'location': ('$tarballs_base/murano/'
'murano-${openstack_branch}.tar.gz')},
'neutron-base': {
'type': 'url',
'location': ('$tarballs_base/neutron/'
'neutron-${openstack_branch}.tar.gz')},
'neutron-base-plugin-neutron-fwaas': {
'type': 'url',
'location': ('$tarballs_base/neutron-fwaas/'
'neutron-fwaas-${openstack_branch}.tar.gz')},
'neutron-base-plugin-networking-ansible': {
'type': 'url',
'location': ('$tarballs_base/networking-ansible/'
'networking-ansible-${openstack_branch}.tar.gz')},
'neutron-base-plugin-networking-baremetal': {
'type': 'url',
'location': ('$tarballs_base/networking-baremetal/'
'networking-baremetal-${openstack_branch}.tar.gz')},
'neutron-base-plugin-networking-generic-switch': {
'type': 'url',
'location': ('$tarballs_base/networking-generic-switch/'
'networking-generic-switch-${openstack_branch}.tar.gz')},
'neutron-base-plugin-networking-mlnx': {
'type': 'url',
'location': ('$tarballs_base/networking-mlnx/'
'networking-mlnx-${openstack_branch}.tar.gz')},
'neutron-base-plugin-networking-sfc': {
'type': 'url',
'location': ('$tarballs_base/networking-sfc/'
'networking-sfc-${openstack_branch}.tar.gz')},
'neutron-base-plugin-vmware-nsx': {
'type': 'url',
'location': ('$tarballs_base/vmware-nsx/'
'vmware-nsx-${openstack_branch}.tar.gz')},
'neutron-base-plugin-vpnaas-agent': {
'type': 'url',
'location': ('$tarballs_base/neutron-vpnaas/'
'neutron-vpnaas-${openstack_branch}.tar.gz')},
'neutron-bgp-dragent': {
'type': 'url',
'location': ('$tarballs_base/neutron-dynamic-routing/'
'neutron-dynamic-routing-${openstack_branch}.tar.gz')},
'neutron-server-opendaylight-plugin-networking-odl': {
'type': 'url',
'location': ('$tarballs_base/networking-odl/'
'networking-odl-${openstack_branch}.tar.gz')},
'neutron-server-opendaylight-plugin-networking-bgpvpn': {
'type': 'url',
'location': ('$tarballs_base/networking-bgpvpn/'
'networking-bgpvpn-${openstack_branch}.tar.gz')},
'neutron-server-opendaylight-plugin-networking-l2gw': {
'type': 'url',
'location': ('$tarballs_base/networking-l2gw/'
'networking-l2gw-${openstack_branch}.tar.gz')},
'neutron-server-opendaylight-plugin-networking-sfc': {
'type': 'url',
'location': ('$tarballs_base/networking-sfc/'
'networking-sfc-${openstack_branch}.tar.gz')},
'neutron-server-plugin-neutron-dynamic-routing': {
'type': 'url',
'location': ('$tarballs_base/neutron-dynamic-routing/'
'neutron-dynamic-routing-${openstack_branch}.tar.gz')},
'neutron-server-plugin-vmware-nsxlib': {
'type': 'url',
'location': ('$tarballs_base/vmware-nsxlib/'
'vmware-nsxlib-${openstack_branch}.tar.gz')},
'neutron-vpnaas-agent': {
'type': 'url',
'location': ('$tarballs_base/neutron-vpnaas/'
'neutron-vpnaas-${openstack_branch}.tar.gz')},
'neutron-server-ovn-plugin-networking-ovn': {
'type': 'url',
'location': ('$tarballs_base/networking-ovn/'
'networking-ovn-${openstack_branch}.tar.gz')},
'neutron-metadata-agent-ovn-plugin-networking-ovn': {
'type': 'url',
'location': ('$tarballs_base/networking-ovn/'
'networking-ovn-${openstack_branch}.tar.gz')},
'nova-base': {
'type': 'url',
'location': ('$tarballs_base/nova/'
'nova-${openstack_branch}.tar.gz')},
'nova-base-plugin-blazar': {
'type': 'url',
'location': ('$tarballs_base/blazar-nova/'
'blazar-nova-${openstack_branch}.tar.gz')},
'nova-base-plugin-mksproxy': {
'type': 'url',
'location': ('$tarballs_base/nova-mksproxy/'
'nova-mksproxy-master.tar.gz')},
'novajoin-base': {
'type': 'url',
'location': ('$tarballs_base/novajoin/'
'novajoin-master.tar.gz')},
'octavia-base': {
'type': 'url',
'location': ('$tarballs_base/octavia/'
'octavia-${openstack_branch}.tar.gz')},
'panko-base': {
'type': 'url',
'location': ('$tarballs_base/panko/'
'panko-${openstack_branch}.tar.gz')},
'placement-base': {
'type': 'url',
'location': ('$tarballs_base/placement/'
'placement-${openstack_branch}.tar.gz')},
'qinling-base': {
'type': 'url',
'location': ('$tarballs_base/qinling/'
'qinling-${openstack_branch}.tar.gz')},
'tempest-plugin-tempest-conf': {
'type': 'url',
'location': ('$tarballs_base/python-tempestconf/'
'python-tempestconf-master.tar.gz')},
'tempest-plugin-barbican': {
'type': 'url',
'location': ('$tarballs_base/barbican-tempest-plugin/'
'barbican-tempest-plugin-master.tar.gz')},
'tempest-plugin-blazar': {
'type': 'url',
'location': ('$tarballs_base/blazar-tempest-plugin/'
'blazar-tempest-plugin-master.tar.gz')},
'tempest-plugin-cinder': {
'type': 'url',
'location': ('$tarballs_base/cinder-tempest-plugin/'
'cinder-tempest-plugin-master.tar.gz')},
'tempest-plugin-congress': {
'type': 'url',
'location': ('$tarballs_base/congress-tempest-plugin/'
'congress-tempest-plugin-master.tar.gz')},
'tempest-plugin-ec2api': {
'type': 'url',
'location': ('$tarballs_base/ec2api-tempest-plugin/'
'ec2api-tempest-plugin-master.tar.gz')},
'tempest-plugin-heat': {
'type': 'url',
'location': ('$tarballs_base/heat-tempest-plugin/'
'heat-tempest-plugin-master.tar.gz')},
'tempest-plugin-ironic': {
'type': 'url',
'location': ('$tarballs_base/ironic-tempest-plugin/'
'ironic-tempest-plugin-master.tar.gz')},
'tempest-plugin-keystone': {
'type': 'url',
'location': ('$tarballs_base/keystone-tempest-plugin/'
'keystone-tempest-plugin-master.tar.gz')},
'tempest-plugin-magnum': {
'type': 'url',
'location': ('$tarballs_base/magnum-tempest-plugin/'
'magnum-tempest-plugin-master.tar.gz')},
'tempest-plugin-manila': {
'type': 'url',
'location': ('$tarballs_base/manila-tempest-plugin/'
'manila-tempest-plugin-master.tar.gz')},
'tempest-plugin-mistral': {
'type': 'url',
'location': ('$tarballs_base/mistral-tempest-plugin/'
'mistral-tempest-plugin-master.tar.gz')},
'tempest-plugin-monasca': {
'type': 'url',
'location': ('$tarballs_base/monasca-tempest-plugin/'
'monasca-tempest-plugin-master.tar.gz')},
'tempest-plugin-murano': {
'type': 'url',
'location': ('$tarballs_base/murano-tempest-plugin/'
'murano-tempest-plugin-master.tar.gz')},
'tempest-plugin-neutron': {
'type': 'url',
'location': ('$tarballs_base/neutron-tempest-plugin/'
'neutron-tempest-plugin-master.tar.gz')},
'tempest-plugin-patrole': {
'type': 'url',
'location': ('$tarballs_base/patrole/'
'patrole-master.tar.gz')},
'tempest-plugin-telemetry': {
'type': 'url',
'location': ('$tarballs_base/telemetry-tempest-plugin/'
'telemetry-tempest-plugin-master.tar.gz')},
'tempest-plugin-tripleo-common': {
'type': 'url',
'location': ('$tarballs_base/tripleo-common-tempest-plugin/'
'tripleo-common-tempest-plugin-master.'
'tar.gz')},
'tempest-plugin-trove': {
'type': 'url',
'location': ('$tarballs_base/trove-tempest-plugin/'
'trove-tempest-plugin-master.tar.gz')},
'tempest-plugin-vitrage': {
'type': 'url',
'location': ('$tarballs_base/vitrage-tempest-plugin/'
'vitrage-tempest-plugin-master.tar.gz')},
'tempest-plugin-watcher': {
'type': 'url',
'location': ('$tarballs_base/watcher-tempest-plugin/'
'watcher-tempest-plugin-master.tar.gz')},
'tempest-plugin-zaqar': {
'type': 'url',
'location': ('$tarballs_base/zaqar-tempest-plugin/'
'zaqar-tempest-plugin-master.tar.gz')},
'rally': {
'type': 'url',
'location': ('$tarballs_base/rally/'
'rally-master.tar.gz')},
'sahara-base': {
'type': 'url',
'location': ('$tarballs_base/sahara/'
'sahara-${openstack_branch}.tar.gz')},
'sahara-base-plugin-ambari': {
'type': 'url',
'location': ('$tarballs_base/sahara-plugin-ambari/'
'sahara-plugin-ambari-${openstack_branch}.tar.gz')},
'sahara-base-plugin-cdh': {
'type': 'url',
'location': ('$tarballs_base/sahara-plugin-cdh/'
'sahara-plugin-cdh-${openstack_branch}.tar.gz')},
'sahara-base-plugin-mapr': {
'type': 'url',
'location': ('$tarballs_base/sahara-plugin-mapr/'
'sahara-plugin-mapr-${openstack_branch}.tar.gz')},
'sahara-base-plugin-spark': {
'type': 'url',
'location': ('$tarballs_base/sahara-plugin-spark/'
'sahara-plugin-spark-${openstack_branch}.tar.gz')},
'sahara-base-plugin-storm': {
'type': 'url',
'location': ('$tarballs_base/sahara-plugin-storm/'
'sahara-plugin-storm-${openstack_branch}.tar.gz')},
'sahara-base-plugin-vanilla': {
'type': 'url',
'location': ('$tarballs_base/sahara-plugin-vanilla/'
'sahara-plugin-vanilla-${openstack_branch}.tar.gz')},
'searchlight-base': {
'type': 'url',
'location': ('$tarballs_base/searchlight/'
'searchlight-${openstack_branch}.tar.gz')},
'senlin-base': {
'type': 'url',
'location': ('$tarballs_base/senlin/'
'senlin-${openstack_branch}.tar.gz')},
'solum-base': {
'type': 'url',
'location': ('$tarballs_base/solum/'
'solum-${openstack_branch}.tar.gz')},
'swift-base': {
'type': 'url',
'location': ('$tarballs_base/swift/'
'swift-${openstack_branch}.tar.gz')},
'tacker-base': {
'type': 'url',
'location': ('$tarballs_base/tacker/'
'tacker-${openstack_branch}.tar.gz')},
'tacker-base-plugin-networking-sfc': {
'type': 'url',
'location': ('$tarballs_base/networking-sfc/'
'networking-sfc-${openstack_branch}.tar.gz')},
'tempest': {
'type': 'url',
'location': ('$tarballs_base/tempest/'
'tempest-master.tar.gz')},
'tripleoclient': {
'type': 'url',
'location': ('$tarballs_base/python-tripleoclient/'
'tripleoclient-12.3.0.tar.gz')},
'trove-base': {
'type': 'url',
'location': ('$tarballs_base/trove/'
'trove-${openstack_branch}.tar.gz')},
'vitrage-base': {
'type': 'url',
'location': ('$tarballs_base/vitrage/'
'vitrage-${openstack_branch}.tar.gz')},
'vmtp': {
'type': 'url',
'location': ('$tarballs_base/vmtp/'
'vmtp-master.tar.gz')},
'watcher-base': {
'type': 'url',
'location': ('$tarballs_base/watcher/'
'watcher-${openstack_branch}.tar.gz')},
'zaqar-base': {
'type': 'url',
'location': ('$tarballs_base/zaqar/'
'zaqar-${openstack_branch}.tar.gz')},
'zun-base': {
'type': 'url',
'location': ('$tarballs_base/zun/'
'zun-${openstack_branch}.tar.gz')}
}
# NOTE(SamYaple): Only increment the UID. Never reuse old or removed UIDs.
# Starting point 42400+ was chosen arbitrarily to ensure no conflicts
USERS = {
'kolla-user': {
'uid': 42400,
'gid': 42400,
},
'ansible-user': {
'uid': 42401,
'gid': 42401,
},
'aodh-user': {
'uid': 42402,
'gid': 42402,
},
'barbican-user': {
'uid': 42403,
'gid': 42403,
},
'bifrost-user': {
'uid': 42404,
'gid': 42404,
},
'ceilometer-user': {
'uid': 42405,
'gid': 42405,
},
'chrony-user': {
'uid': 42406,
'gid': 42406,
},
'cinder-user': {
'uid': 42407,
'gid': 42407,
},
'cloudkitty-user': {
'uid': 42408,
'gid': 42408,
},
'collectd-user': {
'uid': 42409,
'gid': 42409,
},
'congress-user': {
'uid': 42410,
'gid': 42410,
},
'designate-user': {
'uid': 42411,
'gid': 42411,
},
'elasticsearch-user': {
'uid': 42412,
'gid': 42412,
},
'etcd-user': {
'uid': 42413,
'gid': 42413,
},
'freezer-user': {
'uid': 42414,
'gid': 42414,
},
'glance-user': {
'uid': 42415,
'gid': 42415,
},
'gnocchi-user': {
'uid': 42416,
'gid': 42416,
},
'grafana-user': {
'uid': 42417,
'gid': 42417,
},
'heat-user': {
'uid': 42418,
'gid': 42418,
},
'horizon-user': {
'uid': 42420,
'gid': 42420,
},
'influxdb-user': {
'uid': 42421,
'gid': 42421,
},
'ironic-user': {
'uid': 42422,
'gid': 42422,
},
'kafka-user': {
'uid': 42423,
'gid': 42423,
},
'keystone-user': {
'uid': 42425,
'gid': 42425,
},
'kibana-user': {
'uid': 42426,
'gid': 42426,
},
'qemu-user': {
'uid': 42427,
'gid': 42427,
},
'magnum-user': {
'uid': 42428,
'gid': 42428,
},
'manila-user': {
'uid': 42429,
'gid': 42429,
},
'mistral-user': {
'uid': 42430,
'gid': 42430,
},
'monasca-user': {
'uid': 42431,
'gid': 42431,
},
'mongodb-user': {
'uid': 42432,
'gid': 65534,
},
'murano-user': {
'uid': 42433,
'gid': 42433,
},
'mysql-user': {
'uid': 42434,
'gid': 42434,
},
'neutron-user': {
'uid': 42435,
'gid': 42435,
},
'nova-user': {
'uid': 42436,
'gid': 42436,
},
'octavia-user': {
'uid': 42437,
'gid': 42437,
},
'panko-user': {
'uid': 42438,
'gid': 42438,
},
'rabbitmq-user': {
'uid': 42439,
'gid': 42439,
},
'rally-user': {
'uid': 42440,
'gid': 42440,
},
'sahara-user': {
'uid': 42441,
'gid': 42441,
},
'searchlight-user': {
'uid': 42442,
'gid': 42442,
},
'senlin-user': {
'uid': 42443,
'gid': 42443,
},
'solum-user': {
'uid': 42444,
'gid': 42444,
},
'swift-user': {
'uid': 42445,
'gid': 42445,
},
'tacker-user': {
'uid': 42446,
'gid': 42446,
},
'td-agent-user': {
'uid': 42447,
'gid': 42447,
},
'telegraf-user': {
'uid': 42448,
'gid': 42448,
},
'trove-user': {
'uid': 42449,
'gid': 42449,
},
'vmtp-user': {
'uid': 42450,
'gid': 42450,
},
'watcher-user': {
'uid': 42451,
'gid': 42451,
},
'zaqar-user': {
'uid': 42452,
'gid': 42452,
},
'zookeeper-user': {
'uid': 42453,
'gid': 42453,
},
'haproxy-user': {
'uid': 42454,
'gid': 42454,
},
'ceph-user': {
'uid': 64045,
'gid': 64045,
},
'memcached-user': {
'uid': 42457,
'gid': 42457,
},
'karbor-user': {
'uid': 42458,
'gid': 42458,
},
'vitrage-user': {
'uid': 42459,
'gid': 42459,
},
'redis-user': {
'uid': 42460,
'gid': 42460,
},
'ironic-inspector-user': {
'uid': 42461,
'gid': 42461,
},
'odl-user': {
'uid': 42462,
'gid': 42462,
},
'zun-user': {
'uid': 42463,
'gid': 42463,
},
'dragonflow-user': { # unused user (dragonflow dropped)
'uid': 42464,
'gid': 42464,
},
'qdrouterd-user': {
'uid': 42465,
'gid': 42465,
},
'ec2api-user': {
'uid': 42466,
'gid': 42466,
},
'sensu-user': {
'uid': 42467,
'gid': 42467,
},
'skydive-user': {
'uid': 42468,
'gid': 42468,
},
'kuryr-user': {
'uid': 42469,
'gid': 42469,
},
'novajoin-user': {
'uid': 42470,
'gid': 42470,
},
'blazar-user': {
'uid': 42471,
'gid': 42471,
},
'prometheus-user': {
'uid': 42472,
'gid': 42472,
},
'libvirt-user': {
'uid': 42473, # unused user, but we need the group for socket access
'gid': 42473,
},
'fluentd-user': {
'uid': 42474,
'gid': 42474,
},
'almanach-user': { # unused user (almanach dropped)
'uid': 42475,
'gid': 42475,
},
'openvswitch-user': {
'uid': 42476, # unused user
'gid': 42476,
},
'hugetlbfs-user': {
'uid': 42477, # unused user, but we need the group for vhost socket
'gid': 42477,
},
'logstash-user': {
'uid': 42478,
'gid': 42478,
},
'storm-user': {
'uid': 42479,
'gid': 42479,
},
'tempest-user': {
'uid': 42480,
'gid': 42480,
},
'nfast-user': {
'uid': 42481, # unused user, but we need the group for thales hsm
'gid': 42481,
},
'placement-user': {
'uid': 42482,
'gid': 42482,
},
'cyborg-user': {
'uid': 42483,
'gid': 42483,
},
'qinling-user': {
'uid': 42484,
'gid': 42484,
},
'masakari-user': {
'uid': 42485,
'gid': 42485,
}
}
def get_source_opts(type_=None, location=None, reference=None):
return [cfg.StrOpt('type', choices=['local', 'git', 'url'],
default=type_,
help='Source location type'),
cfg.StrOpt('location', default=location,
help='The location for source install'),
cfg.StrOpt('reference', default=reference,
help=('Git reference to pull, commit sha, tag '
'or branch name'))]
def get_user_opts(uid, gid):
return [
cfg.IntOpt('uid', default=uid, help='The user id'),
cfg.IntOpt('gid', default=gid, help='The group id'),
]
def gen_all_user_opts():
for name, params in USERS.items():
uid = params['uid']
gid = params['gid']
yield name, get_user_opts(uid, gid)
def gen_all_source_opts():
for name, params in SOURCES.items():
type_ = params['type']
location = params['location']
reference = params.get('reference')
yield name, get_source_opts(type_, location, reference)
def list_opts():
return itertools.chain([(None, _CLI_OPTS),
(None, _BASE_OPTS),
('profiles', _PROFILE_OPTS)],
gen_all_source_opts(),
gen_all_user_opts(),
)
def parse(conf, args, usage=None, prog=None,
default_config_files=None):
conf.register_cli_opts(_CLI_OPTS)
conf.register_opts(_BASE_OPTS)
conf.register_opts(_PROFILE_OPTS, group='profiles')
for name, opts in gen_all_source_opts():
conf.register_opts(opts, name)
for name, opts in gen_all_user_opts():
conf.register_opts(opts, name)
conf(args=args,
project='kolla',
usage=usage,
prog=prog,
version=version.cached_version_string(),
default_config_files=default_config_files)
# NOTE(jeffrey4l): set the default base tag based on the
# base option
conf.set_default('base_tag', DEFAULT_BASE_TAGS.get(conf.base))
conf.set_default('openstack_release', OPENSTACK_RELEASE.get(conf.base))
prefix = '' if conf.openstack_release == 'master' else 'stable-'
openstack_branch = '{}{}'.format(prefix, conf.openstack_release)
conf.set_default('openstack_branch', openstack_branch)
if not conf.base_image:
conf.base_image = conf.base
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LogAnalyticsInputBase(Model):
"""Api input base class for LogAnalytics Api.
All required parameters must be populated in order to send to Azure.
:param blob_container_sas_uri: Required. SAS Uri of the logging blob
container to which LogAnalytics Api writes output logs to.
:type blob_container_sas_uri: str
:param from_time: Required. From time of the query
:type from_time: datetime
:param to_time: Required. To time of the query
:type to_time: datetime
:param group_by_throttle_policy: Group query result by Throttle Policy
applied.
:type group_by_throttle_policy: bool
:param group_by_operation_name: Group query result by Operation Name.
:type group_by_operation_name: bool
:param group_by_resource_name: Group query result by Resource Name.
:type group_by_resource_name: bool
"""
_validation = {
'blob_container_sas_uri': {'required': True},
'from_time': {'required': True},
'to_time': {'required': True},
}
_attribute_map = {
'blob_container_sas_uri': {'key': 'blobContainerSasUri', 'type': 'str'},
'from_time': {'key': 'fromTime', 'type': 'iso-8601'},
'to_time': {'key': 'toTime', 'type': 'iso-8601'},
'group_by_throttle_policy': {'key': 'groupByThrottlePolicy', 'type': 'bool'},
'group_by_operation_name': {'key': 'groupByOperationName', 'type': 'bool'},
'group_by_resource_name': {'key': 'groupByResourceName', 'type': 'bool'},
}
def __init__(self, *, blob_container_sas_uri: str, from_time, to_time, group_by_throttle_policy: bool=None, group_by_operation_name: bool=None, group_by_resource_name: bool=None, **kwargs) -> None:
super(LogAnalyticsInputBase, self).__init__(**kwargs)
self.blob_container_sas_uri = blob_container_sas_uri
self.from_time = from_time
self.to_time = to_time
self.group_by_throttle_policy = group_by_throttle_policy
self.group_by_operation_name = group_by_operation_name
self.group_by_resource_name = group_by_resource_name
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SignupForm(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'header': 'SignupFormHeaderOptions',
'contents': 'list[CollectionOfContentForListSignupForms]',
'styles': 'list[CollectionOfElementStyleForListSignupForms]',
'signup_form_url': 'str',
'list_id': 'str',
'links': 'list[ResourceLink]'
}
attribute_map = {
'header': 'header',
'contents': 'contents',
'styles': 'styles',
'signup_form_url': 'signup_form_url',
'list_id': 'list_id',
'links': '_links'
}
def __init__(self, header=None, contents=None, styles=None, signup_form_url=None, list_id=None, links=None): # noqa: E501
"""SignupForm - a model defined in Swagger""" # noqa: E501
self._header = None
self._contents = None
self._styles = None
self._signup_form_url = None
self._list_id = None
self._links = None
self.discriminator = None
if header is not None:
self.header = header
if contents is not None:
self.contents = contents
if styles is not None:
self.styles = styles
if signup_form_url is not None:
self.signup_form_url = signup_form_url
if list_id is not None:
self.list_id = list_id
if links is not None:
self.links = links
@property
def header(self):
"""Gets the header of this SignupForm. # noqa: E501
:return: The header of this SignupForm. # noqa: E501
:rtype: SignupFormHeaderOptions
"""
return self._header
@header.setter
def header(self, header):
"""Sets the header of this SignupForm.
:param header: The header of this SignupForm. # noqa: E501
:type: SignupFormHeaderOptions
"""
self._header = header
@property
def contents(self):
"""Gets the contents of this SignupForm. # noqa: E501
The signup form body content. # noqa: E501
:return: The contents of this SignupForm. # noqa: E501
:rtype: list[CollectionOfContentForListSignupForms]
"""
return self._contents
@contents.setter
def contents(self, contents):
"""Sets the contents of this SignupForm.
The signup form body content. # noqa: E501
:param contents: The contents of this SignupForm. # noqa: E501
:type: list[CollectionOfContentForListSignupForms]
"""
self._contents = contents
@property
def styles(self):
"""Gets the styles of this SignupForm. # noqa: E501
An array of objects, each representing an element style for the signup form. # noqa: E501
:return: The styles of this SignupForm. # noqa: E501
:rtype: list[CollectionOfElementStyleForListSignupForms]
"""
return self._styles
@styles.setter
def styles(self, styles):
"""Sets the styles of this SignupForm.
An array of objects, each representing an element style for the signup form. # noqa: E501
:param styles: The styles of this SignupForm. # noqa: E501
:type: list[CollectionOfElementStyleForListSignupForms]
"""
self._styles = styles
@property
def signup_form_url(self):
"""Gets the signup_form_url of this SignupForm. # noqa: E501
Signup form URL. # noqa: E501
:return: The signup_form_url of this SignupForm. # noqa: E501
:rtype: str
"""
return self._signup_form_url
@signup_form_url.setter
def signup_form_url(self, signup_form_url):
"""Sets the signup_form_url of this SignupForm.
Signup form URL. # noqa: E501
:param signup_form_url: The signup_form_url of this SignupForm. # noqa: E501
:type: str
"""
self._signup_form_url = signup_form_url
@property
def list_id(self):
"""Gets the list_id of this SignupForm. # noqa: E501
The signup form's list id. # noqa: E501
:return: The list_id of this SignupForm. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this SignupForm.
The signup form's list id. # noqa: E501
:param list_id: The list_id of this SignupForm. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def links(self):
"""Gets the links of this SignupForm. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this SignupForm. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this SignupForm.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this SignupForm. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SignupForm, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SignupForm):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
data = [("000060", 8.25), ("000020", 5.75), ("039490", 1.3)]
def 정렬규칙(x):
return x[1]
data.sort(key=정렬규칙)
print(data)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class FeatureClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for FeatureClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(FeatureClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
import torch.nn as nn
import torch.nn.functional as F
class noise_Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1,
groups=1, bias=True, noise_std=0.1):
super(noise_Conv2d, self).__init__(in_channels, out_channels,
kernel_size, stride,
padding, dilation, groups, bias)
self.noise_std = noise_std
def forward(self, input):
noise_i = input.clone().normal_(0, self.noise_std)
noise_input = input + noise_i
output = F.conv2d(noise_input, self.weight, self.bias, self.stride,
self.padding, self.dilation,
self.groups)
return output
|
# Copyright 2014: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import novaclient.exceptions
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import osclients
from rally.plugins.openstack.context.cleanup import manager as resource_manager
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="keypair", order=310)
class Keypair(context.Context):
KEYPAIR_NAME = "rally_ssh_key"
def _generate_keypair(self, endpoint):
keypair_name = "%s_%s" % (
self.KEYPAIR_NAME, self.context["task"]["uuid"])
nova_client = osclients.Clients(endpoint).nova()
# NOTE(hughsaunders): If keypair exists, it must be deleted as we can't
# retrieve the private key
try:
nova_client.keypairs.delete(keypair_name)
except novaclient.exceptions.NotFound:
pass
keypair = nova_client.keypairs.create(keypair_name)
return {"private": keypair.private_key,
"public": keypair.public_key,
"name": keypair_name,
"id": keypair.id}
@utils.log_task_wrapper(LOG.info, _("Enter context: `keypair`"))
def setup(self):
for user in self.context["users"]:
user["keypair"] = self._generate_keypair(user["endpoint"])
@utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`"))
def cleanup(self):
# TODO(boris-42): Delete only resources created by this context
resource_manager.cleanup(names=["nova.keypairs"],
users=self.context.get("users", []))
|
from pyblazing.apiv2 import context
from pyblazing.apiv2 import make_context
BlazingContext = context.BlazingContext
import pyblazing.apiv2
|
from source.element.cr_beam_element import CRBeamElement
import numpy as np
np.set_printoptions(suppress=False, precision=4, linewidth=100)
def test_crbeam_element_update_incremental():
material_params = {'rho': 7850, 'e': 2069000000.0, 'nu': 0.29, 'zeta': 0.05, 'lx_i': 1.2, 'is_nonlinear': True}
element_params = {'a': 0.0001, 'asy': 0.0, 'asz': 0.0, 'iy': 0.0001, 'iz': 0.0001, 'it': 0.0001}
coords = np.array([[1.2, 0.0, 0.0], [0.0, 0.0, 0.0]])
element = CRBeamElement(material_params, element_params, coords, 0, '3D')
Kd_kratos = np.array([
[66828.2, 0, 0, 0, 0, 0],
[0, 172417, 0, 0, 0, 0],
[0, 0, 172417, 0, 0, 0],
[0, 0, 0, 172417, 0, 0],
[0, 0, 0, 0, 517250, 0],
[0, 0, 0, 0, 0, 517250]
])
Kd = element.Kd_mat
try:
assert (abs(Kd_kratos - Kd) < 10).all()
except AssertionError:
msg = "##################################################################################\n"
msg += "Deformation Stiffness matrix\n"
msg += "Kd in Kratos:\n" + str(Kd_kratos)
msg += "\nIt is however:\n" + str(Kd)
print(msg)
Ke_mat_kratos = np.array([
[172417, 0, 0, 0, 0, 0, -172417, 0, 0, 0, 0, 0],
[0, 1.43681e+06, 0, 0, 0, 862083, 0, -1.43681e+06, 0, 0, 0, 862083],
[0, 0, 1.43681e+06, 0, -862083, 0, 0, 0, -1.43681e+06, 0, -862083, 0],
[0, 0, 0, 66828.2, 0, 0, 0, 0, 0, -66828.2, 0, 0],
[0, 0, -862083, 0, 689667, 0, 0, 0, 862083, 0, 344833, 0],
[0, 862083, 0, 0, 0, 689667, 0, -862083, 0, 0, 0, 344833],
[-172417, 0, 0, 0, 0, 0, 172417, 0, 0, 0, 0, 0],
[0, -1.43681e+06, 0, 0, 0, -862083, 0, 1.43681e+06, 0, 0, 0, -862083],
[0, 0, -1.43681e+06, 0, 862083, 0, 0, 0, 1.43681e+06, 0, 862083, 0],
[0, 0, 0, -66828.2, 0, 0, 0, 0, 0, 66828.2, 0, 0],
[0, 0, -862083, 0, 344833, 0, 0, 0, 862083, 0, 689667, 0],
[0, 862083, 0, 0, 0, 344833, 0, -862083, 0, 0, 0, 689667]
])
Ke_mat = element.Ke_mat
try:
assert (abs(Ke_mat_kratos - Ke_mat) < 10).all()
except AssertionError:
msg = "##################################################################################\n"
msg += "Material Stiffness matrix\n"
msg += "Ke_mat in Kratos:\n" + str(Ke_mat_kratos)
msg += "\nIt is however:\n" + str(Ke_mat)
print(msg)
Phiz = 0.0
Phiy = 0.0
CTy = (element.rho * element.A * element.L) / ((1 + Phiy) * (1 + Phiy))
CTz = (element.rho * element.A * element.L) / ((1 + Phiz) * (1 + Phiz))
CRy = (element.rho * element.Iy) / ((1 + Phiy) * (1 + Phiy) * element.L)
CRz = (element.rho * element.Iz) / ((1 + Phiz) * (1 + Phiz) * element.L)
bending_mass_matrix_z = element.build_single_mass_matrix(Phiz, CTz, CRz, element.L, +1)
bending_mass_matrix_kratos_z = np.array([
[1.13489, 0.137711, -0.663886, 0.0435114],
[0.137711, 0.138519, -0.0435114, -0.0410891],
[-0.663886, -0.0435114, 1.13489, -0.137711],
[0.0435114, -0.0410891, -0.137711, 0.138519]
])
try:
assert (abs(bending_mass_matrix_z - bending_mass_matrix_kratos_z) < 1e-4).all()
print("Bending mass_matrix z is correct")
except AssertionError:
msg = "##################################################################################\n"
msg += "Bending mass matrix z\n"
msg += "Me in Kratos:\n" + str(bending_mass_matrix_kratos_z)
msg += "\nIt is however:\n" + str(bending_mass_matrix_z)
print(msg)
bending_mass_matrix_y = element.build_single_mass_matrix(Phiz, CTy, CRy, element.L, -1)
bending_mass_matrix_kratos_y = np.array([
[1.13489, -0.137711, -0.663886, -0.0435114],
[-0.137711, 0.138519, 0.0435114, -0.0410891],
[-0.663886, 0.0435114, 1.13489, 0.137711],
[-0.0435114, -0.0410891, 0.137711, 0.138519]
])
try:
assert (abs(bending_mass_matrix_y - bending_mass_matrix_kratos_y) < 1e-4).all()
print("Bending mass_matrix y is correct")
except AssertionError:
msg = "##################################################################################\n"
msg += "Bending mass matrix y\n"
msg += "Me in Kratos:\n" + str(bending_mass_matrix_kratos_y)
msg += "\nIt is however:\n" + str(bending_mass_matrix_y)
print(msg)
Me = element._get_consistent_mass_matrix()
Me_kratos = np.array([
[0.314, 0, 0, 0, 0, 0, 0.157, 0, 0, 0, 0, 0],
[0, 1.13489, 0, 0, 0, 0.137711, 0, -0.663886, 0, 0, 0, 0.0435114],
[0, 0, 1.13489, 0, -0.137711, 0, 0, 0, -0.663886, 0, -0.0435114, 0],
[0, 0, 0, 0.628, 0, 0, 0, 0, 0, 0.314, 0, 0],
[0, 0, -0.137711, 0, 0.138519, 0, 0, 0, 0.0435114, 0, -0.0410891, 0],
[0, 0.137711, 0, 0, 0, 0.138519, 0, -0.0435114, 0, 0, 0, -0.0410891],
[0.157, 0, 0, 0, 0, 0, 0.314, 0, 0, 0, 0, 0],
[0, -0.663886, 0, 0, 0, -0.0435114, 0, 1.13489, 0, 0, 0, -0.137711],
[0, 0, -0.663886, 0, 0.0435114, 0, 0, 0, 1.13489, 0, 0.137711, 0],
[0, 0, 0, 0.314, 0, 0, 0, 0, 0, 0.628, 0, 0],
[0, 0, -0.0435114, 0, -0.0410891, 0, 0, 0, 0.137711, 0, 0.138519, 0],
[0, 0.0435114, 0, 0, 0, -0.0410891, 0, -0.137711, 0, 0, 0, 0.138519]
])
try:
assert (abs(Me - Me_kratos) < 1e-2).all()
print("Mass matrix is correct")
except AssertionError:
msg = "##################################################################################\n"
msg += "Consistent mass matrix\n"
msg += "Me in Kratos:\n" + str(Me_kratos)
msg += "\nIt is however:\n" + str(Me)
print(msg)
|
from typing import List
import random
import re
class Concept:
def __init__(self, name: str, options: List[str]):
self.name = name.lower()
self.options = options
def next(self):
return random.choice(self.options)
def render_to(self, template):
return [template.format(option) for option in self.options]
def __iter__(self):
i = 0
while i < len(self.options):
yield self.options[i]
i += 1
class ConceptLibrary:
def __init__(self, concepts : List[Concept] = []):
self.concepts = concepts
def add(self, name: str, options: List[str]):
self.concepts.append(Concept(name, options))
def get(self, term):
term = term.lower()
for concept in self.concepts:
if concept.name == term:
return concept
class Response:
def __init__(self, choices: List[str]):
self.choices = choices
self.iter = 0
def next(self, concepts: ConceptLibrary):
choice = self.choices[self.iter]
self.iter += 1
if self.iter >= len(self.choices):
self.iter = 0
matches = re.findall(r'~([a-z0-9]+)\b', choice, re.I)
if matches:
for match in matches:
valid_concept = concepts.get(match)
if valid_concept:
choice = re.sub('~{}'.format(match), valid_concept.next(), choice, flags=re.I)
break # out of match loop
return choice
class DialogNode:
def __init__(self, **kwargs):
self.triggers = kwargs.get('triggers', [])
self.response = kwargs.get('response', None)
self.proposal = kwargs.get('proposal', False)
self.level = kwargs.get('level', None)
self.scope = [] # type:List[DialogNode]
self.parent = None
self.triggers = [t.lower() for t in self.triggers]
def triggers_on(self, phrase: str, concepts: ConceptLibrary):
for node in self.scope: # type:DialogNode
if node.triggers and not node.proposal:
if phrase.lower() in node.triggers:
return node
def render_triggers(self, concepts: ConceptLibrary):
possible = []
for trigger in self.triggers:
matches = re.findall(r'~([a-z0-9]+)\b', trigger, re.I)
if matches:
for match in matches:
valid_concept = concepts.get(match)
if valid_concept:
template = trigger.replace('~{}'.format(match), '{}')
possible.extend(valid_concept.render_to(template))
else:
possible.append(trigger)
self.triggers = possible
def add_node(self, node):
self.scope.append(node)
def next_response(self, concept_lib: ConceptLibrary):
return self.response.next(concept_lib)
|
import os
import sys
import pytest
from pytest import fixture
if sys.version_info < (3, 6):
raise pytest.skip("plantuml_markdown plugin requires Python >= 3.6", allow_module_level=True)
from tests import V8_PLUGIN_PATH
from tests.conftest import CompileResult
from v8.plantuml_markdown.plantuml_markdown import PlantUmlMarkdownProcessor, first_line_for_listing_block
def test_svg(do_fence_test):
with do_fence_test('plantuml') as compiled:
assert set(compiled.document.xpath('//svg//text/text()')) == {'Alice', 'Bob', 'hello1', 'hello2'}
assert '<?xml' not in compiled.raw_html
def test_listing(do_fence_test):
with do_fence_test('{ .plantuml listing }') as compiled:
assert compiled.document.xpath('//pre/text()') == [(
'Alice -> Bob : hello1\n'
'Bob -> Alice : hello2\n'
)]
def test_id(do_fence_test):
with do_fence_test('{ .plantuml svg+listing #foo }') as compiled:
assert compiled.document.xpath('/html/body/div/@id') == ['foo']
assert compiled.document.xpath('//pre/a/@name') == ['foo-1', 'foo-2']
assert compiled.raw_html.count('foo') == 5 # ensure the id is not anywhere unexpected
def test_line_numbering(do_fence_test):
with do_fence_test('{ .plantuml listing #foo linenos=y }') as compiled:
assert compiled.document.xpath('//table/tr//code/@data-line-number') == ['1', '2']
assert compiled.document.xpath('//table/tr//a/@href') == ['#foo-1', '#foo-2']
def test_line_highlighting(do_fence_test):
with do_fence_test('{ .plantuml listing hl_lines="1 2" }') as compiled:
assert len(compiled.document.xpath('//pre/span[@class="hll"]')) == 2
def test_svg_and_listing(do_fence_test):
with do_fence_test('{ .plantuml svg+listing }') as compiled:
assert [e.tag for e in compiled.document.xpath('/html/body/div/div/*')] == ['svg', 'pre']
def test_listing_and_svg(do_fence_test):
with do_fence_test('{ .plantuml listing+svg }') as compiled:
assert [e.tag for e in compiled.document.xpath('/html/body/div/div/*')] == ['pre', 'svg']
def test_prefix(do_compile_test):
with do_compile_test("""\
```plantuml-prefix
title Title 1
footer Footer 1
```
```plantuml
Participant foo
```
```plantuml
Participant bar
```
```plantuml-prefix
title Title 2
' no footer this time
```
```plantuml
Participant baz
```
""") as compiled:
text = compiled.document.xpath('//svg//text/text()')
assert text.count('Title 1') == 2
assert text.count('Footer 1') == 2
assert text.count('Title 2') == 1
def test_with_other_markdown(do_compile_test):
with do_compile_test("""\
# Heading
```plantuml
Participant foo
```
```python
# comment
```
""") as compiled:
assert compiled.document.xpath('//h1/text()') == ['Heading']
assert compiled.document.xpath('//svg//text/text()') == ['foo']
assert compiled.document.xpath('//pre//span[@class="c1"]/text()') == ['# comment']
def test_plantuml_syntax_error(do_compile_test):
with do_compile_test("""\
```plantuml
this line is bad
```
""", plantuml_continue_after_failure=True) as compiled:
text = compiled.document.xpath('//svg//text/text()')
assert '[From string (line 2) ]' in text
assert 'this line is bad' in text
assert 'Syntax Error?' in text
@pytest.mark.parametrize('line, expected', [
(
'```plantuml',
'```text',
),
(
'```.plantuml hl_lines="3 4"',
'```text hl_lines="3 4"',
),
(
'```{.plantuml}',
'```{.text}',
),
(
'```{ .plantuml #bar }',
'```{ .text anchor_ref=bar }',
),
(
'```{ .plantuml #bad<>&chars }',
'```{ .text anchor_ref=badchars }',
),
(
'```{ .plantuml #bar .foo linenos=y }',
'```{ .text anchor_ref=bar .foo linenos=y }',
),
])
def test_first_line_for_listing_block(line, expected):
match = PlantUmlMarkdownProcessor.FENCED_BLOCK_RE.search(line + '\n```')
assert match
assert first_line_for_listing_block(match) == expected
@fixture
def do_compile_test(basic_compile_test):
def f(data: str, plantuml_continue_after_failure=False) -> CompileResult:
return basic_compile_test(
'.md',
data,
extra_config={
'PLANTUML_DEBUG': True,
'PLANTUML_CONTINUE_AFTER_FAILURE': plantuml_continue_after_failure,
'PLANTUML_EXEC': os.environ.get('PLANTUML_EXEC', 'plantuml').split(),
'PLANTUML_MARKDOWN_ARGS': [
'-chide footbox',
'-nometadata',
'-Sshadowing=false',
],
},
extra_plugins_dirs=[
V8_PLUGIN_PATH / 'plantuml',
V8_PLUGIN_PATH / 'plantuml_markdown',
]
)
return f
@fixture
def do_fence_test(do_compile_test):
def f(fence: str) -> CompileResult:
return do_compile_test("""\
```{}
Alice -> Bob : hello1
Bob -> Alice : hello2
```
""".format(fence))
return f
|
from __future__ import unicode_literals
import unittest
from datetime import date
from mock import Mock, patch
from xero import Xero
from xero.exceptions import (
XeroBadRequest,
XeroExceptionUnknown,
XeroForbidden,
XeroInternalError,
XeroNotAvailable,
XeroNotFound,
XeroNotImplemented,
XeroRateLimitExceeded,
XeroUnauthorized,
)
from . import mock_data
class ExceptionsTest(unittest.TestCase):
@patch("requests.put")
def test_bad_request(self, r_put):
"Data with validation errors raises a bad request exception"
# Verified response from the live API
head = dict()
head["content-type"] = "text/xml; charset=utf-8"
r_put.return_value = Mock(
status_code=400,
encoding="utf-8",
text=mock_data.bad_request_text,
headers=head,
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.invoices.put(
{
"Type": "ACCREC",
"LineAmountTypes": "Exclusive",
"Date": date(2013, 4, 29),
"DueDate": date(2013, 4, 29),
"Reference": "Order # 123456",
"Status": "PAID",
"AmountPaid": "19.05",
"TotalTax": "1.05",
"AmountDue": "0.00",
"Total": "19.05",
"SubTotal": "18.00",
}
)
self.fail("Should raise a XeroBadRequest.")
except XeroBadRequest as e:
# Error messages have been extracted
self.assertEqual(str(e), "A validation exception occurred")
self.assertEqual(
e.errors,
[
"One or more line items must be specified",
"Invoice not of valid status for creation",
"A Contact must be specified for this type of transaction",
],
)
# The response has also been stored
self.assertEqual(e.response.status_code, 400)
self.assertTrue(e.response.text.startswith("<ApiException"))
except Exception as e:
self.fail("Should raise a XeroBadRequest, not %s" % e)
@patch("requests.put")
def test_bad_request_invalid_response(self, r_put):
"If the error response from the backend is malformed (or truncated), raise a XeroExceptionUnknown"
head = {"content-type": "text/xml; charset=utf-8"}
# Same error as before, but the response got cut off prematurely
bad_response = mock_data.bad_request_text[:1000]
r_put.return_value = Mock(
status_code=400, encoding="utf-8", text=bad_response, headers=head
)
credentials = Mock(base_url="")
xero = Xero(credentials)
with self.assertRaises(
XeroExceptionUnknown, msg="Should raise a XeroExceptionUnknown"
):
xero.invoices.put(
{
"Type": "ACCREC",
"LineAmountTypes": "Exclusive",
"Date": date(2013, 4, 29),
"DueDate": date(2013, 4, 29),
"Reference": "Order # 123456",
"Status": "PAID",
"AmountPaid": "19.05",
"TotalTax": "1.05",
"AmountDue": "0.00",
"Total": "19.05",
"SubTotal": "18.00",
}
)
@patch("requests.get")
def test_unregistered_app(self, r_get):
"An app without a signature raises a BadRequest exception, but with HTML payload"
# Verified response from the live API
head = dict()
head["content-type"] = "text/html; charset=utf-8"
r_get.return_value = Mock(
status_code=400,
text="oauth_problem=signature_method_rejected&oauth_problem_advice=No%20certificates%20have%20been%20registered%20for%20the%20consumer",
headers=head,
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroUnauthorized.")
except XeroBadRequest as e:
# Error messages have been extracted
self.assertEqual(
str(e), "No certificates have been registered for the consumer"
)
self.assertEqual(e.errors[0], "signature_method_rejected")
# The response has also been stored
self.assertEqual(e.response.status_code, 400)
self.assertEqual(
e.response.text,
"oauth_problem=signature_method_rejected&oauth_problem_advice=No%20certificates%20have%20been%20registered%20for%20the%20consumer",
)
except Exception as e:
self.fail("Should raise a XeroBadRequest, not %s" % e)
@patch("requests.get")
def test_unauthorized_invalid(self, r_get):
"A session with an invalid token raises an unauthorized exception"
# Verified response from the live API
r_get.return_value = Mock(
status_code=401,
text="oauth_problem=signature_invalid&oauth_problem_advice=Failed%20to%20validate%20signature",
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroUnauthorized.")
except XeroUnauthorized as e:
# Error messages have been extracted
self.assertEqual(str(e), "Failed to validate signature")
self.assertEqual(e.errors[0], "signature_invalid")
# The response has also been stored
self.assertEqual(e.response.status_code, 401)
self.assertEqual(
e.response.text,
"oauth_problem=signature_invalid&oauth_problem_advice=Failed%20to%20validate%20signature",
)
except Exception as e:
self.fail("Should raise a XeroUnauthorized, not %s" % e)
@patch("requests.get")
def test_unauthorized_expired(self, r_get):
"A session with an expired token raises an unauthorized exception"
# Verified response from the live API
r_get.return_value = Mock(
status_code=401,
text="oauth_problem=token_expired&oauth_problem_advice=The%20access%20token%20has%20expired",
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroUnauthorized.")
except XeroUnauthorized as e:
# Error messages have been extracted
self.assertEqual(str(e), "The access token has expired")
self.assertEqual(e.errors[0], "token_expired")
# The response has also been stored
self.assertEqual(e.response.status_code, 401)
self.assertEqual(
e.response.text,
"oauth_problem=token_expired&oauth_problem_advice=The%20access%20token%20has%20expired",
)
except Exception as e:
self.fail("Should raise a XeroUnauthorized, not %s" % e)
@patch("requests.get")
def test_forbidden(self, r_get):
"In case of an SSL failure, a Forbidden exception is raised"
# This is unconfirmed; haven't been able to verify this response from API.
r_get.return_value = Mock(
status_code=403, text="The client SSL certificate was not valid."
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroForbidden.")
except XeroForbidden as e:
# Error messages have been extracted
self.assertEqual(str(e), "The client SSL certificate was not valid.")
# The response has also been stored
self.assertEqual(e.response.status_code, 403)
self.assertEqual(
e.response.text, "The client SSL certificate was not valid."
)
except Exception as e:
self.fail("Should raise a XeroForbidden, not %s" % e)
@patch("requests.get")
def test_not_found(self, r_get):
"If you request an object that doesn't exist, a Not Found exception is raised"
# Verified response from the live API
r_get.return_value = Mock(
status_code=404, text="The resource you're looking for cannot be found"
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.get(id="deadbeef")
self.fail("Should raise a XeroNotFound.")
except XeroNotFound as e:
# Error messages have been extracted
self.assertEqual(str(e), "The resource you're looking for cannot be found")
# The response has also been stored
self.assertEqual(e.response.status_code, 404)
self.assertEqual(
e.response.text, "The resource you're looking for cannot be found"
)
except Exception as e:
self.fail("Should raise a XeroNotFound, not %s" % e)
@patch("requests.get")
def test_rate_limit_exceeded_429(self, r_get):
"If you exceed the rate limit, an exception is raised."
# Response based off Xero documentation; not confirmed by reality.
r_get.return_value = Mock(
status_code=429,
headers={"X-Rate-Limit-Problem": "day"},
text="oauth_problem=rate%20limit%20exceeded&oauth_problem_advice=please%20wait%20before%20retrying%20the%20xero%20api",
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroRateLimitExceeded.")
except XeroRateLimitExceeded as e:
# Error messages have been extracted
self.assertEqual(str(e), "please wait before retrying the xero api, the limit exceeded is: day")
self.assertIn("rate limit exceeded", e.errors[0])
# The response has also been stored
self.assertEqual(e.response.status_code, 429)
self.assertEqual(
e.response.text,
"oauth_problem=rate%20limit%20exceeded&oauth_problem_advice=please%20wait%20before%20retrying%20the%20xero%20api",
)
except Exception as e:
self.fail("Should raise a XeroRateLimitExceeded, not %s" % e)
@patch("requests.get")
def test_internal_error(self, r_get):
"In case of an SSL failure, a Forbidden exception is raised"
# This is unconfirmed; haven't been able to verify this response from API.
r_get.return_value = Mock(
status_code=500,
text="An unhandled error with the Xero API occurred. Contact the Xero API team if problems persist.",
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroInternalError.")
except XeroInternalError as e:
# Error messages have been extracted
self.assertEqual(
str(e),
"An unhandled error with the Xero API occurred. Contact the Xero API team if problems persist.",
)
# The response has also been stored
self.assertEqual(e.response.status_code, 500)
self.assertEqual(
e.response.text,
"An unhandled error with the Xero API occurred. Contact the Xero API team if problems persist.",
)
except Exception as e:
self.fail("Should raise a XeroInternalError, not %s" % e)
@patch("requests.post")
def test_not_implemented(self, r_post):
"In case of an SSL failure, a Forbidden exception is raised"
# Verified response from the live API
r_post.return_value = Mock(
status_code=501, encoding="utf-8", text=mock_data.not_implemented_text
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.organisations.save({})
self.fail("Should raise a XeroNotImplemented.")
except XeroNotImplemented as e:
# Error messages have been extracted
self.assertEqual(str(e), "The Api Method called is not implemented")
# The response has also been stored
self.assertEqual(e.response.status_code, 501)
self.assertTrue(e.response.text.startswith, "<ApiException")
except Exception as e:
self.fail("Should raise a XeroNotImplemented, not %s" % e)
@patch("requests.get")
def test_rate_limit_exceeded(self, r_get):
"If you exceed the rate limit, an exception is raised."
# Response based off Xero documentation; not confirmed by reality.
r_get.return_value = Mock(
status_code=503,
text="oauth_problem=rate%20limit%20exceeded&oauth_problem_advice=please%20wait%20before%20retrying%20the%20xero%20api",
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroRateLimitExceeded.")
except XeroRateLimitExceeded as e:
# Error messages have been extracted
self.assertEqual(str(e), "please wait before retrying the xero api")
self.assertEqual(e.errors[0], "rate limit exceeded")
# The response has also been stored
self.assertEqual(e.response.status_code, 503)
self.assertEqual(
e.response.text,
"oauth_problem=rate%20limit%20exceeded&oauth_problem_advice=please%20wait%20before%20retrying%20the%20xero%20api",
)
except Exception as e:
self.fail("Should raise a XeroRateLimitExceeded, not %s" % e)
@patch("requests.get")
def test_not_available(self, r_get):
"If Xero goes down for maintenance, an exception is raised"
# Response based off Xero documentation; not confirmed by reality.
r_get.return_value = Mock(
status_code=503, text="The Xero API is currently offline for maintenance"
)
credentials = Mock(base_url="")
xero = Xero(credentials)
try:
xero.contacts.all()
self.fail("Should raise a XeroNotAvailable.")
except XeroNotAvailable as e:
# Error messages have been extracted
self.assertEqual(
str(e), "The Xero API is currently offline for maintenance"
)
# The response has also been stored
self.assertEqual(e.response.status_code, 503)
self.assertEqual(
e.response.text, "The Xero API is currently offline for maintenance"
)
except Exception as e:
self.fail("Should raise a XeroNotAvailable, not %s" % e)
|
from django.shortcuts import render
from rest_framework import generics, status
from .serializers import RoomSerializer, CreateRoomSerializer, UpdateRoomSerializer
from .models import Room
from rest_framework.views import APIView
from rest_framework.response import Response
from django.http import JsonResponse
# Create your views here.
class RoomView(generics.CreateAPIView):
queryset = Room.objects.all()
serializer_class = RoomSerializer
class GetRoom(APIView):
serializer_class = RoomSerializer
lookup_url_kwarg = 'code'
def get(self, request, format=None):
code = request.GET.get(self.lookup_url_kwarg)
if code != None:
room = Room.objects.filter(code=code)
if len(room) > 0:
data = RoomSerializer(room[0]).data
data['is_host'] = self.request.session.session_key == room[0].host
return Response(data, status=status.HTTP_200_OK)
return Response({'Room Not Found': 'Invalid Room Code.'}, status=status.HTTP_404_NOT_FOUND)
return Response({'Bad Request': 'Code paramater not found in request'}, status=status.HTTP_400_BAD_REQUEST)
class JoinRoom(APIView):
lookup_url_kwarg = 'code'
def post(self, request, format=None):
if not self.request.session.exists(self.request.session.session_key):
self.request.session.create()
code = request.data.get(self.lookup_url_kwarg)
if code != None:
room_result = Room.objects.filter(code=code)
if len(room_result) > 0:
room = room_result[0]
self.request.session['room_code'] = code
return Response({'message' : 'Room Joined!'}, status=status.HTTP_200_OK)
return Response ({'Bad Request' : 'Invalid Room Code'}, status=status.HTTP_400_BAD_REQUEST)
return Response ({'Bad Request' : 'Invalid post data, did not find a code key'}, status=status.HTTP_404_NOT_FOUND)
class CreateRoomView(APIView):
serializer_class = CreateRoomSerializer
def post(self, request, format=None):
if not self.request.session.exists(self.request.session.session_key):
self.request.session.create()
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
guest_can_pause = serializer.data.get('guest_can_pause')
votes_to_skip = serializer.data.get('votes_to_skip')
host = self.request.session.session_key
queryset = Room.objects.filter(host=host)
if queryset.exists():
room = queryset[0]
room.guest_can_pause = guest_can_pause
room.votes_to_skip = votes_to_skip
room.save(update_fields=['guest_can_pause', 'votes_to_skip'])
self.request.session['room_code'] = room.code
return Response(RoomSerializer(room).data, status=status.HTTP_200_OK)
else:
room = Room(host=host, guest_can_pause=guest_can_pause,
votes_to_skip=votes_to_skip)
room.save()
self.request.session['room_code'] = room.code
return Response(RoomSerializer(room).data, status=status.HTTP_201_CREATED)
return Response({'Bad Request': 'Invalid data...'}, status=status.HTTP_400_BAD_REQUEST)
class IsUserInRoom(APIView):
def get(self, request, format=None):
if not self.request.session.exists(self.request.session.session_key):
self.request.session.create()
data = {
'code': self.request.session.get('room_code')
}
return JsonResponse(data, status=status.HTTP_200_OK)
class LeaveRoom(APIView):
def post(self, request, format=None):
if 'room_code' in self.request.session:
self.request.session.pop('room_code')
host_id = self.request.session.session_key
room_results = Room.objects.filter(host=host_id)
if len(room_results) > 0:
room = room_results[0]
room.delete()
return Response({'Message': 'Success'}, status=status.HTTP_200_OK)
class UpdateRoom(APIView):
serializer_class = UpdateRoomSerializer
def patch(self, request, format=None):
if not self.request.session.exists(self.request.session.session_key):
self.request.session.create()
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
guest_can_pause = serializer.data.get('guest_can_pause')
votes_to_skip = serializer.data.get('votes_to_skip')
code = serializer.data.get('code')
queryset = Room.objects.filter(code=code)
if not queryset.exists():
return Response({'msg': 'Room not found.'}, status=status.HTTP_404_NOT_FOUND)
room = queryset[0]
user_id = self.request.session.session_key
if room.host != user_id:
return Response({'msg': 'You are not the host of this room.'}, status=status.HTTP_403_FORBIDDEN)
room.guest_can_pause = guest_can_pause
room.votes_to_skip = votes_to_skip
room.save(update_fields=['guest_can_pause', 'votes_to_skip'])
return Response(RoomSerializer(room).data, status=status.HTTP_200_OK)
return Response({'Bad Request': "Invalid Data..."}, status=status.HTTP_400_BAD_REQUEST)
|
# Generated by Django 2.1.15 on 2019-12-31 00:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
#!/usr/bin/env python
import json
import SNMPUtil
import argparse
### Monitoring iDRAC Servers - Powerunit Performance
### It uses snmpwalk command to get the hadrware data from the iDRAC Servers.
### SNMPUtil.py is used to get the snmp raw data and parsed to get the output json
### Download and install the latest version of Site24x7 Linux Agent. The agent will execute the plugin and push the data to the Site24x7 server
###
### Author: Anita, Zoho Corp
### Language : Python
### Tested in Ubuntu
### Tested for snmp version 2c
### OIDS for Getting Power unit Details
OIDS = {'powerunit' : ['powerUnitTable','amperageProbeLocationName','amperageProbeReading']}
### OID Attributes
hardware = {'powerunit' : ['powerUnitStateSettings','powerUnitRedundancyStatus','powerUnitStatus','amperageProbeReading']}
### Output Keys and their units
names = {'powerunit' : ['state','redundancystatus','status', {'powerconsumption':'W'}]}
class HardwareParser:
def __init__(self, hostname, snmp_version, snmp_community_str, mib_location):
self.hostname = hostname
self.snmp_version = snmp_version
self.snmp_community_str = snmp_community_str
self.mib_location = mib_location
self.hardware = ''
self.oids = ''
self.pattern = ''
def getData(self):
output_data = {}
output_data['data'] = {}
output_data['units'] = {}
for _ in OIDS:
self.hardware = _
self.oids = OIDS[self.hardware]
self.keys = set()
for _ in self.oids:
try:
### SNMPUtil module is used to get the snmp output for the input OIDS
snmpdata = SNMPUtil.SNMPPARSER('snmpwalk',self.hostname, self.snmp_version, self.snmp_community_str,_, self.mib_location, hardware[self.hardware])
### get Raw SNMP Output as a dict
self.snmp_data = snmpdata.getRawData()
### Method to parse the SNMP command output data
output_data = self.parseSNMPData(output_data)
except Exception as e:
raise Exception(e)
return output_data
### Method to parse the SNMP command output data
def parseSNMPData(self,output_data):
jsondata = output_data['data']
unitdata = output_data['units']
appendkeys = False;
if not jsondata: appendkeys = True
for _ in self.snmp_data:
for index, __ in enumerate(hardware[self.hardware]) :
if __ in _:
name = ''.join(_.split("::")[1:]).replace('"','').split(' ')[0].split('.')
elementname = name[len(name)-1] # Name
value = ''.join(_.split()[1:]).replace('"','') # Value
if appendkeys : self.keys.add(elementname);
if ':' in value:
val = value.split(':')[1:]
value = val[len(val)-1]
if __ == 'powerSupplyOutputWatts' : value = int(value)/float(10)
if __ == 'powerSupplyRatedInputWattage' : value = int(value)/float(10)
if __ == 'amperageProbeReading' : value = int(value)/float(10)
if __ == 'voltageProbeReading' : value = int(value)/float(1000)
elem = names[self.hardware][index]
attribute = '' # Attribute Name
unit = '' # Attribute Value
if type(elem) is str: # Attributes with no units specified
attribute = elem
elif type(elem) is dict: # Attributes with units
attribute = list(elem.keys())[0]
unit = elem[list(elem.keys())[0]]
key = (attribute +'_'+elementname).replace(' ','')
if appendkeys :
jsondata[key] = value
if unit!='': unitdata[key] = unit
elif elementname in self.keys :
jsondata[key] = value
if unit!='': unitdata[key] = unit
elif self.hardware== 'powerunit':
if 'System Board Pwr Consumption' in _: self.keys.add(elementname)
if (elementname in self.keys and 'amperageProbeReading' in _) : jsondata[key] = value
output_data['data'] = jsondata
output_data['units'] = unitdata
return output_data
if __name__ == '__main__':
result = {}
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', help='hostname', nargs='?', default='localhost')
parser.add_argument('--snmp_version', help='snmp version', type=str, nargs='?', default="2c")
parser.add_argument('--snmp_community_str', help='snmp community version', nargs='?', default='public')
parser.add_argument('--idrac_mib_file_locn', help='idrac mib file location', nargs='?', default='')
parser.add_argument('--plugin_version', help='plugin template version', nargs='?', default='1')
parser.add_argument('--heartbeat_required', help='Enable heartbeat for monitoring', nargs='?', default="true")
args = parser.parse_args()
try:
parser = HardwareParser(args.hostname, args.snmp_version, args.snmp_community_str, args.idrac_mib_file_locn)
output = parser.getData()
result = output['data']
result['units'] = output['units']
except Exception as e:
result['msg'] = str(e)
result['plugin_version'] = args.plugin_version
result['heartbeat_required'] = args.heartbeat_required
print(json.dumps(result, indent=2, sort_keys=True))
|
import sys, smtplib
fromaddr = input("From: ")
toaddrs = input("To: ").split(',')
print("Enter message, end with ^D:")
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
# The actual mail send
server = smtplib.SMTP('localhost', 2500)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import logging
import signal
import sys
import traceback
from typing import Any, Callable, Coroutine, Dict, Generator, Iterable, List, Optional, Sequence, TYPE_CHECKING, Tuple, TypeVar, Union
import aiohttp
from .user import User
from .invite import Invite
from .template import Template
from .widget import Widget
from .guild import Guild
from .emoji import Emoji
from .channel import _threaded_channel_factory
from .enums import ChannelType
from .mentions import AllowedMentions
from .errors import *
from .enums import Status, VoiceRegion
from .flags import ApplicationFlags, Intents
from .gateway import *
from .activity import ActivityTypes, BaseActivity, create_activity
from .voice_client import VoiceClient
from .http import HTTPClient
from .state import ConnectionState
from . import utils
from .utils import MISSING
from .object import Object
from .backoff import ExponentialBackoff
from .webhook import Webhook
from .iterators import GuildIterator
from .appinfo import AppInfo
from .ui.view import View
from .stage_instance import StageInstance
from .threads import Thread
if TYPE_CHECKING:
from .abc import SnowflakeTime, PrivateChannel, GuildChannel, Snowflake
from .channel import DMChannel
from .user import ClientUser
from .message import Message
from .member import Member
from .voice_client import VoiceProtocol
__all__ = (
'Client',
)
Coro = TypeVar('Coro', bound=Callable[..., Coroutine[Any, Any, Any]])
log: logging.Logger = logging.getLogger(__name__)
def _cancel_tasks(loop: asyncio.AbstractEventLoop) -> None:
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
if not tasks:
return
log.info('Cleaning up after %d tasks.', len(tasks))
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
log.info('All tasks finished cancelling.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task
})
def _cleanup_loop(loop: asyncio.AbstractEventLoop) -> None:
try:
_cancel_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
class Client:
r"""Represents a client connection that connects to Discord.
This class is used to interact with the Discord WebSocket and API.
A number of options can be passed to the :class:`Client`.
Parameters
-----------
max_messages: Optional[:class:`int`]
The maximum number of messages to store in the internal message cache.
This defaults to ``1000``. Passing in ``None`` disables the message cache.
.. versionchanged:: 1.3
Allow disabling the message cache and change the default size to ``1000``.
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The :class:`asyncio.AbstractEventLoop` to use for asynchronous operations.
Defaults to ``None``, in which case the default event loop is used via
:func:`asyncio.get_event_loop()`.
connector: Optional[:class:`aiohttp.BaseConnector`]
The connector to use for connection pooling.
proxy: Optional[:class:`str`]
Proxy URL.
proxy_auth: Optional[:class:`aiohttp.BasicAuth`]
An object that represents proxy HTTP Basic Authorization.
shard_id: Optional[:class:`int`]
Integer starting at ``0`` and less than :attr:`.shard_count`.
shard_count: Optional[:class:`int`]
The total number of shards.
application_id: :class:`int`
The client's application ID.
intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
.. versionadded:: 1.5
member_cache_flags: :class:`MemberCacheFlags`
Allows for finer control over how the library caches members.
If not given, defaults to cache as much as possible with the
currently selected intents.
.. versionadded:: 1.5
chunk_guilds_at_startup: :class:`bool`
Indicates if :func:`.on_ready` should be delayed to chunk all guilds
at start-up if necessary. This operation is incredibly slow for large
amounts of guilds. The default is ``True`` if :attr:`Intents.members`
is ``True``.
.. versionadded:: 1.5
status: Optional[:class:`.Status`]
A status to start your presence with upon logging on to Discord.
activity: Optional[:class:`.BaseActivity`]
An activity to start your presence with upon logging on to Discord.
allowed_mentions: Optional[:class:`AllowedMentions`]
Control how the client handles mentions by default on every message sent.
.. versionadded:: 1.4
heartbeat_timeout: :class:`float`
The maximum numbers of seconds before timing out and restarting the
WebSocket in the case of not receiving a HEARTBEAT_ACK. Useful if
processing the initial packets take too long to the point of disconnecting
you. The default timeout is 60 seconds.
guild_ready_timeout: :class:`float`
The maximum number of seconds to wait for the GUILD_CREATE stream to end before
preparing the member cache and firing READY. The default timeout is 2 seconds.
.. versionadded:: 1.4
assume_unsync_clock: :class:`bool`
Whether to assume the system clock is unsynced. This applies to the ratelimit handling
code. If this is set to ``True``, the default, then the library uses the time to reset
a rate limit bucket given by Discord. If this is ``False`` then your system clock is
used to calculate how long to sleep for. If this is set to ``False`` it is recommended to
sync your system clock to Google's NTP server.
.. versionadded:: 1.3
Attributes
-----------
ws
The websocket gateway the client is currently connected to. Could be ``None``.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the client uses for asynchronous operations.
"""
def __init__(
self,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
**options: Any,
):
self.ws: DiscordWebSocket = None # type: ignore
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop
self._listeners: Dict[str, List[Tuple[asyncio.Future, Callable[..., bool]]]] = {}
self.shard_id: Optional[int] = options.get('shard_id')
self.shard_count: Optional[int] = options.get('shard_count')
connector: Optional[aiohttp.BaseConnector] = options.pop('connector', None)
proxy: Optional[str] = options.pop('proxy', None)
proxy_auth: Optional[aiohttp.BasicAuth] = options.pop('proxy_auth', None)
unsync_clock: bool = options.pop('assume_unsync_clock', True)
self.http: HTTPClient = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop)
self._handlers: Dict[str, Callable] = {
'ready': self._handle_ready
}
self._hooks: Dict[str, Callable] = {
'before_identify': self._call_before_identify_hook
}
self._connection: ConnectionState = self._get_state(**options)
self._connection.shard_count = self.shard_count
self._closed: bool = False
self._ready: asyncio.Event = asyncio.Event()
self._connection._get_websocket = self._get_websocket
self._connection._get_client = lambda: self
if VoiceClient.warn_nacl:
VoiceClient.warn_nacl = False
log.warning("PyNaCl is not installed, voice will NOT be supported")
# internals
def _get_websocket(self, guild_id: Optional[int] = None, *, shard_id: Optional[int] = None) -> DiscordWebSocket:
return self.ws
def _get_state(self, **options: Any) -> ConnectionState:
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers,
hooks=self._hooks, http=self.http, loop=self.loop, **options)
def _handle_ready(self) -> None:
self._ready.set()
@property
def latency(self) -> float:
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord WebSocket protocol latency.
"""
ws = self.ws
return float('nan') if not ws else ws.latency
def is_ws_ratelimited(self) -> bool:
""":class:`bool`: Whether the websocket is currently rate limited.
This can be useful to know when deciding whether you should query members
using HTTP or via the gateway.
.. versionadded:: 1.6
"""
if self.ws:
return self.ws.is_ratelimited()
return False
@property
def user(self) -> Optional[ClientUser]:
"""Optional[:class:`.ClientUser`]: Represents the connected client. ``None`` if not logged in."""
return self._connection.user
@property
def guilds(self) -> List[Guild]:
"""List[:class:`.Guild`]: The guilds that the connected client is a member of."""
return self._connection.guilds
@property
def emojis(self) -> List[Emoji]:
"""List[:class:`.Emoji`]: The emojis that the connected client has."""
return self._connection.emojis
@property
def cached_messages(self) -> Sequence[Message]:
"""Sequence[:class:`.Message`]: Read-only list of messages the connected client has cached.
.. versionadded:: 1.1
"""
return utils.SequenceProxy(self._connection._messages or [])
@property
def private_channels(self) -> List[PrivateChannel]:
"""List[:class:`.abc.PrivateChannel`]: The private channels that the connected client is participating on.
.. note::
This returns only up to 128 most recent private channels due to an internal working
on how Discord deals with private channels.
"""
return self._connection.private_channels
@property
def voice_clients(self) -> List[VoiceProtocol]:
"""List[:class:`.VoiceProtocol`]: Represents a list of voice connections.
These are usually :class:`.VoiceClient` instances.
"""
return self._connection.voice_clients
@property
def application_id(self) -> Optional[int]:
"""Optional[:class:`int`]: The client's application ID.
If this is not passed via ``__init__`` then this is retrieved
through the gateway when an event contains the data. Usually
after :func:`~discord.on_connect` is called.
"""
return self._connection.application_id
@property
def application_flags(self) -> ApplicationFlags:
""":class:`~discord.ApplicationFlags`: The client's application flags.
.. versionadded: 2.0
"""
return self._connection.application_flags # type: ignore
def is_ready(self) -> bool:
""":class:`bool`: Specifies if the client's internal cache is ready for use."""
return self._ready.is_set()
async def _run_event(self, coro: Callable[..., Coroutine[Any, Any, Any]], event_name: str, *args: Any, **kwargs: Any) -> None:
try:
await coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
await self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def _schedule_event(self, coro: Callable[..., Coroutine[Any, Any, Any]], event_name: str, *args: Any, **kwargs: Any) -> asyncio.Task:
wrapped = self._run_event(coro, event_name, *args, **kwargs)
# Schedules the task
task = self.loop.create_task(wrapped)
task.set_name(f'discord.py: {event_name}')
return task
def dispatch(self, event: str, *args: Any, **kwargs: Any) -> None:
log.debug('Dispatching event %s', event)
method = 'on_' + event
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, condition) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = condition(*args)
except Exception as exc:
future.set_exception(exc)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
try:
coro = getattr(self, method)
except AttributeError:
pass
else:
self._schedule_event(coro, method, *args, **kwargs)
async def on_error(self, event_method: str, *args: Any, **kwargs: Any) -> None:
"""|coro|
The default error handler provided by the client.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
Check :func:`~discord.on_error` for more details.
"""
print(f'Ignoring exception in {event_method}', file=sys.stderr)
traceback.print_exc()
# hooks
async def _call_before_identify_hook(self, shard_id: Optional[int], *, initial: bool = False) -> None:
# This hook is an internal hook that actually calls the public one.
# It allows the library to have its own hook without stepping on the
# toes of those who need to override their own hook.
await self.before_identify_hook(shard_id, initial=initial)
async def before_identify_hook(self, shard_id: Optional[int], *, initial: bool = False) -> None:
"""|coro|
A hook that is called before IDENTIFYing a session. This is useful
if you wish to have more control over the synchronization of multiple
IDENTIFYing clients.
The default implementation sleeps for 5 seconds.
.. versionadded:: 1.4
Parameters
------------
shard_id: :class:`int`
The shard ID that requested being IDENTIFY'd
initial: :class:`bool`
Whether this IDENTIFY is the first initial IDENTIFY.
"""
if not initial:
await asyncio.sleep(5.0)
# login state management
async def login(self, token: str) -> None:
"""|coro|
Logs in the client with the specified credentials.
Parameters
-----------
token: :class:`str`
The authentication token. Do not prefix this token with
anything as the library will do it for you.
Raises
------
:exc:`.LoginFailure`
The wrong credentials are passed.
:exc:`.HTTPException`
An unknown HTTP related error occurred,
usually when it isn't 200 or the known incorrect credentials
passing status code.
"""
log.info('logging in using static token')
await self.http.static_login(token.strip())
async def connect(self, *, reconnect: bool = True) -> None:
"""|coro|
Creates a websocket connection and lets the websocket listen
to messages from Discord. This is a loop that runs the entire
event system and miscellaneous aspects of the library. Control
is not resumed until the WebSocket connection is terminated.
Parameters
-----------
reconnect: :class:`bool`
If we should attempt reconnecting, either due to internet
failure or a specific failure on Discord's part. Certain
disconnects that lead to bad state will not be handled (such as
invalid sharding payloads or bad tokens).
Raises
-------
:exc:`.GatewayNotFound`
If the gateway to connect to Discord is not found. Usually if this
is thrown then there is a Discord API outage.
:exc:`.ConnectionClosed`
The websocket connection has been terminated.
"""
backoff = ExponentialBackoff()
ws_params = {
'initial': True,
'shard_id': self.shard_id,
}
while not self.is_closed():
try:
coro = DiscordWebSocket.from_client(self, **ws_params)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
ws_params['initial'] = False
while True:
await self.ws.poll_event()
except ReconnectWebSocket as e:
log.info('Got a request to %s the websocket.', e.op)
self.dispatch('disconnect')
ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id)
continue
except (OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError) as exc:
self.dispatch('disconnect')
if not reconnect:
await self.close()
if isinstance(exc, ConnectionClosed) and exc.code == 1000:
# clean close, don't re-raise this
return
raise
if self.is_closed():
return
# If we get connection reset by peer then try to RESUME
if isinstance(exc, OSError) and exc.errno in (54, 10054):
ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id)
continue
# We should only get this when an unhandled close code happens,
# such as a clean disconnect (1000) or a bad state (bad token, no sharding, etc)
# sometimes, discord sends us 1000 for unknown reasons so we should reconnect
# regardless and rely on is_closed instead
if isinstance(exc, ConnectionClosed):
if exc.code == 4014:
raise PrivilegedIntentsRequired(exc.shard_id) from None
if exc.code != 1000:
await self.close()
raise
retry = backoff.delay()
log.exception("Attempting a reconnect in %.2fs", retry)
await asyncio.sleep(retry)
# Always try to RESUME the connection
# If the connection is not RESUME-able then the gateway will invalidate the session.
# This is apparently what the official Discord client does.
ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id)
async def close(self) -> None:
"""|coro|
Closes the connection to Discord.
"""
if self._closed:
return
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect(force=True)
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close(code=1000)
await self.http.close()
self._ready.clear()
def clear(self) -> None:
"""Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`is_closed`
and :meth:`is_ready` both return ``False`` along with the bot's internal
cache cleared.
"""
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
async def start(self, token: str, *, reconnect: bool = True) -> None:
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
Raises
-------
TypeError
An unexpected keyword argument was received.
"""
await self.login(token)
await self.connect(reconnect=reconnect)
def run(self, *args: Any, **kwargs: Any) -> None:
"""A blocking call that abstracts away the event loop
initialisation from you.
If you want more control over the event loop then this
function should not be used. Use :meth:`start` coroutine
or :meth:`connect` + :meth:`login`.
Roughly Equivalent to: ::
try:
loop.run_until_complete(start(*args, **kwargs))
except KeyboardInterrupt:
loop.run_until_complete(close())
# cancel all tasks lingering
finally:
loop.close()
.. warning::
This function must be the last function to call due to the fact that it
is blocking. That means that registration of events or anything being
called after this function call will not execute until it returns.
"""
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
async def runner():
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info('Received signal to terminate bot and event loop.')
finally:
future.remove_done_callback(stop_loop_on_completion)
log.info('Cleaning up tasks.')
_cleanup_loop(loop)
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
# I am unsure why this gets raised here but suppress it anyway
return None
# properties
def is_closed(self) -> bool:
""":class:`bool`: Indicates if the websocket connection is closed."""
return self._closed
@property
def activity(self) -> Optional[ActivityTypes]:
"""Optional[:class:`.BaseActivity`]: The activity being used upon
logging in.
"""
return create_activity(self._connection._activity)
@activity.setter
def activity(self, value: Optional[ActivityTypes]) -> None:
if value is None:
self._connection._activity = None
elif isinstance(value, BaseActivity):
self._connection._activity = value.to_dict()
else:
raise TypeError('activity must derive from BaseActivity.')
@property
def allowed_mentions(self) -> Optional[AllowedMentions]:
"""Optional[:class:`~discord.AllowedMentions`]: The allowed mention configuration.
.. versionadded:: 1.4
"""
return self._connection.allowed_mentions
@allowed_mentions.setter
def allowed_mentions(self, value: Optional[AllowedMentions]) -> None:
if value is None or isinstance(value, AllowedMentions):
self._connection.allowed_mentions = value
else:
raise TypeError(f'allowed_mentions must be AllowedMentions not {value.__class__!r}')
@property
def intents(self) -> Intents:
""":class:`~discord.Intents`: The intents configured for this connection.
.. versionadded:: 1.5
"""
return self._connection.intents
# helpers/getters
@property
def users(self) -> List[User]:
"""List[:class:`~discord.User`]: Returns a list of all the users the bot can see."""
return list(self._connection._users.values())
def get_channel(self, id: int) -> Optional[Union[GuildChannel, PrivateChannel]]:
"""Returns a channel with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]]
The returned channel or ``None`` if not found.
"""
return self._connection.get_channel(id)
def get_stage_instance(self, id) -> Optional[StageInstance]:
"""Returns a stage instance with the given stage channel ID.
.. versionadded:: 2.0
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.StageInstance`]
The returns stage instance of ``None`` if not found.
"""
from .channel import StageChannel
channel = self._connection.get_channel(id)
if isinstance(channel, StageChannel):
return channel.instance
def get_guild(self, id) -> Optional[Guild]:
"""Returns a guild with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Guild`]
The guild or ``None`` if not found.
"""
return self._connection._get_guild(id)
def get_user(self, id) -> Optional[User]:
"""Returns a user with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`~discord.User`]
The user or ``None`` if not found.
"""
return self._connection.get_user(id)
def get_emoji(self, id) -> Optional[Emoji]:
"""Returns an emoji with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Emoji`]
The custom emoji or ``None`` if not found.
"""
return self._connection.get_emoji(id)
def get_all_channels(self) -> Generator[GuildChannel, None, None]:
"""A generator that retrieves every :class:`.abc.GuildChannel` the client can 'access'.
This is equivalent to: ::
for guild in client.guilds:
for channel in guild.channels:
yield channel
.. note::
Just because you receive a :class:`.abc.GuildChannel` does not mean that
you can communicate in said channel. :meth:`.abc.GuildChannel.permissions_for` should
be used for that.
Yields
------
:class:`.abc.GuildChannel`
A channel the client can 'access'.
"""
for guild in self.guilds:
yield from guild.channels
def get_all_members(self) -> Generator[Member, None, None]:
"""Returns a generator with every :class:`.Member` the client can see.
This is equivalent to: ::
for guild in client.guilds:
for member in guild.members:
yield member
Yields
------
:class:`.Member`
A member the client can see.
"""
for guild in self.guilds:
yield from guild.members
# listeners/waiters
async def wait_until_ready(self) -> None:
"""|coro|
Waits until the client's internal cache is all ready.
"""
await self._ready.wait()
def wait_for(
self,
event: str,
*,
check: Optional[Callable[..., bool]] = None,
timeout: Optional[float] = None,
) -> Any:
"""|coro|
Waits for a WebSocket event to be dispatched.
This could be used to wait for a user to reply to a message,
or to react to a message, or to edit a message in a self-contained
way.
The ``timeout`` parameter is passed onto :func:`asyncio.wait_for`. By default,
it does not timeout. Note that this does propagate the
:exc:`asyncio.TimeoutError` for you in case of timeout and is provided for
ease of use.
In case the event returns multiple arguments, a :class:`tuple` containing those
arguments is returned instead. Please check the
:ref:`documentation <discord-api-events>` for a list of events and their
parameters.
This function returns the **first event that meets the requirements**.
Examples
---------
Waiting for a user reply: ::
@client.event
async def on_message(message):
if message.content.startswith('$greet'):
channel = message.channel
await channel.send('Say hello!')
def check(m):
return m.content == 'hello' and m.channel == channel
msg = await client.wait_for('message', check=check)
await channel.send(f'Hello {msg.author}!')
Waiting for a thumbs up reaction from the message author: ::
@client.event
async def on_message(message):
if message.content.startswith('$thumb'):
channel = message.channel
await channel.send('Send me that \N{THUMBS UP SIGN} reaction, mate')
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '\N{THUMBS UP SIGN}'
try:
reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
await channel.send('\N{THUMBS DOWN SIGN}')
else:
await channel.send('\N{THUMBS UP SIGN}')
Parameters
------------
event: :class:`str`
The event name, similar to the :ref:`event reference <discord-api-events>`,
but without the ``on_`` prefix, to wait for.
check: Optional[Callable[..., :class:`bool`]]
A predicate to check what to wait for. The arguments must meet the
parameters of the event being waited for.
timeout: Optional[:class:`float`]
The number of seconds to wait before timing out and raising
:exc:`asyncio.TimeoutError`.
Raises
-------
asyncio.TimeoutError
If a timeout is provided and it was reached.
Returns
--------
Any
Returns no arguments, a single argument, or a :class:`tuple` of multiple
arguments that mirrors the parameters passed in the
:ref:`event reference <discord-api-events>`.
"""
future = self.loop.create_future()
if check is None:
def _check(*args):
return True
check = _check
ev = event.lower()
try:
listeners = self._listeners[ev]
except KeyError:
listeners = []
self._listeners[ev] = listeners
listeners.append((future, check))
return asyncio.wait_for(future, timeout)
# event registration
def event(self, coro: Coro) -> Coro:
"""A decorator that registers an event to listen to.
You can find more info about the events on the :ref:`documentation below <discord-api-events>`.
The events must be a :ref:`coroutine <coroutine>`, if not, :exc:`TypeError` is raised.
Example
---------
.. code-block:: python3
@client.event
async def on_ready():
print('Ready!')
Raises
--------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.debug('%s has successfully been registered as an event', coro.__name__)
return coro
async def change_presence(
self,
*,
activity: Optional[BaseActivity] = None,
status: Optional[Status] = None,
):
"""|coro|
Changes the client's presence.
Example
---------
.. code-block:: python3
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
.. versionchanged:: 2.0
Removed the ``afk`` keyword-only parameter.
Parameters
----------
activity: Optional[:class:`.BaseActivity`]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`.Status`]
Indicates what status to change to. If ``None``, then
:attr:`.Status.online` is used.
Raises
------
:exc:`.InvalidArgument`
If the ``activity`` parameter is not the proper type.
"""
if status is None:
status_str = 'online'
status = Status.online
elif status is Status.offline:
status_str = 'invisible'
status = Status.offline
else:
status_str = str(status)
await self.ws.change_presence(activity=activity, status=status_str)
for guild in self._connection.guilds:
me = guild.me
if me is None:
continue
if activity is not None:
me.activities = (activity,)
else:
me.activities = ()
me.status = status
# Guild stuff
def fetch_guilds(
self,
*,
limit: Optional[int] = 100,
before: SnowflakeTime = None,
after: SnowflakeTime = None
) -> GuildIterator:
"""Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild...
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to ``100``.
before: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieves guilds before this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieve guilds after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
:exc:`.HTTPException`
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
"""
return GuildIterator(self, limit=limit, before=before, after=after)
async def fetch_template(self, code: Union[Template, str]) -> Template:
"""|coro|
Gets a :class:`.Template` from a discord.new URL or code.
Parameters
-----------
code: Union[:class:`.Template`, :class:`str`]
The Discord Template Code or URL (must be a discord.new URL).
Raises
-------
:exc:`.NotFound`
The template is invalid.
:exc:`.HTTPException`
Getting the template failed.
Returns
--------
:class:`.Template`
The template from the URL/code.
"""
code = utils.resolve_template(code)
data = await self.http.get_template(code)
return Template(data=data, state=self._connection) # type: ignore
async def fetch_guild(self, guild_id: int) -> Guild:
"""|coro|
Retrieves a :class:`.Guild` from an ID.
.. note::
Using this, you will **not** receive :attr:`.Guild.channels`, :attr:`.Guild.members`,
:attr:`.Member.activity` and :attr:`.Member.voice` per :class:`.Member`.
.. note::
This method is an API call. For general usage, consider :meth:`get_guild` instead.
Parameters
-----------
guild_id: :class:`int`
The guild's ID to fetch from.
Raises
------
:exc:`.Forbidden`
You do not have access to the guild.
:exc:`.HTTPException`
Getting the guild failed.
Returns
--------
:class:`.Guild`
The guild from the ID.
"""
data = await self.http.get_guild(guild_id)
return Guild(data=data, state=self._connection)
async def create_guild(
self,
*,
name: str,
region: Union[VoiceRegion, str] = VoiceRegion.us_west,
icon: bytes = MISSING,
code: str = MISSING,
) -> Guild:
"""|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`.VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: Optional[:class:`bytes`]
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
code: :class:`str`
The code for a template to create the guild with.
.. versionadded:: 1.4
Raises
------
:exc:`.HTTPException`
Guild creation failed.
:exc:`.InvalidArgument`
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not MISSING:
icon_base64 = utils._bytes_to_base64_data(icon)
else:
icon_base64 = None
region_value = str(region)
if code:
data = await self.http.create_from_template(code, name, region_value, icon_base64)
else:
data = await self.http.create_guild(name, region_value, icon_base64)
return Guild(data=data, state=self._connection)
async def fetch_stage_instance(self, channel_id: int) -> StageInstance:
"""|coro|
Gets a :class:`.StageInstance` for a stage channel id.
.. versionadded:: 2.0
Parameters
-----------
channel_id: :class:`int`
The stage channel ID.
Raises
-------
:exc:`.NotFound`
The stage instance or channel could not be found.
:exc:`.HTTPException`
Getting the stage instance failed.
Returns
--------
:class:`.StageInstance`
The stage instance from the stage channel ID.
"""
data = await self.http.get_stage_instance(channel_id)
guild = self.get_guild(int(data['guild_id']))
return StageInstance(guild=guild, state=self._connection, data=data) # type: ignore
# Invite management
async def fetch_invite(self, url: Union[Invite, str], *, with_counts: bool = True, with_expiration: bool = True) -> Invite:
"""|coro|
Gets an :class:`.Invite` from a discord.gg URL or ID.
.. note::
If the invite is for a guild you have not joined, the guild and channel
attributes of the returned :class:`.Invite` will be :class:`.PartialInviteGuild` and
:class:`.PartialInviteChannel` respectively.
Parameters
-----------
url: Union[:class:`.Invite`, :class:`str`]
The Discord invite ID or URL (must be a discord.gg URL).
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`.Invite.approximate_member_count` and :attr:`.Invite.approximate_presence_count`
fields.
with_expiration: :class:`bool`
Whether to include the expiration date of the invite. This fills the
:attr:`.Invite.expires_at` field.
.. versionadded:: 2.0
Raises
-------
:exc:`.NotFound`
The invite has expired or is invalid.
:exc:`.HTTPException`
Getting the invite failed.
Returns
--------
:class:`.Invite`
The invite from the URL/ID.
"""
invite_id = utils.resolve_invite(url)
data = await self.http.get_invite(invite_id, with_counts=with_counts, with_expiration=with_expiration)
return Invite.from_incomplete(state=self._connection, data=data)
async def delete_invite(self, invite: Union[Invite, str]) -> None:
"""|coro|
Revokes an :class:`.Invite`, URL, or ID to an invite.
You must have the :attr:`~.Permissions.manage_channels` permission in
the associated guild to do this.
Parameters
----------
invite: Union[:class:`.Invite`, :class:`str`]
The invite to revoke.
Raises
-------
:exc:`.Forbidden`
You do not have permissions to revoke invites.
:exc:`.NotFound`
The invite is invalid or expired.
:exc:`.HTTPException`
Revoking the invite failed.
"""
invite_id = utils.resolve_invite(invite)
await self.http.delete_invite(invite_id)
# Miscellaneous stuff
async def fetch_widget(self, guild_id: int) -> Widget:
"""|coro|
Gets a :class:`.Widget` from a guild ID.
.. note::
The guild must have the widget enabled to get this information.
Parameters
-----------
guild_id: :class:`int`
The ID of the guild.
Raises
-------
:exc:`.Forbidden`
The widget for this guild is disabled.
:exc:`.HTTPException`
Retrieving the widget failed.
Returns
--------
:class:`.Widget`
The guild's widget.
"""
data = await self.http.get_widget(guild_id)
return Widget(state=self._connection, data=data)
async def application_info(self) -> AppInfo:
"""|coro|
Retrieves the bot's application information.
Raises
-------
:exc:`.HTTPException`
Retrieving the information failed somehow.
Returns
--------
:class:`.AppInfo`
The bot's application information.
"""
data = await self.http.application_info()
if 'rpc_origins' not in data:
data['rpc_origins'] = None
return AppInfo(self._connection, data)
async def fetch_user(self, user_id: int) -> User:
"""|coro|
Retrieves a :class:`~discord.User` based on their ID.
You do not have to share any guilds with the user to get this information,
however many operations do require that you do.
.. note::
This method is an API call. If you have :attr:`discord.Intents.members` and member cache enabled, consider :meth:`get_user` instead.
Parameters
-----------
user_id: :class:`int`
The user's ID to fetch from.
Raises
-------
:exc:`.NotFound`
A user with this ID does not exist.
:exc:`.HTTPException`
Fetching the user failed.
Returns
--------
:class:`~discord.User`
The user you requested.
"""
data = await self.http.get_user(user_id)
return User(state=self._connection, data=data)
async def fetch_channel(self, channel_id: int) -> Union[GuildChannel, PrivateChannel, Thread]:
"""|coro|
Retrieves a :class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`, or :class:`.Thread` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel` instead.
.. versionadded:: 1.2
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`, :class:`.Thread`]
The channel from the ID.
"""
data = await self.http.get_channel(channel_id)
factory, ch_type = _threaded_channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
channel = factory(me=self.user, data=data, state=self._connection)
else:
guild_id = int(data['guild_id'])
guild = self.get_guild(guild_id) or Object(id=guild_id)
channel = factory(guild=guild, state=self._connection, data=data)
return channel
async def fetch_webhook(self, webhook_id: int) -> Webhook:
"""|coro|
Retrieves a :class:`.Webhook` with the specified ID.
Raises
--------
:exc:`.HTTPException`
Retrieving the webhook failed.
:exc:`.NotFound`
Invalid webhook ID.
:exc:`.Forbidden`
You do not have permission to fetch this webhook.
Returns
---------
:class:`.Webhook`
The webhook you requested.
"""
data = await self.http.get_webhook(webhook_id)
return Webhook.from_state(data, state=self._connection)
async def create_dm(self, user: Snowflake) -> DMChannel:
"""|coro|
Creates a :class:`.DMChannel` with this user.
This should be rarely called, as this is done transparently for most
people.
.. versionadded:: 2.0
Parameters
-----------
user: :class:`~discord.abc.Snowflake`
The user to create a DM with.
Returns
-------
:class:`.DMChannel`
The channel that was created.
"""
state = self._connection
found = state._get_private_channel_by_user(user.id)
if found:
return found
data = await state.http.start_private_message(user.id)
return state.add_dm_channel(data)
def add_view(self, view: View, *, message_id: Optional[int] = None) -> None:
"""Registers a :class:`~discord.ui.View` for persistent listening.
This method should be used for when a view is comprised of components
that last longer than the lifecycle of the program.
Parameters
------------
view: :class:`discord.ui.View`
The view to register for dispatching.
message_id: Optional[:class:`int`]
The message ID that the view is attached to. This is currently used to
refresh the view's state during message update events. If not given
then message update events are not propagated for the view.
Raises
-------
TypeError
A view was not passed.
ValueError
The view is not persistent. A persistent view has no timeout
and all their components have an explicitly provided custom_id.
"""
if not isinstance(view, View):
raise TypeError(f'expected an instance of View not {view.__class__!r}')
if not view.is_persistent():
raise ValueError('View is not persistent. Items need to have a custom_id set and View must have no timeout')
self._connection.store_view(view, message_id)
@property
def persistent_views(self) -> Sequence[View]:
"""Sequence[:class:`.View`]: A sequence of persistent views added to the client."""
return self._connection.persistent_views
|
from __future__ import absolute_import, division, print_function, unicode_literals
from wowp.actors.special import Splitter, Chain
from wowp.schedulers import NaiveScheduler
from wowp.actors import FuncActor
from wowp.util import ConstructorWrapper
def test_splitter():
splitter = Splitter(multiplicity=2, inport_name="x")
assert len(splitter.outports) == 2
scheduler = NaiveScheduler()
for i in range(0, 10):
scheduler.put_value(splitter.inports.x, i)
scheduler.execute()
x1_all = list(splitter.outports["x_1"].pop_all())
x2_all = list(splitter.outports["x_2"].pop_all())
print("x1:", x1_all)
print("x2:", x2_all)
assert [0, 2, 4, 6, 8] == x1_all
assert [1, 3, 5, 7, 9] == x2_all
def double_me(x):
return x * 2
def test_chain():
func_generator = ConstructorWrapper(FuncActor, double_me)
chain = Chain("func_chain", [func_generator, func_generator])
wf = chain.get_workflow()
res = wf(inp=4)
assert res["out"].pop() == 16
res = wf(inp=2)
assert res["out"].pop() == 8
res = wf(inp="a")
assert res["out"].pop() == "aaaa"
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import shutil
import logging
import xmltodict
import copy
import re
from codecs import open
from os import getcwd
from os.path import basename, join, normpath
from collections import OrderedDict
from project_generator_definitions.definitions import ProGenDef
from .tool import Tool, Builder, Exporter
from ..util import SOURCE_KEYS
logger = logging.getLogger('progen.tools.uvision')
class uVisionDefinitions():
debuggers = {
'ulink2-me': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\UL2CM3.dll',
},
'Utilities': {
'Flash2': 'BIN\\UL2CM3.DLL',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '1',
'pMon': 'BIN\\UL2CM3.DLL',
},
'SetRegEntry' : {
'Key' : 'UL2CM3',
},
},
},
'cmsis-dap': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\CMSIS_AGDI.dll',
},
'Utilities': {
'Flash2': 'BIN\\CMSIS_AGDI.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '12',
'pMon': 'BIN\\CMSIS_AGDI.dll',
},
'SetRegEntry' : {
'Key' : 'CMSIS_AGDI',
},
},
},
'j-link': {
'uvproj': {
'TargetDlls': {
'Driver': 'Segger\\JL2CM3.dll',
},
'Utilities': {
'Flash2': 'Segger\\JL2CM3.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '6',
'pMon': 'Segger\\JL2CM3.dll',
},
'SetRegEntry' : {
'Key' : 'JL2CM3',
},
},
},
'ulink-pro': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\ULP2CM3.dll',
},
'Utilities': {
'Flash2': 'BIN\\ULP2CM3.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '7',
'pMon': 'BIN\\ULP2CM3.DLL',
},
'SetRegEntry' : {
'Key' : 'ULP2CM3',
},
},
},
'st-link': {
'uvproj': {
'TargetDlls': {
'Driver': 'STLink\\ST-LINKIII-KEIL_SWO.dll',
},
'Utilities': {
'Flash2': 'STLink\\ST-LINKIII-KEIL_SWO.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '11',
'pMon': 'STLink\\ST-LINKIII-KEIL_SWO.dll',
},
'SetRegEntry' : {
'Key' : 'ST-LINKIII-KEIL_SWO',
},
},
},
'nu-link': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\Nu_Link.dll',
},
'Utilities': {
'Flash2': 'BIN\\Nu_Link.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '9',
'pMon': 'NULink\\Nu_Link.dll',
},
'SetRegEntry' : {
'Key' : 'Nu_Link',
},
},
},
}
# use cmsis-dap debugger as default
debuggers_default = 'cmsis-dap'
class Uvision(Tool, Builder, Exporter):
optimization_options = ['O0', 'O1', 'O2', 'O3']
file_types = {'cpp': 8, 'c': 1, 's': 2, 'obj': 3,'o':3, 'lib': 4, 'ar': 4, 'h': 5}
# flags mapping to uvision uvproj dics
# for available flags, check armcc/armasm/armlink command line guide
# this does not provide all options within a project, most usable options are
# exposed via command line, the rest is covered via template project files
FLAGS_TO_UVISION = {
'asm_flags': 'Aads',
'c_flags': 'Cads',
'cxx_flags': 'Cads',
'ld_flags': 'LDads',
}
ERRORLEVEL = {
0: 'success (0 warnings, 0 errors)',
1: 'warnings',
2: 'errors',
3: 'fatal errors',
11: 'cant write to project file',
12: 'device error',
13: 'error writing',
15: 'error reading xml file',
}
SUCCESSVALUE = 0
WARNVALUE = 1
generated_project = {
'path': '',
'files': {
'uvproj': '',
}
}
def __init__(self, workspace, env_settings):
self.definitions = uVisionDefinitions()
# workspace or project
self.workspace = workspace
self.env_settings = env_settings
self.uvproj_file = join(self.TEMPLATE_DIR, "uvision.uvproj")
self.uvmpw_file = join(self.TEMPLATE_DIR, "uvision.uvmpw")
self.uvoptx_file = join(self.TEMPLATE_DIR, "uvision.uvoptx")
@staticmethod
def get_toolnames():
return ['uvision']
@staticmethod
def get_toolchain():
return 'uvision'
def _expand_one_file(self, source, new_data, extension):
ordered = OrderedDict()
ordered["FileType"] = self.file_types[extension]
ordered["FileName"] = basename(source)
ordered["FilePath"] = source
return ordered
def _normalize_mcu_def(self, mcu_def):
for k, v in mcu_def['TargetOption'].items():
mcu_def['TargetOption'][k] = v[0]
def _uvproj_clean_xmldict(self, uvproj_dic):
for k, v in uvproj_dic.items():
if v is None:
uvproj_dic[k] = ''
def _uvproj_set_CommonProperty(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
def _uvproj_set_DebugOption(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
self._uvproj_clean_xmldict(uvproj_dic['SimDlls'])
self._uvproj_clean_xmldict(uvproj_dic['Simulator'])
self._uvproj_clean_xmldict(uvproj_dic['Target'])
self._uvproj_clean_xmldict(uvproj_dic['TargetDlls'])
def _uvproj_set_DllOption(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
def _uvproj_set_TargetArmAds(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic['Aads'])
self._uvproj_clean_xmldict(uvproj_dic['Aads']['VariousControls'])
self._uvproj_clean_xmldict(uvproj_dic['ArmAdsMisc'])
self._uvproj_clean_xmldict(uvproj_dic['Cads'])
self._uvproj_clean_xmldict(uvproj_dic['Cads']['VariousControls'])
self._uvproj_clean_xmldict(uvproj_dic['LDads'])
uvproj_dic['LDads']['ScatterFile'] = project_dic['linker_file']
uvproj_dic['Cads']['VariousControls']['IncludePath'] = '; '.join(project_dic['include_paths'])
uvproj_dic['Cads']['VariousControls']['Define'] = ', '.join(project_dic['macros'])
if project_dic['macros']:
uvproj_dic['Aads']['VariousControls']['MiscControls'] = '--cpreproc --cpreproc_opts=-D' + ',-D'.join(project_dic['macros'])
for misc_keys in project_dic['misc'].keys():
# ld-flags dont follow the same as asm/c flags, why?!? Please KEIL fix this
if misc_keys == 'ld_flags':
for item in project_dic['misc'][misc_keys]:
uvproj_dic[self.FLAGS_TO_UVISION[misc_keys]]['Misc'] += ' ' + item
else:
for item in project_dic['misc'][misc_keys]:
uvproj_dic[self.FLAGS_TO_UVISION[misc_keys]]['VariousControls']['MiscControls'] += ' ' + item
def _uvproj_set_TargetCommonOption(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
self._uvproj_clean_xmldict(uvproj_dic['AfterMake'])
self._uvproj_clean_xmldict(uvproj_dic['BeforeCompile'])
self._uvproj_clean_xmldict(uvproj_dic['BeforeMake'])
self._uvproj_clean_xmldict(uvproj_dic['TargetStatus'])
uvproj_dic['OutputDirectory'] = project_dic['build_dir']
uvproj_dic['OutputName'] = project_dic['name']
uvproj_dic['CreateExecutable'] = 1 if project_dic['output_type'] == 'exe' else 0
uvproj_dic['CreateLib'] = 1 if project_dic['output_type'] == 'lib' else 0
def _uvproj_set_Utilities(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
def _uvproj_files_set(self, uvproj_dic, project_dic):
uvproj_dic['Project']['Targets']['Target']['Groups'] = OrderedDict()
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'] = []
i = 0
for group_name, files in project_dic['groups'].items():
# Why OrderedDict() - uvision project requires an order. GroupName must be before Files,
# otherwise it does not sense any file. Same applies for other attributes, like VariousControl.
# Therefore be aware that order matters in this exporter
group = OrderedDict()
group['GroupName'] = group_name
# group['Files'] = {}
group['Files'] = {'File': []}
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'].append(group)
for file in files:
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File'].append(file)
files = uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File']
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File'] = sorted(files, key=lambda x: x['FileName'].lower())
i += 1
def _generate_uvmpw_file(self):
uvmpw_dic = xmltodict.parse(open(self.uvmpw_file, "rb"))
uvmpw_dic['ProjectWorkspace']['project'] = []
for project in self.workspace['projects']:
# We check how far is project from root and workspace. IF they dont match,
# get relpath for project and inject it into workspace
path_project = os.path.dirname(project['files']['uvproj'])
path_workspace = os.path.dirname(self.workspace['settings']['path'] + '\\')
destination = os.path.join(os.path.relpath(self.env_settings.root, path_project), project['files']['uvproj'])
if path_project != path_workspace:
destination = os.path.join(os.path.relpath(self.env_settings.root, path_workspace), project['files']['uvproj'])
uvmpw_dic['ProjectWorkspace']['project'].append({'PathAndName': destination})
# generate the file
uvmpw_xml = xmltodict.unparse(uvmpw_dic, pretty=True)
project_path, uvmpw = self.gen_file_raw(uvmpw_xml, '%s.uvmpw' % self.workspace['settings']['name'], self.workspace['settings']['path'])
return project_path, uvmpw
def _set_target(self, expanded_dic, uvproj_dic, tool_name):
pro_def = ProGenDef(tool_name)
if not pro_def.is_supported(expanded_dic['target'].lower()):
raise RuntimeError("Target %s is not supported. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
mcu_def_dic = pro_def.get_tool_definition(expanded_dic['target'].lower())
if not mcu_def_dic:
raise RuntimeError(
"Target definitions were not found for %s. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
logger.debug("Mcu definitions: %s" % mcu_def_dic)
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Device'] = mcu_def_dic['TargetOption']['Device'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'] = mcu_def_dic['TargetOption']['DeviceId'][0]
try:
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Vendor'] = mcu_def_dic['TargetOption']['Vendor'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Cpu'] = mcu_def_dic['TargetOption']['Cpu'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['FlashDriverDll'] = str(mcu_def_dic['TargetOption']['FlashDriverDll'][0])
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['SFDFile'] = mcu_def_dic['TargetOption']['SFDFile'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['RegisterFile'] = mcu_def_dic['TargetOption']['RegisterFile'][0]
except KeyError:
pass
# overwrite the template if target has defined debugger
# later progen can overwrite this if debugger is set in project data
try:
debugger_name = pro_def.get_debugger(expanded_dic['target'])['name']
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption']['TargetDlls']['Driver'] = self.definitions.debuggers[debugger_name]['uvproj']['TargetDlls']['Driver']
uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities']['Flash2'] = self.definitions.debuggers[debugger_name]['uvproj']['Utilities']['Flash2']
except (TypeError, KeyError) as err:
pass
# Support new device packs
if 'PackID' in mcu_def_dic['TargetOption']:
if tool_name != 'uvision5':
# using software packs require v5
logger.info("The target might not be supported in %s, requires uvision5" % tool_name)
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['PackID'] = mcu_def_dic['TargetOption']['PackID'][0]
def _uvoptx_set_debugger(self, expanded_dic, uvoptx_dic, tool_name):
pro_def = ProGenDef(tool_name)
if not pro_def.is_supported(expanded_dic['target'].lower()):
raise RuntimeError("Target %s is not supported. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
mcu_def_dic = pro_def.get_tool_definition(expanded_dic['target'].lower())
if not mcu_def_dic:
raise RuntimeError(
"Target definitions were not found for %s. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
logger.debug("Mcu definitions: %s" % mcu_def_dic)
# set the same target name FlashDriverDll config as in uvprojx file
try:
uvoptx_dic['ProjectOpt']['Target']['TargetName'] = expanded_dic['name']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['TargetDriverDllRegistry']['SetRegEntry']['Name'] = str(mcu_def_dic['TargetOption']['FlashDriverDll'][0])
except KeyError:
return
# load debugger from target dictionary or use default debugger
try:
debugger_dic = pro_def.get_debugger(expanded_dic['target'])
if debugger_dic is None:
debugger_name = self.definitions.debuggers_default
else:
debugger_name = debugger_dic['name']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['DebugOpt']['nTsel'] = self.definitions.debuggers[debugger_name]['uvoptx']['DebugOpt']['nTsel']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['DebugOpt']['pMon'] = self.definitions.debuggers[debugger_name]['uvoptx']['DebugOpt']['pMon']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['TargetDriverDllRegistry']['SetRegEntry']['Key'] = self.definitions.debuggers[debugger_name]['uvoptx']['SetRegEntry']['Key']
except KeyError:
raise RuntimeError("Debugger %s is not supported" % expanded_dic['debugger'])
def _export_single_project(self, tool_name):
expanded_dic = self.workspace.copy()
groups = self._get_groups(self.workspace)
expanded_dic['groups'] = {}
for group in groups:
expanded_dic['groups'][group] = []
# get relative path and fix all paths within a project
self._iterate(self.workspace, expanded_dic)
expanded_dic['build_dir'] = '.\\' + expanded_dic['build_dir'] + '\\'
# generic tool template specified or project
if expanded_dic['template']:
for template in expanded_dic['template']:
template = join(getcwd(), template)
if os.path.splitext(template)[1] == '.uvproj' or os.path.splitext(template)[1] == '.uvprojx' or \
re.match('.*\.uvproj.tmpl$', template) or re.match('.*\.uvprojx.tmpl$', template):
try:
uvproj_dic = xmltodict.parse(open(template, encoding="utf8").read())
except IOError:
logger.info("Template file %s not found" % template)
return None, None
else:
logger.info("Template file %s contains unknown template extension (.uvproj/x are valid). Using default one" % template)
uvproj_dic = xmltodict.parse(open(self.uvproj_file, "rb"))
elif 'uvision' in self.env_settings.templates.keys():
# template overrides what is set in the yaml files
for template in self.env_settings.templates['uvision']:
template = join(getcwd(), template)
if os.path.splitext(template)[1] == '.uvproj' or os.path.splitext(template)[1] == '.uvprojx' or \
re.match('.*\.uvproj.tmpl$', template) or re.match('.*\.uvprojx.tmpl$', template):
try:
uvproj_dic = xmltodict.parse(open(template, encoding="utf8").read())
except IOError:
logger.info("Template file %s not found. Using default template" % template)
uvproj_dic = xmltodict.parse(open(self.uvproj_file, "rb"))
else:
logger.info("Template file %s contains unknown template extension (.uvproj/x are valid). Using default one" % template)
uvproj_dic = xmltodict.parse(open(self.uvproj_file))
else:
uvproj_dic = xmltodict.parse(open(self.uvproj_file, "rb"))
try:
uvproj_dic['Project']['Targets']['Target']['TargetName'] = expanded_dic['name']
except KeyError:
raise RuntimeError("The uvision template is not valid .uvproj file")
self._uvproj_files_set(uvproj_dic, expanded_dic)
self._uvproj_set_CommonProperty(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['CommonProperty'], expanded_dic)
self._uvproj_set_DebugOption(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption'], expanded_dic)
self._uvproj_set_DllOption(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DllOption'], expanded_dic)
self._uvproj_set_TargetArmAds(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetArmAds'], expanded_dic)
self._uvproj_set_TargetCommonOption(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption'], expanded_dic)
self._uvproj_set_Utilities(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities'], expanded_dic)
# set target only if defined, otherwise use from template/default one
if tool_name == 'uvision5':
extension = 'uvprojx'
uvproj_dic['Project']['SchemaVersion'] = '2.1'
else:
extension = 'uvproj'
uvproj_dic['Project']['SchemaVersion'] = '1.1'
if expanded_dic['target']:
self._set_target(expanded_dic, uvproj_dic, tool_name)
# load debugger
if expanded_dic['debugger']:
try:
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption']['TargetDlls']['Driver'] = self.definitions.debuggers[expanded_dic['debugger']]['uvproj']['TargetDlls']['Driver']
uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities']['Flash2'] = self.definitions.debuggers[expanded_dic['debugger']]['uvproj']['Utilities']['Flash2']
except KeyError:
raise RuntimeError("Debugger %s is not supported" % expanded_dic['debugger'])
# Project file
uvproj_xml = xmltodict.unparse(uvproj_dic, pretty=True)
project_path, uvproj = self.gen_file_raw(uvproj_xml, '%s.%s' % (expanded_dic['name'], extension), expanded_dic['output_dir']['path'])
uvoptx = None
# generic tool template specified
uvoptx_dic = xmltodict.parse(open(self.uvoptx_file, "rb"))
self._uvoptx_set_debugger(expanded_dic, uvoptx_dic, tool_name)
# set target only if defined, otherwise use from template/default one
if tool_name == 'uvision5':
extension = 'uvoptx'
else:
extension = 'uvopt'
# Project file
uvoptx_xml = xmltodict.unparse(uvoptx_dic, pretty=True)
project_path, uvoptx = self.gen_file_raw(uvoptx_xml, '%s.%s' % (expanded_dic['name'], extension), expanded_dic['output_dir']['path'])
return project_path, [uvproj, uvoptx]
def export_workspace(self):
path, workspace = self._generate_uvmpw_file()
return path, [workspace]
def export_project(self):
path, files = self._export_single_project('uvision') #todo: uvision will switch to uv4
generated_projects = copy.deepcopy(self.generated_project)
generated_projects['path'] = path
generated_projects['files']['uvproj'] = files[0]
return generated_projects
def get_generated_project_files(self):
return {'path': self.workspace['path'], 'files': [self.workspace['files']['uvproj']]}
def _build_project(self, tool_name, extension):
# > UV4 -b [project_path]
path = join(self.env_settings.root, self.workspace['files'][extension])
if path.split('.')[-1] != extension:
path = path + extension
if not os.path.exists(path):
logger.debug("The file: %s does not exists, exported prior building?" % path)
return -1
logger.debug("Building uVision project: %s" % path)
build_log_path = join(os.path.dirname(path),'build','build_log.txt')
args = [self.env_settings.get_env_settings(tool_name), '-r', '-j0', '-o', build_log_path, path]
logger.debug(args)
try:
ret_code = None
ret_code = subprocess.call(args)
except:
logger.error(
"Error whilst calling UV4: '%s'. Please set uvision path in the projects.yaml file." % self.env_settings.get_env_settings('uvision'))
return -1
else:
if ret_code != self.SUCCESSVALUE and ret_code != self.WARNVALUE:
# Seems like something went wrong.
logger.error("Project: %s build failed with the status: %s" % (self.workspace['files'][extension], self.ERRORLEVEL.get(ret_code, "Unknown")))
return -1
else:
logger.info("Project: %s build succeeded with the status: %s" % (self.workspace['files'][extension], self.ERRORLEVEL.get(ret_code, "Unknown")))
return 0
def build_project(self):
return self._build_project('uvision', 'uvproj')
class Uvision5(Uvision):
generated_project = {
'path': '',
'files': {
'uvprojx': '',
'uvoptx': '',
}
}
def __init__(self, workspace, env_settings):
super(Uvision5, self).__init__(workspace, env_settings)
@staticmethod
def get_toolnames():
return ['uvision5']
def export_project(self):
path, files = self._export_single_project('uvision5')
generated_projects = copy.deepcopy(self.generated_project)
generated_projects['path'] = path
generated_projects['files']['uvprojx'] = files[0]
generated_projects['files']['uvoptx'] = files[1]
return generated_projects
def get_generated_project_files(self):
return {'path': self.workspace['path'], 'files': [self.workspace['files']['uvprojx'], self.workspace['files']['uvoptx']]}
def build_project(self):
# tool_name uvision as uv4 is still used in uv5
return self._build_project('uvision', 'uvprojx')
|
# Copyright (c) 2017-2020 Wenyi Tang.
# Author: Wenyi Tang
# Email: wenyitang@outlook.com
# Update: 2020 - 2 - 7
from importlib import import_module
from ..Backend import BACKEND
__all__ = [
'get_model',
'list_supported_models'
]
def get_model(name: str):
name = name.lower()
try:
if BACKEND == 'pytorch':
return import_module('.Models', 'VSR.Backend.Torch').get_model(name)
elif BACKEND == 'tensorflow':
return import_module('.Models', 'VSR.Backend.TF').get_model(name)
elif BACKEND == 'tensorflow2':
pass
except (KeyError, ImportError):
raise ImportError(f"Using {BACKEND}, can't find model {name}.")
def list_supported_models():
if BACKEND == 'pytorch':
return import_module('.Models', 'VSR.Backend.Torch').list_supported_models()
elif BACKEND == 'tensorflow':
return import_module('.Models', 'VSR.Backend.TF').list_supported_models()
elif BACKEND == 'tensorflow2':
pass
|
import argparse
import re
import logging
import requests
from typing import Iterator
from typing import List
LOGGER_NAME="advent"
def init_logging(is_verbose: bool):
"""
Creates standard logging for the logger_name passed in
"""
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.DEBUG)
channel = logging.StreamHandler()
if is_verbose:
channel.setLevel(logging.DEBUG)
else:
channel.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(filename)s:%(lineno)s: %(message)s')
channel.setFormatter(formatter)
logger.addHandler(channel)
logger.debug("Initialized logging")
return logger
def get_input_from_file(filepath: str) -> Iterator[str]:
"""
Read the file from the internet and return
an iterator of each line as a string
"""
logger = logging.getLogger(LOGGER_NAME)
logger.info("Reading input from %s", filepath)
with open(filepath, "r") as handle:
for line in handle:
line = line.strip()
yield line
def read_comma_separated_values(filepath: str) -> Iterator[str]:
logger = logging.getLogger(LOGGER_NAME)
logger.info("Reading comma separated values from %s", filepath)
return_list = []
for line in get_input_from_file(filepath=filepath):
line = line.strip()
values = line.split(",")
for value in values:
v = value.strip()
if v:
yield v
def read_comma_separated_ints(filepath: str) -> Iterator[int]:
values = read_comma_separated_values(filepath=filepath)
for item in values:
yield int(item)
def get_integers_from_file(filepath: str) -> Iterator[int]:
"""
Read the file from the internet and return
an iterator of each line as a string
"""
logger = logging.getLogger(LOGGER_NAME)
logger.info("Reading integers from %s", filepath)
for line in get_input_from_file(filepath=filepath):
yield int(line)
def line_to_parts(line) -> dict:
"""
#123 @ 3,2: 5x4
ID: #123
from_left: 3
from_top: 2
width: 5
height: 4
"""
m = re.match(r"(\w+) (\w+)", "Isaac Newton, physicist")
m = re.match(r"#(\d+) @ (\d+),(\d+): (\d+)x(\d+)", line)
return_dict = {
"id": m.group(1),
"from_left": m.group(2),
"from_top": m.group(3),
"width": m.group(4),
"height": m.group(5),
}
return return_dict
def binary_list_to_int(binary_list: List[str]) -> int:
"""
convert
["1", "0", "1"]
or [1, 0, 1]
to 5
"""
# convert to strings
binary_list = [str(x) for x in binary_list]
as_string = "".join(binary_list)
as_int= int(as_string, 2)
return as_int
class SparseGrid():
def __init__(self):
self.cell_dict = {}
self.max_x = 0
self.max_y = 0
self._logger = logging.getLogger(LOGGER_NAME)
def add_line(self, x1, y1, x2, y2, value, only_horizontal):
# only horizontal and vertical lines
if not (x1 == x2 or y1 == y2) and only_horizontal:
self._logger.debug("Not a horizontal or vertical line")
return
min_x = min(x1, x2)
max_x = max(x1, x2)
min_y = min(y1, y2)
max_y = max(y1, y2)
# Adjust sparse grid metadata for printing the grid
if max_x > self.max_x:
self.max_x = max_x
self._logger.debug("Adjusting sparse grid max_x to %s", self.max_x)
if max_y > self.max_y:
self.max_y = max_y
self._logger.debug("Adjusting sparse grid max_y to %s", self.max_y)
# use range to get coordinates
x_list = list(range(min_x, max_x + 1))
y_list = list(range(min_y, max_y + 1))
# reverse the range if needed
if x1 < x2:
x_list.reverse()
if y1 < y2:
y_list.reverse()
# for vertical lines, duplicate the x value
# for each coordinate so zip works
if len(y_list) == 1:
for x in range(len(x_list)-1):
y_list.append(y_list[0])
# for horizontal lines, duplicate the y value
# for each coordinate so zip works
if len(x_list) == 1:
for y in range(len(y_list)-1):
x_list.append(x_list[0])
cells = list(zip(x_list, y_list))
for x, y in cells:
coordinate = f"{x}:{y}"
self.cell_dict.setdefault(coordinate, [])
self.cell_dict[coordinate].append(value)
def add_block(self, from_left_x: int, from_top_y: int, width: int, height: int, value: str):
if from_left_x + width > self.max_x:
self.max_x = from_left_x + width
print(f"Adjusting width to {self.max_x}")
if from_top_y + height > self.max_y:
self.max_y = from_top_y + height
print(f"Adjusting height to {self.max_y}")
for x in range(width):
for y in range(height):
true_x = x + from_left_x
true_y = y + from_top_y
coordinate = f"{true_x}:{true_y}"
self.cell_dict.setdefault(coordinate, [])
self.cell_dict[coordinate].append(value)
def evaluate_block(self, from_left_x: int, from_top_y: int, width: int, height: int, value: str) -> bool:
"""
One the SparseGrid is populated, evaluate a block to
see if it does not overlap with any other block
"""
for x in range(width):
for y in range(height):
true_x = x + from_left_x
true_y = y + from_top_y
coordinate = f"{true_x}:{true_y}"
array = self.cell_dict[coordinate]
if len(array) != 1:
return False
return True
def get_num_overlapping_cells(self):
"""
return the number of cells with arrays
with more than one element
"""
num = 0
for coord, array in self.cell_dict.items():
if len(array) > 1:
num += 1
return num
def print(self):
for y in range(self.max_y + 1):
for x in range(self.max_x + 1):
coordinate = f"{x}:{y}"
array = self.cell_dict.get(coordinate)
if not array:
print(".", end='')
else:
print(len(array), end='')
print("")
def parse_args(argv=None):
"""
Parse command line args
"""
parser = argparse.ArgumentParser(description="Main Driver for Frivenmeld")
parser.add_argument('-v',
action="store_true",
dest="verbose",
required=False,
help="Debug output")
parser.add_argument('-t',
action="store_true",
dest="use_test_data",
required=False,
help="Use test data")
parser.add_argument('-d',
action="store_true",
dest="print_data",
required=False,
help="Just print out the data")
parser.add_argument("-yd",
dest="year_day",
help="YYYYDD the date to process data for")
results = parser.parse_args(argv)
return results
class Input():
def __init__(self, year: int, day: int, use_test_data: bool):
self._year = year
self._day = day
self._use_test_data = use_test_data
self._logger = logging.getLogger(LOGGER_NAME)
self._logger.info("Input year: %d day: %d test-data: %s", self._year, self._day, self._use_test_data)
def get_filepath(self):
if self._use_test_data:
return f"data/{self._year}/day/{self._day}/test-input.txt"
else:
return f"data/{self._year}/day/{self._day}/input.txt"
def get_raw(self) -> str:
filepath = self.get_filepath()
self._logger.info("Reading raw data from '%s'", filepath)
with open(filepath, "r") as handle:
data = handle.read()
return data
def get_lines(self) -> Iterator[str]:
filepath = self.get_filepath()
self._logger.info("Reading lines from '%s'", filepath)
with open(filepath, "r") as handle:
for line in handle:
line = line.strip()
yield line
def get_chars(self) -> List[str]:
"""
return all the characters in the file as a list
asd
fro
['a', 's', 'd', 'f', 'r', 'o']
"""
for line in self.get_lines():
for char in line:
yield char
def get_ints(self) -> Iterator[int]:
for line in self.get_lines():
yield int(line)
def get_floats(self) -> Iterator[int]:
for line in self.get_lines():
yield float(line)
def get_comma_separated_values(self) -> Iterator[str]:
"""
note: skips empty values
"""
for line in self.get_lines():
line = line.strip()
values = line.split(",")
for value in values:
v = value.strip()
if v:
yield v
def get_comma_separated_ints(self) -> Iterator[int]:
values = self.get_comma_separated_values()
for item in values:
yield int(item)
|
import poplib
from ...utils import Timer
class EmailChecker(Timer):
'''WARNING: This uses POP3 and by default deletes the emails it reads!'''
username = None
password = None
server = None
port = None
on_mail = None
delete = None
def __init__(self, username, password, server, port=110, on_mail=None, delete=True, interval=60):
super(EmailChecker, self).__init__(interval=interval)
self.username = username
self.password = password
self.server = server
self.port = port
if on_mail is not None:
self.on_mail = on_mail
self.delete = delete
def target(self):
client = poplib.POP3(self.server, self.port)
client.user(self.username)
client.pass_(self.password)
count = client.stat()[0]
for i in range(count):
email = client.retr(i + 1)
data = [l.decode('utf-8') for l in email[1]]
sep = data.index(u'')
headers = {}
body = u''
# Headers
last = None
for line in data[:sep]:
if line[0] in (u' ', u'\t', u'\r', u'\n') and last is not None:
# Folded header continuation
headers[last] += line
else:
# Next header
name_separator = line.index(u':')
name = line[:name_separator]
value = line[name_separator + 2:]
headers[name] = value
last = name
# Body
body = u''.join(data[sep + 1:])
if self.on_mail(headers, body) or self.delete:
client.dele(i + 1)
client.quit()
def on_exception(self):
'''Sometimes the mail server doesn't respond in time, ignore the produced error and keep running.'''
return False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import botocore
import boto3
import sys
import os
import json
import subprocess
import click
import StringIO
import gzip
from datetime import datetime
from textwrap import dedent
import mimetypes
TARGETS = ['infra', 'dev', 'int', 'prod']
mimetypes.init()
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/x-font-opentype', '.otf')
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/json', '.json')
mimetypes.add_type('text/cache-manifest', '.appcache')
mimetypes.add_type('text/plain', '.txt')
NO_COMPRESS = [
'image/png',
'image/jpeg',
'image/x-icon',
'image/vnd.microsoft.icon',
'application/x-font-ttf',
'application/x-font-opentype',
'application/vnd.ms-fontobject']
project = os.environ.get('PROJECT', 'mf-geoadmin3')
def _get_bucket_name(deploy_target):
if project == 'mf-geoadmin3':
return 'mf-geoadmin3-%s-dublin' % deploy_target.lower()
else:
return 'mf-geoadmin4-%s-dublin' % deploy_target.lower()
def get_bucket_name(target):
if target in TARGETS:
return _get_bucket_name(target)
else:
return target
def local_git_last_commit(basedir):
try:
output = subprocess.check_output(('git rev-parse HEAD',), cwd=basedir, shell=True)
return output.strip()
except subprocess.CalledProcessError:
print('Not a git directory: %s' % basedir)
try:
with open(os.path.join(basedir, '.build-artefacts', 'last-commit-ref'), 'r') as f:
data = f.read()
return data
except IOError:
print('Error while reading \'last-commit-ref\' from %s' % basedir)
return None
def local_git_commit_short(basedir):
output = subprocess.check_output(('git rev-parse --short HEAD'), cwd=basedir, shell=True)
return output.strip()
def local_git_branch(basedir):
output = subprocess.check_output(('git rev-parse --abbrev-ref HEAD',), cwd=basedir, shell=True)
return output.strip()
def local_last_version(basedir):
try:
with open(os.path.join(basedir, '.build-artefacts', 'last-version'), 'r') as f:
data = f.read()
return data
except IOError as e:
print('Cannot find version: %s' % e)
return None
def _gzip_data(data):
out = None
infile = StringIO.StringIO()
try:
gzip_file = gzip.GzipFile(fileobj=infile, mode='w', compresslevel=5)
gzip_file.write(data)
gzip_file.close()
infile.seek(0)
out = infile.getvalue()
except:
out = None
finally:
infile.close()
return out
def _unzip_data(compressed):
inbuffer = StringIO.StringIO(compressed)
f = gzip.GzipFile(mode='rb', fileobj=inbuffer)
try:
data = f.read()
finally:
f.close()
return data
def save_to_s3(src, dest, bucket_name, cached=True, mimetype=None, break_on_error=False):
mimetype = get_file_mimetype(src)
try:
with open(src, 'r') as f:
data = f.read()
except EnvironmentError as e:
print('Failed to upload %s' % src)
print(str(e))
if break_on_error:
print("Exiting...")
sys.exit(1)
else:
return False
_save_to_s3(data, dest, mimetype, bucket_name, cached=cached)
def _save_to_s3(in_data, dest, mimetype, bucket_name, compress=True, cached=True):
data = in_data
compressed = False
content_encoding = None
cache_control = 'max-age=31536000, public'
extra_args = {}
if compress and mimetype not in NO_COMPRESS:
data = _gzip_data(in_data)
content_encoding = 'gzip'
compressed = True
if cached is False:
cache_control = 'max-age=0, must-revalidate, s-maxage=300'
extra_args['ACL'] = 'public-read'
extra_args['ContentType'] = mimetype
extra_args['CacheControl'] = cache_control
try:
print('Uploading to %s - %s, gzip: %s, cache headers: %s' %
(dest, mimetype, compressed, cached))
if compressed:
extra_args['ContentEncoding'] = content_encoding
if cached is False:
extra_args['Expires'] = datetime(1990, 1, 1)
extra_args['Metadata'] = {'Pragma': 'no-cache', 'Vary': '*'}
s3.Object(bucket_name, dest).put(Body=data, **extra_args)
except Exception as e:
print('Error while uploading %s: %s' % (dest, e))
def get_index_version(c):
version = None
p = re.compile(ur'version: \'(\d+)\'')
match = re.findall(p, c)
if len(match) > 0:
version = int(match[0])
return version
def create_s3_dir_path(base_dir, named_branch, git_branch):
print(base_dir)
if git_branch is None:
git_branch = local_git_branch(base_dir)
version = local_last_version(base_dir).strip()
if named_branch:
return (git_branch, version)
git_short_sha = local_git_last_commit(base_dir)[:7]
return (os.path.join(git_branch, git_short_sha, version), version)
def is_cached(file_name, named_branch):
if named_branch:
return False
# 1 exception
if file_name == 'services':
return True
_, extension = os.path.splitext(file_name)
return bool(extension not in ['.html', '.txt', '.appcache', ''])
def get_file_mimetype(local_file):
if local_file.endswith('services'):
return 'application/json'
else:
mimetype, _ = mimetypes.guess_type(local_file)
if mimetype:
return mimetype
return 'text/plain'
# DEPR: this is the legacy upload method and can be removed in a future
# release if dist stuff works properly
def upload(bucket_name, base_dir, deploy_target, named_branch, git_branch, bucket_url):
s3_dir_path, version = create_s3_dir_path(base_dir, named_branch, git_branch)
print('Destination folder is:')
print('%s' % s3_dir_path)
upload_directories = ['prd', 'src']
exclude_filename_patterns = ['.less', '.gitignore', '.mako.']
root_files = ('index.html', 'mobile.html', 'embed.html', '404.html',
'robots.txt', 'robots_prod.txt', 'favicon.ico',
'checker', 'geoadmin.%s.appcache' % version)
for directory in upload_directories:
for file_path_list in os.walk(os.path.join(base_dir, directory)):
file_names = file_path_list[2]
if len(file_names) > 0:
file_base_path = file_path_list[0]
for file_name in file_names:
if len([p for p in exclude_filename_patterns if p in file_name]) == 0:
is_chsdi_cache = bool(file_base_path.endswith('cache'))
local_file = os.path.join(file_base_path, file_name)
relative_file_path = file_base_path.replace('cache', '')
if directory == 'prd':
# Take only files directly in prd/
if file_name in root_files and relative_file_path.endswith('prd'):
relative_file_path = relative_file_path.replace('prd', '')
else:
relative_file_path = relative_file_path.replace('prd', version)
relative_file_path = relative_file_path.replace(base_dir + '/', '')
remote_file = os.path.join(s3_dir_path, relative_file_path, file_name)
# Don't cache some files
cached = is_cached(file_name, named_branch)
mimetype = get_file_mimetype(local_file)
save_to_s3(
local_file,
remote_file,
bucket_name,
cached=cached,
mimetype=mimetype)
# Also upload chsdi metadata file to src folder if available
if is_chsdi_cache:
relative_file_path = relative_file_path.replace(version + '/', '')
remote_file = os.path.join(
s3_dir_path, 'src/', relative_file_path, file_name)
save_to_s3(
local_file,
remote_file,
bucket_name,
cached=cached,
mimetype=mimetype)
url_to_check = bucket_url if bucket_url.endswith('/') else bucket_url + '/'
print('S3 version path: ')
# This line is used by jenkins to get the S3_VERSION_PATH
print(s3_dir_path)
print('Test url: ')
# This line is used by jenkins to get the E2E_TARGETURL
print('%s%s/index.html' % (url_to_check, s3_dir_path))
def upload_dist(bucket_name, base_dir, deploy_target, named_branch, git_branch, bucket_url):
print("base_dir", base_dir)
version = local_last_version(base_dir).strip()
dist_dir = 'dist'
file_nr = 0
for root, dirs, files in os.walk(os.path.join(base_dir, dist_dir)):
# empty directory
if len(files) == 0:
continue
for fil in files:
local_file_path = os.path.join(root, fil)
# get the relative part of file_path after base_dir/dist_dir/
s3_rel_path = os.path.relpath(local_file_path, os.path.join(base_dir, dist_dir))
# add the branch and version (i.e. timestamp) info
s3_file_path = os.path.join(git_branch, version, s3_rel_path)
file_nr += 1
print("{} => s3://{}/{}".format(local_file_path, bucket_name, s3_file_path))
save_to_s3(
local_file_path,
s3_file_path,
bucket_name,
cached=False,
mimetype=None)
print('Number of files uploaded: {}'.format(file_nr))
url_to_check = bucket_url if bucket_url.endswith('/') else bucket_url + '/'
s3_dir_path = os.path.join(git_branch, version)
# This line is used by jenkins to get the VERSION
print('Version that was uploaded:')
print(version)
print('S3 version path: ')
# This line is used by jenkins to get the S3_VERSION_PATH
print(s3_dir_path)
print('Test url: ')
# This line is used by jenkins to get the E2E_TARGETURL
print('%s%s/index.html' % (url_to_check, s3_dir_path))
def list_version():
branches = bucket.meta.client.list_objects(Bucket=bucket.name,
Delimiter='/')
for b in branches.get('CommonPrefixes'):
branch = b.get('Prefix')
if re.search(r'^\D', branch):
shas = bucket.meta.client.list_objects(Bucket=bucket.name,
Prefix=branch,
Delimiter='/')
shas = shas.get('CommonPrefixes')
if shas:
for s in shas:
sha = s.get('Prefix')
nice_sha = sha.replace(branch, '').replace('/', '')
# Full version path to display
if re.match('[0-9a-f]{7}$', nice_sha) is not None:
builds = bucket.meta.client.list_objects(Bucket=bucket.name,
Prefix=sha,
Delimiter='/')
for v in builds.get('CommonPrefixes'):
build = v.get('Prefix')
print(
'Full version: %s%s/%s' %
(branch, nice_sha, build.replace(
sha, '').replace(
'/', '')))
else:
# Matching a version of the deployed branch
if re.match('[0-9]{10}', nice_sha):
print('Named branch: %s (version: %s)' %
(branch.replace('/', ''), nice_sha))
else:
print('Not a official path for branch %s' % branch)
def list_dist_version():
# Note: branch-names containing '/' are currently not supported!
branches = bucket.meta.client.list_objects(
Bucket=bucket.name,
Delimiter='/')
# get list of 'unique' branch names
for b in branches.get('CommonPrefixes'):
branch = b.get('Prefix')
# "new" style pattern is
# /<branch_name>/<timestamp>/files
# where <branch_name> can be any "named" branch or master
# and <timestamp> is of the form 1902190815
# we consider only "new" style versions
_subfolders = bucket.meta.client.list_objects(
Bucket=bucket.name,
Prefix=branch,
Delimiter='/'
)
subfolders = _subfolders.get('CommonPrefixes')
for _subfolder in subfolders:
# subfolder contains the branch name also, -> remove to
# get bare version info
subfolder = _subfolder.get('Prefix')
version = subfolder.replace(branch, '').replace('/', '')
# we match only subfolders that correspond to
# a valid timestamp and hence represent a version
if re.match('[0-9]{10}', version):
print('{}: (version: {})'.format(branch, version))
def get_version_info(s3_path):
print('App version is: %s' % s3_path)
version_target = s3_path.split('/')[2]
obj = s3.Object(bucket.name, '%s/%s/info.json' % (s3_path, version_target))
try:
content = obj.get()["Body"].read()
raw = _unzip_data(content)
data = json.loads(raw)
except botocore.exceptions.ClientError:
return None
except botocore.exceptions.BotoCoreError:
return None
return data
def version_info(s3_path):
info = get_version_info(s3_path)
if info is None:
print('No info for version %s' % s3_path)
sys.exit(1)
for k in info.keys():
print('%s: %s' % (k, info[k]))
def version_exists(s3_path):
files = bucket.objects.filter(Prefix=str(s3_path)).all()
return len(list(files)) > 0
def delete_version(s3_path, bucket_name):
if version_exists(s3_path) is False:
print('Version <%s> does not exists in AWS S3. Aborting' % s3_path)
sys.exit(1)
msg = raw_input('Are you sure you want to delete all files in <%s>?\n' % s3_path)
if msg.lower() in ('y', 'yes'):
files = bucket.objects.filter(Prefix=str(s3_path)).all()
n = 200
indexes = [{'Key': k.key} for k in files]
for i in xrange(0, len(indexes), n):
resp = s3client.delete_objects(
Bucket=bucket_name, Delete={
'Objects': indexes[
i: i + n]})
for v in resp['Deleted']:
print(v)
else:
print('Aborting deletion of <%s>.' % s3_path)
def activate_version(s3_path, bucket_name, deploy_target, bucket_url):
# Prod files
for n in ('index', 'embed', 'mobile', '404'):
src_key_name = '{}/{}.html'.format(s3_path, n)
print('{} --> {}.html'.format(src_key_name, n))
s3client.copy_object(
Bucket=bucket_name,
CopySource=bucket_name + '/' + src_key_name,
Key=n + '.html',
ACL='public-read')
# Delete older appcache files
appcache_versioned_files = list(bucket.objects.filter(Prefix='geoadmin.').all())
indexes = [{'Key': k.key} for k in appcache_versioned_files if k.key.endswith('.appcache')]
if len(indexes) > 0:
s3client.delete_objects(Bucket=bucket_name, Delete={'Objects': indexes})
appcache = None
files = list(bucket.objects.filter(Prefix='{}/geoadmin.'.format(s3_path)).all())
if len(files) > 0:
appcache = os.path.basename(sorted(files)[-1].key)
for j in ('robots.txt', 'checker', 'favicon.ico', appcache):
# In prod move robots prod
src_file_name = 'robots_prod.txt' if j == 'robots.txt' and deploy_target == 'prod' else j
src_key_name = '{}/{}'.format(s3_path, src_file_name)
print('%s ---> %s' % (src_key_name, j))
try:
s3client.copy_object(
Bucket=bucket_name,
CopySource=bucket_name + '/' + src_key_name,
Key=j,
CopySourceIfModifiedSince=datetime(2015, 1, 1),
ACL='public-read')
except botocore.exceptions.ClientError as e:
print('Cannot copy {}: {}'.format(j, e))
print('\nSuccessfuly deployed into bucket {}'.format(bucket_name))
print('Check:\n{}/{}'.format(bucket_url, s3_path + '/index.html'))
def activate_dist_version(branch_name, version, bucket_name, deploy_target, bucket_url):
# Prod files
print('branch_name', branch_name)
print('version', version)
print('bucket_name', bucket_name)
# The root for copying the files is different for master and all
# other branches
# root: s3://mf-geoadmin3-(int|prod)-dublin/
# <branch>: s3://mf-geoadmin3-(int|prod)-dublin/<branch>/
if "branch_name" == "master":
branch_root = ''
else:
branch_root = "{}/".format(branch_name)
# Delete older appcache files
appcache_versioned_files = list(bucket.objects.filter(Prefix='{}geoadmin.'.format(branch_root)).all())
indexes = [{'Key': k.key} for k in appcache_versioned_files if k.key.endswith('.appcache')]
if len(indexes) > 0:
print("deleting old *.appcache objects {}".format(indexes))
s3client.delete_objects(Bucket=bucket_name, Delete={'Objects': indexes})
for n in bucket.objects.filter(Prefix='{}/{}/'.format(branch_name, version)):
src_key = n.key
dst_key = "{}{}".format(
branch_root,
src_key.replace(
"{}/{}/".format(branch_name, version),
""
)
)
if deploy_target == 'prod':
if 'robots.txt' in src_key:
continue
elif 'robots_prod.txt' in src_key:
dst_key = "{}robots.txt".format(branch_root)
else:
# don't copy 'robots_prod.txt' on non-prod envs
if 'robots_prod.txt' in src_key:
continue
print('{} => {}'.format(src_key, dst_key))
try:
s3client.copy_object(
Bucket=bucket_name,
CopySource=bucket_name + '/' + src_key,
Key=dst_key,
CopySourceIfModifiedSince=datetime(2015, 1, 1),
ACL='public-read'
)
except botocore.exceptions.ClientError as e:
print('Cannot copy {}: {}'.format(j, e))
print('\nSuccessfuly activated version <{}> of branch <{}> in bucket {}'.format(
version,
branch_name,
bucket_name
))
print('Check:\n{}/{}'.format(bucket_url, branch_root + 'index.html'))
def init_connection(bucket_name):
try:
session = boto3.session.Session()
except botocore.exceptions.BotoCoreError as e:
print('Cannot establish connection to bucket "%s". Check you credentials.' % bucket_name)
print(e)
sys.exit(1)
s3client = session.client('s3', config=boto3.session.Config(signature_version='s3v4'))
s3 = session.resource('s3', config=boto3.session.Config(signature_version='s3v4'))
bucket = s3.Bucket(bucket_name)
return (s3, s3client, bucket)
def exit_usage(cmd_type):
with click.Context(cmd_type) as ctx:
click.echo(cmd_type.get_help(ctx))
def parse_s3_path(s3_path, cmd_type):
if s3_path.endswith('/'):
s3_path = s3_path[:len(s3_path) - 1]
# Delete named branch as well
if s3_path.count('/') not in (0, 2):
exit_usage(cmd_type)
print('Bad version definition')
sys.exit(1)
if s3_path.count('/') == 0 and cmd_type in ('activate', 'info'):
exit_usage(eval(cmd_type + '_cmd'))
print('Cmd activate/info not supported for named branches.')
print('Please provide a full version path.')
sys.exit(1)
return s3_path
@click.group()
def cli():
"""Manage map.geo.admin.ch versions in AWS S3 bucket. Please do not use any credentials or profile, as this script
relies on aws instance's role.
A version deployed to S3 is always defined by:\n
<s3version> = <branch_name>/<sha>/<version>
"""
for var in ('AWS_PROFILE', 'AWS_ACCESS_KEY_ID'):
val = os.environ.get(var)
if val is not None:
print('Please unset: {}. We use instance roles'.format(var))
sys.exit(2)
@cli.command('list')
@click.argument('target', required=True)
@click.option('--legacy', is_flag=True)
def list_cmd(target, legacy=False):
"""List available <version> in a bucket."""
global s3, s3client, bucket
bucket_name = get_bucket_name(target)
s3, s3client, bucket = init_connection(bucket_name)
if legacy:
list_version()
else:
list_dist_version()
@cli.command('upload')
@click.option('--force', help='Do not prompt for confirmation', is_flag=True)
@click.option('--url', 'bucket_url', help='Bucket url to check', required=True)
@click.argument('snapshotdir', required=True, default=os.getcwd())
@click.argument('target', required=True)
@click.argument('named_branch', required=False, default=False)
@click.argument('git_branch', required=False)
def upload_cmd(force, snapshotdir, named_branch, target, git_branch, bucket_url):
"""Upload content of /dist directory to a bucket. You may specify a directory (it defaults to current)."""
global s3, s3client, bucket
bucket_name = get_bucket_name(target)
s3, s3client, bucket = init_connection(bucket_name)
named_branch = True if named_branch == 'true' else False
base_dir = os.path.abspath(snapshotdir)
if not os.path.isdir(base_dir):
print('No code found in directory %s' % base_dir)
sys.exit(1)
if not force and not click.confirm(
'You are about to upload {} to {}. Continue?'.format(
base_dir, bucket_name)):
click.echo('Aborting.')
sys.exit()
else:
#upload(bucket_name, base_dir, target, named_branch, git_branch, bucket_url)
upload_dist(bucket_name, base_dir, target, named_branch, git_branch, bucket_url)
@cli.command('info')
@click.argument('s3_path', required=True)
@click.argument('target', required=True)
def info_cmd(s3_path, target):
"""Print the info.json file"""
global s3, s3client, bucket
bucket_name = get_bucket_name(target)
s3, s3client, bucket = init_connection(bucket_name)
s3_path = parse_s3_path(s3_path, 'info')
version_info(s3_path)
@cli.command('activate')
@click.option('--force', help='Do not prompt for confirmation', is_flag=True)
@click.option('--url', 'bucket_url', help='Bucket url to check',
required=False, default='https://<bucket public url>')
@click.option('--branch', 'branch_name', required=False, default='master')
@click.option('--version', 'version', required=False, default=None)
@click.argument('target', required=True)
def activate_cmd(branch_name, version, target, force, bucket_url):
"""Activate a version at the root of a bucket (by copying index.html and co to the root)"""
global s3, s3client, bucket
bucket_name = get_bucket_name(target)
s3, s3client, bucket = init_connection(bucket_name)
s3_path = os.path.join(branch_name, version)
if version_exists(s3_path) is False:
print('Version <%s> does not exists in AWS S3. Aborting' % s3_path)
sys.exit(1)
if not force and not click.confirm(
'Are you sure you want to activate version <{}> for branch <{}> in bucket <{}>?'.format(
version,
branch_name,
bucket_name)):
click.echo('Aborting activation.')
sys.exit()
else:
activate_dist_version(branch_name, version, bucket_name, target, bucket_url)
# DEPR:
# This is the legacy activate command to activate legacy (i.e. before dist)
# master branches
@cli.command('activate_legacy')
@click.option('--force', help='Do not prompt for confirmation', is_flag=True)
@click.option('--url', 'bucket_url', help='Bucket url to check',
required=False, default='https://<bucket public url>')
@click.argument('s3_path', required=True)
@click.argument('target', required=True)
def activate_cmd(s3_path, target, force, bucket_url):
"""Activate a version at the root of a bucket (by copying index.html and co to the root)"""
global s3, s3client, bucket
bucket_name = get_bucket_name(target)
s3, s3client, bucket = init_connection(bucket_name)
s3_path = parse_s3_path(s3_path, 'activate')
if version_exists(s3_path) is False:
print('Version <%s> does not exists in AWS S3. Aborting' % s3_path)
sys.exit(1)
if not force and not click.confirm(
'Are you sure you want to activate version <{}> in bucket <{}>?'.format(
s3_path,
bucket_name)):
click.echo('Aborting activation.')
sys.exit()
else:
activate_version(s3_path, bucket_name, target, bucket_url)
@cli.command('delete')
@click.argument('s3_path', required=True)
@click.argument('target', required=False)
def delete_cmd(s3_path, target):
"""Delete a s3_path on a give bucket"""
global s3, s3client, bucket
bucket_name = get_bucket_name(target)
s3, s3client, bucket = init_connection(bucket_name)
s3_path = parse_s3_path(s3_path, 'delete')
print('Trying to delete version \'{}\''.format(s3_path))
delete_version(s3_path, bucket_name)
if __name__ == '__main__':
cli()
|
from __future__ import print_function, division
import numpy as np
from R_tools import rho2u,rho2v
def calc_etat(ubar,vbar,hflow,pm,pn):
''' compute divergence of barotropic momentum (units m/h)
arrays are (x,y) ordered -- hflow is full column depth (SSE-z_bot)'''
return -( np.diff(rho2u(hflow/pn)*ubar,axis=0)[:,1:-1] \
+ np.diff(rho2v(hflow/pm)*vbar,axis=1)[1:-1,:] )\
* pm[1:-1,1:-1]*pn[1:-1,1:-1]*3600.
|
from .starter_class import StarterClass
from .boto_manager import BotoClientManager
from .config import _CONFIG
__all__ = ['BotoClientManager', 'StarterClass', '_CONFIG']
|
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.voluntary_exits import sign_voluntary_exit
def run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=True):
"""
Run ``process_voluntary_exit``, yielding:
- pre-state ('pre')
- voluntary_exit ('voluntary_exit')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
validator_index = signed_voluntary_exit.message.validator_index
yield 'pre', state
yield 'voluntary_exit', signed_voluntary_exit
if not valid:
expect_assertion_error(lambda: spec.process_voluntary_exit(state, signed_voluntary_exit))
yield 'post', None
return
pre_exit_epoch = state.validators[validator_index].exit_epoch
spec.process_voluntary_exit(state, signed_voluntary_exit)
yield 'post', state
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
@with_all_phases
@spec_state_test
def test_success(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
assert state.validators[validator_index].exit_epoch == spec.compute_activation_exit_epoch(current_epoch)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_signature(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
voluntary_exit = spec.VoluntaryExit(
epoch=current_epoch,
validator_index=validator_index,
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, 12345)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_success_exit_queue(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
# exit `MAX_EXITS_PER_EPOCH`
initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_validator_churn_limit(state)]
# Prepare a bunch of exits, based on the current state
exit_queue = []
for index in initial_indices:
privkey = pubkey_to_privkey[state.validators[index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=index), privkey)
exit_queue.append(signed_voluntary_exit)
# Now run all the exits
for voluntary_exit in exit_queue:
# the function yields data, but we are just interested in running it here, ignore yields.
for _ in run_voluntary_exit_processing(spec, state, voluntary_exit):
continue
# exit an additional validator
validator_index = spec.get_active_validator_indices(state, current_epoch)[-1]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
# This is the interesting part of the test: on a pre-state with a full exit queue,
# when processing an additional exit, it results in an exit in a later epoch
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
assert (
state.validators[validator_index].exit_epoch ==
state.validators[initial_indices[0]].exit_epoch + 1
)
@with_all_phases
@spec_state_test
def test_default_exit_epoch_subsequent_exit(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
# Exit one validator prior to this new one
exited_index = spec.get_active_validator_indices(state, current_epoch)[-1]
state.validators[exited_index].exit_epoch = current_epoch - 1
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
assert state.validators[validator_index].exit_epoch == spec.compute_activation_exit_epoch(current_epoch)
@with_all_phases
@spec_state_test
def test_validator_exit_in_future(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
voluntary_exit = spec.VoluntaryExit(
epoch=current_epoch + 1,
validator_index=validator_index,
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_invalid_validator_index(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
voluntary_exit = spec.VoluntaryExit(
epoch=current_epoch,
validator_index=len(state.validators),
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_not_active(spec, state):
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
state.validators[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_already_exited(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow validator able to exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
# but validator already has exited
state.validators[validator_index].exit_epoch = current_epoch + 2
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
@with_all_phases
@spec_state_test
def test_validator_not_active_long_enough(spec, state):
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
assert (
current_epoch - state.validators[validator_index].activation_epoch <
spec.config.SHARD_COMMITTEE_PERIOD
)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
# Generated by Django 2.2.2 on 2019-07-29 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('question', models.TextField(max_length=400)),
('priority', models.CharField(choices=[('H', 'High'), ('M', 'Medium'), ('L', 'Low')], max_length=1)),
],
options={
'verbose_name': 'The Question',
'verbose_name_plural': "People's Question",
},
),
]
|
import execjs
def get_js_function(js_path, func_name, *func_args):
'''
获取指定目录下的js代码, 并且指定js代码中函数的名字以及函数的参数。
:param js_path: js代码的位置
:param func_name: js代码中函数的名字
:param func_args: js代码中函数的参数
:return: 返回调用js函数的结果
'''
with open(js_path, encoding='utf-8') as fp:
js = fp.read()
ctx = execjs.compile(js)
return ctx.call(func_name, func_args[0], func_args[1])
if __name__ == '__main__':
# 给个star吧
passwd = get_js_function('xiami.js', '_s', "")
print('*'*80)
print(passwd)
print('*'*80)
print('@欢迎Star!')
print('@有问题请联系: scrapy@qq.com')
|
# coding: utf-8
"""
Swagger Petstore */ ' \" =end -- \\r\\n \\n \\r
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ */ ' \" =end --
OpenAPI spec version: 1.0.0 */ ' \" =end -- \\r\\n \\n \\r
Contact: apiteam@swagger.io */ ' \" =end -- \\r\\n \\n \\r
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FakeApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def test_code_inject____end__rn_n_r(self, **kwargs):
"""
To test code injection */ ' \" =end -- \\r\\n \\n \\r
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_code_inject____end__rn_n_r(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str test_code_inject____end____rn_n_r: To test code injection */ ' \" =end -- \\r\\n \\n \\r
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.test_code_inject____end__rn_n_r_with_http_info(**kwargs)
else:
(data) = self.test_code_inject____end__rn_n_r_with_http_info(**kwargs)
return data
def test_code_inject____end__rn_n_r_with_http_info(self, **kwargs):
"""
To test code injection */ ' \" =end -- \\r\\n \\n \\r
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_code_inject____end__rn_n_r_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str test_code_inject____end____rn_n_r: To test code injection */ ' \" =end -- \\r\\n \\n \\r
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_code_inject____end____rn_n_r']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_code_inject____end__rn_n_r" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/fake'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'test_code_inject____end____rn_n_r' in params:
form_params.append(('test code inject */ ' " =end -- \r\n \n \r', params['test_code_inject____end____rn_n_r']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', '*/ \" =end -- '])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', '*/ \" =end -- '])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
class Singleton(type):
"""
"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the `__init__` argument do not affect
the returned instance.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
|
"""
ASGI config for to_do_list project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'to_do_list.settings')
application = get_asgi_application()
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""rawtranscation RPCs QA test.
# Tests the following RPCs:
# - createrawtransaction
# - signrawtransaction
# - sendrawtransaction
# - decoderawtransaction
# - getrawtransaction
"""
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(MachinecoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://machinecoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 140332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
|
import unittest
import numpy as np
from UncertainSCI.families import LaguerrePolynomials
class IDistTestCase(unittest.TestCase):
"""
Tests for (Laguerre polynomial) inversed induced distributions.
"""
def test_idistinv_laguerre(self):
"""Evaluation of Laguerre inversed induced distribution function."""
# Randomly generate x, use idist to generate u
rho = 11*np.random.random() - 1
L = LaguerrePolynomials(rho=rho)
n = int(np.ceil(10*np.random.rand(1))[0])
M = 25
x1 = 4*(n+1)*np.random.rand(M)
u = L.idist(x1, n)
# see if idistinv givens x back
x2 = L.idistinv(u, n)
delta = 5e-3
ind = np.where(np.abs(x1-x2) > delta)[:2][0]
if ind.size > 0:
errstr = 'Failed for rho={0:1.3f}, n={1:d}'.format(rho, n)
else:
errstr = ''
self.assertAlmostEqual(np.linalg.norm(x1-x2, ord=np.inf), 0., delta=delta, msg=errstr)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
# Logging module.
import os
from os import path
import datetime
from michiru import config
from michiru.modules import hook
## Module information.
__name__ = 'logger'
__author__ = 'Shiz'
__license__ = 'WTFPL'
__desc__ = 'Log activities.'
config.item('logger.path', path.join('{local}', 'logs', '{server}', '{channel}.log'))
config.item('logger.date_format', '%Y/%m/%d %H:%M:%S')
## Utility functions.
def log(server, channel, message):
""" Remove earlier entries for `nick` from database and insert new log entry. """
logfile = config.get('logger.path', server=server, channel=channel).format(
site=config.SITE_DIR,
local=config.LOCAL_DIR,
server=server,
channel=channel or '<server>'
)
logpath = path.dirname(logfile)
dateformat = config.get('logger.date_format', server=server, channel=channel)
if not path.exists(logpath):
os.makedirs(logpath)
with open(logfile, 'a') as f:
f.write('[{now}] {message}\n'.format(now=datetime.datetime.utcnow().strftime(dateformat), message=message))
## Commands and hooks.
@hook('chat.join')
def join(bot, server, channel, who):
log(server, channel, '--> {nick} joined {chan}'.format(nick=who, chan=channel))
@hook('chat.part')
def part(bot, server, channel, who, reason):
log(server, channel, '<-- {nick} left {chan} ({reason})'.format(nick=who, chan=channel, reason=reason))
@hook('chat.disconnect')
def quit(bot, server, who, reason):
log(server, None, '<-- {nick} quit ({reason})'.format(nick=who, reason=reason))
@hook('chat.kick')
def kick(bot, server, channel, target, by, reason):
log(server, channel, '<!- {nick} got kicked from {channel} by {kicker} ({reason})'.format(nick=target, channel=channel, kicker=by, reason=reason))
@hook('chat.nickchange')
def nickchange(bot, server, who, to):
log(server, None, '-!- {old} changed nickname to {new}'.format(old=who, new=to))
@hook('chat.message')
def message(bot, server, target, who, message, private, admin):
log(server, who if private else target, '<{nick}> {message}'.format(nick=who, message=message))
@hook('chat.notice')
def notice(bot, server, target, who, message, private, admin):
log(server, who if private else target, '*{nick}* {message}'.format(nick=who, message=message))
@hook('chat.channelchange')
def channelchange(bot, server, channel, new):
log(server, channel, '-!- Channel changed to {new}'.format(new=new))
@hook('chat.topicchange')
def topicchange(bot, server, channel, who, topic):
if who:
log(server, channel, '-!- {who} changed topic to: {topic}'.format(who=who, topic=topic))
else:
log(server, channel, '-!- Topic changed to: {topic}'.format(topic=topic))
## Boilerplate.
def load():
return True
def unload():
pass
|
from bs4 import BeautifulSoup
from selenium import webdriver
import selenium as se
from selenium.webdriver.chrome.options import Options
# This is the temporary url **** Need to make it dynamic
url = "https://www.realestate.co.nz/residential/sale?by=featured&lct=d225&maxba=2&maxbe=4&maxp=1400000&ql=80&scat=1"
# Component to fetch elements headless (expanded html)
options = se.webdriver.ChromeOptions() # weddriver library
options.add_argument('headless') # Type of fetching = headless
driver = se.webdriver.Chrome('/Users/Thyme/chromedriver') # PATH for chromedriver without fetching data will fail
driver.get(url)
data = driver.page_source
soup = BeautifulSoup(data, 'html.parser') # Using name soup just to respect its library
houses = str(soup) # transform bs4 type to string
houses = houses.split("\n") # Extract each lines into a list
# Realstate.co.nz
print("**********")
house_numbers = []
numbers = []
http_head = "https://www.realestate.co.nz/"
http_houses = []
# Extract all lines of element that contains house ID
for house in houses:
if "id=\"orbit-" in house:
house_numbers.append(house)
for number in house_numbers:
pos = number.index("id=\"orbit-")
result = number[pos+10:pos+17]
if result not in numbers:
numbers.append(result)
# print(numbers)
# print(len(numbers))
for number in numbers:
http = http_head + str(number)
http_houses.append(http)
print(http_houses)
# After first page http will adds "qo=80" the number represent total number houses shown start counting from second page
# Eg, first page "", second page "qo=80", third page "qo=160", fourth page "qo=240" and so on
# On the last page, if the numbers of houses less than 80 the number of increment will remain constant
# 2*(n-1) where "n" is number of page
# bbb = "https://www.realestate.co.nz/residential/sale?by=featured&lct=d225&maxba=2&maxbe=4&maxp=1400000&ql=80&scat=1"
# aaa = "https://www.realestate.co.nz/residential/sale?by=featured&lct=d225&maxba=2&maxbe=4&maxp=1400000&ql=80&qo=80&scat=1"
|
from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
When the middleware is disabled, an exception is raised when one
attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
When the middleware is disabled, an exception is not raised
if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Return the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([
Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2', extra_tags='tag'),
])
def test_existing_read(self):
"""
Reading the existing storage doesn't cause the data to be lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
})
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.contrib import admin
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .models import Container, ContainerImage, Mirror
from .models import ContainerBox, ContainerBoxContainers
from .forms import ContainerBoxContainersInlineForm
from opps.core.admin import PublishableAdmin, apply_opps_rules, BaseBoxAdmin
from opps.contrib.multisite.admin import AdminViewPermission
from opps.core.filters import ChannelListFilter, HasQuerySet
from opps.images.generate import image_url
from opps.fields.models import Field, FieldOption
@apply_opps_rules('containers')
class ContainerImageInline(admin.TabularInline):
model = ContainerImage
fk_name = 'container'
raw_id_fields = ['image']
sortable_field_name = "order"
actions = None
extra = 0
verbose_name = _(u"Container image")
verbose_name_plural = _(u"Container images")
fieldsets = [(None, {'fields': ('image', 'image_thumb',
'order', 'caption')})]
ordering = ('order',)
readonly_fields = ['image_thumb']
def image_thumb(self, obj):
if obj.image:
return u'<img width="60px" height="60px" src="{0}" />'.format(
image_url(obj.image.archive.url, width=60, height=60))
return _(u'No Image')
image_thumb.short_description = _(u'Thumbnail')
image_thumb.allow_tags = True
@apply_opps_rules('containers')
class ContainerBoxContainersInline(admin.StackedInline):
model = ContainerBoxContainers
form = ContainerBoxContainersInlineForm
fk_name = 'containerbox'
raw_id_fields = ['container', 'main_image']
sortable_field_name = "order"
actions = None
ordering = ('order',)
extra = 0
fieldsets = [(None, {
'classes': ('collapse',),
'fields': ('container', 'aggregate', 'highlight', 'order',
'date_available', 'date_end', 'hat', 'title',
'main_image', 'main_image_caption', 'url', 'url_target')})]
@apply_opps_rules('containers')
class ContainerAdmin(PublishableAdmin, AdminViewPermission):
prepopulated_fields = {"slug": ["title"]}
readonly_fields = ['get_http_absolute_url', 'short_url',
'in_containerboxes', 'image_thumb']
raw_id_fields = ['main_image', 'channel', 'mirror_channel']
ordering = ('-date_available',)
autocomplete_lookup_fields = {
'fk': ['channel'],
}
def get_list_filter(self, request):
list_filter = super(ContainerAdmin, self).list_filter
list_filter = [ChannelListFilter] + list(list_filter)
return list_filter
def save_model(self, request, obj, form, change):
super(ContainerAdmin, self).save_model(request, obj, form, change)
_json = {}
for field in Field.objects.filter(
application__contains=obj.__class__.__name__):
if field.type == 'checkbox':
for fo in FieldOption.objects.filter(field=field):
key = "{0}_{1}".format(field.slug, fo.option.slug)
_json[key] = request.POST.get('json_{0}'.format(key), '')
else:
_json[field.slug] = request.POST.get(
'json_{0}'.format(field.slug), '')
obj.json = json.dumps(_json)
obj.save()
@apply_opps_rules('containers')
class ContainerBoxAdmin(BaseBoxAdmin, AdminViewPermission):
inlines = [ContainerBoxContainersInline]
raw_id_fields = ['channel', 'queryset', 'main_image']
list_display = ['name', 'site', 'channel_name', 'date_available',
'published']
save_as = True
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'name', 'slug', 'title', 'title_url',
'main_image', 'main_image_caption')}),
(_(u'Relationships'), {
'fields': ('channel', 'queryset')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('content_group', 'published', 'date_available')}),
)
autocomplete_lookup_fields = {
'fk': ['channel'],
}
def clean_ended_entries(self, request, queryset):
now = timezone.now()
for box in queryset:
ended = box.containerboxcontainers_containerboxes.filter(
date_end__lt=now
)
if ended:
ended.delete()
clean_ended_entries.short_description = _(u'Clean ended containers')
def get_list_display(self, request):
list_display = getattr(self, 'list_display', [])
if request.user.is_superuser:
return list_display + ['is_dynamic']
return list_display
def get_list_filter(self, request):
list_filter = super(ContainerBoxAdmin, self).list_filter
if request.user.is_superuser:
list_filter = [HasQuerySet] + list_filter
return list_filter
def is_dynamic(self, obj):
if obj.queryset:
return True
else:
return False
is_dynamic.short_description = _(u'Dynamic')
is_dynamic.boolean = True
actions = ('clean_ended_entries',)
class HideContainerAdmin(PublishableAdmin, AdminViewPermission):
list_display = ['image_thumb', 'get_child_class', 'title',
'channel_name', 'date_available',
'published']
readonly_fields = ['image_thumb']
def get_child_class(self, obj):
return _(obj.child_class)
get_child_class.short_description = _(u'Child class')
def get_model_perms(self, *args, **kwargs):
return {}
def has_add_permission(self, request):
return False
def get_list_filter(self, request):
list_filter = super(HideContainerAdmin, self).list_filter
list_filter = [ChannelListFilter] + list(list_filter)
return list_filter
def queryset(self, request):
qs = super(HideContainerAdmin, self).queryset(request)
# TODO: Document this
blacklist = getattr(settings, 'OPPS_CONTAINERS_BLACKLIST', [])
if blacklist:
qs = qs.exclude(child_class__in=blacklist)
return qs
admin.site.register(Container, HideContainerAdmin)
admin.site.register(ContainerBox, ContainerBoxAdmin)
admin.site.register(Mirror, HideContainerAdmin)
|
# File name: tile.py
# Author: Michael Chunko
# Python Version: 3.7
# This file contains the class representing a tile on a map
class Tile:
def __init__(self, blocked=True, block_sight=None, seen=False):
self.blocked = blocked
# By default, a blocked tile also blocks sight
if block_sight is None:
self.block_sight = blocked
else:
self.block_sight = blocked
self.seen = seen
|
'''
TESS User Reducer
-----------------
This module porvides functions to calculate uesr weights for the TESS project.
Extracts are from Ceasars `PluckFieldExtractor`.
'''
from .running_reducer_wrapper import running_reducer_wrapper
import numpy as np
@running_reducer_wrapper(relevant_reduction=True)
def tess_user_reducer(data, **kwargs):
'''Calculate TESS user weights
Parameters
----------
data : list
A list with one item containing the extract with the user's feedback on a
gold standard subject
store : keyword, dict
A dictinary with two keys:
* `seed`: sum of all previous `seed` values
* `count`: sum of all previous gold standard transits seen
relevant_reduction : keyword, list
A list with one item containing the results of the current subject's stats reducer.
This item is a dictinary with two keys:
* `True`: number of users who correctly identified the gold standard transits in the subject
* `False`: number of users who incorrectly identified the gold standard transits in the subject
Returns
-------
reduction : dict
A dictinary with two keys:
* `data`: A dictionary with the `skill` value as the only item
* `store`: The updated store for the user
'''
success = [d['success'] for d in data[0]['feedback']]
store = kwargs.pop('store')
relevant_reduction = kwargs.pop('relevant_reduction')[0]
try:
d_subject = relevant_reduction['data']['difficulty']
except:
d_subject = 0
seed_current = (np.where(success, 2, -1) * d_subject).sum()
seed = store.get('seed', 0) + seed_current
count = store.get('count', 0) + len(success)
store = {
'seed': seed,
'count': count
}
c0 = 1
skill = c0 * pow((1.0 + np.log10(count)), (seed / count))
skill = min([3.0, max([0.05, skill])])
return {
'skill': skill,
'_store': store
}
|
# Third-party dependencies fetched by Bazel
# Unlike WORKSPACE, the content of this file is unordered.
# We keep them separate to make the WORKSPACE file more maintainable.
# Install the nodejs "bootstrap" package
# This provides the basic tools for running and packaging nodejs programs in Bazel
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def fetch_dependencies():
http_archive(
name = "build_bazel_rules_nodejs",
sha256 = "e79c08a488cc5ac40981987d862c7320cee8741122a2649e9b08e850b6f20442",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/3.8.0/rules_nodejs-3.8.0.tar.gz"],
)
# rules_nodejs doesn't depend on skylib, but it's a useful dependency anyway.
http_archive(
name = "bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.