text
stringlengths
2
999k
#name_scan "d/yourdomain" 1 import sys, os #sys.path.append('/home/khal/sources/nmcontrol/lib/') import DNS import rpcClient import struct, listdns, base64, types, json, random #from jsonrpc import ServiceProxy from utils import * from common import * class Source(object): #def __init__(self): #self.servers = app['services']['dns'].conf['resolver'].split(',') #self.reqobj = DNS.Request() #jsonfile = open("config.json", "r") #data = json.loads(jsonfile.read()) #jsonfile.close() #username = str(data[u"username"]) #port = data[u"port"] #password = str(data[u"password"]) #self.sp = ServiceProxy("http://%(user)s:%(passwd)s@127.0.0.1:%(port)d" % dict(user=username, passwd=password, port=port)) #elf.sp = rpcClient.rpcClientNamecoin('127.0.0.1', port, username, password) #self.sp = app['plugins']['domain'] # def _parse_file(self): # f = open(self._filename, "r") # for line in f.readlines(): # line = line.strip() # if line and line[0] != '#': # question, type, value = line.split() # question = question.lower() # type = type.upper() # if question == '@': # question = '' # if type == 'A': # answer = struct.pack("!I", ipstr2int(value)) # qtype = 1 # if type == 'NS': # answer = labels2str(value.split(".")) # qtype = 2 # elif type == 'CNAME': # answer = labels2str(value.split(".")) # qtype = 5 # elif type == 'TXT': # answer = label2str(value) # qtype = 16 # elif type == 'MX': # preference, domain = value.split(":") # answer = struct.pack("!H", int(preference)) # answer += labels2str(domain.split(".")) # qtype = 15 # self._answers.setdefault(question, {}).setdefault(qtype, []).append(answer) # f.close() def isIP(self, host) : parts = host.split(".") if len(parts) != 4: return False try : valid = False for part in parts : intpart = int(part) if intpart <= 255 and intpart >= 0 : valid = True else : return False if valid : return True return False except : return False def get_response(self, query, domain, qtype, qclass, src_addr): #print query #print domain #print qtype #print qclass #print src_addr if qtype == 1: #answer = struct.pack("!I", ipstr2int(value)) reqtype = "A" if qtype == 2: #answer = labels2str(value.split(".")) reqtype = "NS" elif qtype == 5: #answer = labels2str(value.split(".")) reqtype = "CNAME" elif qtype == 16: #answer = label2str(value) reqtype = "TXT" elif qtype == 15: #preference, domain = value.split(":") #nswer = struct.pack("!H", int(preference)) #answer += labels2str(domain.split(".")) reqtype = "MX" elif qtype == 28: #answer = struct.pack("!I", ipstr2int(value)) reqtype = "AAAA" elif qtype == 52: reqtype = "TLSA" else : reqtype = None answers = app['services']['dns'].lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) #print 'domain:', domain #print 'answers:', answers if domain.endswith(".bit") or domain.endswith(".tor") : #response = listdns.lookup(self.sp, {"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) #response = self.sp.lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr}) response = answers results = [] if type(response) == types.DictType : tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]} if response["type"] == 1 : #if answers == [] : # return self.get_response(query, domain, 5, qclass, src_addr) tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) elif response["type"] == 2 or response["type"] == 5: tempresults["rdata"] = labels2str(response["data"].split(".")) elif response["type"] == 16 : tempresults["rdata"] = labels2str(response["data"]) elif response["type"] == 15 : tempresult = struct.pack("!H", response["data"][0]) tempresult += labels2str(response["data"][1].split(".")) tempresults["rdata"] = tempresult elif response["type"] == 28 : tempresults["rdata"] = response["data"] elif response["type"] == 52 : tempresult = '\x03\x00' tempresult += chr(int(response["data"][0][0])) tempresult += bytearray.fromhex(response["data"][0][1]) tempresults["rdata"] = tempresult #else : return 3, [] results.append(tempresults) return 0, results if type(response) == types.StringType : if self.isIP(response) : return 0, [{"qtype":1, "qclass":qclass, "ttl":300, "rdata":struct.pack("!I", ipstr2int(response))}] return 3, [] #if query not in self._answers: #return 3, [] #if qtype in self._answers[query]: #if domain == "sonicrules.bit": # results = [{'qtype': 1, 'qclass':qclass, 'ttl': 300, 'rdata': struct.pack("!I", ipstr2int(self.reqobj.req("sonicrules.org", qtype=1).answers[0]["data"]))}] # return 0, results #elif qtype == 1: # if they asked for an A record and we didn't find one, check for a CNAME #return self.get_response(query, domain, 5, qclass, src_addr) else: #server = self.servers[random.randrange(0, len(self.servers)-1)] #answers = self.reqobj.req(name=domain, qtype=qtype, server=server).answers results = [] for response in answers : tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]} if response["type"] == 1 : if answers == [] : return self.get_response(query, domain, 5, qclass, src_addr) tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) elif response["type"] == 2 or response["type"] == 5: tempresults["rdata"] = labels2str(response["data"].split(".")) elif response["type"] == 16 : tempresults["rdata"] = labels2str(response["data"]) elif response["type"] == 15 : tempresult = struct.pack("!H", response["data"][0]) tempresult += labels2str(response["data"][1].split(".")) tempresults["rdata"] = tempresult elif response["type"] == 28 : if answers == [] : return self.get_response(query, domain, 5, qclass, src_addr) #tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"])) tempresults["rdata"] = response["data"] elif response["type"] == 52 : tempresults["rdata"] = response["data"] #else : return 3, [] results.append(tempresults) return 0, results return 3, []
"""Tests for the :mod:`~polymatheia.data.writer` package.""" import json import os from shutil import rmtree from polymatheia.data import NavigableDict from polymatheia.data.writer import JSONWriter DOCUMENTS = [NavigableDict(r) for r in [ { 'id': '1', 'name': { 'first': 'A', 'last': 'Person' }, 'age': 32, 'special tags': 'The first' }, { 'id': '2', 'name': { 'first': ['Another', {'abbr': 'Nameless'}], 'last': 'Parrot' }, 'age': 23, }, { 'id': '3', 'name': { 'first': 'The', 'last': 'Last' }, 'age': 65, }, ]] def test_local_json_writing(): """Test writing to the local filesystem.""" rmtree('tmp/json_writer_test', ignore_errors=True) writer = JSONWriter('tmp/json_writer_test', 'id') writer.write(DOCUMENTS) count = 0 for basepath, _, filenames in os.walk('tmp/json_writer_test'): for filename in filenames: if filename.endswith('.json'): count = count + len(filenames) with open(os.path.join(basepath, filename)) as in_f: doc = json.load(in_f) assert 'id' in doc assert 'name' in doc if doc['id'] == '2': assert 'first' in doc['name'] assert len(doc['name']['first']) == 2 else: assert 'first' in doc['name'] assert 'last' in doc['name'] assert 'age' in doc if doc['id'] == '1': assert 'special tags' in doc assert count == 3 def test_local_json_writing_pre_split_id_path(): """Test writing to the local filesystem.""" rmtree('tmp/json_writer_test', ignore_errors=True) writer = JSONWriter('tmp/json_writer_test', ['id']) writer.write(DOCUMENTS) count = 0 for basepath, _, filenames in os.walk('tmp/json_writer_test'): for filename in filenames: if filename.endswith('.json'): count = count + len(filenames) with open(os.path.join(basepath, filename)) as in_f: doc = json.load(in_f) assert 'id' in doc assert 'name' in doc if doc['id'] == '2': assert 'first' in doc['name'] assert len(doc['name']['first']) == 2 else: assert 'first' in doc['name'] assert 'last' in doc['name'] assert 'age' in doc if doc['id'] == '1': assert 'special tags' in doc assert count == 3
# -*- coding: utf-8; -*- from __future__ import unicode_literals, absolute_import import json import requests import six from tests import unittest, mock from freight_forwarder.registry import Registry, V1, V2 from freight_forwarder.registry.registry_base import RegistryBase, RegistryException from ..factories.registry_factory import RegistryV1Factory, RegistryV2Factory class RegistryTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V1, '_validate_response', autospec=True, return_value=True) @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_registry_v1_init(self, mock_requests, mock_v1_validate): test_registry = Registry() self.assertIsInstance(test_registry, RegistryBase) self.assertEquals(test_registry.ping(), True) @mock.patch.object(V1, '_validate_response', name="v1_validate") @mock.patch.object(V2, '_validate_response', name="v2_validate") @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_registry_v2_init(self, mock_requests, mock_v2, mock_v1): mock_v1.side_effect = RegistryException("test") mock_v2.return_value = True test_v1_registry = RegistryV1Factory() test_v2_registry = RegistryV2Factory() # This is stated to ensure the test environment is setup correctly # validated v1.ping() returns an exception with self.assertRaises(RegistryException): test_v1_registry.ping() # validated v2.ping() returns an exception self.assertEquals(test_v2_registry.ping(), True) # Validate the logic of the registry class to return a V2 object test_registry = Registry(address="https://v2.dockertest.io") self.assertIsInstance(test_registry, RegistryBase) class RegistryV1Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V1, '_validate_response', return_value=True) @mock.patch.object(V1, '_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v1_search(self, mock_requests, mock_request_builder, mock_validate_response): # Defined Search Request a search_response_content = { "num_results": 3, "query": "test", "results": [ {"description": "api test app", "name": "testproject/test-app"}, {"description": "database test app", "name": "testproject/test-db"}, {"description": "cache test app", "name": "testproject/test-cache"} ] } # Define Response Value for content once request has been validated mock_request_builder.return_value = create_response_object( url="https://search.registry.docker.com", status_code=200, content=json.dumps(search_response_content).encode('utf-8') ) # Define Default value for utils _validate_reponse mock_validate_response.return_value = True # Build V1 Factory Registry test_registry = RegistryV1Factory(address='https://search.registry.docker.com') results = test_registry.search("test") self.assertIsInstance(results, dict) @mock.patch.object(V1, '_validate_response', return_value=True) @mock.patch.object(V1, '_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v1_tags(self, mock_requests, mock_request_builder, mock_validate_response): tag_response_content = { "0.1": "3fad19bfa2", "latest": "xxxxxxxxxx", "localtest": "xxxxxxxxxxxxxxae13", "redis123123": "xxxxxxxxxxxxxxae132", "jira1268": "xxxxxxxxxxxxxxae1324987" } formatted_output = [ 'appexample/test-app:0.1', 'appexample/test-app:latest', 'appexample/test-app:us-east-01-dev', 'appexample/test-app:localtest', 'appexample/test-app:redis123123', 'appexample/test-app:jira1268' ] mock_request_builder.return_value = create_response_object( url="https://tag.registry.docker.com", status_code=200, content=json.dumps(tag_response_content).encode('utf-8') ) mock_validate_response.return_value = True test_registry = RegistryV1Factory(address='https://tag.registry.docker.com') for tag in test_registry.tags("appexample/test-app"): tag_output = "".join(tag) self.assertIsInstance(tag_output, six.string_types) self.assertIn(tag_output, formatted_output) def test_delete_tag(self): self.skipTest("Implemented but not used") def test_delete(self): self.skipTest("Implemented but not used") def test_get_image_by_id(self): self.skipTest("Implemented but not used") def test_get_image_id_by_tag(self): self.skipTest("Implemented but not used") def set_image_tag(self): self.skipTest("Implemented but not used") class RegistryV2Test(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @mock.patch.object(V2, '_validate_response', name='mock_v2_validate_response', return_value=True) @mock.patch.object(V2, '_request_builder', name='mock_v2_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v2_search(self, mock_requests, mock_request_builder, mock_validate_response): # Defined Search Request search_response_content = json.dumps({"repositories": ["appexample/test-app", "appexample/test-db", "appexample/test-cache"]}).encode('utf-8') response = create_response_object(url="https://v2search.registry.docker.com", status_code=200, content=search_response_content) # Define Response Value for content once request has been validated mock_request_builder.return_value = response # Define Default value for utils _validate_response mock_validate_response.return_value = True # Build V1 Factory Registry test_registry = RegistryV2Factory(address='https://v2search.registry.docker.com') test_registry.search("test") for search in test_registry.search("test"): search_output = "".join(search) self.assertIsInstance(search_output, six.string_types) @mock.patch.object(V2, '_validate_response', name='mock_v2_validate_response', return_value=True) @mock.patch.object(V2, '_request_builder', name='mock_v2_request_builder') @mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) def test_v2_tags(self, mock_requests, mock_request_builder, mock_validate_response): tag_response_content = json.dumps({"name": "appexample/test-app", "tags": [ "latest", "0.0.15", "asdfasb81"] } ).encode('utf-8') formatted_output = ['appexample/test-app:latest', 'appexample/test-app:0.0.15', 'appexample/test-app:asdfasb81'] response = create_response_object(url="https://v2tags.registry.docker.com", status_code=200, content=tag_response_content) mock_request_builder.return_value = response mock_validate_response.return_value = True test_registry = RegistryV2Factory(address='https://v2tags.registry.docker.com') for tags in test_registry.tags("appexample/test-app"): tag_output = "".join(tags) self.assertIsInstance(tag_output, six.string_types) self.assertIn(tag_output, formatted_output) def test_blobs(self): self.skipTest("Not implemented") def test_catalog(self, count=None, last=None): self.skipTest("Not implemented") def test_manifests(self): self.skipTest("Not implemented") class RegistryBaseTests(unittest.TestCase): def setUp(self): self.patch_requests = mock.patch('freight_forwarder.registry.registry_base.requests', autospec=True) self.patch_requests.start() self.test_registry = RegistryV1Factory(address="https://registrybasetest.docker.com") def tearDown(self): self.patch_requests.stop() del self.test_registry def test_ping(self): self.skipTest("Defined as abc method. Override in class") def test_tags(self): self.skipTest("Defined as abc method. Override in class") def test_init(self): self.assertEquals(self.test_registry.scheme, 'https://') self.assertEquals(self.test_registry.location, 'registrybasetest.docker.com') self.assertEquals(self.test_registry.auth, None) self.assertEquals(self.test_registry.__str__(), "https://registrybasetest.docker.com") self.assertIsInstance(self.test_registry, RegistryBase) def test_registry_base_auth_base_functionality(self): self.assertEquals(self.test_registry.auth, None) with self.assertRaises(TypeError): self.test_registry.auth = ["user=test_user", "passwd=password"] def test_registry_base_auth_with_auth(self): pass class RegistryExceptionTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_exception_with_status_code_and_url(self): response = create_response_object(url="https://bad.docker.io", status_code=503, content={"test": "data"}) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.response.status_code, 503) def test_exception_with_no_content(self): response = create_response_object(url="https://nocontent.docker.io", status_code=503) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.message, 'There was an issue with the request to the docker registry.') def test_exception_with_error_content(self): # TODO - grab a properly formatted error for testing response = create_response_object(url="https://errorcontent.docker.io", status_code=500, content=json.dumps({'error': 'Docker Registry Error Example'})) registry_exception = RegistryException(response) self.assertIsInstance(registry_exception, RegistryException) self.assertEquals(registry_exception.message, 'Docker Registry Error Example') # Test the class.__str__ MagicMethod self.assertEquals("{0}".format(registry_exception), 'Docker Registry Error Example') def create_response_object(url, status_code, content=None): """ The function generates a mock object that is properly formatted for the RegistryException and validates the input :param url: url to pass through for the mock request object :param status_code: status code to append to the response object :param content: **required** if not provided, this attribute will be blocked :return: Parent Mock: request.Reponse Child Mock: request - requests.PreparedRequest """ if not isinstance(url, six.string_types): raise(TypeError("incorrect type provided for url")) if not isinstance(status_code, six.integer_types): raise(TypeError("incorrect type provided for http status code")) mock_object_request = mock.MagicMock(spec=requests.PreparedRequest, url=url) mock_object_response = mock.MagicMock(spec=requests.Response, request=mock_object_request) mock_object_response.status_code = status_code if content: mock_object_response.content = content else: # this blocks the content attribute from being present del mock_object_response.content return mock_object_response def format_image_results(registry_response_dict): """ Response attribute content is formatted correctly for the Images :param response: response object with content attribute :return: dict of various images """ if not isinstance(registry_response_dict, dict): raise TypeError('registry_response_dict must be a dict.') images = {} results = registry_response_dict.get('results') if results: for image in results: images[image.get('name')] = image return images
__all__ = ["loss_fn"] from icevision.imports import * def loss_fn(preds, targets) -> torch.Tensor: return preds["loss"]
# Generated by Django 3.2.4 on 2021-09-11 12:44 import ckeditor_uploader.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0007_subscriber'), ] operations = [ migrations.AlterField( model_name='post', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(), ), ]
import unittest from numpy.testing import assert_allclose from qspectra import polarization from qspectra.simulate import decorators class TestGetCallArgs(unittest.TestCase): def test(self): self.assertEqual( decorators._get_call_args(lambda a: None, 1), {'a': 1}) self.assertEqual( decorators._get_call_args(lambda a, **b: None, 1), {'a': 1}) self.assertEqual( decorators._get_call_args(lambda a, **b: None, a=1, c=2), {'a': 1, 'c': 2}) self.assertEqual( decorators._get_call_args(lambda **b: None, a=1, c=2), {'a': 1, 'c': 2}) with self.assertRaises(NotImplementedError): decorators._get_call_args(lambda *a: None, 1, 2, 3) class TestIsotropicAverage(unittest.TestCase): def test_optional_2nd_order_isotropic_average(self): binary = {'xx': 1, 'yy': 2, 'zz': 4} f = decorators.optional_2nd_order_isotropic_average( lambda polarization: (0, binary[polarization])) assert_allclose(f('xx'), (0, 1)) assert_allclose(f('xx', exact_isotropic_average=False), (0, 1)) assert_allclose(f('xx', exact_isotropic_average=True), (0, 7 / 3.0)) assert_allclose(f('xy', exact_isotropic_average=True), (0, 0)) with self.assertRaises(ValueError): # wrong number of polarizations f('xyz', exact_isotropic_average=True) def test_optional_4th_order_isotropic_average(self): binary = {'xx': 1, 'yy': 2, 'zz': 4} f = decorators.optional_4th_order_isotropic_average( lambda polarization: (0, binary[polarization[:2]] + 10 * binary[polarization[2:]])) assert_allclose(f('xxxx'), (0, 11)) ma = polarization.MAGIC_ANGLE assert_allclose(f([0, 0, ma, ma], exact_isotropic_average=True), (0, (11 + 12 + 14 + 21 + 22 + 24 + 41 + 42 + 44) / 9.0)) with self.assertRaises(ValueError): # wrong number of polarizations f('xyz', exact_isotropic_average=True)
# proxy module from traitsui.editors.check_list_editor import *
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 29 16:16:57 2019 @author: rakshit """ import os import cv2 import argparse import matplotlib import numpy as np import deepdish as dd import scipy.io as scio print('Extracting Santini') parser = argparse.ArgumentParser() parser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int) parser.add_argument('--path2ds', help='Path to dataset', type=str) args = parser.parse_args() if args.noDisp: noDisp = True print('No graphics') else: noDisp = False print('Showing figures') gui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg'] for gui in gui_env: try: print("testing: {}".format(gui)) matplotlib.use(gui,warn=False, force=True) from matplotlib import pyplot as plt break except: continue print("Using: {}".format(matplotlib.get_backend())) plt.ion() args.path2ds = '/media/rakshit/tank/Dataset' PATH_DIR = os.path.join(args.path2ds, 'Santini') PATH_DS = os.path.join(args.path2ds, 'All') PATH_MASTER = os.path.join(args.path2ds, 'MasterKey') list_ds = ['1', '2', '3', '4', '5', '6'] sc = (640.0/384.0) Image_counter = 0.0 ds_num = 24 def mypause(interval): backend = plt.rcParams['backend'] if backend in matplotlib.rcsetup.interactive_bk: figManager = matplotlib._pylab_helpers.Gcf.get_active() if figManager is not None: canvas = figManager.canvas if canvas.figure.stale: canvas.draw() canvas.start_event_loop(interval) return def fix_pupil_loc(p, res): # res: [H, W] p[0] = 0.5*p[0] p[1] = res[0] - 0.5*p[1] return p def readFormattedText(path2file, ignoreLines): data = [] count = 0 f = open(path2file, 'r') for line in f: d = [int(d) for d in line.split() if d.isdigit()] count = count + 1 if d and count > ignoreLines: data.append(d) f.close() return data for name in list_ds: # Ignore the first row and column. # Columns: [index, p_x, p_y] opts = os.listdir(os.path.join(PATH_DIR, name)) for subdir in opts: PATH_DATA = os.path.join(PATH_DIR, name, subdir) # Read pupil data Path2text = os.path.join(PATH_DATA, 'journal-{:04d}.txt'.format(int(subdir)-1)) Path2vid = os.path.join(PATH_DATA, 'eye-{:04d}-0000.avi'.format(int(subdir)-1)) PupilData = np.array(readFormattedText(Path2text, 2)) VidObj = cv2.VideoCapture(Path2vid) keydict = {k:[] for k in ['pupil_loc', 'archive', 'data_type', 'resolution', 'dataset', 'subset']} # Generate empty dictionaries keydict['data_type'] = 0 # Only pupil center available keydict['resolution'] = [] keydict['dataset'] = 'Santini' keydict['subset'] = '{}-{}'.format(name, subdir) # Create an empty dictionary as per agreed structure Data = {k:[] for k in ['Images', 'Info', 'Masks', 'Masks_noSkin', 'Fits', 'pupil_loc']} Data['Fits'] = {k:[] for k in ['pupil', 'pupil_norm', 'pupil_phi', 'iris', 'iris_norm', 'iris_phi']} if not noDisp: fig, plts = plt.subplots(1,1) fr_num = 0 while(VidObj.isOpened()): ret, I = VidObj.read() if ret == True: I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY) I = cv2.resize(I, (640, 480), cv2.INTER_LANCZOS4) Data['Images'].append(I) keydict['resolution'].append(I.shape) keydict['archive'].append(ds_num) pupil_loc = fix_pupil_loc(PupilData[fr_num, 10:12]*sc, I.shape) keydict['pupil_loc'].append(pupil_loc) Data['pupil_loc'].append(pupil_loc) Data['Info'].append(str(fr_num)) fr_num+=1 Image_counter+=1 if not noDisp: if fr_num == 1: cI = plts.imshow(I) cX = plts.scatter(pupil_loc[0], pupil_loc[1]) plt.show() plt.pause(.01) else: newLoc = np.array([pupil_loc[0], pupil_loc[1]]) cI.set_data(I) cX.set_offsets(newLoc) mypause(0.01) else: # No more frames to load break Data['Images'] = np.stack(Data['Images'], axis=0) Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0) keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0) keydict['resolution'] = np.stack(keydict['resolution'], axis=0) keydict['archive'] = np.stack(keydict['archive'], axis=0) # Save out data dd.io.save(os.path.join(PATH_DS, str(ds_num)+'.h5'), Data) scio.savemat(os.path.join(PATH_MASTER, str(ds_num)), keydict, appendmat=True) ds_num=ds_num+1
from django.http import HttpResponse from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.shortcuts import redirect from django.core.validators import URLValidator # https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Max, Count from app.models import Reference, Tools, Reports, Tasks, TasksStats import io import re import six import uuid import hashlib import simplejson #https://pybtex.org/ from pybtex.database import parse_string as parse_reference_string import pybtex.database.input.bibtex import pybtex.plugin # Globals pybtex_style = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')() pybtex_html_backend = pybtex.plugin.find_plugin('pybtex.backends', 'html')() pybtex_parser = pybtex.database.input.bibtex.Parser() sep = '||' sep2 = '@@' format_time_string = '%a, %d %b %Y %H:%M:%S' # RFC 2822 Internet email standard. https://docs.python.org/2/library/time.html#time.strftime # '%Y-%m-%d, %H:%M:%S' url_validator = URLValidator() # https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not class ArkalosException(Exception): pass def get_guid(): ''' Create a new guid ''' return str(uuid.uuid4()) def get_user_id(request): ''' Get id of user ''' is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user.id return None def get_user(request): ''' Get user object ''' is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user return None def fail(error_message=None): ''' Failed AJAX request ''' ret = {'success': False, 'error_message': error_message} json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def success(data={}): ''' success Ajax request ''' data['success'] = True json = simplejson.dumps(data) return HttpResponse(json, content_type='application/json') def has_data(f): ''' Decorator that passes AJAX data to a function parameters ''' def wrapper(*args, **kwargs): request = args[0] if request.method == 'POST': if len(request.POST): for k in request.POST: kwargs[k] = request.POST[k] else: POST = simplejson.loads(request.body) for k in POST: kwargs[k] = POST[k] elif request.method == 'GET': for k in request.GET: kwargs[k] = request.GET[k] print ("GET: {} == {}".format(k, kwargs[k])) return f(*args, **kwargs) return wrapper def has_field(field_names, errors): ''' Check if field names are present field_name: The field to check ''' def decorator(f): def wrapper(*args, **kwargs): for field_index, field_name in enumerate(field_names): if not field_name in kwargs: if callable(errors): kwargs['error'] = errors(field_name) elif type(errors) is list: kwargs['error'] = errors[field_index] elif type(errors) is dict: kwargs['error'] = errors[field_name] elif type(errors) is str: kwargs['error'] = errors else: # This should never happen raise ArkalosException('Unknown error type: {}'.format(type(error).__name__)) return f(*args, **kwargs) return f(*args, **kwargs) return wrapper return decorator def has_error(f): ''' Check if error in kwargs ''' def wrapper(*args, **kwargs): if 'error' in kwargs: return fail(kwargs['error']) return f(*args, **kwargs) return wrapper def username_exists(username): ''' Checks if a username exists ''' return User.objects.filter(username=username).exists() def URL_validate(url): ''' https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not ''' try: url_validator(url) except ValidationError as e: return False return True def format_time(t): ''' Universal method to string format time vars ''' return t.strftime(format_time_string) ########################################################################### ##################DATABASE FUNCTIONS####################################### ########################################################################### def bootstrap_table_format_field(entry, value): ''' Formats the field of a bootstrap table. Values are taken from bidings ''' if type(value) is str: if type(entry) is dict: return entry[value] else: return getattr(entry, value) elif callable(value): return value(entry) def serve_boostrap_table2(model, query_f, filters, bindings, **kwargs): ''' count_f = Tools.objects.values('name', 'url').annotate(Count('name')).count() query_f = Tools.objects.values('name', 'url').annotate(Count('name')) IT DOES NOT USE count_f ! ''' #count = count_f() order = kwargs['order'] offset = kwargs['offset'] limit = kwargs['limit'] from_offset = int(offset) to_offset = from_offset + int(limit) if 'filter' in kwargs: # "read" the filter filter_ = kwargs['filter'] filter_ = simplejson.loads(filter_) print ("Filter:") print (filter_) applied_filters = {filters[f][0](): filters[f][1](f_value) for f, f_value in filter_.items() if f in filters} print ("Applied filters:") print (applied_filters) else: applied_filters = {} querySet = query_f(applied_filters) count = querySet.count() querySet = querySet[from_offset:to_offset] ret = {'total': count} ret['rows'] = [ {k: bootstrap_table_format_field(entry, v) for k, v in bindings.items()} for entry in querySet] json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def serve_boostrap_table(model, bindings, order_by, **kwargs): ''' http://bootstrap-table.wenzhixin.net.cn/ ''' count = model.objects.count() order = kwargs['order'] offset = kwargs['offset'] limit = kwargs['limit'] from_offset = int(offset) to_offset = from_offset + int(limit) if 'filter' in kwargs: filter_ = kwargs['filter'] filter_ = simplejson.loads(filter_) filter_ = { bindings[k] + '__icontains':v for k,v in filter_.items()} querySet = model.objects.filter(**filter_) count = querySet.count() querySet = querySet[from_offset:to_offset] else: querySet = model.objects.order_by(order_by)[from_offset:to_offset] ret = {'total': count} ret['rows'] = [ {k: bootstrap_table_format_field(entry, v) for k, v in bindings.items()} for entry in querySet] json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def db_exists(model, filters): ''' Does this entry exist? ''' return model.objects.filter(**filters).exists() def get_maximum_current_version(model, name): ''' Return the next available current_version ''' max_entry = model.objects.filter(name=name).aggregate(Max('current_version')) if max_entry['current_version__max'] is None: return 1 assert type(max_entry) is dict assert len(max_entry) == 1 return max_entry['current_version__max'] + 1 def build_jstree_tool_dependencies(tool, prefix='', include_original=False): ''' Build the dependency jstree of this tool include_original are we including the original tool in the jstree? ''' def node(t): ret = { 'id': prefix + sep + t.name + sep + str(t.current_version), #Through this id we get info from jstree jandlers 'text': t.name + ' ' + str(t.current_version), 'children': [build_jstree_tool_dependencies(x, prefix, include_original=True) for x in t.dependencies.all()] + \ [{'text': x[0], 'type': 'exposed', 'value': x[1], 'description': x[2], 'id': prefix+sep+x[0]+sep+t.name+sep2+str(t.current_version)} for x in simplejson.loads(t.exposed)], 'current_version': t.current_version, 'name': t.name, 'type': 'tool', } return ret if include_original: return node(tool) else: return [node(dependent_tool) for dependent_tool in tool.dependencies.all()] def build_jstree(model, name, prefix=''): ''' Take an entry that has a previous_version and current_version Build a jstree compatible structure ''' index = {} if prefix: prefix_to_add = prefix + sep else: prefix_to_add = '' def node(o): current_version = o.current_version ret = { 'id': prefix_to_add + o.name + sep + str(o.current_version), 'text': o.name + ' ' + str(o.current_version), 'children': [], 'current_version': o.current_version, 'name': o.name } index[current_version] = ret return ret ret = [] all_objects = model.objects.filter(name=name).order_by("current_version") #ret.append(node(all_objects[0])) for o in all_objects: previous_version = o.previous_version if previous_version is None: ret.append(node(o)) else: this_node = node(o) index[previous_version]['children'].append(this_node) #print (simplejson.dumps(ret)) return ret ########################################################################### ##################END OF DATABASE####################################### ########################################################################### ########################################################################### ################## REGISTER ############################################### ########################################################################### @has_data @has_field(['username', 'password', 'password_confirm', 'email'], lambda x :'{} is required'.format(x)) @has_error def register(request, **kwargs): ''' Register ''' #print (kwargs) username = kwargs['username'] password = kwargs['password'] password_confirm = kwargs['password_confirm'] email = kwargs['email'] #Check if this user exists if username_exists(username): return fail('Username {} exists'.format(username)) #Check if password match if kwargs['password'] != kwargs['password_confirm']: return fail('Passwords do not match') #Create user user = User.objects.create_user(username, email, password) return success({}) @has_data @has_field(['username', 'password'], lambda x :'{} is required'.format(x)) @has_error def loginlocal(request, **kwargs): ''' Function called from login ''' username = kwargs['username'] password = kwargs['password'] user = authenticate(username=username, password=password) if user is None: return fail('Invalid username or password') #if user.is_active: ... # https://docs.djangoproject.com/en/1.9/topics/auth/default/ login(request, user) ret = {'username': username} return success(ret) def logoutlocal(request): ''' logout ''' logout(request) return redirect('/') ########################################################################### ################## END OF REGISTER ######################################## ########################################################################### ############################### ####REFERENCES################# ############################### def reference_get_fields(content): ''' Get the code of the bibtex entry ''' p = parse_reference_string(content, 'bibtex') p_len = len(p.entries) if p_len == 0: return False, 'Could not find BIBTEX entry' if p_len > 1: return False, 'More than one BIBTEX entries found' code = p.entries.keys()[0] if not 'title' in p.entries[code].fields: return False, 'Could not find title information' title = p.entries[code].fields['title'] if not hasattr(p.entries[code], 'persons'): return False, 'Could not find author information' if not 'author' in p.entries[code].persons: return False, 'Could not find author information' if len(p.entries[code].persons['author']) == 0: return False, 'Could not find author information' authors = sep.join([str(x) for x in p.entries[code].persons['author']]) return True, {'code': code, 'title': title, 'authors': authors} def bibtex_to_html(content): ''' Convert bibtex to html Adapted from: http://pybtex-docutils.readthedocs.io/en/latest/quickstart.html#overview ''' data = pybtex_parser.parse_stream(six.StringIO(content)) data_formatted = pybtex_style.format_entries(six.itervalues(data.entries)) output = io.StringIO() pybtex_html_backend.write_to_stream(data_formatted, output) html = output.getvalue() html_s = html.split('\n') html_s = html_s[9:-2] new_html = '\n'.join(html_s).replace('<dd>', '').replace('</dd>', '') return new_html @has_data @has_field(['content'], 'BIBTEX content is required') @has_error def add_reference(request, **kwargs): ''' Add reference ''' content = kwargs['content'] s, fields = reference_get_fields(content) if not s: return fail(fiels) if db_exists(Reference, {'code': fields['code']}): return fail('BIBTEX entry with code {} already exists'.format(code)) html = bibtex_to_html(content) r = Reference( user=get_user(request), code=fields['code'], title=fields['title'], authors=fields['authors'], content=content, reference_type='BIBTEX', html = html, ) r.save() return success() @has_data def get_references(request, **kwargs): ''' Serve GET Request for References bootstrap table ''' bindings = { 'id': 'code', 'content': 'html', } return serve_boostrap_table(Reference, bindings, 'id', **kwargs) @has_data @has_error def get_reference(request, **kwargs): ''' Get reference ''' codes = kwargs['codes'] ret = {'data': {}, 'html': []} c = 0 for code in codes: try: ref = Reference.objects.get(code=code) c += 1 ret['data'][code] = {'counter': c} ret['html'].append({'html': ref.html}) except ObjectDoesNotExist: pass ret['total'] = c return success(ret) @has_data def reference_suggestions(request, **kwargs): ''' Get called from tagas input ''' query = kwargs['query'] querySet = Reference.objects.filter(content__icontains = query)[:10] ret = [ {'value' : entry.code, 'html': entry.html} for entry in querySet] # We have a html representation for each Reference json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def get_references_from_text(text): ''' Get all reference objects from a text. This is useful for the report ''' ret = [] all_brackets = re.findall(r'\[[\w]+\]', text) for bracket in all_brackets: #Remove brackets code = bracket[1:-1] #Check if this a real reference try: ref = Reference.objects.get(code=code) except ObjectDoesNotExist: pass else: ret += [ref] return ret ############################### ######END OF REFERENCES######## ############################### ################################# #### REPORTS #################### ################################# @has_data def get_reports(request, **kwargs): ''' Serve bootstrap table for reports ''' bindings = { 'name': 'name', #'total_edits': lambda entry: entry['name__count'], 'content': lambda entry : '' } #return serve_boostrap_table(Reports, bindings, 'id', **kwargs) return serve_boostrap_table2( model = Reports, #count_f = lambda : Reports.objects.values('name').annotate(Count('name')).count(), query_f = lambda x : Reports.objects.filter(**x).values('name').distinct(), bindings = bindings, filters = { 'name': (lambda : 'name__icontains', lambda x : x) # name_contains = x }, **kwargs ) @has_data @has_error def get_reports_ui(request, **kwargs): name = kwargs['name'] current_version = kwargs['current_version'] report = Reports.objects.get(name=name, current_version=current_version) username = report.user.username ret = { 'name': name, 'current_version': current_version, 'username': username, 'created_at': format_time(report.created_at), 'markdown': report.markdown, 'summary': report.summary, } return success(ret) @has_data @has_error def add_report(request, **kwargs): name = kwargs['name'] previous_version = kwargs['previous_version'] markdown = kwargs['markdown'] references = kwargs['references'] user = get_user(request) #print (name) #print (previous_version) #print (markdown) #print (references) current_version = get_maximum_current_version(Reports, name) previous_version = kwargs["previous_version"] if previous_version == 'N/A': previous_version = None if current_version == 1: previous_version = None report = Reports( name=name, user=user, current_version=current_version, previous_version=previous_version, markdown=markdown, ) report.save() fetched_references = [Reference.objects.get(name=x) for x in references] report.references.add(*fetched_references) report.save() ret = { 'created_at' : format_time(report.created_at), 'current_version': current_version, 'jstree': build_jstree(Reports, report.name) } #print (ret) return success(ret) ################################# #### END OF REPORTS ############# ################################# ################################# ####TOOLS / DATA################# ################################# @has_data def get_tools(request, **kwargs): ''' Serve GET Request for Tools bootstrap table def serve_boostrap_table2(model, count_f, query_f, bindings, **kwargs): count_f = Tools.objects.values('name', 'url').annotate(Count('name')).count() query_f = Tools.objects.values('name', 'url').annotate(Count('name') ''' bindings = { 'name' : 'name', 'url': lambda entry : '<a href="{}" target="_blank">{}</a>'.format(entry['url'], entry['url']), #'total_edits': lambda entry: entry['name__count'], 'description': lambda entry: '' #'current_version': lambda entry: '{} -- {}'.format(entry.current_version, entry.previous_version), #'current_version': 'current_version', #'description': 'description', #'description': lambda entry: '{} {} -- {}'.format(entry.description, entry.current_version, entry.previous_version), } #return serve_boostrap_table(Tools, bindings, 'name', **kwargs) return serve_boostrap_table2( model = Tools, #count_f = lambda : Tools.objects.values('name', 'url').annotate(Count('name')).count(), query_f = lambda x : Tools.objects.values('name', 'url').annotate(Count('name')), filters = { }, bindings = bindings, **kwargs ) @has_data @has_error def get_tools_ui(request, **kwargs): ''' Called when we want an explicit tool from the UI ''' name = kwargs['name'] current_version = kwargs['current_version'] tool = Tools.objects.get(name=name, current_version=current_version) #print ('System: {}'.format(tool.system)) exposed = simplejson.loads(tool.exposed) if not len(exposed): exposed = [['', '', '']] jstree = build_jstree(Tools, tool.name) dependencies = build_jstree_tool_dependencies(tool, prefix='3', include_original=False) #print ('DEPENDENCIES:') #print (dependencies) ret = { 'name': tool.name, 'current_version': current_version, 'version' : tool.version, 'system' : simplejson.loads(tool.system), 'username': tool.user.username, 'created_at': format_time(tool.created_at), 'url': tool.url, 'description': tool.description, 'installation': tool.installation, 'validate_installation': tool.validate_installation, 'exposed': exposed, 'jstree': jstree, 'references': [x.code for x in tool.references.all()], 'summary': tool.summary, 'dependencies': dependencies } return success(ret) @has_data @has_field( ['name', 'version', 'url', 'description', 'installation'], ['Name cannot be empty', 'Version cannot be empty', 'Link cannot be empty', 'Description cannot be empty', 'Installation cannot be empty']) @has_error def add_tool(request, **kwargs): ''' Attempt to add a new Tool ''' system = kwargs['system'] system_p = simplejson.loads(system) if not len(system_p): return fail('Please select one or more systems') url = kwargs['url'] if not URL_validate(url): return fail('URL: {} does not seem to be valid'.format(url)) references = kwargs['references'] references = simplejson.loads(references) references = [Reference.objects.get(code=r) for r in references] name = kwargs['name'] current_version = get_maximum_current_version(Tools, name) previous_version = kwargs["previous_version"] if previous_version == 'N/A': previous_version = None # else: # print ('Previous version: {}'.format(previous_version)) # print ('Current version: {}'.format(current_version)) # a=1/0 # Throw exception deliberately print ('Current version: {}'.format(current_version)) user = get_user(request) version = kwargs['version'] description = kwargs['description'] installation=kwargs['installation'] validate_installation = kwargs['validate_installation'] exposed = kwargs['exposed'] #print ('Exposed: {} {}'.format(exposed, type(exposed).__name__)) # This is a list exposed = [e for e in exposed if any(e)] # Remove empty exposed = simplejson.dumps(exposed) # Serialize summary = kwargs['summary'] new_tool = Tools( user=user, name=name, version=version, system=system, current_version=current_version, previous_version=previous_version, url = url, description = description, installation = installation, validate_installation = validate_installation, exposed = exposed, summary = summary, ); new_tool.save() #Add references new_tool.references.add(*references) new_tool.save() jstree = build_jstree(Tools, new_tool.name) #Add dependencies dependencies = kwargs['dependencies'] dependencies_objects = [Tools.objects.get(name=dependency['name'], current_version=dependency['current_version']) for dependency in dependencies] new_tool.dependencies.add(*dependencies_objects) new_tool.save() #Get created at created_at = format_time(new_tool.created_at) #print ('Created at: {}'.format(created_at)) ret = { 'created_at': created_at, 'current_version': current_version, 'jstree': jstree } return success(ret) @has_data @has_error def jstree_tool(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Tools, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_report(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Reports, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_wf(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Tasks, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_tool_dependencies(request, **kwargs): ''' AJAX backend to get the dependency jstree for a tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) if 'prefix' in kwargs: prefix=kwargs['prefix'] else: prefix = '3' tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'jstree': build_jstree_tool_dependencies(tool, prefix=prefix, include_original=True) } #print(ret) return success(ret) @has_data @has_error def get_tool_dependencies(request, **kwargs): ''' Return ONE LEVEL dependencies of this tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'dependencies': [{'name': x.name, 'current_version': x.current_version} for x in tool.dependencies.all()] } return success(ret) @has_data @has_error def get_tool_variables(request, **kwargs): ''' Return the variables of this tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'variables': simplejson.loads(tool.exposed) } return success(ret) ######################################## ####END OF TOOLS / DATA################# ######################################## ######################################## ######### WORKFLOWS #################### ######################################## def jason_or_django(f): ''' getattr and iterate methods for JSON or DJANGO objects ''' def dec(*args, **kwargs): if type(args[0]) is dict: attr = lambda x,y : x[y] iterate = lambda x,y : (k for k in x[y]) elif type(args[0]) is Tasks: attr = lambda x,y : getattr(x,y) iterate = lambda x,y : (k for k in getattr(x,y).all()) else: raise ArkalosException('This should never happen: {}'.format(type(task))) kwargs['attr'] = attr kwargs['iterate'] = iterate return f(*args, **kwargs) return dec @jason_or_django def task_hash(task, **kwargs): ''' Creates a unique hash for this task attr: Get attribute iterate: Iterator ''' attr = kwargs['attr'] iterate = kwargs['iterate'] # Dictionary version # to_hash = [ # task['name'], # task['bash'], # task['documentation'], # '@@'.join(['&&'.join((x['name'], str(x['current_version']))) for x in task['dependencies'] if x['type'] == 'tool']), # '!!'.join(['**'.join((x['name'], str(x['current_version']) if x['is_workflow'] else 'None')) for x in task['calls']]), # '##'.join(task['inputs']), # '$$'.join(task['outputs']) # ] # This works with both dictionary and django database objects to_hash = [ attr(task, 'name'), attr(task, 'bash'), attr(task, 'documentation'), '@@'.join(['&&'.join((attr(x, 'name'), str(attr(x, 'current_version')))) for x in iterate(task, 'dependencies')]), '!!'.join(['**'.join((attr(x, 'name'), str(attr(x, 'current_version')) if attr(x, 'current_version') else 'None')) for x in iterate(task, 'calls')]), '##'.join(attr(task, 'inputs')), '$$'.join(attr(task, 'outputs')), ] to_hash = '^^'.join(to_hash) to_hash_b = bytearray(to_hash, encoding="utf-8") return hashlib.sha256(to_hash_b).hexdigest() def save_task_or_workflow(request, workflow_or_task): ''' Saves a workflow or task ''' if workflow_or_task['is_workflow']: # This is worflow is_workflow = True if workflow_or_task['current_version'] is None: # This workflow is not saved # Get the previous_version previous_version = workflow_or_task['previous_version'] # Get the current number current_version = get_maximum_current_version(Tasks, workflow_or_task['name']) else: # This workflow is saved. Find it and return it worklfow = Tasks.objects.get(name=workflow_or_task['name'], current_version=workflow_or_task['current_version']) return worklfow else: # This is a task is_workflow = False current_version = None previous_version = None #Check if it exists in the database try: task = Tasks.objects.get(hash_field=workflow_or_task['hash_value']) except ObjectDoesNotExist: pass else: return task # It does not exist. Create it! task = Tasks( user=get_user(request), name=workflow_or_task['name'], current_version=current_version, previous_version=previous_version, bash=workflow_or_task['bash'], documentation=workflow_or_task['documentation'], hash_field=workflow_or_task['hash_value'], is_workflow=is_workflow, inputs=simplejson.dumps(workflow_or_task['inputs']), outputs=simplejson.dumps(workflow_or_task['outputs']), ) task.save() # Add dependencies tools = [] for dependency in workflow_or_task['dependencies']: if dependency['type'] != 'tool': continue tools += [Tools.objects.get(name=dependency['name'], current_version=dependency['current_version'])] task.dependencies.add(*tools) task.save() # Add references refs = get_references_from_text(workflow_or_task['documentation']) task.references.add(*refs) task.save() return task def update_TasksStats(task): ''' Update the stats of this task ''' name = task.name try: taskStat = TasksStats.objects.get(name=name) except ObjectDoesNotExist: taskStat = TasksStats( name=name, edits=1, users=1, last_edit=task, ) else: taskStat.edits += 1 taskStat.users = Tasks.objects.filter(name=name).values('user').count() taskStat.last_edit=task finally: taskStat.save() @has_data @has_error def add_workflow(request, **kwargs): ''' Add a new workflow ''' graph = kwargs['graph'] main_guid = kwargs['main_guid'] #Fix is_workflow for node in graph: node['is_workflow'] = node['type'] == 'workflow' #Take main node main_node = None for node in graph: if node['guid'] == main_guid: main_node = node break assert not (main_node is None) assert main_node['is_workflow'] # Check if there is another workflow with the same name if main_node['previous_version'] is None: # It is a new workflow! if db_exists(Tasks, {'name': main_node['name']}): return fail('Another workflow with this name exists. Please choose another name') # Check if this workflow calls another workflow which is unsaved (this is not allowed) for node in graph: if not node['is_workflow']: # It is not a workflow continue if node['guid'] == main_guid: # It is not the main workflow continue if node['current_version'] is None: # It is not saved return fail('Could not save. Workflow: {} calls an UNSAVED workflow: {}'.format(main_node['name'], node['name'])) #Fix the "calls" guids_to_graph = {node['guid']:node for node in graph} for node in graph: node['calls'] = [{'name': guids_to_graph[callee_guid]['name'], 'current_version': guids_to_graph[callee_guid]['current_version']} for callee_guid in node['serial_calls']] #Do the following three things: #1. Add hash_value information #2. Take the hash of the main workflow #3. Create a mapping from GUIDs to hash_values from_guid_to_hash = {} main_hash = None guids_to_hashes = {} for node in graph: #print ('======') #print(node) node['hash_value'] = task_hash(node) if node['guid'] == main_guid: main_hash = node['hash_value'] guids_to_hashes[node['guid']] = node['hash_value'] assert not (main_hash is None) # Save the graph and create a new dictionary with the saved objects hash_objects_dict = { node['hash_value']: save_task_or_workflow(request, node) for node in graph } #Add the who calls whom information for node in graph: this_node_called =[hash_objects_dict[guids_to_hashes[callee_guid]] for callee_guid in node['serial_calls']] if this_node_called: hash_objects_dict[node['hash_value']].calls.add(*this_node_called) hash_objects_dict[node['hash_value']].save() #Update TaskStats. Perhaps can be done better with signals update_TasksStats(hash_objects_dict[main_hash]) ret = { 'current_version': hash_objects_dict[main_hash].current_version, 'created_at': format_time(hash_objects_dict[main_hash].created_at), } return success(ret) def workflow_graph(workflow_or_task): ''' Create a caller--callee graph identical to the one sent from angular for a workflow ''' ret = [] all_hashes = [] def create_node(node): ret = { 'bash': node.bash, 'current_version': node.current_version, 'previous_version': node.previous_version, 'documentation': node.documentation, 'tools_jstree_data': [build_jstree_tool_dependencies(tool, prefix='5', include_original=True) for x in node.dependencies.all()], 'inputs': simplejson.loads(node.inputs), 'outputs': simplejson.loads(node.outputs), 'type': 'workflow' if node.is_workflow else 'task', 'hash_value': node.hash_field, 'children': [] } if node.is_workflow: ret['name'] = node.name + '_' + str(node.current_version) ret['workflow_name'] = node.name ret['created_at'] = format_time(node.created_at) ret['username'] = node.user.username else: ret['name'] = node.name return ret def workflow_graph_rec(node): if node.hash_field in all_hashes: return all_hashes.append(node.hash_field) ret_json = create_node(node) ret_json['serial_calls'] = [] for callee in node.calls.all(): ret_json['serial_calls'].append(callee.hash_field) workflow_graph_rec(callee) ret.append(ret_json) workflow_graph_rec(workflow_or_task) return ret @has_data def get_workflow(request, **kwargs): ''' Creates a json object EXACTTLY the same as the one saved return { "name": node.type == 'workflow' ? node.workflow_name : node.name, "bash": node.bash, "current_version": node.current_version, // This is always null "previous_version": node.previous_version, "documentation": node.documentation, "dependencies": node.tools_jstree_data, "serial_calls" : node.serial_calls, "inputs": node.inputs, "outputs": node.outputs, "type": node.type, "guid": node.guid }; ''' name = kwargs['name'] current_version = kwargs['current_version'] wf = Tasks.objects.get(name=name, current_version=current_version) graph = workflow_graph(wf) # print ('ret:') # print (ret) ret = { 'graph': graph, 'main_hash': wf.hash_field } return success(ret) @has_data def get_workflows(request, **kwargs): ''' Serve bootstrap table for workflows ''' def description(entry): ret = '<p>Edits: <strong>%i</strong> Users: <strong>%i</strong> Last Edit: <strong>%s</strong><br />Last documentation: %s</p>' % (entry.edits, entry.users, format_time(entry.last_edit.created_at), entry.last_edit.documentation) return ret bindings = { 'name' : 'name', 'description': description, } #return serve_boostrap_table(Tools, bindings, 'name', **kwargs) return serve_boostrap_table2( model = TasksStats, #count_f = lambda : Tasks.objects.values('name').count(), # COUNT ALL query_f = lambda x : TasksStats.objects.filter(**x), # Query function filters = { 'name': (lambda : 'name__icontains', lambda x : x) # name_contains = x }, bindings = bindings, **kwargs ) ######################################## ####### END OF WORKFLOWS ############### ########################################
""" The arraypad module contains a group of functions to pad values onto the edges of an n-dimensional array. """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = ['pad'] ############################################################################### # Private utility functions. def _arange_ndarray(arr, shape, axis, reverse=False): """ Create an ndarray of `shape` with increments along specified `axis` Parameters ---------- arr : ndarray Input array of arbitrary shape. shape : tuple of ints Shape of desired array. Should be equivalent to `arr.shape` except `shape[axis]` which may have any positive value. axis : int Axis to increment along. reverse : bool If False, increment in a positive fashion from 1 to `shape[axis]`, inclusive. If True, the bounds are the same but the order reversed. Returns ------- padarr : ndarray Output array sized to pad `arr` along `axis`, with linear range from 1 to `shape[axis]` along specified `axis`. Notes ----- The range is deliberately 1-indexed for this specific use case. Think of this algorithm as broadcasting `np.arange` to a single `axis` of an arbitrarily shaped ndarray. """ initshape = tuple(1 if i != axis else shape[axis] for (i, x) in enumerate(arr.shape)) if not reverse: padarr = np.arange(1, shape[axis] + 1) else: padarr = np.arange(shape[axis], 0, -1) padarr = padarr.reshape(initshape) for i, dim in enumerate(shape): if padarr.shape[i] != dim: padarr = padarr.repeat(dim, axis=i) return padarr def _round_ifneeded(arr, dtype): """ Rounds arr inplace if destination dtype is integer. Parameters ---------- arr : ndarray Input array. dtype : dtype The dtype of the destination array. """ if np.issubdtype(dtype, np.integer): arr.round(out=arr) def _prepend_const(arr, pad_amt, val, axis=-1): """ Prepend constant `val` along `axis` of `arr`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. val : scalar Constant value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` constant `val` prepended along `axis`. """ if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) if val == 0: return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr), axis=axis) else: return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype), arr), axis=axis) def _append_const(arr, pad_amt, val, axis=-1): """ Append constant `val` along `axis` of `arr`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. val : scalar Constant value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` constant `val` appended along `axis`. """ if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) if val == 0: return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)), axis=axis) else: return np.concatenate( (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis) def _prepend_edge(arr, pad_amt, axis=-1): """ Prepend `pad_amt` to `arr` along `axis` by extending edge values. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, extended by `pad_amt` edge values appended along `axis`. """ if pad_amt == 0: return arr edge_slice = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_arr = arr[edge_slice].reshape(pad_singleton) return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_edge(arr, pad_amt, axis=-1): """ Append `pad_amt` to `arr` along `axis` by extending edge values. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, extended by `pad_amt` edge values prepended along `axis`. """ if pad_amt == 0: return arr edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_arr = arr[edge_slice].reshape(pad_singleton) return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_ramp(arr, pad_amt, end, axis=-1): """ Prepend linear ramp along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. end : scalar Constal value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region ramps linearly from the edge value to `end`. """ if pad_amt == 0: return arr # Generate shape for final concatenated array padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) # Generate an n-dimensional array incrementing along `axis` ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=True).astype(np.float64) # Appropriate slicing to extract n-dimensional edge along `axis` edge_slice = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract edge, reshape to original rank, and extend along `axis` edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) # Ramp values will most likely be float, cast them to the same type as arr return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis) def _append_ramp(arr, pad_amt, end, axis=-1): """ Append linear ramp along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. end : scalar Constal value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region ramps linearly from the edge value to `end`. """ if pad_amt == 0: return arr # Generate shape for final concatenated array padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) # Generate an n-dimensional array incrementing along `axis` ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=False).astype(np.float64) # Slice a chunk from the edge to calculate stats on edge_slice = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract edge, reshape to original rank, and extend along `axis` edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) # Ramp values will most likely be float, cast them to the same type as arr return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis) def _prepend_max(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` maximum values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate maximum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The prepended region is the maximum of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on max_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate max, reshape to add singleton dimension back max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_max(arr, pad_amt, num, axis=-1): """ Pad one `axis` of `arr` with the maximum of the last `num` elements. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate maximum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the maximum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: max_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: max_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate max, reshape to add singleton dimension back max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_mean(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` mean values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate mean. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the mean of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on mean_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate mean, reshape to add singleton dimension back mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_mean(arr, pad_amt, num, axis=-1): """ Append `pad_amt` mean values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate mean. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the maximum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: mean_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: mean_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate mean, reshape to add singleton dimension back mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_med(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate median. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the median of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on med_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate median, reshape to add singleton dimension back med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_med(arr, pad_amt, num, axis=-1): """ Append `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate median. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the median of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: med_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: med_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate median, reshape to add singleton dimension back med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_min(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` minimum values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate minimum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the minimum of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on min_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate min, reshape to add singleton dimension back min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_min(arr, pad_amt, num, axis=-1): """ Append `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate minimum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the minimum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: min_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: min_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate min, reshape to add singleton dimension back min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _pad_ref(arr, pad_amt, method, axis=-1): """ Pad `axis` of `arr` by reflection. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. method : str Controls method of reflection; options are 'even' or 'odd'. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded with reflected values from the original array. Notes ----- This algorithm does not pad with repetition, i.e. the edges are not repeated in the reflection. For that behavior, use `mode='symmetric'`. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1) for (i, x) in enumerate(arr.shape)) ref_chunk1 = arr[ref_slice] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: ref_chunk1 = ref_chunk1.reshape(pad_singleton) # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: edge_slice1 = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice1].reshape(pad_singleton) ref_chunk1 = 2 * edge_chunk - ref_chunk1 del edge_chunk ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after start = arr.shape[axis] - pad_amt[1] - 1 end = arr.shape[axis] - 1 ref_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) for (i, x) in enumerate(arr.shape)) ref_chunk2 = arr[ref_slice][rev_idx] if pad_amt[1] == 1: ref_chunk2 = ref_chunk2.reshape(pad_singleton) if 'odd' in method: edge_slice2 = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice2].reshape(pad_singleton) ref_chunk2 = 2 * edge_chunk - ref_chunk2 del edge_chunk # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) def _pad_sym(arr, pad_amt, method, axis=-1): """ Pad `axis` of `arr` by symmetry. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. method : str Controls method of symmetry; options are 'even' or 'odd'. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded with symmetric values from the original array. Notes ----- This algorithm DOES pad with repetition, i.e. the edges are repeated. For padding without repeated edges, use `mode='reflect'`. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0]) for (i, x) in enumerate(arr.shape)) rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) for (i, x) in enumerate(arr.shape)) sym_chunk1 = arr[sym_slice][rev_idx] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: sym_chunk1 = sym_chunk1.reshape(pad_singleton) # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: edge_slice1 = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice1].reshape(pad_singleton) sym_chunk1 = 2 * edge_chunk - sym_chunk1 del edge_chunk ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after start = arr.shape[axis] - pad_amt[1] end = arr.shape[axis] sym_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) sym_chunk2 = arr[sym_slice][rev_idx] if pad_amt[1] == 1: sym_chunk2 = sym_chunk2.reshape(pad_singleton) if 'odd' in method: edge_slice2 = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice2].reshape(pad_singleton) sym_chunk2 = 2 * edge_chunk - sym_chunk2 del edge_chunk # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) def _pad_wrap(arr, pad_amt, axis=-1): """ Pad `axis` of `arr` via wrapping. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded wrapped values from the opposite end of `axis`. Notes ----- This method of padding is also known as 'tile' or 'tiling'. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before start = arr.shape[axis] - pad_amt[0] end = arr.shape[axis] wrap_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) wrap_chunk1 = arr[wrap_slice] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: wrap_chunk1 = wrap_chunk1.reshape(pad_singleton) ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1]) for (i, x) in enumerate(arr.shape)) wrap_chunk2 = arr[wrap_slice] if pad_amt[1] == 1: wrap_chunk2 = wrap_chunk2.reshape(pad_singleton) # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) def _normalize_shape(ndarray, shape, cast_to_int=True): """ Private function which does some checks and normalizes the possibly much simpler representations of 'pad_width', 'stat_length', 'constant_values', 'end_values'. Parameters ---------- narray : ndarray Input ndarray shape : {sequence, array_like, float, int}, optional The width of padding (pad_width), the number of elements on the edge of the narray used for statistics (stat_length), the constant value(s) to use when filling padded regions (constant_values), or the endpoint target(s) for linear ramps (end_values). ((before_1, after_1), ... (before_N, after_N)) unique number of elements for each axis where `N` is rank of `narray`. ((before, after),) yields same before and after constants for each axis. (constant,) or val is a shortcut for before = after = constant for all axes. cast_to_int : bool, optional Controls if values in ``shape`` will be rounded and cast to int before being returned. Returns ------- normalized_shape : tuple of tuples val => ((val, val), (val, val), ...) [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...) ((val1, val2), (val3, val4), ...) => no change [[val1, val2], ] => ((val1, val2), (val1, val2), ...) ((val1, val2), ) => ((val1, val2), (val1, val2), ...) [[val , ], ] => ((val, val), (val, val), ...) ((val , ), ) => ((val, val), (val, val), ...) """ ndims = ndarray.ndim # Shortcut shape=None if shape is None: return ((None, None), ) * ndims # Convert any input `info` to a NumPy array arr = np.asarray(shape) # Switch based on what input looks like if arr.ndim <= 1: if arr.shape == () or arr.shape == (1,): # Single scalar input # Create new array of ones, multiply by the scalar arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr elif arr.shape == (2,): # Apply padding (before, after) each axis # Create new axis 0, repeat along it for every axis arr = arr[np.newaxis, :].repeat(ndims, axis=0) else: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) elif arr.ndim == 2: if arr.shape[1] == 1 and arr.shape[0] == ndims: # Padded before and after by the same amount arr = arr.repeat(2, axis=1) elif arr.shape[0] == ndims: # Input correctly formatted, pass it on as `arr` arr = shape else: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) else: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) # Cast if necessary if cast_to_int is True: arr = np.round(arr).astype(int) # Convert list of lists to tuple of tuples return tuple(tuple(axis) for axis in arr.tolist()) def _validate_lengths(narray, number_elements): """ Private function which does some checks and reformats pad_width and stat_length using _normalize_shape. Parameters ---------- narray : ndarray Input ndarray number_elements : {sequence, int}, optional The width of padding (pad_width) or the number of elements on the edge of the narray used for statistics (stat_length). ((before_1, after_1), ... (before_N, after_N)) unique number of elements for each axis. ((before, after),) yields same before and after constants for each axis. (constant,) or int is a shortcut for before = after = constant for all axes. Returns ------- _validate_lengths : tuple of tuples int => ((int, int), (int, int), ...) [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) ((int1, int2), (int3, int4), ...) => no change [[int1, int2], ] => ((int1, int2), (int1, int2), ...) ((int1, int2), ) => ((int1, int2), (int1, int2), ...) [[int , ], ] => ((int, int), (int, int), ...) ((int , ), ) => ((int, int), (int, int), ...) """ normshp = _normalize_shape(narray, number_elements) for i in normshp: chk = [1 if x is None else x for x in i] chk = [1 if x >= 0 else -1 for x in chk] if (chk[0] < 0) or (chk[1] < 0): fmt = "%s cannot contain negative values." raise ValueError(fmt % (number_elements,)) return normshp ############################################################################### # Public functions def pad(array, pad_width, mode=None, **kwargs): """ Pads an array. Parameters ---------- array : array_like of rank N Input array pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode : str or function One of the following string values or a user supplied function. 'constant' Pads with a constant value. 'edge' Pads with the edge values of array. 'linear_ramp' Pads with the linear ramp between end_value and the array edge value. 'maximum' Pads with the maximum value of all or part of the vector along each axis. 'mean' Pads with the mean value of all or part of the vector along each axis. 'median' Pads with the median value of all or part of the vector along each axis. 'minimum' Pads with the minimum value of all or part of the vector along each axis. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. 'wrap' Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. <function> Padding function, see Notes. stat_length : sequence or int, optional Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. ((before_1, after_1), ... (before_N, after_N)) unique statistic lengths for each axis. ((before, after),) yields same before and after statistic lengths for each axis. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : sequence or int, optional Used in 'constant'. The values to set the padded values for each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis. ((before, after),) yields same before and after constants for each axis. (constant,) or int is a shortcut for before = after = constant for all axes. Default is 0. end_values : sequence or int, optional Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ((before_1, after_1), ... (before_N, after_N)) unique end values for each axis. ((before, after),) yields same before and after end values for each axis. (constant,) or int is a shortcut for before = after = end value for all axes. Default is 0. reflect_type : {'even', 'odd'}, optional Used in 'reflect', and 'symmetric'. The 'even' style is the default with an unaltered reflection around the edge value. For the 'odd' style, the extented part of the array is created by subtracting the reflected values from two times the edge value. Returns ------- pad : ndarray Padded array of rank equal to `array` with shape increased according to `pad_width`. Notes ----- .. versionadded:: 1.7.0 For an array with rank greater than 1, some of the padding of later axes is calculated from padding of previous axes. This is easiest to think about with a rank 2 array where the corners of the padded array are calculated by using padded values from the first axis. The padding function, if used, should return a rank 1 array equal in length to the vector argument with padded values replaced. It has the following signature:: padding_func(vector, iaxis_pad_width, iaxis, **kwargs) where vector : ndarray A rank 1 array already padded with zeros. Padded values are vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. iaxis_pad_width : tuple A 2-tuple of ints, iaxis_pad_width[0] represents the number of values padded at the beginning of vector where iaxis_pad_width[1] represents the number of values padded at the end of vector. iaxis : int The axis currently being calculated. kwargs : misc Any keyword arguments the function requires. Examples -------- >>> a = [1, 2, 3, 4, 5] >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6)) array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) >>> np.lib.pad(a, (2, 3), 'edge') array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) >>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) >>> np.lib.pad(a, (2,), 'maximum') array([5, 5, 1, 2, 3, 4, 5, 5, 5]) >>> np.lib.pad(a, (2,), 'mean') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> np.lib.pad(a, (2,), 'median') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> a = [[1, 2], [3, 4]] >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum') array([[1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [3, 3, 3, 4, 3, 3, 3], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1]]) >>> a = [1, 2, 3, 4, 5] >>> np.lib.pad(a, (2, 3), 'reflect') array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) >>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd') array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) >>> np.lib.pad(a, (2, 3), 'symmetric') array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) >>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd') array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) >>> np.lib.pad(a, (2, 3), 'wrap') array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) >>> def padwithtens(vector, pad_width, iaxis, kwargs): ... vector[:pad_width[0]] = 10 ... vector[-pad_width[1]:] = 10 ... return vector >>> a = np.arange(6) >>> a = a.reshape((2, 3)) >>> np.lib.pad(a, 2, padwithtens) array([[10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 0, 1, 2, 10, 10], [10, 10, 3, 4, 5, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10]]) """ if not np.asarray(pad_width).dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') narray = np.array(array) pad_width = _validate_lengths(narray, pad_width) allowedkwargs = { 'constant': ['constant_values'], 'edge': [], 'linear_ramp': ['end_values'], 'maximum': ['stat_length'], 'mean': ['stat_length'], 'median': ['stat_length'], 'minimum': ['stat_length'], 'reflect': ['reflect_type'], 'symmetric': ['reflect_type'], 'wrap': [], } kwdefaults = { 'stat_length': None, 'constant_values': 0, 'end_values': 0, 'reflect_type': 'even', } if isinstance(mode, str): # Make sure have allowed kwargs appropriate for mode for key in kwargs: if key not in allowedkwargs[mode]: raise ValueError('%s keyword not in allowed keywords %s' % (key, allowedkwargs[mode])) # Set kwarg defaults for kw in allowedkwargs[mode]: kwargs.setdefault(kw, kwdefaults[kw]) # Need to only normalize particular keywords. for i in kwargs: if i == 'stat_length': kwargs[i] = _validate_lengths(narray, kwargs[i]) if i in ['end_values', 'constant_values']: kwargs[i] = _normalize_shape(narray, kwargs[i], cast_to_int=False) elif mode is None: raise ValueError('Keyword "mode" must be a function or one of %s.' % (list(allowedkwargs.keys()),)) else: # Drop back to old, slower np.apply_along_axis mode for user-supplied # vector function function = mode # Create a new padded array rank = list(range(len(narray.shape))) total_dim_increase = [np.sum(pad_width[i]) for i in rank] offset_slices = [slice(pad_width[i][0], pad_width[i][0] + narray.shape[i]) for i in rank] new_shape = np.array(narray.shape) + total_dim_increase newmat = np.zeros(new_shape, narray.dtype) # Insert the original array into the padded array newmat[offset_slices] = narray # This is the core of pad ... for iaxis in rank: np.apply_along_axis(function, iaxis, newmat, pad_width[iaxis], iaxis, kwargs) return newmat # If we get here, use new padding method newmat = narray.copy() # API preserved, but completely new algorithm which pads by building the # entire block to pad before/after `arr` with in one step, for each axis. if mode == 'constant': for axis, ((pad_before, pad_after), (before_val, after_val)) \ in enumerate(zip(pad_width, kwargs['constant_values'])): newmat = _prepend_const(newmat, pad_before, before_val, axis) newmat = _append_const(newmat, pad_after, after_val, axis) elif mode == 'edge': for axis, (pad_before, pad_after) in enumerate(pad_width): newmat = _prepend_edge(newmat, pad_before, axis) newmat = _append_edge(newmat, pad_after, axis) elif mode == 'linear_ramp': for axis, ((pad_before, pad_after), (before_val, after_val)) \ in enumerate(zip(pad_width, kwargs['end_values'])): newmat = _prepend_ramp(newmat, pad_before, before_val, axis) newmat = _append_ramp(newmat, pad_after, after_val, axis) elif mode == 'maximum': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_max(newmat, pad_before, chunk_before, axis) newmat = _append_max(newmat, pad_after, chunk_after, axis) elif mode == 'mean': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) newmat = _append_mean(newmat, pad_after, chunk_after, axis) elif mode == 'median': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_med(newmat, pad_before, chunk_before, axis) newmat = _append_med(newmat, pad_after, chunk_after, axis) elif mode == 'minimum': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_min(newmat, pad_before, chunk_before, axis) newmat = _append_min(newmat, pad_after, chunk_after, axis) elif mode == 'reflect': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. if ((pad_before > 0) or (pad_after > 0)) and newmat.shape[axis] == 1: # Extending singleton dimension for 'reflect' is legacy # behavior; it really should raise an error. newmat = _prepend_edge(newmat, pad_before, axis) newmat = _append_edge(newmat, pad_after, axis) continue method = kwargs['reflect_type'] safe_pad = newmat.shape[axis] - 1 while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_ref(newmat, (pad_iter_b, pad_iter_a), method, axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) elif mode == 'symmetric': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. method = kwargs['reflect_type'] safe_pad = newmat.shape[axis] while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_sym(newmat, (pad_iter_b, pad_iter_a), method, axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) elif mode == 'wrap': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. safe_pad = newmat.shape[axis] while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) return newmat
import copy import json import logging import pytest import burn_lock_functions import test_utilities from integration_env_credentials import sifchain_cli_credentials_for_test from pytest_utilities import generate_minimal_test_account from test_utilities import EthereumToSifchainTransferRequest, SifchaincliCredentials def create_new_sifaddr(): new_account_key = test_utilities.get_shell_output("uuidgen") credentials = sifchain_cli_credentials_for_test(new_account_key) new_addr = burn_lock_functions.create_new_sifaddr(credentials=credentials, keyname=new_account_key) return new_addr["address"] def create_new_sifaddr_and_key(): new_account_key = test_utilities.get_shell_output("uuidgen") credentials = sifchain_cli_credentials_for_test(new_account_key) new_addr = burn_lock_functions.create_new_sifaddr(credentials=credentials, keyname=new_account_key) return new_addr["address"], new_addr["name"] @pytest.mark.skip(reason="run manually") def test_bulk_transfers( basic_transfer_request: EthereumToSifchainTransferRequest, smart_contracts_dir, source_ethereum_address, bridgebank_address, bridgetoken_address, ethereum_network, ): n_transfers = int(test_utilities.get_optional_env_var("NTRANSFERS", 2)) ganache_delay = test_utilities.get_optional_env_var("GANACHE_DELAY", 1) # test_utilities.get_shell_output(f"{integration_dir}/ganache_start.sh {ganache_delay}") amount = "{:d}".format(5 * test_utilities.highest_gas_cost) new_addresses_and_keys = list(map(lambda x: create_new_sifaddr_and_key(), range(n_transfers))) logging.info(f"aandk: {new_addresses_and_keys}") new_addresses = list(map(lambda a: a[0], new_addresses_and_keys)) logging.debug(f"new_addresses: {new_addresses}") new_eth_addrs = test_utilities.create_ethereum_addresses(smart_contracts_dir, basic_transfer_request.ethereum_network, len(new_addresses)) logging.info(f"new eth addrs: {new_eth_addrs}") request: EthereumToSifchainTransferRequest = copy.deepcopy(basic_transfer_request) requests = list(map(lambda addr: { "amount": amount, "symbol": test_utilities.NULL_ADDRESS, "sifchain_address": addr }, new_addresses)) json_requests = json.dumps(requests) test_utilities.run_yarn_command( " ".join([ f"yarn --cwd {smart_contracts_dir}", "integrationtest:sendBulkLockTx", f"--amount {amount}", f"--symbol eth", f"--json_path {request.solidity_json_path}", f"--sifchain_address {new_addresses[0]}", f"--transactions \'{json_requests}\'", f"--ethereum_address {source_ethereum_address}", f"--bridgebank_address {bridgebank_address}", f"--ethereum_network {ethereum_network}", ]) ) requests = list(map(lambda addr: { "amount": amount, "symbol": bridgetoken_address, "sifchain_address": addr }, new_addresses)) json_requests = json.dumps(requests) yarn_result = test_utilities.run_yarn_command( " ".join([ f"yarn --cwd {smart_contracts_dir}", "integrationtest:sendBulkLockTx", f"--amount {amount}", "--lock_or_burn burn", f"--symbol {bridgetoken_address}", f"--json_path {request.solidity_json_path}", f"--sifchain_address {new_addresses[0]}", f"--transactions \'{json_requests}\'", f"--ethereum_address {source_ethereum_address}", f"--bridgebank_address {bridgebank_address}", f"--ethereum_network {ethereum_network}", ]) ) logging.info(f"bulk result: {yarn_result}") manual_advance = False if manual_advance: test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, smart_contracts_dir) test_utilities.wait_for_ethereum_block_number(yarn_result["blockNumber"] + test_utilities.n_wait_blocks, basic_transfer_request); for a in new_addresses: test_utilities.wait_for_sif_account(a, basic_transfer_request.sifnoded_node, 90) test_utilities.wait_for_sifchain_addr_balance(a, "ceth", amount, basic_transfer_request.sifnoded_node, 180) test_utilities.wait_for_sifchain_addr_balance(a, "rowan", amount, basic_transfer_request.sifnoded_node, 180) text_file = open("pfile.cmds", "w") simple_credentials = SifchaincliCredentials( keyring_passphrase=None, keyring_backend="test", from_key=None, sifnoded_homedir=None ) logging.info(f"all accounts are on sifchain and have the correct balance") for sifaddr, ethaddr in zip(new_addresses_and_keys, new_eth_addrs): r = copy.deepcopy(basic_transfer_request) r.sifchain_address = sifaddr[0] r.ethereum_address = ethaddr["address"] r.amount = 100 simple_credentials.from_key = sifaddr[1] c = test_utilities.send_from_sifchain_to_ethereum_cmd(r, simple_credentials) text_file.write(f"{c}\n") text_file.close() # test_utilities.get_shell_output("cat pfile.cmds | parallel --trim lr -v {}") test_utilities.get_shell_output("bash -x pfile.cmds") for sifaddr, ethaddr in zip(new_addresses_and_keys, new_eth_addrs): r = copy.deepcopy(basic_transfer_request) r.ethereum_address = ethaddr["address"] r.amount = 100 test_utilities.wait_for_eth_balance(r, 100, 300)
"""Build Environment used for isolation during sdist building """ import logging import os import sys import textwrap from distutils.sysconfig import get_python_lib from sysconfig import get_paths from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet from pip import __file__ as pip_location from pip._internal.utils.misc import call_subprocess from pip._internal.utils.temp_dir import TempDirectory from pip._internal.utils.ui import open_spinner logger = logging.getLogger(__name__) class BuildEnvironment(object): """Creates and manages an isolated environment to install build deps """ def __init__(self): self._temp_dir = TempDirectory(kind="build-env") self._temp_dir.create() @property def path(self): return self._temp_dir.path def __enter__(self): self.save_path = os.environ.get('PATH', None) self.save_pythonpath = os.environ.get('PYTHONPATH', None) self.save_nousersite = os.environ.get('PYTHONNOUSERSITE', None) install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' install_dirs = get_paths(install_scheme, vars={ 'base': self.path, 'platbase': self.path, }) scripts = install_dirs['scripts'] if self.save_path: os.environ['PATH'] = scripts + os.pathsep + self.save_path else: os.environ['PATH'] = scripts + os.pathsep + os.defpath # Note: prefer distutils' sysconfig to get the # library paths so PyPy is correctly supported. purelib = get_python_lib(plat_specific=0, prefix=self.path) platlib = get_python_lib(plat_specific=1, prefix=self.path) if purelib == platlib: lib_dirs = purelib else: lib_dirs = purelib + os.pathsep + platlib if self.save_pythonpath: os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ self.save_pythonpath else: os.environ['PYTHONPATH'] = lib_dirs os.environ['PYTHONNOUSERSITE'] = '1' # Ensure .pth files are honored. with open(os.path.join(purelib, 'sitecustomize.py'), 'w') as fp: fp.write(textwrap.dedent( ''' import site site.addsitedir({!r}) ''' ).format(purelib)) return self.path def __exit__(self, exc_type, exc_val, exc_tb): def restore_var(varname, old_value): if old_value is None: os.environ.pop(varname, None) else: os.environ[varname] = old_value restore_var('PATH', self.save_path) restore_var('PYTHONPATH', self.save_pythonpath) restore_var('PYTHONNOUSERSITE', self.save_nousersite) def cleanup(self): self._temp_dir.cleanup() def missing_requirements(self, reqs): """Return a list of the requirements from reqs that are not present """ missing = [] with self: ws = WorkingSet(os.environ["PYTHONPATH"].split(os.pathsep)) for req in reqs: try: if ws.find(Requirement.parse(req)) is None: missing.append(req) except VersionConflict: missing.append(req) return missing def install_requirements(self, finder, requirements, message): args = [ sys.executable, os.path.dirname(pip_location), 'install', '--ignore-installed', '--no-user', '--prefix', self.path, '--no-warn-script-location', ] if logger.getEffectiveLevel() <= logging.DEBUG: args.append('-v') for format_control in ('no_binary', 'only_binary'): formats = getattr(finder.format_control, format_control) args.extend(('--' + format_control.replace('_', '-'), ','.join(sorted(formats or {':none:'})))) if finder.index_urls: args.extend(['-i', finder.index_urls[0]]) for extra_index in finder.index_urls[1:]: args.extend(['--extra-index-url', extra_index]) else: args.append('--no-index') for link in finder.find_links: args.extend(['--find-links', link]) for _, host, _ in finder.secure_origins: args.extend(['--trusted-host', host]) if finder.allow_all_prereleases: args.append('--pre') if finder.process_dependency_links: args.append('--process-dependency-links') args.append('--') args.extend(requirements) with open_spinner(message) as spinner: call_subprocess(args, show_stdout=False, spinner=spinner) class NoOpBuildEnvironment(BuildEnvironment): """A no-op drop-in replacement for BuildEnvironment """ def __init__(self): pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass def cleanup(self): pass def install_requirements(self, finder, requirements, message): raise NotImplementedError()
import logging from random import randint import traceback from typing import cast, Dict, List, Set, Collection from geniusweb.actions.Accept import Accept from geniusweb.actions.Action import Action from geniusweb.actions.LearningDone import LearningDone from geniusweb.actions.Offer import Offer from geniusweb.actions.PartyId import PartyId from geniusweb.actions.Vote import Vote from geniusweb.actions.Votes import Votes from geniusweb.bidspace.AllBidsList import AllBidsList from geniusweb.inform.ActionDone import ActionDone from geniusweb.inform.Finished import Finished from geniusweb.inform.Inform import Inform from geniusweb.inform.OptIn import OptIn from geniusweb.inform.Settings import Settings from geniusweb.inform.Voting import Voting from geniusweb.inform.YourTurn import YourTurn from geniusweb.issuevalue.Bid import Bid from geniusweb.issuevalue.Domain import Domain from geniusweb.issuevalue.Value import Value from geniusweb.issuevalue.ValueSet import ValueSet from geniusweb.party.Capabilities import Capabilities from geniusweb.party.DefaultParty import DefaultParty from geniusweb.profile.utilityspace.UtilitySpace import UtilitySpace from geniusweb.profileconnection.ProfileConnectionFactory import ( ProfileConnectionFactory, ) from geniusweb.progress.ProgressRounds import ProgressRounds from geniusweb.utils import val class RandomAgent(DefaultParty): """ Offers random bids until a bid with sufficient utility is offered. """ def __init__(self): super().__init__() self.getReporter().log(logging.INFO, "party is initialized") self._profile = None self._lastReceivedBid: Bid = None # Override def notifyChange(self, info: Inform): # self.getReporter().log(logging.INFO,"received info:"+str(info)) if isinstance(info, Settings): self._settings: Settings = cast(Settings, info) self._me = self._settings.getID() self._protocol: str = str(self._settings.getProtocol().getURI()) self._progress = self._settings.getProgress() if "Learn" == self._protocol: self.getConnection().send(LearningDone(self._me)) # type:ignore else: self._profile = ProfileConnectionFactory.create( info.getProfile().getURI(), self.getReporter() ) elif isinstance(info, ActionDone): action: Action = cast(ActionDone, info).getAction() if isinstance(action, Offer): self._lastReceivedBid = cast(Offer, action).getBid() elif isinstance(info, YourTurn): self._myTurn() if isinstance(self._progress, ProgressRounds): self._progress = self._progress.advance() elif isinstance(info, Finished): self.terminate() elif isinstance(info, Voting): # MOPAC protocol self._lastvotes = self._vote(cast(Voting, info)) val(self.getConnection()).send(self._lastvotes) elif isinstance(info, OptIn): val(self.getConnection()).send(self._lastvotes) else: self.getReporter().log( logging.WARNING, "Ignoring unknown info " + str(info) ) # Override def getCapabilities(self) -> Capabilities: return Capabilities( set(["SAOP", "Learn", "MOPAC"]), set(["geniusweb.profile.utilityspace.LinearAdditive"]), ) # Override def getDescription(self) -> str: return "Offers random bids until a bid with sufficient utility is offered. Parameters minPower and maxPower can be used to control voting behaviour." # Override def terminate(self): self.getReporter().log(logging.INFO, "party is terminating:") super().terminate() if self._profile != None: self._profile.close() self._profile = None def _myTurn(self): if self._isGood(self._lastReceivedBid): action = Accept(self._me, self._lastReceivedBid) else: for _attempt in range(20): bid = self._getRandomBid(self._profile.getProfile().getDomain()) if self._isGood(bid): break action = Offer(self._me, bid) self.getConnection().send(action) def _isGood(self, bid: Bid) -> bool: if bid == None: return False profile = self._profile.getProfile() if isinstance(profile, UtilitySpace): return profile.getUtility(bid) > 0.6 raise Exception("Can not handle this type of profile") def _getRandomBid(self, domain: Domain) -> Bid: allBids = AllBidsList(domain) return allBids.get(randint(0, allBids.size() - 1)) def _vote(self, voting: Voting) -> Votes: """ @param voting the {@link Voting} object containing the options @return our next Votes. """ val = self._settings.getParameters().get("minPower") minpower: int = val if isinstance(val, int) else 2 val = self._settings.getParameters().get("maxPower") maxpower: int = val if isinstance(val, int) else 9999999 votes: Set[Vote] = set( [ Vote(self._me, offer.getBid(), minpower, maxpower) for offer in voting.getOffers() if self._isGood(offer.getBid()) ] ) return Votes(self._me, votes)
METER_TO_KM = 1e-3 ONE_TO_KILO = 1e3 KM_TO_METER = 1e3 KILO_TO_ONE = 1e3 # Average earth radius, see https://en.wikipedia.org/wiki/Earth_radius EARTH_RADIUS_KM = 6371.0088 # in reality air density varies between 1.14 and 1.42 in kg/m^3 AIR_DENSITY_RHO = 1.225 # of course this introduces a small mistake due to leap years, but in average it's quite ok # Warning: in most cases it might be better to use mean() instead of sum()/HOURS_PER_YEAR HOURS_PER_YEAR = 8765.812536
# Time: O(n) # Space: O(1) class Solution(object): # @param a, a string # @param b, a string # @return a string def addBinary(self, a, b): result, carry, val = "", 0, 0 for i in range(max(len(a), len(b))): val = carry if i < len(a): val += int(a[-(i + 1)]) if i < len(b): val += int(b[-(i + 1)]) carry, val = divmod(val, 2) result += str(val) if carry: result += str(carry) return result[::-1] # Time: O(n) # Space: O(1) from itertools import zip_longest class Solution2(object): def addBinary(self, a, b): """ :type a: str :type b: str :rtype: str """ result = "" carry = 0 for x, y in zip_longest(reversed(a), reversed(b), fillvalue="0"): carry, remainder = divmod(int(x)+int(y)+carry, 2) result += str(remainder) if carry: result += str(carry) return result[::-1]
"""Test whether all elements of cls.args are instances of Basic. """ # NOTE: keep tests sorted by (module, class name) key. If a class can't # be instantiated, add it here anyway with @SKIP("abstract class) (see # e.g. Function). import os import re import warnings import io from sympy import Basic, S, symbols, sqrt, sin, oo, Interval, exp from sympy.core.compatibility import range from sympy.utilities.pytest import XFAIL, SKIP from sympy.utilities.exceptions import SymPyDeprecationWarning x, y, z = symbols('x,y,z') def test_all_classes_are_tested(): this = os.path.split(__file__)[0] path = os.path.join(this, os.pardir, os.pardir) sympy_path = os.path.abspath(path) prefix = os.path.split(sympy_path)[0] + os.sep re_cls = re.compile("^class ([A-Za-z][A-Za-z0-9_]*)\s*\(", re.MULTILINE) modules = {} for root, dirs, files in os.walk(sympy_path): module = root.replace(prefix, "").replace(os.sep, ".") for file in files: if file.startswith(("_", "test_", "bench_")): continue if not file.endswith(".py"): continue with io.open(os.path.join(root, file), "r", encoding='utf-8') as f: text = f.read() submodule = module + '.' + file[:-3] names = re_cls.findall(text) if not names: continue try: mod = __import__(submodule, fromlist=names) except ImportError: continue def is_Basic(name): cls = getattr(mod, name) return issubclass(cls, Basic) names = list(filter(is_Basic, names)) if names: modules[submodule] = names ns = globals() failed = [] for module, names in modules.items(): mod = module.replace('.', '__') for name in names: test = 'test_' + mod + '__' + name if test not in ns: failed.append(module + '.' + name) # reset all SymPyDeprecationWarning into errors warnings.simplefilter("error", category=SymPyDeprecationWarning) assert not failed, "Missing classes: %s. Please add tests for these to sympy/core/tests/test_args.py." % ", ".join(failed) def _test_args(obj): return all(isinstance(arg, Basic) for arg in obj.args) def test_sympy__assumptions__assume__AppliedPredicate(): from sympy.assumptions.assume import AppliedPredicate, Predicate assert _test_args(AppliedPredicate(Predicate("test"), 2)) def test_sympy__assumptions__assume__Predicate(): from sympy.assumptions.assume import Predicate assert _test_args(Predicate("test")) @XFAIL def test_sympy__combinatorics__graycode__GrayCode(): from sympy.combinatorics.graycode import GrayCode # an integer is given and returned from GrayCode as the arg assert _test_args(GrayCode(3, start='100')) assert _test_args(GrayCode(3, rank=1)) def test_sympy__combinatorics__subsets__Subset(): from sympy.combinatorics.subsets import Subset assert _test_args(Subset([0, 1], [0, 1, 2, 3])) assert _test_args(Subset(['c', 'd'], ['a', 'b', 'c', 'd'])) @XFAIL def test_sympy__combinatorics__permutations__Permutation(): from sympy.combinatorics.permutations import Permutation assert _test_args(Permutation([0, 1, 2, 3])) def test_sympy__combinatorics__perm_groups__PermutationGroup(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.perm_groups import PermutationGroup assert _test_args(PermutationGroup([Permutation([0, 1])])) def test_sympy__combinatorics__polyhedron__Polyhedron(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.polyhedron import Polyhedron from sympy.abc import w, x, y, z pgroup = [Permutation([[0, 1, 2], [3]]), Permutation([[0, 1, 3], [2]]), Permutation([[0, 2, 3], [1]]), Permutation([[1, 2, 3], [0]]), Permutation([[0, 1], [2, 3]]), Permutation([[0, 2], [1, 3]]), Permutation([[0, 3], [1, 2]]), Permutation([[0, 1, 2, 3]])] corners = [w, x, y, z] faces = [(w, x, y), (w, y, z), (w, z, x), (x, y, z)] assert _test_args(Polyhedron(corners, faces, pgroup)) @XFAIL def test_sympy__combinatorics__prufer__Prufer(): from sympy.combinatorics.prufer import Prufer assert _test_args(Prufer([[0, 1], [0, 2], [0, 3]], 4)) def test_sympy__combinatorics__partitions__Partition(): from sympy.combinatorics.partitions import Partition assert _test_args(Partition([1])) @XFAIL def test_sympy__combinatorics__partitions__IntegerPartition(): from sympy.combinatorics.partitions import IntegerPartition assert _test_args(IntegerPartition([1])) def test_sympy__concrete__products__Product(): from sympy.concrete.products import Product assert _test_args(Product(x, (x, 0, 10))) assert _test_args(Product(x, (x, 0, y), (y, 0, 10))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__ExprWithLimits(): from sympy.concrete.expr_with_limits import ExprWithLimits assert _test_args(ExprWithLimits(x, (x, 0, 10))) assert _test_args(ExprWithLimits(x*y, (x, 0, 10.),(y,1.,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__AddWithLimits(): from sympy.concrete.expr_with_limits import AddWithLimits assert _test_args(AddWithLimits(x, (x, 0, 10))) assert _test_args(AddWithLimits(x*y, (x, 0, 10),(y,1,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_intlimits__ExprWithIntLimits(): from sympy.concrete.expr_with_intlimits import ExprWithIntLimits assert _test_args(ExprWithIntLimits(x, (x, 0, 10))) assert _test_args(ExprWithIntLimits(x*y, (x, 0, 10),(y,1,3))) def test_sympy__concrete__summations__Sum(): from sympy.concrete.summations import Sum assert _test_args(Sum(x, (x, 0, 10))) assert _test_args(Sum(x, (x, 0, y), (y, 0, 10))) def test_sympy__core__add__Add(): from sympy.core.add import Add assert _test_args(Add(x, y, z, 2)) def test_sympy__core__basic__Atom(): from sympy.core.basic import Atom assert _test_args(Atom()) def test_sympy__core__basic__Basic(): from sympy.core.basic import Basic assert _test_args(Basic()) def test_sympy__core__containers__Dict(): from sympy.core.containers import Dict assert _test_args(Dict({x: y, y: z})) def test_sympy__core__containers__Tuple(): from sympy.core.containers import Tuple assert _test_args(Tuple(x, y, z, 2)) def test_sympy__core__expr__AtomicExpr(): from sympy.core.expr import AtomicExpr assert _test_args(AtomicExpr()) def test_sympy__core__expr__Expr(): from sympy.core.expr import Expr assert _test_args(Expr()) def test_sympy__core__function__Application(): from sympy.core.function import Application assert _test_args(Application(1, 2, 3)) def test_sympy__core__function__AppliedUndef(): from sympy.core.function import AppliedUndef assert _test_args(AppliedUndef(1, 2, 3)) def test_sympy__core__function__Derivative(): from sympy.core.function import Derivative assert _test_args(Derivative(2, x, y, 3)) @SKIP("abstract class") def test_sympy__core__function__Function(): pass def test_sympy__core__function__Lambda(): from sympy.core.function import Lambda assert _test_args(Lambda((x, y), x + y + z)) def test_sympy__core__function__Subs(): from sympy.core.function import Subs assert _test_args(Subs(x + y, x, 2)) def test_sympy__core__function__WildFunction(): from sympy.core.function import WildFunction assert _test_args(WildFunction('f')) def test_sympy__core__mod__Mod(): from sympy.core.mod import Mod assert _test_args(Mod(x, 2)) def test_sympy__core__mul__Mul(): from sympy.core.mul import Mul assert _test_args(Mul(2, x, y, z)) def test_sympy__core__numbers__Catalan(): from sympy.core.numbers import Catalan assert _test_args(Catalan()) def test_sympy__core__numbers__ComplexInfinity(): from sympy.core.numbers import ComplexInfinity assert _test_args(ComplexInfinity()) def test_sympy__core__numbers__EulerGamma(): from sympy.core.numbers import EulerGamma assert _test_args(EulerGamma()) def test_sympy__core__numbers__Exp1(): from sympy.core.numbers import Exp1 assert _test_args(Exp1()) def test_sympy__core__numbers__Float(): from sympy.core.numbers import Float assert _test_args(Float(1.23)) def test_sympy__core__numbers__GoldenRatio(): from sympy.core.numbers import GoldenRatio assert _test_args(GoldenRatio()) def test_sympy__core__numbers__Half(): from sympy.core.numbers import Half assert _test_args(Half()) def test_sympy__core__numbers__ImaginaryUnit(): from sympy.core.numbers import ImaginaryUnit assert _test_args(ImaginaryUnit()) def test_sympy__core__numbers__Infinity(): from sympy.core.numbers import Infinity assert _test_args(Infinity()) def test_sympy__core__numbers__Integer(): from sympy.core.numbers import Integer assert _test_args(Integer(7)) @SKIP("abstract class") def test_sympy__core__numbers__IntegerConstant(): pass def test_sympy__core__numbers__NaN(): from sympy.core.numbers import NaN assert _test_args(NaN()) def test_sympy__core__numbers__NegativeInfinity(): from sympy.core.numbers import NegativeInfinity assert _test_args(NegativeInfinity()) def test_sympy__core__numbers__NegativeOne(): from sympy.core.numbers import NegativeOne assert _test_args(NegativeOne()) def test_sympy__core__numbers__Number(): from sympy.core.numbers import Number assert _test_args(Number(1, 7)) def test_sympy__core__numbers__NumberSymbol(): from sympy.core.numbers import NumberSymbol assert _test_args(NumberSymbol()) def test_sympy__core__numbers__One(): from sympy.core.numbers import One assert _test_args(One()) def test_sympy__core__numbers__Pi(): from sympy.core.numbers import Pi assert _test_args(Pi()) def test_sympy__core__numbers__Rational(): from sympy.core.numbers import Rational assert _test_args(Rational(1, 7)) @SKIP("abstract class") def test_sympy__core__numbers__RationalConstant(): pass def test_sympy__core__numbers__Zero(): from sympy.core.numbers import Zero assert _test_args(Zero()) @SKIP("abstract class") def test_sympy__core__operations__AssocOp(): pass @SKIP("abstract class") def test_sympy__core__operations__LatticeOp(): pass def test_sympy__core__power__Pow(): from sympy.core.power import Pow assert _test_args(Pow(x, 2)) def test_sympy__core__relational__Equality(): from sympy.core.relational import Equality assert _test_args(Equality(x, 2)) def test_sympy__core__relational__GreaterThan(): from sympy.core.relational import GreaterThan assert _test_args(GreaterThan(x, 2)) def test_sympy__core__relational__LessThan(): from sympy.core.relational import LessThan assert _test_args(LessThan(x, 2)) @SKIP("abstract class") def test_sympy__core__relational__Relational(): pass def test_sympy__core__relational__StrictGreaterThan(): from sympy.core.relational import StrictGreaterThan assert _test_args(StrictGreaterThan(x, 2)) def test_sympy__core__relational__StrictLessThan(): from sympy.core.relational import StrictLessThan assert _test_args(StrictLessThan(x, 2)) def test_sympy__core__relational__Unequality(): from sympy.core.relational import Unequality assert _test_args(Unequality(x, 2)) def test_sympy__sets__sets__EmptySet(): from sympy.sets.sets import EmptySet assert _test_args(EmptySet()) def test_sympy__sets__sets__UniversalSet(): from sympy.sets.sets import UniversalSet assert _test_args(UniversalSet()) def test_sympy__sets__sets__FiniteSet(): from sympy.sets.sets import FiniteSet assert _test_args(FiniteSet(x, y, z)) def test_sympy__sets__sets__Interval(): from sympy.sets.sets import Interval assert _test_args(Interval(0, 1)) def test_sympy__sets__sets__ProductSet(): from sympy.sets.sets import ProductSet, Interval assert _test_args(ProductSet(Interval(0, 1), Interval(0, 1))) @SKIP("does it make sense to test this?") def test_sympy__sets__sets__Set(): from sympy.sets.sets import Set assert _test_args(Set()) def test_sympy__sets__sets__Intersection(): from sympy.sets.sets import Intersection, Interval assert _test_args(Intersection(Interval(0, 3), Interval(2, 4), evaluate=False)) def test_sympy__sets__sets__Union(): from sympy.sets.sets import Union, Interval assert _test_args(Union(Interval(0, 1), Interval(2, 3))) def test_sympy__sets__sets__Complement(): from sympy.sets.sets import Complement assert _test_args(Complement(Interval(0, 2), Interval(0, 1))) def test_sympy__sets__sets__SymmetricDifference(): from sympy.sets.sets import FiniteSet, SymmetricDifference assert _test_args(SymmetricDifference(FiniteSet(1, 2, 3), \ FiniteSet(2, 3, 4))) def test_sympy__core__trace__Tr(): from sympy.core.trace import Tr a, b = symbols('a b') assert _test_args(Tr(a + b)) def test_sympy__sets__fancysets__Naturals(): from sympy.sets.fancysets import Naturals assert _test_args(Naturals()) def test_sympy__sets__fancysets__Naturals0(): from sympy.sets.fancysets import Naturals0 assert _test_args(Naturals0()) def test_sympy__sets__fancysets__Integers(): from sympy.sets.fancysets import Integers assert _test_args(Integers()) def test_sympy__sets__fancysets__Reals(): from sympy.sets.fancysets import Reals assert _test_args(Reals()) def test_sympy__sets__fancysets__ImageSet(): from sympy.sets.fancysets import ImageSet from sympy import S, Lambda, Symbol x = Symbol('x') assert _test_args(ImageSet(Lambda(x, x**2), S.Naturals)) def test_sympy__sets__fancysets__Range(): from sympy.sets.fancysets import Range assert _test_args(Range(1, 5, 1)) def test_sympy__sets__contains__Contains(): from sympy.sets.fancysets import Range from sympy.sets.contains import Contains assert _test_args(Contains(x, Range(0, 10, 2))) # STATS from sympy.stats.crv_types import NormalDistribution nd = NormalDistribution(0, 1) from sympy.stats.frv_types import DieDistribution die = DieDistribution(6) def test_sympy__stats__crv__ContinuousDomain(): from sympy.stats.crv import ContinuousDomain assert _test_args(ContinuousDomain(set([x]), Interval(-oo, oo))) def test_sympy__stats__crv__SingleContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain assert _test_args(SingleContinuousDomain(x, Interval(-oo, oo))) def test_sympy__stats__crv__ProductContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain, ProductContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) E = SingleContinuousDomain(y, Interval(0, oo)) assert _test_args(ProductContinuousDomain(D, E)) def test_sympy__stats__crv__ConditionalContinuousDomain(): from sympy.stats.crv import (SingleContinuousDomain, ConditionalContinuousDomain) D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ConditionalContinuousDomain(D, x > 0)) def test_sympy__stats__crv__ContinuousPSpace(): from sympy.stats.crv import ContinuousPSpace, SingleContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ContinuousPSpace(D, nd)) def test_sympy__stats__crv__SingleContinuousPSpace(): from sympy.stats.crv import SingleContinuousPSpace assert _test_args(SingleContinuousPSpace(x, nd)) def test_sympy__stats__crv__ProductContinuousPSpace(): from sympy.stats.crv import ProductContinuousPSpace, SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductContinuousPSpace(A, B)) @SKIP("abstract class") def test_sympy__stats__crv__SingleContinuousDistribution(): pass def test_sympy__stats__drv__SingleDiscreteDomain(): from sympy.stats.drv import SingleDiscreteDomain assert _test_args(SingleDiscreteDomain(x, S.Naturals)) def test_sympy__stats__drv__SingleDiscretePSpace(): from sympy.stats.drv import SingleDiscretePSpace from sympy.stats.drv_types import PoissonDistribution assert _test_args(SingleDiscretePSpace(x, PoissonDistribution(1))) @SKIP("abstract class") def test_sympy__stats__drv__SingleDiscreteDistribution(): pass def test_sympy__stats__rv__RandomDomain(): from sympy.stats.rv import RandomDomain from sympy.sets.sets import FiniteSet assert _test_args(RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3))) def test_sympy__stats__rv__SingleDomain(): from sympy.stats.rv import SingleDomain from sympy.sets.sets import FiniteSet assert _test_args(SingleDomain(x, FiniteSet(1, 2, 3))) def test_sympy__stats__rv__ConditionalDomain(): from sympy.stats.rv import ConditionalDomain, RandomDomain from sympy.sets.sets import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2)) assert _test_args(ConditionalDomain(D, x > 1)) def test_sympy__stats__rv__PSpace(): from sympy.stats.rv import PSpace, RandomDomain from sympy import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3, 4, 5, 6)) assert _test_args(PSpace(D, die)) @SKIP("abstract Class") def test_sympy__stats__rv__SinglePSpace(): pass def test_sympy__stats__rv__RandomSymbol(): from sympy.stats.rv import RandomSymbol from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) assert _test_args(RandomSymbol(A, x)) def test_sympy__stats__rv__ProductPSpace(): from sympy.stats.rv import ProductPSpace from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductPSpace(A, B)) def test_sympy__stats__rv__ProductDomain(): from sympy.stats.rv import ProductDomain, SingleDomain D = SingleDomain(x, Interval(-oo, oo)) E = SingleDomain(y, Interval(0, oo)) assert _test_args(ProductDomain(D, E)) def test_sympy__stats__frv_types__DiscreteUniformDistribution(): from sympy.stats.frv_types import DiscreteUniformDistribution from sympy.core.containers import Tuple assert _test_args(DiscreteUniformDistribution(Tuple(*list(range(6))))) def test_sympy__stats__frv_types__DieDistribution(): from sympy.stats.frv_types import DieDistribution assert _test_args(DieDistribution(6)) def test_sympy__stats__frv_types__BernoulliDistribution(): from sympy.stats.frv_types import BernoulliDistribution assert _test_args(BernoulliDistribution(S.Half, 0, 1)) def test_sympy__stats__frv_types__BinomialDistribution(): from sympy.stats.frv_types import BinomialDistribution assert _test_args(BinomialDistribution(5, S.Half, 1, 0)) def test_sympy__stats__frv_types__HypergeometricDistribution(): from sympy.stats.frv_types import HypergeometricDistribution assert _test_args(HypergeometricDistribution(10, 5, 3)) def test_sympy__stats__frv_types__RademacherDistribution(): from sympy.stats.frv_types import RademacherDistribution assert _test_args(RademacherDistribution()) def test_sympy__stats__frv__FiniteDomain(): from sympy.stats.frv import FiniteDomain assert _test_args(FiniteDomain(set([(x, 1), (x, 2)]))) # x can be 1 or 2 def test_sympy__stats__frv__SingleFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain assert _test_args(SingleFiniteDomain(x, set([1, 2]))) # x can be 1 or 2 def test_sympy__stats__frv__ProductFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ProductFiniteDomain xd = SingleFiniteDomain(x, set([1, 2])) yd = SingleFiniteDomain(y, set([1, 2])) assert _test_args(ProductFiniteDomain(xd, yd)) def test_sympy__stats__frv__ConditionalFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ConditionalFiniteDomain xd = SingleFiniteDomain(x, set([1, 2])) assert _test_args(ConditionalFiniteDomain(xd, x > 1)) def test_sympy__stats__frv__FinitePSpace(): from sympy.stats.frv import FinitePSpace, SingleFiniteDomain xd = SingleFiniteDomain(x, set([1, 2, 3, 4, 5, 6])) p = 1.0/6 xd = SingleFiniteDomain(x, set([1, 2])) assert _test_args(FinitePSpace(xd, {(x, 1): S.Half, (x, 2): S.Half})) def test_sympy__stats__frv__SingleFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace from sympy import Symbol assert _test_args(SingleFinitePSpace(Symbol('x'), die)) def test_sympy__stats__frv__ProductFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace, ProductFinitePSpace from sympy import Symbol xp = SingleFinitePSpace(Symbol('x'), die) yp = SingleFinitePSpace(Symbol('y'), die) assert _test_args(ProductFinitePSpace(xp, yp)) @SKIP("abstract class") def test_sympy__stats__frv__SingleFiniteDistribution(): pass @SKIP("abstract class") def test_sympy__stats__crv__ContinuousDistribution(): pass def test_sympy__stats__frv_types__FiniteDistributionHandmade(): from sympy.stats.frv_types import FiniteDistributionHandmade assert _test_args(FiniteDistributionHandmade({1: 1})) def test_sympy__stats__crv__ContinuousDistributionHandmade(): from sympy.stats.crv import ContinuousDistributionHandmade from sympy import Symbol, Interval assert _test_args(ContinuousDistributionHandmade(Symbol('x'), Interval(0, 2))) def test_sympy__stats__rv__Density(): from sympy.stats.rv import Density from sympy.stats.crv_types import Normal assert _test_args(Density(Normal('x', 0, 1))) def test_sympy__stats__crv_types__ArcsinDistribution(): from sympy.stats.crv_types import ArcsinDistribution assert _test_args(ArcsinDistribution(0, 1)) def test_sympy__stats__crv_types__BeniniDistribution(): from sympy.stats.crv_types import BeniniDistribution assert _test_args(BeniniDistribution(1, 1, 1)) def test_sympy__stats__crv_types__BetaDistribution(): from sympy.stats.crv_types import BetaDistribution assert _test_args(BetaDistribution(1, 1)) def test_sympy__stats__crv_types__BetaPrimeDistribution(): from sympy.stats.crv_types import BetaPrimeDistribution assert _test_args(BetaPrimeDistribution(1, 1)) def test_sympy__stats__crv_types__CauchyDistribution(): from sympy.stats.crv_types import CauchyDistribution assert _test_args(CauchyDistribution(0, 1)) def test_sympy__stats__crv_types__ChiDistribution(): from sympy.stats.crv_types import ChiDistribution assert _test_args(ChiDistribution(1)) def test_sympy__stats__crv_types__ChiNoncentralDistribution(): from sympy.stats.crv_types import ChiNoncentralDistribution assert _test_args(ChiNoncentralDistribution(1,1)) def test_sympy__stats__crv_types__ChiSquaredDistribution(): from sympy.stats.crv_types import ChiSquaredDistribution assert _test_args(ChiSquaredDistribution(1)) def test_sympy__stats__crv_types__DagumDistribution(): from sympy.stats.crv_types import DagumDistribution assert _test_args(DagumDistribution(1, 1, 1)) def test_sympy__stats__crv_types__ExponentialDistribution(): from sympy.stats.crv_types import ExponentialDistribution assert _test_args(ExponentialDistribution(1)) def test_sympy__stats__crv_types__FDistributionDistribution(): from sympy.stats.crv_types import FDistributionDistribution assert _test_args(FDistributionDistribution(1, 1)) def test_sympy__stats__crv_types__FisherZDistribution(): from sympy.stats.crv_types import FisherZDistribution assert _test_args(FisherZDistribution(1, 1)) def test_sympy__stats__crv_types__FrechetDistribution(): from sympy.stats.crv_types import FrechetDistribution assert _test_args(FrechetDistribution(1, 1, 1)) def test_sympy__stats__crv_types__GammaInverseDistribution(): from sympy.stats.crv_types import GammaInverseDistribution assert _test_args(GammaInverseDistribution(1, 1)) def test_sympy__stats__crv_types__GammaDistribution(): from sympy.stats.crv_types import GammaDistribution assert _test_args(GammaDistribution(1, 1)) def test_sympy__stats__crv_types__KumaraswamyDistribution(): from sympy.stats.crv_types import KumaraswamyDistribution assert _test_args(KumaraswamyDistribution(1, 1)) def test_sympy__stats__crv_types__LaplaceDistribution(): from sympy.stats.crv_types import LaplaceDistribution assert _test_args(LaplaceDistribution(0, 1)) def test_sympy__stats__crv_types__LogisticDistribution(): from sympy.stats.crv_types import LogisticDistribution assert _test_args(LogisticDistribution(0, 1)) def test_sympy__stats__crv_types__LogNormalDistribution(): from sympy.stats.crv_types import LogNormalDistribution assert _test_args(LogNormalDistribution(0, 1)) def test_sympy__stats__crv_types__MaxwellDistribution(): from sympy.stats.crv_types import MaxwellDistribution assert _test_args(MaxwellDistribution(1)) def test_sympy__stats__crv_types__NakagamiDistribution(): from sympy.stats.crv_types import NakagamiDistribution assert _test_args(NakagamiDistribution(1, 1)) def test_sympy__stats__crv_types__NormalDistribution(): from sympy.stats.crv_types import NormalDistribution assert _test_args(NormalDistribution(0, 1)) def test_sympy__stats__crv_types__ParetoDistribution(): from sympy.stats.crv_types import ParetoDistribution assert _test_args(ParetoDistribution(1, 1)) def test_sympy__stats__crv_types__QuadraticUDistribution(): from sympy.stats.crv_types import QuadraticUDistribution assert _test_args(QuadraticUDistribution(1, 2)) def test_sympy__stats__crv_types__RaisedCosineDistribution(): from sympy.stats.crv_types import RaisedCosineDistribution assert _test_args(RaisedCosineDistribution(1, 1)) def test_sympy__stats__crv_types__RayleighDistribution(): from sympy.stats.crv_types import RayleighDistribution assert _test_args(RayleighDistribution(1)) def test_sympy__stats__crv_types__StudentTDistribution(): from sympy.stats.crv_types import StudentTDistribution assert _test_args(StudentTDistribution(1)) def test_sympy__stats__crv_types__TriangularDistribution(): from sympy.stats.crv_types import TriangularDistribution assert _test_args(TriangularDistribution(-1, 0, 1)) def test_sympy__stats__crv_types__UniformDistribution(): from sympy.stats.crv_types import UniformDistribution assert _test_args(UniformDistribution(0, 1)) def test_sympy__stats__crv_types__UniformSumDistribution(): from sympy.stats.crv_types import UniformSumDistribution assert _test_args(UniformSumDistribution(1)) def test_sympy__stats__crv_types__VonMisesDistribution(): from sympy.stats.crv_types import VonMisesDistribution assert _test_args(VonMisesDistribution(1, 1)) def test_sympy__stats__crv_types__WeibullDistribution(): from sympy.stats.crv_types import WeibullDistribution assert _test_args(WeibullDistribution(1, 1)) def test_sympy__stats__crv_types__WignerSemicircleDistribution(): from sympy.stats.crv_types import WignerSemicircleDistribution assert _test_args(WignerSemicircleDistribution(1)) def test_sympy__stats__drv_types__PoissonDistribution(): from sympy.stats.drv_types import PoissonDistribution assert _test_args(PoissonDistribution(1)) def test_sympy__stats__drv_types__GeometricDistribution(): from sympy.stats.drv_types import GeometricDistribution assert _test_args(GeometricDistribution(.5)) def test_sympy__core__symbol__Dummy(): from sympy.core.symbol import Dummy assert _test_args(Dummy('t')) def test_sympy__core__symbol__Symbol(): from sympy.core.symbol import Symbol assert _test_args(Symbol('t')) def test_sympy__core__symbol__Wild(): from sympy.core.symbol import Wild assert _test_args(Wild('x', exclude=[x])) @SKIP("abstract class") def test_sympy__functions__combinatorial__factorials__CombinatorialFunction(): pass def test_sympy__functions__combinatorial__factorials__FallingFactorial(): from sympy.functions.combinatorial.factorials import FallingFactorial assert _test_args(FallingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__MultiFactorial(): from sympy.functions.combinatorial.factorials import MultiFactorial assert _test_args(MultiFactorial(x)) def test_sympy__functions__combinatorial__factorials__RisingFactorial(): from sympy.functions.combinatorial.factorials import RisingFactorial assert _test_args(RisingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__binomial(): from sympy.functions.combinatorial.factorials import binomial assert _test_args(binomial(2, x)) def test_sympy__functions__combinatorial__factorials__subfactorial(): from sympy.functions.combinatorial.factorials import subfactorial assert _test_args(subfactorial(1)) def test_sympy__functions__combinatorial__factorials__factorial(): from sympy.functions.combinatorial.factorials import factorial assert _test_args(factorial(x)) def test_sympy__functions__combinatorial__factorials__factorial2(): from sympy.functions.combinatorial.factorials import factorial2 assert _test_args(factorial2(x)) def test_sympy__functions__combinatorial__numbers__bell(): from sympy.functions.combinatorial.numbers import bell assert _test_args(bell(x, y)) def test_sympy__functions__combinatorial__numbers__bernoulli(): from sympy.functions.combinatorial.numbers import bernoulli assert _test_args(bernoulli(x)) def test_sympy__functions__combinatorial__numbers__catalan(): from sympy.functions.combinatorial.numbers import catalan assert _test_args(catalan(x)) def test_sympy__functions__combinatorial__numbers__genocchi(): from sympy.functions.combinatorial.numbers import genocchi assert _test_args(genocchi(x)) def test_sympy__functions__combinatorial__numbers__euler(): from sympy.functions.combinatorial.numbers import euler assert _test_args(euler(x)) def test_sympy__functions__combinatorial__numbers__fibonacci(): from sympy.functions.combinatorial.numbers import fibonacci assert _test_args(fibonacci(x)) def test_sympy__functions__combinatorial__numbers__harmonic(): from sympy.functions.combinatorial.numbers import harmonic assert _test_args(harmonic(x, 2)) def test_sympy__functions__combinatorial__numbers__lucas(): from sympy.functions.combinatorial.numbers import lucas assert _test_args(lucas(x)) def test_sympy__functions__elementary__complexes__Abs(): from sympy.functions.elementary.complexes import Abs assert _test_args(Abs(x)) def test_sympy__functions__elementary__complexes__adjoint(): from sympy.functions.elementary.complexes import adjoint assert _test_args(adjoint(x)) def test_sympy__functions__elementary__complexes__arg(): from sympy.functions.elementary.complexes import arg assert _test_args(arg(x)) def test_sympy__functions__elementary__complexes__conjugate(): from sympy.functions.elementary.complexes import conjugate assert _test_args(conjugate(x)) def test_sympy__functions__elementary__complexes__im(): from sympy.functions.elementary.complexes import im assert _test_args(im(x)) def test_sympy__functions__elementary__complexes__re(): from sympy.functions.elementary.complexes import re assert _test_args(re(x)) def test_sympy__functions__elementary__complexes__sign(): from sympy.functions.elementary.complexes import sign assert _test_args(sign(x)) def test_sympy__functions__elementary__complexes__polar_lift(): from sympy.functions.elementary.complexes import polar_lift assert _test_args(polar_lift(x)) def test_sympy__functions__elementary__complexes__periodic_argument(): from sympy.functions.elementary.complexes import periodic_argument assert _test_args(periodic_argument(x, y)) def test_sympy__functions__elementary__complexes__principal_branch(): from sympy.functions.elementary.complexes import principal_branch assert _test_args(principal_branch(x, y)) def test_sympy__functions__elementary__complexes__transpose(): from sympy.functions.elementary.complexes import transpose assert _test_args(transpose(x)) def test_sympy__functions__elementary__exponential__LambertW(): from sympy.functions.elementary.exponential import LambertW assert _test_args(LambertW(2)) @SKIP("abstract class") def test_sympy__functions__elementary__exponential__ExpBase(): pass def test_sympy__functions__elementary__exponential__exp(): from sympy.functions.elementary.exponential import exp assert _test_args(exp(2)) def test_sympy__functions__elementary__exponential__exp_polar(): from sympy.functions.elementary.exponential import exp_polar assert _test_args(exp_polar(2)) def test_sympy__functions__elementary__exponential__log(): from sympy.functions.elementary.exponential import log assert _test_args(log(2)) @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__HyperbolicFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__ReciprocalHyperbolicFunction(): pass def test_sympy__functions__elementary__hyperbolic__acosh(): from sympy.functions.elementary.hyperbolic import acosh assert _test_args(acosh(2)) def test_sympy__functions__elementary__hyperbolic__acoth(): from sympy.functions.elementary.hyperbolic import acoth assert _test_args(acoth(2)) def test_sympy__functions__elementary__hyperbolic__asinh(): from sympy.functions.elementary.hyperbolic import asinh assert _test_args(asinh(2)) def test_sympy__functions__elementary__hyperbolic__atanh(): from sympy.functions.elementary.hyperbolic import atanh assert _test_args(atanh(2)) def test_sympy__functions__elementary__hyperbolic__cosh(): from sympy.functions.elementary.hyperbolic import cosh assert _test_args(cosh(2)) def test_sympy__functions__elementary__hyperbolic__coth(): from sympy.functions.elementary.hyperbolic import coth assert _test_args(coth(2)) def test_sympy__functions__elementary__hyperbolic__csch(): from sympy.functions.elementary.hyperbolic import csch assert _test_args(csch(2)) def test_sympy__functions__elementary__hyperbolic__sech(): from sympy.functions.elementary.hyperbolic import sech assert _test_args(sech(2)) def test_sympy__functions__elementary__hyperbolic__sinh(): from sympy.functions.elementary.hyperbolic import sinh assert _test_args(sinh(2)) def test_sympy__functions__elementary__hyperbolic__tanh(): from sympy.functions.elementary.hyperbolic import tanh assert _test_args(tanh(2)) @SKIP("does this work at all?") def test_sympy__functions__elementary__integers__RoundFunction(): from sympy.functions.elementary.integers import RoundFunction assert _test_args(RoundFunction()) def test_sympy__functions__elementary__integers__ceiling(): from sympy.functions.elementary.integers import ceiling assert _test_args(ceiling(x)) def test_sympy__functions__elementary__integers__floor(): from sympy.functions.elementary.integers import floor assert _test_args(floor(x)) def test_sympy__functions__elementary__miscellaneous__IdentityFunction(): from sympy.functions.elementary.miscellaneous import IdentityFunction assert _test_args(IdentityFunction()) def test_sympy__functions__elementary__miscellaneous__Max(): from sympy.functions.elementary.miscellaneous import Max assert _test_args(Max(x, 2)) def test_sympy__functions__elementary__miscellaneous__Min(): from sympy.functions.elementary.miscellaneous import Min assert _test_args(Min(x, 2)) @SKIP("abstract class") def test_sympy__functions__elementary__miscellaneous__MinMaxBase(): pass def test_sympy__functions__elementary__piecewise__ExprCondPair(): from sympy.functions.elementary.piecewise import ExprCondPair assert _test_args(ExprCondPair(1, True)) def test_sympy__functions__elementary__piecewise__Piecewise(): from sympy.functions.elementary.piecewise import Piecewise assert _test_args(Piecewise((1, x >= 0), (0, True))) @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__TrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__ReciprocalTrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__InverseTrigonometricFunction(): pass def test_sympy__functions__elementary__trigonometric__acos(): from sympy.functions.elementary.trigonometric import acos assert _test_args(acos(2)) def test_sympy__functions__elementary__trigonometric__acot(): from sympy.functions.elementary.trigonometric import acot assert _test_args(acot(2)) def test_sympy__functions__elementary__trigonometric__asin(): from sympy.functions.elementary.trigonometric import asin assert _test_args(asin(2)) def test_sympy__functions__elementary__trigonometric__asec(): from sympy.functions.elementary.trigonometric import asec assert _test_args(asec(2)) def test_sympy__functions__elementary__trigonometric__acsc(): from sympy.functions.elementary.trigonometric import acsc assert _test_args(acsc(2)) def test_sympy__functions__elementary__trigonometric__atan(): from sympy.functions.elementary.trigonometric import atan assert _test_args(atan(2)) def test_sympy__functions__elementary__trigonometric__atan2(): from sympy.functions.elementary.trigonometric import atan2 assert _test_args(atan2(2, 3)) def test_sympy__functions__elementary__trigonometric__cos(): from sympy.functions.elementary.trigonometric import cos assert _test_args(cos(2)) def test_sympy__functions__elementary__trigonometric__csc(): from sympy.functions.elementary.trigonometric import csc assert _test_args(csc(2)) def test_sympy__functions__elementary__trigonometric__cot(): from sympy.functions.elementary.trigonometric import cot assert _test_args(cot(2)) def test_sympy__functions__elementary__trigonometric__sin(): assert _test_args(sin(2)) def test_sympy__functions__elementary__trigonometric__sec(): from sympy.functions.elementary.trigonometric import sec assert _test_args(sec(2)) def test_sympy__functions__elementary__trigonometric__tan(): from sympy.functions.elementary.trigonometric import tan assert _test_args(tan(2)) @SKIP("abstract class") def test_sympy__functions__special__bessel__BesselBase(): pass @SKIP("abstract class") def test_sympy__functions__special__bessel__SphericalBesselBase(): pass def test_sympy__functions__special__bessel__besseli(): from sympy.functions.special.bessel import besseli assert _test_args(besseli(x, 1)) def test_sympy__functions__special__bessel__besselj(): from sympy.functions.special.bessel import besselj assert _test_args(besselj(x, 1)) def test_sympy__functions__special__bessel__besselk(): from sympy.functions.special.bessel import besselk assert _test_args(besselk(x, 1)) def test_sympy__functions__special__bessel__bessely(): from sympy.functions.special.bessel import bessely assert _test_args(bessely(x, 1)) def test_sympy__functions__special__bessel__hankel1(): from sympy.functions.special.bessel import hankel1 assert _test_args(hankel1(x, 1)) def test_sympy__functions__special__bessel__hankel2(): from sympy.functions.special.bessel import hankel2 assert _test_args(hankel2(x, 1)) def test_sympy__functions__special__bessel__jn(): from sympy.functions.special.bessel import jn assert _test_args(jn(0, x)) def test_sympy__functions__special__bessel__yn(): from sympy.functions.special.bessel import yn assert _test_args(yn(0, x)) def test_sympy__functions__special__bessel__AiryBase(): pass def test_sympy__functions__special__bessel__airyai(): from sympy.functions.special.bessel import airyai assert _test_args(airyai(2)) def test_sympy__functions__special__bessel__airybi(): from sympy.functions.special.bessel import airybi assert _test_args(airybi(2)) def test_sympy__functions__special__bessel__airyaiprime(): from sympy.functions.special.bessel import airyaiprime assert _test_args(airyaiprime(2)) def test_sympy__functions__special__bessel__airybiprime(): from sympy.functions.special.bessel import airybiprime assert _test_args(airybiprime(2)) def test_sympy__functions__special__elliptic_integrals__elliptic_k(): from sympy.functions.special.elliptic_integrals import elliptic_k as K assert _test_args(K(x)) def test_sympy__functions__special__elliptic_integrals__elliptic_f(): from sympy.functions.special.elliptic_integrals import elliptic_f as F assert _test_args(F(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_e(): from sympy.functions.special.elliptic_integrals import elliptic_e as E assert _test_args(E(x)) assert _test_args(E(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_pi(): from sympy.functions.special.elliptic_integrals import elliptic_pi as P assert _test_args(P(x, y)) assert _test_args(P(x, y, z)) def test_sympy__functions__special__delta_functions__DiracDelta(): from sympy.functions.special.delta_functions import DiracDelta assert _test_args(DiracDelta(x, 1)) def test_sympy__functions__special__delta_functions__Heaviside(): from sympy.functions.special.delta_functions import Heaviside assert _test_args(Heaviside(x)) def test_sympy__functions__special__error_functions__erf(): from sympy.functions.special.error_functions import erf assert _test_args(erf(2)) def test_sympy__functions__special__error_functions__erfc(): from sympy.functions.special.error_functions import erfc assert _test_args(erfc(2)) def test_sympy__functions__special__error_functions__erfi(): from sympy.functions.special.error_functions import erfi assert _test_args(erfi(2)) def test_sympy__functions__special__error_functions__erf2(): from sympy.functions.special.error_functions import erf2 assert _test_args(erf2(2, 3)) def test_sympy__functions__special__error_functions__erfinv(): from sympy.functions.special.error_functions import erfinv assert _test_args(erfinv(2)) def test_sympy__functions__special__error_functions__erfcinv(): from sympy.functions.special.error_functions import erfcinv assert _test_args(erfcinv(2)) def test_sympy__functions__special__error_functions__erf2inv(): from sympy.functions.special.error_functions import erf2inv assert _test_args(erf2inv(2, 3)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__FresnelIntegral(): pass def test_sympy__functions__special__error_functions__fresnels(): from sympy.functions.special.error_functions import fresnels assert _test_args(fresnels(2)) def test_sympy__functions__special__error_functions__fresnelc(): from sympy.functions.special.error_functions import fresnelc assert _test_args(fresnelc(2)) def test_sympy__functions__special__error_functions__erfs(): from sympy.functions.special.error_functions import _erfs assert _test_args(_erfs(2)) def test_sympy__functions__special__error_functions__Ei(): from sympy.functions.special.error_functions import Ei assert _test_args(Ei(2)) def test_sympy__functions__special__error_functions__li(): from sympy.functions.special.error_functions import li assert _test_args(li(2)) def test_sympy__functions__special__error_functions__Li(): from sympy.functions.special.error_functions import Li assert _test_args(Li(2)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__TrigonometricIntegral(): pass def test_sympy__functions__special__error_functions__Si(): from sympy.functions.special.error_functions import Si assert _test_args(Si(2)) def test_sympy__functions__special__error_functions__Ci(): from sympy.functions.special.error_functions import Ci assert _test_args(Ci(2)) def test_sympy__functions__special__error_functions__Shi(): from sympy.functions.special.error_functions import Shi assert _test_args(Shi(2)) def test_sympy__functions__special__error_functions__Chi(): from sympy.functions.special.error_functions import Chi assert _test_args(Chi(2)) def test_sympy__functions__special__error_functions__expint(): from sympy.functions.special.error_functions import expint assert _test_args(expint(y, x)) def test_sympy__functions__special__gamma_functions__gamma(): from sympy.functions.special.gamma_functions import gamma assert _test_args(gamma(x)) def test_sympy__functions__special__gamma_functions__loggamma(): from sympy.functions.special.gamma_functions import loggamma assert _test_args(loggamma(2)) def test_sympy__functions__special__gamma_functions__lowergamma(): from sympy.functions.special.gamma_functions import lowergamma assert _test_args(lowergamma(x, 2)) def test_sympy__functions__special__gamma_functions__polygamma(): from sympy.functions.special.gamma_functions import polygamma assert _test_args(polygamma(x, 2)) def test_sympy__functions__special__gamma_functions__uppergamma(): from sympy.functions.special.gamma_functions import uppergamma assert _test_args(uppergamma(x, 2)) def test_sympy__functions__special__beta_functions__beta(): from sympy.functions.special.beta_functions import beta assert _test_args(beta(x, x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleParametersBase(): pass @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleArg(): pass def test_sympy__functions__special__hyper__hyper(): from sympy.functions.special.hyper import hyper assert _test_args(hyper([1, 2, 3], [4, 5], x)) def test_sympy__functions__special__hyper__meijerg(): from sympy.functions.special.hyper import meijerg assert _test_args(meijerg([1, 2, 3], [4, 5], [6], [], x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__HyperRep(): pass def test_sympy__functions__special__hyper__HyperRep_power1(): from sympy.functions.special.hyper import HyperRep_power1 assert _test_args(HyperRep_power1(x, y)) def test_sympy__functions__special__hyper__HyperRep_power2(): from sympy.functions.special.hyper import HyperRep_power2 assert _test_args(HyperRep_power2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log1(): from sympy.functions.special.hyper import HyperRep_log1 assert _test_args(HyperRep_log1(x)) def test_sympy__functions__special__hyper__HyperRep_atanh(): from sympy.functions.special.hyper import HyperRep_atanh assert _test_args(HyperRep_atanh(x)) def test_sympy__functions__special__hyper__HyperRep_asin1(): from sympy.functions.special.hyper import HyperRep_asin1 assert _test_args(HyperRep_asin1(x)) def test_sympy__functions__special__hyper__HyperRep_asin2(): from sympy.functions.special.hyper import HyperRep_asin2 assert _test_args(HyperRep_asin2(x)) def test_sympy__functions__special__hyper__HyperRep_sqrts1(): from sympy.functions.special.hyper import HyperRep_sqrts1 assert _test_args(HyperRep_sqrts1(x, y)) def test_sympy__functions__special__hyper__HyperRep_sqrts2(): from sympy.functions.special.hyper import HyperRep_sqrts2 assert _test_args(HyperRep_sqrts2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log2(): from sympy.functions.special.hyper import HyperRep_log2 assert _test_args(HyperRep_log2(x)) def test_sympy__functions__special__hyper__HyperRep_cosasin(): from sympy.functions.special.hyper import HyperRep_cosasin assert _test_args(HyperRep_cosasin(x, y)) def test_sympy__functions__special__hyper__HyperRep_sinasin(): from sympy.functions.special.hyper import HyperRep_sinasin assert _test_args(HyperRep_sinasin(x, y)) @SKIP("abstract class") def test_sympy__functions__special__polynomials__OrthogonalPolynomial(): pass def test_sympy__functions__special__polynomials__jacobi(): from sympy.functions.special.polynomials import jacobi assert _test_args(jacobi(x, 2, 2, 2)) def test_sympy__functions__special__polynomials__gegenbauer(): from sympy.functions.special.polynomials import gegenbauer assert _test_args(gegenbauer(x, 2, 2)) def test_sympy__functions__special__polynomials__chebyshevt(): from sympy.functions.special.polynomials import chebyshevt assert _test_args(chebyshevt(x, 2)) def test_sympy__functions__special__polynomials__chebyshevt_root(): from sympy.functions.special.polynomials import chebyshevt_root assert _test_args(chebyshevt_root(3, 2)) def test_sympy__functions__special__polynomials__chebyshevu(): from sympy.functions.special.polynomials import chebyshevu assert _test_args(chebyshevu(x, 2)) def test_sympy__functions__special__polynomials__chebyshevu_root(): from sympy.functions.special.polynomials import chebyshevu_root assert _test_args(chebyshevu_root(3, 2)) def test_sympy__functions__special__polynomials__hermite(): from sympy.functions.special.polynomials import hermite assert _test_args(hermite(x, 2)) def test_sympy__functions__special__polynomials__legendre(): from sympy.functions.special.polynomials import legendre assert _test_args(legendre(x, 2)) def test_sympy__functions__special__polynomials__assoc_legendre(): from sympy.functions.special.polynomials import assoc_legendre assert _test_args(assoc_legendre(x, 0, y)) def test_sympy__functions__special__polynomials__laguerre(): from sympy.functions.special.polynomials import laguerre assert _test_args(laguerre(x, 2)) def test_sympy__functions__special__polynomials__assoc_laguerre(): from sympy.functions.special.polynomials import assoc_laguerre assert _test_args(assoc_laguerre(x, 0, y)) def test_sympy__functions__special__spherical_harmonics__Ynm(): from sympy.functions.special.spherical_harmonics import Ynm assert _test_args(Ynm(1, 1, x, y)) def test_sympy__functions__special__spherical_harmonics__Znm(): from sympy.functions.special.spherical_harmonics import Znm assert _test_args(Znm(1, 1, x, y)) def test_sympy__functions__special__tensor_functions__LeviCivita(): from sympy.functions.special.tensor_functions import LeviCivita assert _test_args(LeviCivita(x, y, 2)) def test_sympy__functions__special__tensor_functions__KroneckerDelta(): from sympy.functions.special.tensor_functions import KroneckerDelta assert _test_args(KroneckerDelta(x, y)) def test_sympy__functions__special__zeta_functions__dirichlet_eta(): from sympy.functions.special.zeta_functions import dirichlet_eta assert _test_args(dirichlet_eta(x)) def test_sympy__functions__special__zeta_functions__zeta(): from sympy.functions.special.zeta_functions import zeta assert _test_args(zeta(101)) def test_sympy__functions__special__zeta_functions__lerchphi(): from sympy.functions.special.zeta_functions import lerchphi assert _test_args(lerchphi(x, y, z)) def test_sympy__functions__special__zeta_functions__polylog(): from sympy.functions.special.zeta_functions import polylog assert _test_args(polylog(x, y)) def test_sympy__integrals__integrals__Integral(): from sympy.integrals.integrals import Integral assert _test_args(Integral(2, (x, 0, 1))) def test_sympy__integrals__risch__NonElementaryIntegral(): from sympy.integrals.risch import NonElementaryIntegral assert _test_args(NonElementaryIntegral(exp(-x**2), x)) @SKIP("abstract class") def test_sympy__integrals__transforms__IntegralTransform(): pass def test_sympy__integrals__transforms__MellinTransform(): from sympy.integrals.transforms import MellinTransform assert _test_args(MellinTransform(2, x, y)) def test_sympy__integrals__transforms__InverseMellinTransform(): from sympy.integrals.transforms import InverseMellinTransform assert _test_args(InverseMellinTransform(2, x, y, 0, 1)) def test_sympy__integrals__transforms__LaplaceTransform(): from sympy.integrals.transforms import LaplaceTransform assert _test_args(LaplaceTransform(2, x, y)) def test_sympy__integrals__transforms__InverseLaplaceTransform(): from sympy.integrals.transforms import InverseLaplaceTransform assert _test_args(InverseLaplaceTransform(2, x, y, 0)) @SKIP("abstract class") def test_sympy__integrals__transforms__FourierTypeTransform(): pass def test_sympy__integrals__transforms__InverseFourierTransform(): from sympy.integrals.transforms import InverseFourierTransform assert _test_args(InverseFourierTransform(2, x, y)) def test_sympy__integrals__transforms__FourierTransform(): from sympy.integrals.transforms import FourierTransform assert _test_args(FourierTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__SineCosineTypeTransform(): pass def test_sympy__integrals__transforms__InverseSineTransform(): from sympy.integrals.transforms import InverseSineTransform assert _test_args(InverseSineTransform(2, x, y)) def test_sympy__integrals__transforms__SineTransform(): from sympy.integrals.transforms import SineTransform assert _test_args(SineTransform(2, x, y)) def test_sympy__integrals__transforms__InverseCosineTransform(): from sympy.integrals.transforms import InverseCosineTransform assert _test_args(InverseCosineTransform(2, x, y)) def test_sympy__integrals__transforms__CosineTransform(): from sympy.integrals.transforms import CosineTransform assert _test_args(CosineTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__HankelTypeTransform(): pass def test_sympy__integrals__transforms__InverseHankelTransform(): from sympy.integrals.transforms import InverseHankelTransform assert _test_args(InverseHankelTransform(2, x, y, 0)) def test_sympy__integrals__transforms__HankelTransform(): from sympy.integrals.transforms import HankelTransform assert _test_args(HankelTransform(2, x, y, 0)) @XFAIL def test_sympy__liealgebras__cartan_type__CartanType_generator(): from sympy.liealgebras.cartan_type import CartanType_generator assert _test_args(CartanType_generator("A2")) @XFAIL def test_sympy__liealgebras__cartan_type__Standard_Cartan(): from sympy.liealgebras.cartan_type import Standard_Cartan assert _test_args(Standard_Cartan("A", 2)) @XFAIL def test_sympy__liealgebras__weyl_group__WeylGroup(): from sympy.liealgebras.weyl_group import WeylGroup assert _test_args(WeylGroup("B4")) @XFAIL def test_sympy__liealgebras__root_system__RootSystem(): from sympy.liealgebras.root_system import RootSystem assert _test_args(RootSystem("A2")) @XFAIL def test_sympy__liealgebras__type_a__TypeA(): from sympy.liealgebras.type_a import TypeA assert _test_args(TypeA(2)) @XFAIL def test_sympy__liealgebras__type_b__TypeB(): from sympy.liealgebras.type_b import TypeB assert _test_args(TypeB(4)) @XFAIL def test_sympy__liealgebras__type_c__TypeC(): from sympy.liealgebras.type_c import TypeC assert _test_args(TypeC(4)) @XFAIL def test_sympy__liealgebras__type_d__TypeD(): from sympy.liealgebras.type_d import TypeD assert _test_args(TypeD(4)) @XFAIL def test_sympy__liealgebras__type_e__TypeE(): from sympy.liealgebras.type_e import TypeE assert _test_args(TypeE(6)) @XFAIL def test_sympy__liealgebras__type_f__TypeF(): from sympy.liealgebras.type_f import TypeF assert _test_args(TypeF(4)) @XFAIL def test_sympy__liealgebras__type_g__TypeG(): from sympy.liealgebras.type_g import TypeG assert _test_args(TypeG(2)) def test_sympy__logic__boolalg__And(): from sympy.logic.boolalg import And assert _test_args(And(x, y, 2)) @SKIP("abstract class") def test_sympy__logic__boolalg__Boolean(): pass def test_sympy__logic__boolalg__BooleanFunction(): from sympy.logic.boolalg import BooleanFunction assert _test_args(BooleanFunction(1, 2, 3)) @SKIP("abstract class") def test_sympy__logic__boolalg__BooleanAtom(): pass def test_sympy__logic__boolalg__BooleanTrue(): from sympy.logic.boolalg import true assert _test_args(true) def test_sympy__logic__boolalg__BooleanFalse(): from sympy.logic.boolalg import false assert _test_args(false) def test_sympy__logic__boolalg__Equivalent(): from sympy.logic.boolalg import Equivalent assert _test_args(Equivalent(x, 2)) def test_sympy__logic__boolalg__ITE(): from sympy.logic.boolalg import ITE assert _test_args(ITE(x, y, 2)) def test_sympy__logic__boolalg__Implies(): from sympy.logic.boolalg import Implies assert _test_args(Implies(x, y)) def test_sympy__logic__boolalg__Nand(): from sympy.logic.boolalg import Nand assert _test_args(Nand(x, y, 2)) def test_sympy__logic__boolalg__Nor(): from sympy.logic.boolalg import Nor assert _test_args(Nor(x, y)) def test_sympy__logic__boolalg__Not(): from sympy.logic.boolalg import Not assert _test_args(Not(x)) def test_sympy__logic__boolalg__Or(): from sympy.logic.boolalg import Or assert _test_args(Or(x, y)) def test_sympy__logic__boolalg__Xor(): from sympy.logic.boolalg import Xor assert _test_args(Xor(x, y, 2)) def test_sympy__matrices__matrices__DeferredVector(): from sympy.matrices.matrices import DeferredVector assert _test_args(DeferredVector("X")) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixBase(): pass def test_sympy__matrices__immutable__ImmutableMatrix(): from sympy.matrices.immutable import ImmutableMatrix m = ImmutableMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__immutable__ImmutableSparseMatrix(): from sympy.matrices.immutable import ImmutableSparseMatrix m = ImmutableSparseMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, {(0, 0): 1}) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableSparseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__expressions__slice__MatrixSlice(): from sympy.matrices.expressions.slice import MatrixSlice from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 4, 4) assert _test_args(MatrixSlice(X, (0, 2), (0, 2))) def test_sympy__matrices__expressions__blockmatrix__BlockDiagMatrix(): from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) assert _test_args(BlockDiagMatrix(X, Y)) def test_sympy__matrices__expressions__blockmatrix__BlockMatrix(): from sympy.matrices.expressions.blockmatrix import BlockMatrix from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) Z = MatrixSymbol('Z', x, y) O = ZeroMatrix(y, x) assert _test_args(BlockMatrix([[X, Z], [O, Y]])) def test_sympy__matrices__expressions__inverse__Inverse(): from sympy.matrices.expressions.inverse import Inverse from sympy.matrices.expressions import MatrixSymbol assert _test_args(Inverse(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__matadd__MatAdd(): from sympy.matrices.expressions.matadd import MatAdd from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(MatAdd(X, Y)) def test_sympy__matrices__expressions__matexpr__Identity(): from sympy.matrices.expressions.matexpr import Identity assert _test_args(Identity(3)) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixExpr(): pass def test_sympy__matrices__expressions__matexpr__MatrixElement(): from sympy.matrices.expressions.matexpr import MatrixSymbol, MatrixElement from sympy import S assert _test_args(MatrixElement(MatrixSymbol('A', 3, 5), S(2), S(3))) @XFAIL def test_sympy__matrices__expressions__matexpr__MatrixSymbol(): from sympy.matrices.expressions.matexpr import MatrixSymbol assert _test_args(MatrixSymbol('A', 3, 5)) def test_sympy__matrices__expressions__matexpr__ZeroMatrix(): from sympy.matrices.expressions.matexpr import ZeroMatrix assert _test_args(ZeroMatrix(3, 5)) def test_sympy__matrices__expressions__matmul__MatMul(): from sympy.matrices.expressions.matmul import MatMul from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', y, x) assert _test_args(MatMul(X, Y)) def test_sympy__matrices__expressions__diagonal__DiagonalMatrix(): from sympy.matrices.expressions.diagonal import DiagonalMatrix from sympy.matrices.expressions import MatrixSymbol x = MatrixSymbol('x', 10, 1) assert _test_args(DiagonalMatrix(x)) def test_sympy__matrices__expressions__diagonal__DiagonalOf(): from sympy.matrices.expressions.diagonal import DiagonalOf from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('x', 10, 10) assert _test_args(DiagonalOf(X)) def test_sympy__matrices__expressions__hadamard__HadamardProduct(): from sympy.matrices.expressions.hadamard import HadamardProduct from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(HadamardProduct(X, Y)) def test_sympy__matrices__expressions__matpow__MatPow(): from sympy.matrices.expressions.matpow import MatPow from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) assert _test_args(MatPow(X, 2)) def test_sympy__matrices__expressions__transpose__Transpose(): from sympy.matrices.expressions.transpose import Transpose from sympy.matrices.expressions import MatrixSymbol assert _test_args(Transpose(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__adjoint__Adjoint(): from sympy.matrices.expressions.adjoint import Adjoint from sympy.matrices.expressions import MatrixSymbol assert _test_args(Adjoint(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__trace__Trace(): from sympy.matrices.expressions.trace import Trace from sympy.matrices.expressions import MatrixSymbol assert _test_args(Trace(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__determinant__Determinant(): from sympy.matrices.expressions.determinant import Determinant from sympy.matrices.expressions import MatrixSymbol assert _test_args(Determinant(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__funcmatrix__FunctionMatrix(): from sympy.matrices.expressions.funcmatrix import FunctionMatrix from sympy import Lambda, symbols i, j = symbols('i,j') assert _test_args(FunctionMatrix(3, 3, Lambda((i, j), i - j) )) def test_sympy__matrices__expressions__fourier__DFT(): from sympy.matrices.expressions.fourier import DFT from sympy import S assert _test_args(DFT(S(2))) def test_sympy__matrices__expressions__fourier__IDFT(): from sympy.matrices.expressions.fourier import IDFT from sympy import S assert _test_args(IDFT(S(2))) from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 10, 10) def test_sympy__matrices__expressions__factorizations__LofLU(): from sympy.matrices.expressions.factorizations import LofLU assert _test_args(LofLU(X)) def test_sympy__matrices__expressions__factorizations__UofLU(): from sympy.matrices.expressions.factorizations import UofLU assert _test_args(UofLU(X)) def test_sympy__matrices__expressions__factorizations__QofQR(): from sympy.matrices.expressions.factorizations import QofQR assert _test_args(QofQR(X)) def test_sympy__matrices__expressions__factorizations__RofQR(): from sympy.matrices.expressions.factorizations import RofQR assert _test_args(RofQR(X)) def test_sympy__matrices__expressions__factorizations__LofCholesky(): from sympy.matrices.expressions.factorizations import LofCholesky assert _test_args(LofCholesky(X)) def test_sympy__matrices__expressions__factorizations__UofCholesky(): from sympy.matrices.expressions.factorizations import UofCholesky assert _test_args(UofCholesky(X)) def test_sympy__matrices__expressions__factorizations__EigenVectors(): from sympy.matrices.expressions.factorizations import EigenVectors assert _test_args(EigenVectors(X)) def test_sympy__matrices__expressions__factorizations__EigenValues(): from sympy.matrices.expressions.factorizations import EigenValues assert _test_args(EigenValues(X)) def test_sympy__matrices__expressions__factorizations__UofSVD(): from sympy.matrices.expressions.factorizations import UofSVD assert _test_args(UofSVD(X)) def test_sympy__matrices__expressions__factorizations__VofSVD(): from sympy.matrices.expressions.factorizations import VofSVD assert _test_args(VofSVD(X)) def test_sympy__matrices__expressions__factorizations__SofSVD(): from sympy.matrices.expressions.factorizations import SofSVD assert _test_args(SofSVD(X)) @SKIP("abstract class") def test_sympy__matrices__expressions__factorizations__Factorization(): pass def test_sympy__physics__vector__frame__CoordinateSym(): from sympy.physics.vector import CoordinateSym from sympy.physics.vector import ReferenceFrame assert _test_args(CoordinateSym('R_x', ReferenceFrame('R'), 0)) def test_sympy__physics__paulialgebra__Pauli(): from sympy.physics.paulialgebra import Pauli assert _test_args(Pauli(1)) def test_sympy__physics__quantum__anticommutator__AntiCommutator(): from sympy.physics.quantum.anticommutator import AntiCommutator assert _test_args(AntiCommutator(x, y)) def test_sympy__physics__quantum__cartesian__PositionBra3D(): from sympy.physics.quantum.cartesian import PositionBra3D assert _test_args(PositionBra3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionKet3D(): from sympy.physics.quantum.cartesian import PositionKet3D assert _test_args(PositionKet3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionState3D(): from sympy.physics.quantum.cartesian import PositionState3D assert _test_args(PositionState3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PxBra(): from sympy.physics.quantum.cartesian import PxBra assert _test_args(PxBra(x, y, z)) def test_sympy__physics__quantum__cartesian__PxKet(): from sympy.physics.quantum.cartesian import PxKet assert _test_args(PxKet(x, y, z)) def test_sympy__physics__quantum__cartesian__PxOp(): from sympy.physics.quantum.cartesian import PxOp assert _test_args(PxOp(x, y, z)) def test_sympy__physics__quantum__cartesian__XBra(): from sympy.physics.quantum.cartesian import XBra assert _test_args(XBra(x)) def test_sympy__physics__quantum__cartesian__XKet(): from sympy.physics.quantum.cartesian import XKet assert _test_args(XKet(x)) def test_sympy__physics__quantum__cartesian__XOp(): from sympy.physics.quantum.cartesian import XOp assert _test_args(XOp(x)) def test_sympy__physics__quantum__cartesian__YOp(): from sympy.physics.quantum.cartesian import YOp assert _test_args(YOp(x)) def test_sympy__physics__quantum__cartesian__ZOp(): from sympy.physics.quantum.cartesian import ZOp assert _test_args(ZOp(x)) def test_sympy__physics__quantum__cg__CG(): from sympy.physics.quantum.cg import CG from sympy import S assert _test_args(CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1)) def test_sympy__physics__quantum__cg__Wigner3j(): from sympy.physics.quantum.cg import Wigner3j assert _test_args(Wigner3j(6, 0, 4, 0, 2, 0)) def test_sympy__physics__quantum__cg__Wigner6j(): from sympy.physics.quantum.cg import Wigner6j assert _test_args(Wigner6j(1, 2, 3, 2, 1, 2)) def test_sympy__physics__quantum__cg__Wigner9j(): from sympy.physics.quantum.cg import Wigner9j assert _test_args(Wigner9j(2, 1, 1, S(3)/2, S(1)/2, 1, S(1)/2, S(1)/2, 0)) def test_sympy__physics__quantum__circuitplot__Mz(): from sympy.physics.quantum.circuitplot import Mz assert _test_args(Mz(0)) def test_sympy__physics__quantum__circuitplot__Mx(): from sympy.physics.quantum.circuitplot import Mx assert _test_args(Mx(0)) def test_sympy__physics__quantum__commutator__Commutator(): from sympy.physics.quantum.commutator import Commutator A, B = symbols('A,B', commutative=False) assert _test_args(Commutator(A, B)) def test_sympy__physics__quantum__constants__HBar(): from sympy.physics.quantum.constants import HBar assert _test_args(HBar()) def test_sympy__physics__quantum__dagger__Dagger(): from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.state import Ket assert _test_args(Dagger(Dagger(Ket('psi')))) def test_sympy__physics__quantum__gate__CGate(): from sympy.physics.quantum.gate import CGate, Gate assert _test_args(CGate((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CGateS(): from sympy.physics.quantum.gate import CGateS, Gate assert _test_args(CGateS((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CNotGate(): from sympy.physics.quantum.gate import CNotGate assert _test_args(CNotGate(0, 1)) def test_sympy__physics__quantum__gate__Gate(): from sympy.physics.quantum.gate import Gate assert _test_args(Gate(0)) def test_sympy__physics__quantum__gate__HadamardGate(): from sympy.physics.quantum.gate import HadamardGate assert _test_args(HadamardGate(0)) def test_sympy__physics__quantum__gate__IdentityGate(): from sympy.physics.quantum.gate import IdentityGate assert _test_args(IdentityGate(0)) def test_sympy__physics__quantum__gate__OneQubitGate(): from sympy.physics.quantum.gate import OneQubitGate assert _test_args(OneQubitGate(0)) def test_sympy__physics__quantum__gate__PhaseGate(): from sympy.physics.quantum.gate import PhaseGate assert _test_args(PhaseGate(0)) def test_sympy__physics__quantum__gate__SwapGate(): from sympy.physics.quantum.gate import SwapGate assert _test_args(SwapGate(0, 1)) def test_sympy__physics__quantum__gate__TGate(): from sympy.physics.quantum.gate import TGate assert _test_args(TGate(0)) def test_sympy__physics__quantum__gate__TwoQubitGate(): from sympy.physics.quantum.gate import TwoQubitGate assert _test_args(TwoQubitGate(0)) def test_sympy__physics__quantum__gate__UGate(): from sympy.physics.quantum.gate import UGate from sympy.matrices.immutable import ImmutableMatrix from sympy import Integer, Tuple assert _test_args( UGate(Tuple(Integer(1)), ImmutableMatrix([[1, 0], [0, 2]]))) def test_sympy__physics__quantum__gate__XGate(): from sympy.physics.quantum.gate import XGate assert _test_args(XGate(0)) def test_sympy__physics__quantum__gate__YGate(): from sympy.physics.quantum.gate import YGate assert _test_args(YGate(0)) def test_sympy__physics__quantum__gate__ZGate(): from sympy.physics.quantum.gate import ZGate assert _test_args(ZGate(0)) @SKIP("TODO: sympy.physics") def test_sympy__physics__quantum__grover__OracleGate(): from sympy.physics.quantum.grover import OracleGate assert _test_args(OracleGate()) def test_sympy__physics__quantum__grover__WGate(): from sympy.physics.quantum.grover import WGate assert _test_args(WGate(1)) def test_sympy__physics__quantum__hilbert__ComplexSpace(): from sympy.physics.quantum.hilbert import ComplexSpace assert _test_args(ComplexSpace(x)) def test_sympy__physics__quantum__hilbert__DirectSumHilbertSpace(): from sympy.physics.quantum.hilbert import DirectSumHilbertSpace, ComplexSpace, FockSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(DirectSumHilbertSpace(c, f)) def test_sympy__physics__quantum__hilbert__FockSpace(): from sympy.physics.quantum.hilbert import FockSpace assert _test_args(FockSpace()) def test_sympy__physics__quantum__hilbert__HilbertSpace(): from sympy.physics.quantum.hilbert import HilbertSpace assert _test_args(HilbertSpace()) def test_sympy__physics__quantum__hilbert__L2(): from sympy.physics.quantum.hilbert import L2 from sympy import oo, Interval assert _test_args(L2(Interval(0, oo))) def test_sympy__physics__quantum__hilbert__TensorPowerHilbertSpace(): from sympy.physics.quantum.hilbert import TensorPowerHilbertSpace, FockSpace f = FockSpace() assert _test_args(TensorPowerHilbertSpace(f, 2)) def test_sympy__physics__quantum__hilbert__TensorProductHilbertSpace(): from sympy.physics.quantum.hilbert import TensorProductHilbertSpace, FockSpace, ComplexSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(TensorProductHilbertSpace(f, c)) def test_sympy__physics__quantum__innerproduct__InnerProduct(): from sympy.physics.quantum import Bra, Ket, InnerProduct b = Bra('b') k = Ket('k') assert _test_args(InnerProduct(b, k)) def test_sympy__physics__quantum__operator__DifferentialOperator(): from sympy.physics.quantum.operator import DifferentialOperator from sympy import Derivative, Function f = Function('f') assert _test_args(DifferentialOperator(1/x*Derivative(f(x), x), f(x))) def test_sympy__physics__quantum__operator__HermitianOperator(): from sympy.physics.quantum.operator import HermitianOperator assert _test_args(HermitianOperator('H')) def test_sympy__physics__quantum__operator__IdentityOperator(): from sympy.physics.quantum.operator import IdentityOperator assert _test_args(IdentityOperator(5)) def test_sympy__physics__quantum__operator__Operator(): from sympy.physics.quantum.operator import Operator assert _test_args(Operator('A')) def test_sympy__physics__quantum__operator__OuterProduct(): from sympy.physics.quantum.operator import OuterProduct from sympy.physics.quantum import Ket, Bra b = Bra('b') k = Ket('k') assert _test_args(OuterProduct(k, b)) def test_sympy__physics__quantum__operator__UnitaryOperator(): from sympy.physics.quantum.operator import UnitaryOperator assert _test_args(UnitaryOperator('U')) def test_sympy__physics__quantum__piab__PIABBra(): from sympy.physics.quantum.piab import PIABBra assert _test_args(PIABBra('B')) def test_sympy__physics__quantum__boson__BosonOp(): from sympy.physics.quantum.boson import BosonOp assert _test_args(BosonOp('a')) assert _test_args(BosonOp('a', False)) def test_sympy__physics__quantum__boson__BosonFockKet(): from sympy.physics.quantum.boson import BosonFockKet assert _test_args(BosonFockKet(1)) def test_sympy__physics__quantum__boson__BosonFockBra(): from sympy.physics.quantum.boson import BosonFockBra assert _test_args(BosonFockBra(1)) def test_sympy__physics__quantum__boson__BosonCoherentKet(): from sympy.physics.quantum.boson import BosonCoherentKet assert _test_args(BosonCoherentKet(1)) def test_sympy__physics__quantum__boson__BosonCoherentBra(): from sympy.physics.quantum.boson import BosonCoherentBra assert _test_args(BosonCoherentBra(1)) def test_sympy__physics__quantum__fermion__FermionOp(): from sympy.physics.quantum.fermion import FermionOp assert _test_args(FermionOp('c')) assert _test_args(FermionOp('c', False)) def test_sympy__physics__quantum__fermion__FermionFockKet(): from sympy.physics.quantum.fermion import FermionFockKet assert _test_args(FermionFockKet(1)) def test_sympy__physics__quantum__fermion__FermionFockBra(): from sympy.physics.quantum.fermion import FermionFockBra assert _test_args(FermionFockBra(1)) def test_sympy__physics__quantum__pauli__SigmaOpBase(): from sympy.physics.quantum.pauli import SigmaOpBase assert _test_args(SigmaOpBase()) def test_sympy__physics__quantum__pauli__SigmaX(): from sympy.physics.quantum.pauli import SigmaX assert _test_args(SigmaX()) def test_sympy__physics__quantum__pauli__SigmaY(): from sympy.physics.quantum.pauli import SigmaY assert _test_args(SigmaY()) def test_sympy__physics__quantum__pauli__SigmaZ(): from sympy.physics.quantum.pauli import SigmaZ assert _test_args(SigmaZ()) def test_sympy__physics__quantum__pauli__SigmaMinus(): from sympy.physics.quantum.pauli import SigmaMinus assert _test_args(SigmaMinus()) def test_sympy__physics__quantum__pauli__SigmaPlus(): from sympy.physics.quantum.pauli import SigmaPlus assert _test_args(SigmaPlus()) def test_sympy__physics__quantum__pauli__SigmaZKet(): from sympy.physics.quantum.pauli import SigmaZKet assert _test_args(SigmaZKet(0)) def test_sympy__physics__quantum__pauli__SigmaZBra(): from sympy.physics.quantum.pauli import SigmaZBra assert _test_args(SigmaZBra(0)) def test_sympy__physics__quantum__piab__PIABHamiltonian(): from sympy.physics.quantum.piab import PIABHamiltonian assert _test_args(PIABHamiltonian('P')) def test_sympy__physics__quantum__piab__PIABKet(): from sympy.physics.quantum.piab import PIABKet assert _test_args(PIABKet('K')) def test_sympy__physics__quantum__qexpr__QExpr(): from sympy.physics.quantum.qexpr import QExpr assert _test_args(QExpr(0)) def test_sympy__physics__quantum__qft__Fourier(): from sympy.physics.quantum.qft import Fourier assert _test_args(Fourier(0, 1)) def test_sympy__physics__quantum__qft__IQFT(): from sympy.physics.quantum.qft import IQFT assert _test_args(IQFT(0, 1)) def test_sympy__physics__quantum__qft__QFT(): from sympy.physics.quantum.qft import QFT assert _test_args(QFT(0, 1)) def test_sympy__physics__quantum__qft__RkGate(): from sympy.physics.quantum.qft import RkGate assert _test_args(RkGate(0, 1)) def test_sympy__physics__quantum__qubit__IntQubit(): from sympy.physics.quantum.qubit import IntQubit assert _test_args(IntQubit(0)) def test_sympy__physics__quantum__qubit__IntQubitBra(): from sympy.physics.quantum.qubit import IntQubitBra assert _test_args(IntQubitBra(0)) def test_sympy__physics__quantum__qubit__IntQubitState(): from sympy.physics.quantum.qubit import IntQubitState, QubitState assert _test_args(IntQubitState(QubitState(0, 1))) def test_sympy__physics__quantum__qubit__Qubit(): from sympy.physics.quantum.qubit import Qubit assert _test_args(Qubit(0, 0, 0)) def test_sympy__physics__quantum__qubit__QubitBra(): from sympy.physics.quantum.qubit import QubitBra assert _test_args(QubitBra('1', 0)) def test_sympy__physics__quantum__qubit__QubitState(): from sympy.physics.quantum.qubit import QubitState assert _test_args(QubitState(0, 1)) def test_sympy__physics__quantum__density__Density(): from sympy.physics.quantum.density import Density from sympy.physics.quantum.state import Ket assert _test_args(Density([Ket(0), 0.5], [Ket(1), 0.5])) @SKIP("TODO: sympy.physics.quantum.shor: Cmod Not Implemented") def test_sympy__physics__quantum__shor__CMod(): from sympy.physics.quantum.shor import CMod assert _test_args(CMod()) def test_sympy__physics__quantum__spin__CoupledSpinState(): from sympy.physics.quantum.spin import CoupledSpinState assert _test_args(CoupledSpinState(1, 0, (1, 1))) assert _test_args(CoupledSpinState(1, 0, (1, S(1)/2, S(1)/2))) assert _test_args(CoupledSpinState( 1, 0, (1, S(1)/2, S(1)/2), ((2, 3, S(1)/2), (1, 2, 1)) )) j, m, j1, j2, j3, j12, x = symbols('j m j1:4 j12 x') assert CoupledSpinState( j, m, (j1, j2, j3)).subs(j2, x) == CoupledSpinState(j, m, (j1, x, j3)) assert CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, j12), (1, 2, j)) ).subs(j12, x) == \ CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, x), (1, 2, j)) ) def test_sympy__physics__quantum__spin__J2Op(): from sympy.physics.quantum.spin import J2Op assert _test_args(J2Op('J')) def test_sympy__physics__quantum__spin__JminusOp(): from sympy.physics.quantum.spin import JminusOp assert _test_args(JminusOp('J')) def test_sympy__physics__quantum__spin__JplusOp(): from sympy.physics.quantum.spin import JplusOp assert _test_args(JplusOp('J')) def test_sympy__physics__quantum__spin__JxBra(): from sympy.physics.quantum.spin import JxBra assert _test_args(JxBra(1, 0)) def test_sympy__physics__quantum__spin__JxBraCoupled(): from sympy.physics.quantum.spin import JxBraCoupled assert _test_args(JxBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxKet(): from sympy.physics.quantum.spin import JxKet assert _test_args(JxKet(1, 0)) def test_sympy__physics__quantum__spin__JxKetCoupled(): from sympy.physics.quantum.spin import JxKetCoupled assert _test_args(JxKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxOp(): from sympy.physics.quantum.spin import JxOp assert _test_args(JxOp('J')) def test_sympy__physics__quantum__spin__JyBra(): from sympy.physics.quantum.spin import JyBra assert _test_args(JyBra(1, 0)) def test_sympy__physics__quantum__spin__JyBraCoupled(): from sympy.physics.quantum.spin import JyBraCoupled assert _test_args(JyBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyKet(): from sympy.physics.quantum.spin import JyKet assert _test_args(JyKet(1, 0)) def test_sympy__physics__quantum__spin__JyKetCoupled(): from sympy.physics.quantum.spin import JyKetCoupled assert _test_args(JyKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyOp(): from sympy.physics.quantum.spin import JyOp assert _test_args(JyOp('J')) def test_sympy__physics__quantum__spin__JzBra(): from sympy.physics.quantum.spin import JzBra assert _test_args(JzBra(1, 0)) def test_sympy__physics__quantum__spin__JzBraCoupled(): from sympy.physics.quantum.spin import JzBraCoupled assert _test_args(JzBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzKet(): from sympy.physics.quantum.spin import JzKet assert _test_args(JzKet(1, 0)) def test_sympy__physics__quantum__spin__JzKetCoupled(): from sympy.physics.quantum.spin import JzKetCoupled assert _test_args(JzKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzOp(): from sympy.physics.quantum.spin import JzOp assert _test_args(JzOp('J')) def test_sympy__physics__quantum__spin__Rotation(): from sympy.physics.quantum.spin import Rotation from sympy import pi assert _test_args(Rotation(pi, 0, pi/2)) def test_sympy__physics__quantum__spin__SpinState(): from sympy.physics.quantum.spin import SpinState assert _test_args(SpinState(1, 0)) def test_sympy__physics__quantum__spin__WignerD(): from sympy.physics.quantum.spin import WignerD assert _test_args(WignerD(0, 1, 2, 3, 4, 5)) def test_sympy__physics__quantum__state__Bra(): from sympy.physics.quantum.state import Bra assert _test_args(Bra(0)) def test_sympy__physics__quantum__state__BraBase(): from sympy.physics.quantum.state import BraBase assert _test_args(BraBase(0)) def test_sympy__physics__quantum__state__Ket(): from sympy.physics.quantum.state import Ket assert _test_args(Ket(0)) def test_sympy__physics__quantum__state__KetBase(): from sympy.physics.quantum.state import KetBase assert _test_args(KetBase(0)) def test_sympy__physics__quantum__state__State(): from sympy.physics.quantum.state import State assert _test_args(State(0)) def test_sympy__physics__quantum__state__StateBase(): from sympy.physics.quantum.state import StateBase assert _test_args(StateBase(0)) def test_sympy__physics__quantum__state__TimeDepBra(): from sympy.physics.quantum.state import TimeDepBra assert _test_args(TimeDepBra('psi', 't')) def test_sympy__physics__quantum__state__TimeDepKet(): from sympy.physics.quantum.state import TimeDepKet assert _test_args(TimeDepKet('psi', 't')) def test_sympy__physics__quantum__state__TimeDepState(): from sympy.physics.quantum.state import TimeDepState assert _test_args(TimeDepState('psi', 't')) def test_sympy__physics__quantum__state__Wavefunction(): from sympy.physics.quantum.state import Wavefunction from sympy.functions import sin from sympy import Piecewise, pi n = 1 L = 1 g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) assert _test_args(Wavefunction(g, x)) def test_sympy__physics__quantum__tensorproduct__TensorProduct(): from sympy.physics.quantum.tensorproduct import TensorProduct assert _test_args(TensorProduct(x, y)) def test_sympy__physics__quantum__identitysearch__GateIdentity(): from sympy.physics.quantum.gate import X from sympy.physics.quantum.identitysearch import GateIdentity assert _test_args(GateIdentity(X(0), X(0))) def test_sympy__physics__quantum__sho1d__SHOOp(): from sympy.physics.quantum.sho1d import SHOOp assert _test_args(SHOOp('a')) def test_sympy__physics__quantum__sho1d__RaisingOp(): from sympy.physics.quantum.sho1d import RaisingOp assert _test_args(RaisingOp('a')) def test_sympy__physics__quantum__sho1d__LoweringOp(): from sympy.physics.quantum.sho1d import LoweringOp assert _test_args(LoweringOp('a')) def test_sympy__physics__quantum__sho1d__NumberOp(): from sympy.physics.quantum.sho1d import NumberOp assert _test_args(NumberOp('N')) def test_sympy__physics__quantum__sho1d__Hamiltonian(): from sympy.physics.quantum.sho1d import Hamiltonian assert _test_args(Hamiltonian('H')) def test_sympy__physics__quantum__sho1d__SHOState(): from sympy.physics.quantum.sho1d import SHOState assert _test_args(SHOState(0)) def test_sympy__physics__quantum__sho1d__SHOKet(): from sympy.physics.quantum.sho1d import SHOKet assert _test_args(SHOKet(0)) def test_sympy__physics__quantum__sho1d__SHOBra(): from sympy.physics.quantum.sho1d import SHOBra assert _test_args(SHOBra(0)) def test_sympy__physics__secondquant__AnnihilateBoson(): from sympy.physics.secondquant import AnnihilateBoson assert _test_args(AnnihilateBoson(0)) def test_sympy__physics__secondquant__AnnihilateFermion(): from sympy.physics.secondquant import AnnihilateFermion assert _test_args(AnnihilateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Annihilator(): pass def test_sympy__physics__secondquant__AntiSymmetricTensor(): from sympy.physics.secondquant import AntiSymmetricTensor i, j = symbols('i j', below_fermi=True) a, b = symbols('a b', above_fermi=True) assert _test_args(AntiSymmetricTensor('v', (a, i), (b, j))) def test_sympy__physics__secondquant__BosonState(): from sympy.physics.secondquant import BosonState assert _test_args(BosonState((0, 1))) @SKIP("abstract class") def test_sympy__physics__secondquant__BosonicOperator(): pass def test_sympy__physics__secondquant__Commutator(): from sympy.physics.secondquant import Commutator assert _test_args(Commutator(x, y)) def test_sympy__physics__secondquant__CreateBoson(): from sympy.physics.secondquant import CreateBoson assert _test_args(CreateBoson(0)) def test_sympy__physics__secondquant__CreateFermion(): from sympy.physics.secondquant import CreateFermion assert _test_args(CreateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Creator(): pass def test_sympy__physics__secondquant__Dagger(): from sympy.physics.secondquant import Dagger from sympy import I assert _test_args(Dagger(2*I)) def test_sympy__physics__secondquant__FermionState(): from sympy.physics.secondquant import FermionState assert _test_args(FermionState((0, 1))) def test_sympy__physics__secondquant__FermionicOperator(): from sympy.physics.secondquant import FermionicOperator assert _test_args(FermionicOperator(0)) def test_sympy__physics__secondquant__FockState(): from sympy.physics.secondquant import FockState assert _test_args(FockState((0, 1))) def test_sympy__physics__secondquant__FockStateBosonBra(): from sympy.physics.secondquant import FockStateBosonBra assert _test_args(FockStateBosonBra((0, 1))) def test_sympy__physics__secondquant__FockStateBosonKet(): from sympy.physics.secondquant import FockStateBosonKet assert _test_args(FockStateBosonKet((0, 1))) def test_sympy__physics__secondquant__FockStateBra(): from sympy.physics.secondquant import FockStateBra assert _test_args(FockStateBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionBra(): from sympy.physics.secondquant import FockStateFermionBra assert _test_args(FockStateFermionBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionKet(): from sympy.physics.secondquant import FockStateFermionKet assert _test_args(FockStateFermionKet((0, 1))) def test_sympy__physics__secondquant__FockStateKet(): from sympy.physics.secondquant import FockStateKet assert _test_args(FockStateKet((0, 1))) def test_sympy__physics__secondquant__InnerProduct(): from sympy.physics.secondquant import InnerProduct from sympy.physics.secondquant import FockStateKet, FockStateBra assert _test_args(InnerProduct(FockStateBra((0, 1)), FockStateKet((0, 1)))) def test_sympy__physics__secondquant__NO(): from sympy.physics.secondquant import NO, F, Fd assert _test_args(NO(Fd(x)*F(y))) def test_sympy__physics__secondquant__PermutationOperator(): from sympy.physics.secondquant import PermutationOperator assert _test_args(PermutationOperator(0, 1)) def test_sympy__physics__secondquant__SqOperator(): from sympy.physics.secondquant import SqOperator assert _test_args(SqOperator(0)) def test_sympy__physics__secondquant__TensorSymbol(): from sympy.physics.secondquant import TensorSymbol assert _test_args(TensorSymbol(x)) def test_sympy__physics__units__Unit(): from sympy.physics.units import Unit assert _test_args(Unit("meter", "m")) def test_sympy__physics__unitsystems__dimensions__Dimension(): from sympy.physics.unitsystems.dimensions import Dimension assert _test_args(Dimension(name="length", symbol="L", length=1)) def test_sympy__physics__unitsystems__quantities__Quantity(): from sympy.physics.unitsystems.quantities import Quantity from sympy.physics.unitsystems.systems import mks assert _test_args(Quantity(10, mks["m"])) def test_sympy__physics__unitsystems__units__Constant(): from sympy.physics.unitsystems.units import Constant from sympy.physics.unitsystems.dimensions import Dimension length = Dimension(length=1) assert _test_args(Constant(length, abbrev="u", factor=10)) def test_sympy__physics__unitsystems__units__Unit(): from sympy.physics.unitsystems.units import Unit from sympy.physics.unitsystems.dimensions import Dimension length = Dimension(length=1) assert _test_args(Unit(length, abbrev="u", factor=10)) def test_sympy__core__numbers__AlgebraicNumber(): from sympy.core.numbers import AlgebraicNumber assert _test_args(AlgebraicNumber(sqrt(2), [1, 2, 3])) def test_sympy__polys__polytools__GroebnerBasis(): from sympy.polys.polytools import GroebnerBasis assert _test_args(GroebnerBasis([x, y, z], x, y, z)) def test_sympy__polys__polytools__Poly(): from sympy.polys.polytools import Poly assert _test_args(Poly(2, x, y)) def test_sympy__polys__polytools__PurePoly(): from sympy.polys.polytools import PurePoly assert _test_args(PurePoly(2, x, y)) def test_sympy__polys__rootoftools__RootOf(): from sympy.polys.rootoftools import RootOf assert _test_args(RootOf(x**3 + x + 1, 0)) def test_sympy__polys__rootoftools__RootSum(): from sympy.polys.rootoftools import RootSum assert _test_args(RootSum(x**3 + x + 1, sin)) def test_sympy__series__limits__Limit(): from sympy.series.limits import Limit assert _test_args(Limit(x, x, 0, dir='-')) def test_sympy__series__order__Order(): from sympy.series.order import Order assert _test_args(Order(1, x, y)) def test_sympy__simplify__hyperexpand__Hyper_Function(): from sympy.simplify.hyperexpand import Hyper_Function assert _test_args(Hyper_Function([2], [1])) def test_sympy__simplify__hyperexpand__G_Function(): from sympy.simplify.hyperexpand import G_Function assert _test_args(G_Function([2], [1], [], [])) def test_sympy__tensor__indexed__Idx(): from sympy.tensor.indexed import Idx assert _test_args(Idx('test')) assert _test_args(Idx(1, (0, 10))) def test_sympy__tensor__indexed__Indexed(): from sympy.tensor.indexed import Indexed, Idx assert _test_args(Indexed('A', Idx('i'), Idx('j'))) def test_sympy__tensor__indexed__IndexedBase(): from sympy.tensor.indexed import IndexedBase assert _test_args(IndexedBase('A', shape=(x, y))) assert _test_args(IndexedBase('A', 1)) assert _test_args(IndexedBase('A')[0, 1]) @XFAIL def test_sympy__physics__hep__gamma_matrices__GammaMatrixHead(): # This test fails, this class can be reconstructed from the *args # of an instance using `TensorHead(*args)` from sympy.physics.hep.gamma_matrices import GammaMatrixHead, Lorentz from sympy.tensor.tensor import tensor_indices i = tensor_indices('i', Lorentz) assert _test_args(GammaMatrixHead()) def test_sympy__tensor__tensor__TensorIndexType(): from sympy.tensor.tensor import TensorIndexType assert _test_args(TensorIndexType('Lorentz', metric=False)) def test_sympy__tensor__tensor__TensorSymmetry(): from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs assert _test_args(TensorSymmetry(get_symmetric_group_sgs(2))) def test_sympy__tensor__tensor__TensorType(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorType Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) assert _test_args(TensorType([Lorentz], sym)) def test_sympy__tensor__tensor__TensorHead(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, TensorHead Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) assert _test_args(TensorHead('p', S1, 0)) def test_sympy__tensor__tensor__TensorIndex(): from sympy.tensor.tensor import TensorIndexType, TensorIndex Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') assert _test_args(TensorIndex('i', Lorentz)) @SKIP("abstract class") def test_sympy__tensor__tensor__TensExpr(): pass def test_sympy__tensor__tensor__TensAdd(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensAdd Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p, q = S1('p,q') t1 = p(a) t2 = q(a) assert _test_args(TensAdd(t1, t2)) def test_sympy__tensor__tensor__Tensor(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') assert _test_args(p(a)) def test_sympy__tensor__tensor__TensMul(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') q = S1('q') assert _test_args(3*p(a)*q(b)) def test_as_coeff_add(): assert (7, (3*x, 4*x**2)) == (7 + 3*x + 4*x**2).as_coeff_add() def test_sympy__geometry__curve__Curve(): from sympy.geometry.curve import Curve assert _test_args(Curve((x, 1), (x, 0, 1))) def test_sympy__geometry__point__Point(): from sympy.geometry.point import Point assert _test_args(Point(0, 1)) def test_sympy__geometry__point3d__Point3D(): from sympy.geometry.point3d import Point3D assert _test_args(Point3D(0, 1, 2)) def test_sympy__geometry__ellipse__Ellipse(): from sympy.geometry.ellipse import Ellipse assert _test_args(Ellipse((0, 1), 2, 3)) def test_sympy__geometry__ellipse__Circle(): from sympy.geometry.ellipse import Circle assert _test_args(Circle((0, 1), 2)) @SKIP("abstract class") def test_sympy__geometry__line__LinearEntity(): pass def test_sympy__geometry__line__Line(): from sympy.geometry.line import Line assert _test_args(Line((0, 1), (2, 3))) def test_sympy__geometry__line__Ray(): from sympy.geometry.line import Ray assert _test_args(Ray((0, 1), (2, 3))) def test_sympy__geometry__line__Segment(): from sympy.geometry.line import Segment assert _test_args(Segment((0, 1), (2, 3))) @SKIP("abstract class") def test_sympy__geometry__line3d__LinearEntity3D(): pass def test_sympy__geometry__line3d__Line3D(): from sympy.geometry.line3d import Line3D assert _test_args(Line3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line3d__Segment3D(): from sympy.geometry.line3d import Segment3D assert _test_args(Segment3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line3d__Ray3D(): from sympy.geometry.line3d import Ray3D assert _test_args(Ray3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__plane__Plane(): from sympy.geometry.plane import Plane assert _test_args(Plane((1, 1, 1), (-3, 4, -2), (1, 2, 3))) def test_sympy__geometry__polygon__Polygon(): from sympy.geometry.polygon import Polygon assert _test_args(Polygon((0, 1), (2, 3), (4, 5), (6, 7))) def test_sympy__geometry__polygon__RegularPolygon(): from sympy.geometry.polygon import RegularPolygon assert _test_args(RegularPolygon((0, 1), 2, 3, 4)) def test_sympy__geometry__polygon__Triangle(): from sympy.geometry.polygon import Triangle assert _test_args(Triangle((0, 1), (2, 3), (4, 5))) def test_sympy__geometry__entity__GeometryEntity(): from sympy.geometry.entity import GeometryEntity from sympy.geometry.point import Point assert _test_args(GeometryEntity(Point(1, 0), 1, [1, 2])) def test_sympy__diffgeom__diffgeom__Manifold(): from sympy.diffgeom import Manifold assert _test_args(Manifold('name', 3)) def test_sympy__diffgeom__diffgeom__Patch(): from sympy.diffgeom import Manifold, Patch assert _test_args(Patch('name', Manifold('name', 3))) def test_sympy__diffgeom__diffgeom__CoordSystem(): from sympy.diffgeom import Manifold, Patch, CoordSystem assert _test_args(CoordSystem('name', Patch('name', Manifold('name', 3)))) @XFAIL def test_sympy__diffgeom__diffgeom__Point(): from sympy.diffgeom import Manifold, Patch, CoordSystem, Point assert _test_args(Point( CoordSystem('name', Patch('name', Manifold('name', 3))), [x, y])) def test_sympy__diffgeom__diffgeom__BaseScalarField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseScalarField(cs, 0)) def test_sympy__diffgeom__diffgeom__BaseVectorField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseVectorField(cs, 0)) def test_sympy__diffgeom__diffgeom__Differential(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(Differential(BaseScalarField(cs, 0))) def test_sympy__diffgeom__diffgeom__Commutator(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, Commutator cs = CoordSystem('name', Patch('name', Manifold('name', 3))) cs1 = CoordSystem('name1', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) v1 = BaseVectorField(cs1, 0) assert _test_args(Commutator(v, v1)) def test_sympy__diffgeom__diffgeom__TensorProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, TensorProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) assert _test_args(TensorProduct(d, d)) def test_sympy__diffgeom__diffgeom__WedgeProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, WedgeProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) d1 = Differential(BaseScalarField(cs, 1)) assert _test_args(WedgeProduct(d, d1)) def test_sympy__diffgeom__diffgeom__LieDerivative(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, BaseVectorField, LieDerivative cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) v = BaseVectorField(cs, 0) assert _test_args(LieDerivative(v, d)) @XFAIL def test_sympy__diffgeom__diffgeom__BaseCovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseCovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseCovarDerivativeOp(cs, 0, [[[0, ]*3, ]*3, ]*3)) def test_sympy__diffgeom__diffgeom__CovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, CovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) _test_args(CovarDerivativeOp(v, [[[0, ]*3, ]*3, ]*3)) def test_sympy__categories__baseclasses__Class(): from sympy.categories.baseclasses import Class assert _test_args(Class()) def test_sympy__categories__baseclasses__Object(): from sympy.categories import Object assert _test_args(Object("A")) @XFAIL def test_sympy__categories__baseclasses__Morphism(): from sympy.categories import Object, Morphism assert _test_args(Morphism(Object("A"), Object("B"))) def test_sympy__categories__baseclasses__IdentityMorphism(): from sympy.categories import Object, IdentityMorphism assert _test_args(IdentityMorphism(Object("A"))) def test_sympy__categories__baseclasses__NamedMorphism(): from sympy.categories import Object, NamedMorphism assert _test_args(NamedMorphism(Object("A"), Object("B"), "f")) def test_sympy__categories__baseclasses__CompositeMorphism(): from sympy.categories import Object, NamedMorphism, CompositeMorphism A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") assert _test_args(CompositeMorphism(f, g)) def test_sympy__categories__baseclasses__Diagram(): from sympy.categories import Object, NamedMorphism, Diagram A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") d = Diagram([f]) assert _test_args(d) def test_sympy__categories__baseclasses__Category(): from sympy.categories import Object, NamedMorphism, Diagram, Category A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") d1 = Diagram([f, g]) d2 = Diagram([f]) K = Category("K", commutative_diagrams=[d1, d2]) assert _test_args(K) def test_sympy__ntheory__factor___totient(): from sympy.ntheory.factor_ import totient k = symbols('k', integer=True) t = totient(k) assert _test_args(t) def test_sympy__ntheory__factor___divisor_sigma(): from sympy.ntheory.factor_ import divisor_sigma k = symbols('k', integer=True) n = symbols('n', integer=True) t = divisor_sigma(n, k) assert _test_args(t) def test_sympy__ntheory__residue_ntheory__mobius(): from sympy.ntheory import mobius assert _test_args(mobius(2)) def test_sympy__physics__optics__waves__TWave(): from sympy.physics.optics import TWave A, f, phi = symbols('A, f, phi') assert _test_args(TWave(A, f, phi)) def test_sympy__physics__optics__gaussopt__BeamParameter(): from sympy.physics.optics import BeamParameter assert _test_args(BeamParameter(530e-9, 1, w=1e-3)) def test_sympy__physics__optics__medium__Medium(): from sympy.physics.optics import Medium assert _test_args(Medium('m')) def test_sympy__printing__codeprinter__Assignment(): from sympy.printing.codeprinter import Assignment assert _test_args(Assignment(x, y)) def test_sympy__vector__coordsysrect__CoordSysCartesian(): from sympy.vector.coordsysrect import CoordSysCartesian assert _test_args(CoordSysCartesian('C')) def test_sympy__vector__point__Point(): from sympy.vector.point import Point assert _test_args(Point('P')) def test_sympy__vector__basisdependent__BasisDependent(): from sympy.vector.basisdependent import BasisDependent #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentMul(): from sympy.vector.basisdependent import BasisDependentMul #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentAdd(): from sympy.vector.basisdependent import BasisDependentAdd #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentZero(): from sympy.vector.basisdependent import BasisDependentZero #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__vector__BaseVector(): from sympy.vector.vector import BaseVector from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseVector('Ci', 0, C, ' ', ' ')) def test_sympy__vector__vector__VectorAdd(): from sympy.vector.vector import VectorAdd, VectorMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') from sympy.abc import a, b, c, x, y, z v1 = a*C.i + b*C.j + c*C.k v2 = x*C.i + y*C.j + z*C.k assert _test_args(VectorAdd(v1, v2)) assert _test_args(VectorMul(x, v1)) def test_sympy__vector__vector__VectorMul(): from sympy.vector.vector import VectorMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') from sympy.abc import a assert _test_args(VectorMul(a, C.i)) def test_sympy__vector__vector__VectorZero(): from sympy.vector.vector import VectorZero assert _test_args(VectorZero()) def test_sympy__vector__vector__Vector(): from sympy.vector.vector import Vector #Vector is never to be initialized using args pass def test_sympy__vector__dyadic__Dyadic(): from sympy.vector.dyadic import Dyadic #Dyadic is never to be initialized using args pass def test_sympy__vector__dyadic__BaseDyadic(): from sympy.vector.dyadic import BaseDyadic from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseDyadic(C.i, C.j)) def test_sympy__vector__dyadic__DyadicMul(): from sympy.vector.dyadic import BaseDyadic, DyadicMul from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(DyadicMul(3, BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicAdd(): from sympy.vector.dyadic import BaseDyadic, DyadicAdd from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(2 * DyadicAdd(BaseDyadic(C.i, C.i), BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicZero(): from sympy.vector.dyadic import DyadicZero assert _test_args(DyadicZero()) def test_sympy__vector__deloperator__Del(): from sympy.vector.deloperator import Del from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(Del(C)) def test_sympy__vector__orienters__Orienter(): from sympy.vector.orienters import Orienter #Not to be initialized def test_sympy__vector__orienters__ThreeAngleOrienter(): from sympy.vector.orienters import ThreeAngleOrienter #Not to be initialized def test_sympy__vector__orienters__AxisOrienter(): from sympy.vector.orienters import AxisOrienter from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(AxisOrienter(x, C.i)) def test_sympy__vector__orienters__BodyOrienter(): from sympy.vector.orienters import BodyOrienter assert _test_args(BodyOrienter(x, y, z, '123')) def test_sympy__vector__orienters__SpaceOrienter(): from sympy.vector.orienters import SpaceOrienter assert _test_args(SpaceOrienter(x, y, z, '123')) def test_sympy__vector__orienters__QuaternionOrienter(): from sympy.vector.orienters import QuaternionOrienter a, b, c, d = symbols('a b c d') assert _test_args(QuaternionOrienter(a, b, c, d)) def test_sympy__vector__scalar__BaseScalar(): from sympy.vector.scalar import BaseScalar from sympy.vector.coordsysrect import CoordSysCartesian C = CoordSysCartesian('C') assert _test_args(BaseScalar('Cx', 0, C, ' ', ' '))
#ARC076e def main(): import sys input=sys.stdin.readline sys.setrecursionlimit(10**6) if __name__ == '__main__': main()
from django.core.management.base import BaseCommand from django.utils.timezone import now class Command(BaseCommand): args = '[event_slug...]' help = 'Create missing email aliases' def handle(*args, **opts): from access.models import InternalEmailAlias InternalEmailAlias.ensure_internal_email_aliases()
from django.contrib.auth.models import User from rest_framework.test import APITestCase class FVHAPITestCase(APITestCase): def assert_dict_contains(self, superset, subset, path=''): for key, expected in subset.items(): full_path = path + key received = superset.get(key, None) if isinstance(expected, dict) and isinstance(received, dict): self.assert_dict_contains(superset[key], expected, full_path + '.') else: assert received == expected, 'Value mismatch for key {}: {} != {}'.format( full_path, expected, received ) def create_user(self): return User.objects.create( username='courier', first_name='Coranne', last_name='Courier', email='coranne@couriersrus.com') def create_and_login_user(self): user = self.create_user() self.client.force_login(user) return user
"ts_project rule" load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo", "NpmPackageInfo", "declaration_info", "js_module_info", "run_node") _DEFAULT_TSC = ( # BEGIN-INTERNAL "@npm" + # END-INTERNAL "//typescript/bin:tsc" ) _ATTRS = { "args": attr.string_list(), "declaration_dir": attr.string(), "deps": attr.label_list(providers = [DeclarationInfo]), "extends": attr.label_list(allow_files = [".json"]), "out_dir": attr.string(), "root_dir": attr.string(), # NB: no restriction on extensions here, because tsc sometimes adds type-check support # for more file kinds (like require('some.json')) and also # if you swap out the `compiler` attribute (like with ngtsc) # that compiler might allow more sources than tsc does. "srcs": attr.label_list(allow_files = True, mandatory = True), "tsc": attr.label(default = Label(_DEFAULT_TSC), executable = True, cfg = "host"), "tsconfig": attr.label(mandatory = True, allow_single_file = [".json"]), } # tsc knows how to produce the following kinds of output files. # NB: the macro `ts_project_macro` will set these outputs based on user # telling us which settings are enabled in the tsconfig for this project. _OUTPUTS = { "buildinfo_out": attr.output(), "js_outs": attr.output_list(), "map_outs": attr.output_list(), "typing_maps_outs": attr.output_list(), "typings_outs": attr.output_list(), } _TsConfigInfo = provider( doc = """Passes tsconfig.json files to downstream compilations so that TypeScript can read them. This is needed to support Project References""", fields = { "tsconfigs": "depset of tsconfig.json files", }, ) def _join(*elements): return "/".join([f for f in elements if f]) def _ts_project_impl(ctx): arguments = ctx.actions.args() # Add user specified arguments *before* rule supplied arguments arguments.add_all(ctx.attr.args) arguments.add_all([ "--project", ctx.file.tsconfig.path, "--outDir", _join(ctx.bin_dir.path, ctx.label.package, ctx.attr.out_dir), "--rootDir", _join(ctx.label.package, ctx.attr.root_dir) if ctx.label.package else ".", ]) if len(ctx.outputs.typings_outs) > 0: declaration_dir = ctx.attr.declaration_dir if ctx.attr.declaration_dir else ctx.attr.out_dir arguments.add_all([ "--declarationDir", _join(ctx.bin_dir.path, ctx.label.package, declaration_dir), ]) # When users report problems, we can ask them to re-build with # --define=VERBOSE_LOGS=1 # so anything that's useful to diagnose rule failures belongs here if "VERBOSE_LOGS" in ctx.var.keys(): arguments.add_all([ # What files were in the ts.Program "--listFiles", # Did tsc write all outputs to the place we expect to find them? "--listEmittedFiles", # Why did module resolution fail? "--traceResolution", # Why was the build slow? "--diagnostics", "--extendedDiagnostics", ]) deps_depsets = [] for dep in ctx.attr.deps: if _TsConfigInfo in dep: deps_depsets.append(dep[_TsConfigInfo].tsconfigs) if NpmPackageInfo in dep: # TODO: we could maybe filter these to be tsconfig.json or *.d.ts only # we don't expect tsc wants to read any other files from npm packages. deps_depsets.append(dep[NpmPackageInfo].sources) if DeclarationInfo in dep: deps_depsets.append(dep[DeclarationInfo].transitive_declarations) inputs = ctx.files.srcs + depset(transitive = deps_depsets).to_list() + [ctx.file.tsconfig] if ctx.attr.extends: inputs.extend(ctx.files.extends) # We do not try to predeclare json_outs, because their output locations generally conflict with their path in the source tree. # (The exception is when out_dir is used, then the .json output is a different path than the input.) # However tsc will copy .json srcs to the output tree so we want to declare these outputs to include along with .js Default outs # NB: We don't have emit_declaration_only setting here, so use presence of any JS outputs as an equivalent. # tsc will only produce .json if it also produces .js if len(ctx.outputs.js_outs): json_outs = [ ctx.actions.declare_file(_join(ctx.attr.out_dir, src.short_path[len(ctx.label.package) + 1:])) for src in ctx.files.srcs if src.basename.endswith(".json") ] else: json_outs = [] outputs = json_outs + ctx.outputs.js_outs + ctx.outputs.map_outs + ctx.outputs.typings_outs + ctx.outputs.typing_maps_outs if ctx.outputs.buildinfo_out: outputs.append(ctx.outputs.buildinfo_out) runtime_outputs = depset(json_outs + ctx.outputs.js_outs + ctx.outputs.map_outs) typings_outputs = ctx.outputs.typings_outs + ctx.outputs.typing_maps_outs + [s for s in ctx.files.srcs if s.path.endswith(".d.ts")] if len(outputs) > 0: run_node( ctx, inputs = inputs, arguments = [arguments], outputs = outputs, executable = "tsc", progress_message = "Compiling TypeScript project %s [tsc -p %s]" % ( ctx.label, ctx.file.tsconfig.short_path, ), ) providers = [ # DefaultInfo is what you see on the command-line for a built library, # and determines what files are used by a simple non-provider-aware # downstream library. # Only the JavaScript outputs are intended for use in non-TS-aware # dependents. DefaultInfo( files = runtime_outputs, runfiles = ctx.runfiles( transitive_files = runtime_outputs, collect_default = True, ), ), js_module_info( sources = runtime_outputs, deps = ctx.attr.deps, ), _TsConfigInfo(tsconfigs = depset([ctx.file.tsconfig] + ctx.files.extends, transitive = [ dep[_TsConfigInfo].tsconfigs for dep in ctx.attr.deps if _TsConfigInfo in dep ])), ] # Don't provide DeclarationInfo if there are no typings to provide. # Improves error messaging if a ts_project needs declaration = True if len(typings_outputs) or len(ctx.attr.deps): providers.append(declaration_info(depset(typings_outputs), ctx.attr.deps)) providers.append(OutputGroupInfo(types = depset(typings_outputs))) return providers ts_project = rule( implementation = _ts_project_impl, attrs = dict(_ATTRS, **_OUTPUTS), ) def _validate_options_impl(ctx): # Bazel won't run our action unless its output is needed, so make a marker file # We make it a .d.ts file so we can plumb it to the deps of the ts_project compile. marker = ctx.actions.declare_file("%s.optionsvalid.d.ts" % ctx.label.name) arguments = ctx.actions.args() arguments.add_all([ctx.file.tsconfig.path, marker.path, ctx.attr.target, struct( declaration = ctx.attr.declaration, declaration_map = ctx.attr.declaration_map, composite = ctx.attr.composite, emit_declaration_only = ctx.attr.emit_declaration_only, source_map = ctx.attr.source_map, incremental = ctx.attr.incremental, ).to_json()]) run_node( ctx, inputs = [ctx.file.tsconfig] + ctx.files.extends, outputs = [marker], arguments = [arguments], executable = "validator", ) return [ DeclarationInfo( transitive_declarations = depset([marker]), ), ] validate_options = rule( implementation = _validate_options_impl, attrs = { "composite": attr.bool(), "declaration": attr.bool(), "declaration_map": attr.bool(), "emit_declaration_only": attr.bool(), "extends": attr.label_list(allow_files = [".json"]), "incremental": attr.bool(), "source_map": attr.bool(), "target": attr.string(), "tsconfig": attr.label(mandatory = True, allow_single_file = [".json"]), "validator": attr.label(default = Label("//packages/typescript/bin:ts_project_options_validator"), executable = True, cfg = "host"), }, ) def _out_paths(srcs, outdir, rootdir, ext): rootdir_replace_pattern = rootdir + "/" if rootdir else "" return [_join(outdir, f[:f.rindex(".")].replace(rootdir_replace_pattern, "") + ext) for f in srcs if not f.endswith(".d.ts") and not f.endswith(".json")] def ts_project_macro( name = "tsconfig", tsconfig = None, srcs = None, args = [], deps = [], extends = None, declaration = False, source_map = False, declaration_map = False, composite = False, incremental = False, emit_declaration_only = False, tsc = None, validate = True, declaration_dir = None, out_dir = None, root_dir = None, **kwargs): """Compiles one TypeScript project using `tsc --project` This is a drop-in replacement for the `tsc` rule automatically generated for the "typescript" package, typically loaded from `@npm//typescript:index.bzl`. Unlike bare `tsc`, this rule understands the Bazel interop mechanism (Providers) so that this rule works with others that produce or consume TypeScript typings (`.d.ts` files). Unlike `ts_library`, this rule is the thinnest possible layer of Bazel interoperability on top of the TypeScript compiler. It shifts the burden of configuring TypeScript into the tsconfig.json file. See https://github.com/bazelbuild/rules_nodejs/blob/master/docs/TypeScript.md#alternatives for more details about the trade-offs between the two rules. Some TypeScript options affect which files are emitted, and Bazel wants to know these ahead-of-time. So several options from the tsconfig file must be mirrored as attributes to ts_project. See https://www.typescriptlang.org/v2/en/tsconfig for a listing of the TypeScript options. Any code that works with `tsc` should work with `ts_project` with a few caveats: - Bazel requires that the `outDir` (and `declarationDir`) be set to `bazel-out/[target architecture]/bin/path/to/package` so we override whatever settings appear in your tsconfig. - Bazel expects that each output is produced by a single rule. Thus if you have two `ts_project` rules with overlapping sources (the same `.ts` file appears in more than one) then you get an error about conflicting `.js` output files if you try to build both together. Worse, if you build them separately then the output directory will contain whichever one you happened to build most recently. This is highly discouraged. > Note: in order for TypeScript to resolve relative references to the bazel-out folder, > we recommend that the base tsconfig contain a rootDirs section that includes all > possible locations they may appear. > > We hope this will not be needed in some future release of TypeScript. > Follow https://github.com/microsoft/TypeScript/issues/37257 for more info. > > For example, if the base tsconfig file relative to the workspace root is > `path/to/tsconfig.json` then you should configure like: > > ``` > "compilerOptions": { > "rootDirs": [ > ".", > "../../bazel-out/darwin-fastbuild/bin/path/to", > "../../bazel-out/k8-fastbuild/bin/path/to", > "../../bazel-out/x64_windows-fastbuild/bin/path/to", > "../../bazel-out/darwin-dbg/bin/path/to", > "../../bazel-out/k8-dbg/bin/path/to", > "../../bazel-out/x64_windows-dbg/bin/path/to", > ] > } > ``` ### Issues when running non-sandboxed When using a non-sandboxed spawn strategy (which is the default on Windows), you may observe these problems which require workarounds: 1) Bazel deletes outputs from the previous execution before running `tsc`. This causes a problem with TypeScript's incremental mode: if the `.tsbuildinfo` file is not known to be an output of the rule, then Bazel will leave it in the output directory, and when `tsc` runs, it may see that the outputs written by the prior invocation are up-to-date and skip the emit of these files. This will cause Bazel to intermittently fail with an error that some outputs were not written. This is why we depend on `composite` and/or `incremental` attributes to be provided, so we can tell Bazel to expect a `.tsbuildinfo` output to ensure it is deleted before a subsequent compilation. At present, we don't do anything useful with the `.tsbuildinfo` output, and this rule does not actually have incremental behavior. Deleting the file is actually counter-productive in terms of TypeScript compile performance. Follow https://github.com/bazelbuild/rules_nodejs/issues/1726 2) When using Project References, TypeScript will expect to verify that the outputs of referenced projects are up-to-date with respect to their inputs. (This is true even without using the `--build` option). When using a non-sandboxed spawn strategy, `tsc` can read the sources from other `ts_project` rules in your project, and will expect that the `tsconfig.json` file for those references will indicate where the outputs were written. However the `outDir` is determined by this Bazel rule so it cannot be known from reading the `tsconfig.json` file. This problem is manifested as a TypeScript diagnostic like `error TS6305: Output file '/path/to/execroot/a.d.ts' has not been built from source file '/path/to/execroot/a.ts'.` As a workaround, you can give the Windows "fastbuild" output directory as the `outDir` in your tsconfig file. On other platforms, the value isn't read so it does no harm. See https://github.com/bazelbuild/rules_nodejs/tree/stable/packages/typescript/test/ts_project as an example. We hope this will be fixed in a future release of TypeScript; follow https://github.com/microsoft/TypeScript/issues/37378 3) When TypeScript encounters an import statement, it adds the source file resolved by that reference to the program. However you may have included that source file in a different project, so this causes the problem mentioned above where a source file is in multiple programs. (Note, if you use Project References this is not the case, TS will know the referenced file is part of the other program.) This will result in duplicate emit for the same file, which produces an error since the files written to the output tree are read-only. Workarounds include using using Project References, or simply grouping the whole compilation into one program (if this doesn't exceed your time budget). Args: name: A name for the target. We recommend you use the basename (no `.json` extension) of the tsconfig file that should be compiled. srcs: List of labels of TypeScript source files to be provided to the compiler. If absent, defaults to `**/*.ts[x]` (all TypeScript files in the package). deps: List of labels of other rules that produce TypeScript typings (.d.ts files) tsconfig: Label of the tsconfig.json file to use for the compilation. By default, we add `.json` to the `name` attribute. extends: List of labels of tsconfig file(s) referenced in `extends` section of tsconfig. Must include any tsconfig files "chained" by extends clauses. args: List of strings of additional command-line arguments to pass to tsc. tsc: Label of the TypeScript compiler binary to run. For example, `tsc = "@my_deps//typescript/bin:tsc"` Or you can pass a custom compiler binary instead. validate: boolean; whether to check that the tsconfig settings match the attributes. root_dir: a string specifying a subdirectory under the input package which should be consider the root directory of all the input files. Equivalent to the TypeScript --rootDir option. By default it is '.', meaning the source directory where the BUILD file lives. out_dir: a string specifying a subdirectory under the bazel-out folder where outputs are written. Equivalent to the TypeScript --outDir option. Note that Bazel always requires outputs be written under a subdirectory matching the input package, so if your rule appears in path/to/my/package/BUILD.bazel and out_dir = "foo" then the .js files will appear in bazel-out/[arch]/bin/path/to/my/package/foo/*.js. By default the out_dir is '.', meaning the packages folder in bazel-out. declaration_dir: a string specifying a subdirectory under the bazel-out folder where generated declaration outputs are written. Equivalent to the TypeScript --declarationDir option. By default declarations are written to the out_dir. declaration: if the `declaration` bit is set in the tsconfig. Instructs Bazel to expect a `.d.ts` output for each `.ts` source. source_map: if the `sourceMap` bit is set in the tsconfig. Instructs Bazel to expect a `.js.map` output for each `.ts` source. declaration_map: if the `declarationMap` bit is set in the tsconfig. Instructs Bazel to expect a `.d.ts.map` output for each `.ts` source. composite: if the `composite` bit is set in the tsconfig. Instructs Bazel to expect a `.tsbuildinfo` output and a `.d.ts` output for each `.ts` source. incremental: if the `incremental` bit is set in the tsconfig. Instructs Bazel to expect a `.tsbuildinfo` output. emit_declaration_only: if the `emitDeclarationOnly` bit is set in the tsconfig. Instructs Bazel *not* to expect `.js` or `.js.map` outputs for `.ts` sources. **kwargs: passed through to underlying rule, allows eg. visibility, tags """ if srcs == None: srcs = native.glob(["**/*.ts", "**/*.tsx"]) if tsconfig == None: tsconfig = name + ".json" extra_deps = [] if validate: validate_options( name = "_validate_%s_options" % name, target = "//%s:%s" % (native.package_name(), name), declaration = declaration, source_map = source_map, declaration_map = declaration_map, composite = composite, incremental = incremental, emit_declaration_only = emit_declaration_only, tsconfig = tsconfig, extends = extends, ) extra_deps.append("_validate_%s_options" % name) typings_out_dir = declaration_dir if declaration_dir else out_dir ts_project( name = name, srcs = srcs, args = args, deps = deps + extra_deps, tsconfig = tsconfig, extends = extends, declaration_dir = declaration_dir, out_dir = out_dir, root_dir = root_dir, js_outs = _out_paths(srcs, out_dir, root_dir, ".js") if not emit_declaration_only else [], map_outs = _out_paths(srcs, out_dir, root_dir, ".js.map") if source_map and not emit_declaration_only else [], typings_outs = _out_paths(srcs, typings_out_dir, root_dir, ".d.ts") if declaration or composite else [], typing_maps_outs = _out_paths(srcs, typings_out_dir, root_dir, ".d.ts.map") if declaration_map else [], buildinfo_out = tsconfig[:-5] + ".tsbuildinfo" if composite or incremental else None, tsc = tsc, **kwargs )
# encoding: utf-8 from __future__ import unicode_literals class TranslationError(Exception): """Failure to translate source.""" pass
# Write a Python program to get execution time for a Python method. import time def sum_of_n_numbers(x): start_time = time.time() s = 0 for i in range(1, x + 1): s = s + i end_time = time.time() return s, end_time - start_time n = 5 print("\nTime to sum of 1 to ", n, " and required time to calculate is :", sum_of_n_numbers(n))
''' Native support for Multitouch devices on Linux, using libmtdev. =============================================================== The Mtdev project is a part of the Ubuntu Maverick multitouch architecture. You can read more on http://wiki.ubuntu.com/Multitouch To configure MTDev, it's preferable to use probesysfs providers. Check :py:class:`~kivy.input.providers.probesysfs` for more information. Otherwise, add this to your configuration:: [input] # devicename = hidinput,/dev/input/eventXX acert230h = mtdev,/dev/input/event2 .. note:: You must have read access to the input event. You can use a custom range for the X, Y and pressure values. On some drivers, the range reported is invalid. To fix that, you can add these options to the argument line: * invert_x : 1 to invert X axis * invert_y : 1 to invert Y axis * min_position_x : X minimum * max_position_x : X maximum * min_position_y : Y minimum * max_position_y : Y maximum * min_pressure : pressure minimum * max_pressure : pressure maximum * min_touch_major : width shape minimum * max_touch_major : width shape maximum * min_touch_minor : width shape minimum * max_touch_minor : height shape maximum * rotation : 0,90,180 or 270 to rotate ''' __all__ = ('MTDMotionEventProvider', 'MTDMotionEvent') import os from kivy.input.motionevent import MotionEvent from kivy.input.shape import ShapeRect class MTDMotionEvent(MotionEvent): def depack(self, args): self.is_touch = True if 'x' in args: self.sx = args['x'] else: self.sx = -1 if 'y' in args: self.sy = args['y'] else: self.sy = -1 self.profile = ['pos'] if 'size_w' in args and 'size_h' in args: self.shape = ShapeRect() self.shape.width = args['size_w'] self.shape.height = args['size_h'] self.profile.append('shape') if 'pressure' in args: self.pressure = args['pressure'] self.profile.append('pressure') super(MTDMotionEvent, self).depack(args) def __str__(self): i, sx, sy, d = (self.id, self.sx, self.sy, self.device) return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d) if 'KIVY_DOC' in os.environ: # documentation hack MTDMotionEventProvider = None else: import threading import collections from kivy.lib.mtdev import Device, \ MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \ MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \ MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \ MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \ MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \ MTDEV_ABS_TOUCH_MAJOR from kivy.input.provider import MotionEventProvider from kivy.input.factory import MotionEventFactory from kivy.logger import Logger class MTDMotionEventProvider(MotionEventProvider): options = ('min_position_x', 'max_position_x', 'min_position_y', 'max_position_y', 'min_pressure', 'max_pressure', 'min_touch_major', 'max_touch_major', 'min_touch_minor', 'max_touch_minor', 'invert_x', 'invert_y', 'rotation') def __init__(self, device, args): super(MTDMotionEventProvider, self).__init__(device, args) self._device = None self.input_fn = None self.default_ranges = dict() # split arguments args = args.split(',') if not args: Logger.error('MTD: No filename pass to MTD configuration') Logger.error('MTD: Use /dev/input/event0 for example') return # read filename self.input_fn = args[0] Logger.info('MTD: Read event from <%s>' % self.input_fn) # read parameters for arg in args[1:]: if arg == '': continue arg = arg.split('=') # ensure it's a key = value if len(arg) != 2: err = 'MTD: Bad parameter %s: Not in key=value format' %\ arg Logger.error(err) continue # ensure the key exist key, value = arg if key not in MTDMotionEventProvider.options: Logger.error('MTD: unknown %s option' % key) continue # ensure the value try: self.default_ranges[key] = int(value) except ValueError: err = 'MTD: invalid value %s for option %s' % (key, value) Logger.error(err) continue # all good! Logger.info('MTD: Set custom %s to %d' % (key, int(value))) if 'rotation' not in self.default_ranges: self.default_ranges['rotation'] = 0 elif self.default_ranges['rotation'] not in (0, 90, 180, 270): Logger.error('HIDInput: invalid rotation value ({})'.format( self.default_ranges['rotation'])) self.default_ranges['rotation'] = 0 def start(self): if self.input_fn is None: return self.uid = 0 self.queue = collections.deque() self.thread = threading.Thread( target=self._thread_run, kwargs=dict( queue=self.queue, input_fn=self.input_fn, device=self.device, default_ranges=self.default_ranges)) self.thread.daemon = True self.thread.start() def _thread_run(self, **kwargs): input_fn = kwargs.get('input_fn') queue = kwargs.get('queue') device = kwargs.get('device') drs = kwargs.get('default_ranges').get touches = {} touches_sent = [] point = {} l_points = {} def assign_coord(point, value, invert, coords): cx, cy = coords if invert: value = 1. - value if rotation == 0: point[cx] = value elif rotation == 90: point[cy] = value elif rotation == 180: point[cx] = 1. - value elif rotation == 270: point[cy] = 1. - value def process(points): for args in points: # this can happen if we have a touch going on already at # the start of the app if 'id' not in args: continue tid = args['id'] try: touch = touches[tid] except KeyError: touch = MTDMotionEvent(device, tid, args) touches[touch.id] = touch touch.move(args) action = 'update' if tid not in touches_sent: action = 'begin' touches_sent.append(tid) if 'delete' in args: action = 'end' del args['delete'] del touches[touch.id] touches_sent.remove(tid) touch.update_time_end() queue.append((action, touch)) def normalize(value, vmin, vmax): return (value - vmin) / float(vmax - vmin) # open mtdev device _fn = input_fn _slot = 0 try: _device = Device(_fn) except OSError as e: if e.errno == 13: # Permission denied Logger.warn( 'MTD: Unable to open device "{0}". Please ensure you' ' have the appropriate permissions.'.format(_fn)) return else: raise _changes = set() # prepare some vars to get limit of some component ab = _device.get_abs(MTDEV_ABS_POSITION_X) range_min_position_x = drs('min_position_x', ab.minimum) range_max_position_x = drs('max_position_x', ab.maximum) Logger.info('MTD: <%s> range position X is %d - %d' % (_fn, range_min_position_x, range_max_position_x)) ab = _device.get_abs(MTDEV_ABS_POSITION_Y) range_min_position_y = drs('min_position_y', ab.minimum) range_max_position_y = drs('max_position_y', ab.maximum) Logger.info('MTD: <%s> range position Y is %d - %d' % (_fn, range_min_position_y, range_max_position_y)) ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR) range_min_major = drs('min_touch_major', ab.minimum) range_max_major = drs('max_touch_major', ab.maximum) Logger.info('MTD: <%s> range touch major is %d - %d' % (_fn, range_min_major, range_max_major)) ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR) range_min_minor = drs('min_touch_minor', ab.minimum) range_max_minor = drs('max_touch_minor', ab.maximum) Logger.info('MTD: <%s> range touch minor is %d - %d' % (_fn, range_min_minor, range_max_minor)) range_min_pressure = drs('min_pressure', 0) range_max_pressure = drs('max_pressure', 255) Logger.info('MTD: <%s> range pressure is %d - %d' % (_fn, range_min_pressure, range_max_pressure)) invert_x = int(bool(drs('invert_x', 0))) invert_y = int(bool(drs('invert_y', 0))) Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' % (_fn, invert_x, invert_y)) rotation = drs('rotation', 0) Logger.info('MTD: <%s> rotation set to %d' % (_fn, rotation)) while _device: # idle as much as we can. while _device.idle(1000): continue # got data, read all without redoing idle while True: data = _device.get() if data is None: break # set the working slot if data.type == MTDEV_TYPE_EV_ABS and \ data.code == MTDEV_CODE_SLOT: _slot = data.value continue # fill the slot if _slot not in l_points: l_points[_slot] = dict() point = l_points[_slot] ev_value = data.value ev_code = data.code if ev_code == MTDEV_CODE_POSITION_X: val = normalize(ev_value, range_min_position_x, range_max_position_x) assign_coord(point, val, invert_x, 'xy') elif ev_code == MTDEV_CODE_POSITION_Y: val = 1. - normalize(ev_value, range_min_position_y, range_max_position_y) assign_coord(point, val, invert_y, 'yx') elif ev_code == MTDEV_CODE_PRESSURE: point['pressure'] = normalize(ev_value, range_min_pressure, range_max_pressure) elif ev_code == MTDEV_CODE_TOUCH_MAJOR: point['size_w'] = normalize(ev_value, range_min_major, range_max_major) elif ev_code == MTDEV_CODE_TOUCH_MINOR: point['size_h'] = normalize(ev_value, range_min_minor, range_max_minor) elif ev_code == MTDEV_CODE_TRACKING_ID: if ev_value == -1: point['delete'] = True # force process of changes here, as the slot can be # reused. _changes.add(_slot) process([l_points[x] for x in _changes]) _changes.clear() continue else: point['id'] = ev_value else: # unrecognized command, ignore. continue _changes.add(_slot) # push all changes if _changes: process([l_points[x] for x in _changes]) _changes.clear() def update(self, dispatch_fn): # dispatch all event from threads try: while True: event_type, touch = self.queue.popleft() dispatch_fn(event_type, touch) except: pass MotionEventFactory.register('mtdev', MTDMotionEventProvider)
import unittest from unittest.mock import MagicMock import pandas as pd from pandas.testing import assert_frame_equal from data_export.pipeline.dataset import Dataset class TestDataset(unittest.TestCase): def setUp(self): example = MagicMock() example.to_dict.return_value = {"data": "example"} self.examples = MagicMock() self.examples.__iter__.return_value = [example] label = MagicMock() label.find_by.return_value = {"labels": ["label"]} self.labels = MagicMock() self.labels.__iter__.return_value = [label] def test_to_dataframe(self): dataset = Dataset(self.examples, self.labels) df = dataset.to_dataframe() expected = pd.DataFrame([{"data": "example", "labels": ["label"]}]) assert_frame_equal(df, expected)
# Copyright 2018 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_channelz/grpc_version.py.template`!!! VERSION = '1.23.0.dev0'
from ..proto import * from ..graph_io import * import paddle.fluid as fluid import numpy as np from paddle.fluid.core import VarDesc, AttrType def union(list_a, list_b): return list(set(list_a).union(set(list_b))) def difference(list_a, list_b): return list(set(list_a).difference(set(list_b))) class Edge_for_fluid: def __init__(self, param, target, var): self.param = param self.target = target self.var = var class Fluid_edger: def __init__(self, param = None, target = None, var = None): self.edges = [] if param is not None and target is not None: edge = Edge_for_fluid(param, target, var) self.edges.append(edge) def __call__(self): return self.all_targets() def add(self, param, target, var = None): edge = Edge_for_fluid(param, target, var) self.edges.append(edge) def rm_edges_by_param(self, param): for edge in self.edges: if edge.param == param: edge_idx = self.edges.index(edge) del self.edges[edge_idx] def rm(self, target): res = -1 for edge in self.edges: if target == edge.target: edge_idx = self.edges.index(edge) del self.edges[edge_idx] res = res + 1 if res != 0: pass def mv(self, old_target, new_target): res = -1 for edge in self.edges: if old_target == edge.target: edge.target = new_target res = res + 1 if res != 0: pass def all_params(self): params = [] for edge in self.edges: if edge.param not in params: params.append(edge.param) return params def all_targets(self): targets = [] for edge in self.edges: targets.append(edge.target) return targets def targets(self, param): targets = [] for edge in self.edges: if edge.param == param: targets.append(edge.target) return targets def target(self, param, idx = 0): return self.targets(param)[idx] def clear(self): targets_list = self.all_targets() for target in targets_list: self.rm(target) def targets_with_params(self): list_of_targets_and_params = [] for edge in self.edges: target_and_param = [edge.target, edge.param] list_of_targets_and_params.append(target_and_param) return list_of_targets_and_params def vars_by_target(self, target): vars = [] for edge in self.edges: if edge.target == target and edge.var is not None: vars.append(edge.var) return vars def __getitem__(self, idx): if idx < len(self.edges): return self.edges[idx] return None class Fluid_helper: def __init__(self, scope, block): self.scope = scope self.block = block def args_by_input_param(self, op, param_name): if param_name in op.input_names: return op.input(param_name) else: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) def args_by_output_param(self, op, param_name): if param_name in op.output_names: return op.output(param_name) else: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) def var_by_input_param(self, op, param_name, var_idx = 0): var_name = self.args_by_input_param(op, param_name)[var_idx] var = self.block.var(var_name) return var def var_by_output_param(self, op, param_name, var_idx = 0): var_name = self.args_by_output_param(op, param_name)[var_idx] var = self.block.var(var_name) return var def var_name_by_param(self, op, param_name, var_idx = 0): if param_name not in op.input_names + op.output_names: raise NameError('ERROR: param_name %s is not exists.' % ( param_name ) ) elif param_name in op.input_names: if len(op.input(param_name)) > 0: var_name_unicode = op.input(param_name)[var_idx] else: raise NameError('ERROR: param %s has not var.' % ( param_name ) ) elif param_name in op.output_names: if len(op.output(param_name)) > 0: var_name_unicode = op.output(param_name)[var_idx] else: raise NameError('ERROR: param %s has not var.' % ( param_name ) ) var = self.block.var(var_name_unicode) var_name = var.name return var_name def var_by_param(self, op, param_name, var_idx = 0): var_name = self.var_name_by_param(op, param_name, var_idx) var = self.block.var(var_name) return var def shape_by_var_name(self, var_name, layout = 'NCHW'): var = self.block.var(var_name) long_tuple = var.shape long_list = list(long_tuple) if layout == 'NCHW': int_list_4d = map(int, [1]*(4-len(long_list)) + long_list) return int_list_4d elif layout == 'UNMODIFIED': return long_list else: raise NameError('ERROR: layout %s is not implemented yet.' % ( layout ) ) def np_data_by_var_name(self, var_name): numpy_array = fluid.executor.fetch_var(var_name, self.scope, True) return numpy_array def dtype_by_var_name(self, var_name): var = self.block.var(var_name) fluid_var_type = var.dtype dtype = ANAKIN_TENSOR_DTYPE[fluid_var_type] return dtype def is_persistable_param(self, op, param_name, var_idx = 0): var = self.var_by_param(op, param_name, var_idx) is_persistable_var = var.persistable return is_persistable_var def var_shape_by_param(self, transpose, op, param_name, var_idx = 0, layout = 'NCHW'): if transpose is True: raise NameError('ERROR: var_shape transpose is not implemented yet.') else: var_name = self.var_name_by_param(op, param_name, var_idx) shape = self.shape_by_var_name(var_name, layout) return shape def data_with_shape_by_param(self, op, param_name, transpose = False, axes = None, var_idx = 0, is_flat_list = True, layout = 'NCHW'): np.set_printoptions(threshold=np.inf, suppress=True) var_name = self.var_name_by_param(op, param_name, var_idx) np_array = self.np_data_by_var_name(var_name) if transpose is True: np_array = np.transpose(np_array, axes) np_shape = np.shape(np_array) if layout == 'NCHW': np_shape = map(int, [1]*(4-len(np_shape)) + list(np_shape)) if is_flat_list is True: flat_list = list(np_array.flatten()) return [flat_list, np_shape] else: return [np_array, np_shape] def np_param(self, op, param_name, transpose = False, axes = None, var_idx = 0): [data, np_shape] = self.data_with_shape_by_param(op, param_name, transpose, \ axes, var_idx, False) return data def dtype_by_param(self, op, param_name, var_idx = 0): var_name = self.var_name_by_param(op, param_name, var_idx) dtype = self.dtype_by_var_name(var_name) return dtype def is_list_type(self, op, attr_name): if op.has_attr(attr_name): fluid_attr_type = op.attr_type(attr_name) if fluid_attr_type in ANAKIN_ATTR_IS_LIST.keys(): return ANAKIN_ATTR_IS_LIST[fluid_attr_type] else: return False # AttrType.LONG else: raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) def dtype_of_attr(self, op, attr_name): if op.has_attr(attr_name): fluid_attr_type = op.attr_type(attr_name) if fluid_attr_type in ANAKIN_ATTR_DTYPE.keys(): return ANAKIN_ATTR_DTYPE[fluid_attr_type] else: return INT32 # AttrType.LONG else: raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) def attr_data_required(self, op, attr_name): data = op.attr(attr_name) is_list = self.is_list_type(op, attr_name) dtype = self.dtype_of_attr(op, attr_name) if dtype not in [INT32, FLOAT, STR]: return data elif dtype == INT32: return map(int, data) if is_list else int(data) elif dtype == FLOAT: return map(float, data) if is_list else float(data) elif dtype == STR: return bytes(data) def attr_data(self, op, attr_name, default_value = 0, type = None): if op.has_attr(attr_name): return self.attr_data_required(op, attr_name) else: #raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) ) return default_value def param_tensor_sh(self, op, param_name, transpose = False, axes = None, reshape = None, var_idx = 0, layout = 'NCHW'): tensor = TensorProtoIO() [flat_data, shape] = self.data_with_shape_by_param(op, param_name, transpose, \ axes, var_idx, True, layout) dtype = self.dtype_by_param(op, param_name, var_idx) tensor.set_data_type(dtype) if dtype in ANAKIN_TENSOR_DTYPESTR.keys(): tensor.set_data(flat_data, ANAKIN_TENSOR_DTYPESTR[dtype]) #pass #debug else: raise NameError('ERROR: Unknown data type (%s)' % ( dtype ) ) if reshape is not None: tensor.set_shape(reshape) else: tensor.set_shape(shape) return [tensor, shape] def param_tensor(self, op, param_name, transpose = False, axes = None, reshape = None, var_idx = 0, layout = 'NCHW'): [tensor, shape] = self.param_tensor_sh(op, param_name, transpose, axes, \ reshape, var_idx, layout) return tensor def create_tensor(self, data_list, data_shape, dtype): tensor = TensorProtoIO() tensor.set_data_type(dtype) tensor.set_data(data_list, ANAKIN_TENSOR_DTYPESTR[dtype]) tensor.set_shape(data_shape) return tensor def gru_tensor_convert(self, origin_h2h, origin_i2h, origin_b, offset=[2, 1, 0]): hidden_size = int(origin_b.size // 3) word_size = int(origin_i2h.size // hidden_size // 3) tar_h2h=np.array(origin_h2h.flatten().tolist()[2*hidden_size*hidden_size:]\ +np.array(origin_h2h.flatten().tolist()[:2*hidden_size*hidden_size])\ .reshape(hidden_size,2,hidden_size)[:,[1,0],:].flatten().tolist())\ .reshape(1,1,hidden_size,3*hidden_size) tar_i2h=origin_i2h.reshape(word_size,3,hidden_size)[:,offset,:]\ .reshape(1,1,word_size,3*hidden_size) tar_b=origin_b.reshape(3, hidden_size)[offset, :].reshape(1,1,1,3 * hidden_size) tar_i2h_h2h=np.concatenate([tar_i2h.flatten(),tar_h2h.flatten()])\ .reshape(1,1,1,3*hidden_size*hidden_size+3*word_size*hidden_size) return tar_i2h_h2h, tar_b def lstm_fc_tensor_merge_convert(self, origin_hidden_size, origin_lstm_w, origin_lstm_b, origin_fc_w, origin_fc_b): layer_size = int (origin_hidden_size // 4) input_size = int (origin_fc_w.size // origin_hidden_size) lstm_bias_num = int (origin_lstm_b.size // layer_size) tar_w = np.vstack((np.hstack((origin_fc_w[:, 1 * layer_size : 2 * layer_size], origin_fc_w[:, 2 * layer_size : 3 * layer_size], origin_fc_w[:, : 1 * layer_size], origin_fc_w[:, 3 * layer_size :])), np.hstack((origin_lstm_w[:, 1 * layer_size : 2 * layer_size], origin_lstm_w[:, 2 * layer_size : 3 * layer_size], origin_lstm_w[:, : 1 * layer_size], origin_lstm_w[:, 3 * layer_size : ])))) if origin_fc_b is not None: split_fc_bc = origin_fc_b.flatten()[: 1 * layer_size] split_fc_bi = origin_fc_b.flatten()[1 * layer_size : 2 * layer_size] split_fc_bf = origin_fc_b.flatten()[2 * layer_size : 3 * layer_size] split_fc_bo = origin_fc_b.flatten()[3 * layer_size : 4 * layer_size] else: split_fc_bc = np.zeros(layer_size) split_fc_bi = np.zeros(layer_size) split_fc_bf = np.zeros(layer_size) split_fc_bo = np.zeros(layer_size) split_lstm_bc = origin_lstm_b.flatten()[: 1 * layer_size] split_lstm_bi = origin_lstm_b.flatten()[1 * layer_size: 2 * layer_size] split_lstm_bf = origin_lstm_b.flatten()[2 * layer_size: 3 * layer_size] split_lstm_bo = origin_lstm_b.flatten()[3 * layer_size: 4 * layer_size] split_lstm_bc = np.add(split_lstm_bc, split_fc_bc) split_lstm_bi = np.add(split_lstm_bi, split_fc_bi) split_lstm_bf = np.add(split_lstm_bf, split_fc_bf) split_lstm_bo = np.add(split_lstm_bo, split_fc_bo) if lstm_bias_num == 4: tar_b = np.array(split_lstm_bi.flatten().tolist() + split_lstm_bf.flatten().tolist() + split_lstm_bc.flatten().tolist() + split_lstm_bo.flatten().tolist()) else: split_lstm_wic = origin_lstm_b.flatten()[4 * layer_size : 5 * layer_size] split_lstm_wfc = origin_lstm_b.flatten()[5 * layer_size : 6 * layer_size] split_lstm_woc = origin_lstm_b.flatten()[6 * layer_size :] tar_b = np.array(split_lstm_bi.flatten().tolist() + split_lstm_bf.flatten().tolist() + split_lstm_bc.flatten().tolist() + split_lstm_bo.flatten().tolist() + split_lstm_wic.flatten().tolist() + split_lstm_wfc.flatten().tolist() + split_lstm_woc.flatten().tolist()) return tar_w.reshape(input_size+ layer_size, 4 * layer_size, 1, 1),\ tar_b.reshape(1, origin_lstm_b.size, 1, 1) class Fluid_comparator: def __init__(self, helper): self.helper = helper self.only_list = ['feed', 'fetch'] def compare_by_param(self, op_a, op_b, param): is_weight_a = self.helper.is_persistable_param(op_a, param) is_weight_b = self.helper.is_persistable_param(op_b, param) if is_weight_a and is_weight_b: np_a = self.helper.np_param(op_a, param) np_b = self.helper.np_param(op_b, param) if (np_a == np_b).all() == True: return True else: return False elif is_weight_a is is_weight_b: return True else: return False def have_same_weights(self, op_a, op_b): is_same = True if op_a.input_names == op_b.input_names: params = op_a.input_names for param in params: if self.compare_by_param(op_a, op_b, param) is False: is_same = False return is_same else: return False def compare_by_attr(self, op_a, op_b, attr_name): data_a = self.helper.attr_data(op_a, attr_name) data_b = self.helper.attr_data(op_b, attr_name) return data_a == data_b def have_same_attrs(self, op_a, op_b): is_same = True if op_a.attr_names == op_b.attr_names: attrs = op_a.attr_names for attr in attrs: if self.compare_by_attr(op_a, op_b, attr) is False: is_same = False return is_same else: return False def brothers(self, op_list): is_same = True if len(op_list) > 1: idx = 0 for op_b in op_list[1:]: if op_b.type not in self.only_list: idx = op_list.index(op_b) op_a = op_list[idx - 1] if op_a.type not in self.only_list: same_weights = self.have_same_weights(op_a, op_b) same_attrs = self.have_same_attrs(op_a, op_b) if (same_weights and same_attrs) is False: is_same = False else: raise NameError('ERROR: %s is in only_list.' % ( op_a.type )) else: raise NameError('ERROR: %s is in only_list.' % ( op_b.type )) return is_same else: raise NameError('ERROR: Members of op_list must be greater than 2.') ANAKIN_TENSOR_DTYPE = { VarDesc.VarType.BOOL: BOOLEN, VarDesc.VarType.INT32: INT32, VarDesc.VarType.FP16: FLOAT16, VarDesc.VarType.FP32: FLOAT, VarDesc.VarType.FP64: DOUBLE, } ANAKIN_TENSOR_DTYPESTR = { STR: "string", INT32: "int", FLOAT: "float", BOOLEN: "bool", } ANAKIN_ATTR_DTYPE = { AttrType.INT: INT32, AttrType.INTS: INT32, AttrType.FLOAT: FLOAT, AttrType.FLOATS: FLOAT, AttrType.STRING: STR, AttrType.STRINGS: STR, AttrType.BOOL: BOOLEN, AttrType.BOOLS: BOOLEN, } ANAKIN_ATTR_IS_LIST = { AttrType.INT: False, AttrType.INTS: True, AttrType.FLOAT: False, AttrType.FLOATS: True, AttrType.STRING: False, AttrType.STRINGS: True, AttrType.BOOL: False, AttrType.BOOLS: True, } APPEND_BIAS_OP_TYPE = [ 'FC', 'mul', 'sequence_conv', 'conv2d', 'conv2d_transpose', 'depthwise_conv2d', 'elementwise_mul', ] APPEND_ACT_OP_TYPE = [ 'FC', 'mul', 'sequence_conv', 'conv2d', 'conv2d_transpose', 'batch_norm', 'layer_norm', 'row_conv', 'reshape', ]
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: vcenter_folder short_description: Manage folders on given datacenter description: - This module can be used to create, delete, move and rename folder on then given datacenter. version_added: '2.5' author: - Abhijeet Kasurde (@akasurde) notes: - Tested on vSphere 6.5 requirements: - python >= 2.6 - PyVmomi options: datacenter: description: - Name of the datacenter. required: True folder_name: description: - Name of folder to be managed. - This is case sensitive parameter. - Folder name should be under 80 characters. This is a VMware restriction. required: True parent_folder: description: - Name of the parent folder under which new folder needs to be created. - This is case sensitive parameter. - Please specify unique folder name as there is no way to detect duplicate names. - "If user wants to create a folder under '/DC0/vm/vm_folder', this value will be 'vm_folder'." required: False folder_type: description: - This is type of folder. - "If set to C(vm), then 'VM and Template Folder' is created under datacenter." - "If set to C(host), then 'Host and Cluster Folder' is created under datacenter." - "If set to C(datastore), then 'Storage Folder' is created under datacenter." - "If set to C(network), then 'Network Folder' is created under datacenter." - This parameter is required, if C(state) is set to C(present) and parent_folder is absent. - This option is ignored, if C(parent_folder) is set. default: vm required: False choices: [ datastore, host, network, vm ] state: description: - State of folder. - If set to C(present) without parent folder parameter, then folder with C(folder_type) is created. - If set to C(present) with parent folder parameter, then folder in created under parent folder. C(folder_type) is ignored. - If set to C(absent), then folder is unregistered and destroyed. default: present choices: [ present, absent ] extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Create a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_vm_folder folder_type: vm state: present register: vm_folder_creation_result - name: Create a datastore folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_datastore_folder folder_type: datastore state: present register: datastore_folder_creation_result - name: Create a sub folder under VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_sub_folder parent_folder: vm_folder state: present register: sub_folder_creation_result - name: Delete a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: datacenter_name folder_name: sample_vm_folder folder_type: vm state: absent register: vm_folder_deletion_result ''' RETURN = r''' result: description: - string stating about result returned: success type: string sample: "Folder 'sub_network_folder' of type 'vm' created under vm_folder successfully." ''' try: from pyVmomi import vim, vmodl except ImportError as e: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, wait_for_task, get_all_objs from ansible.module_utils._text import to_native class VmwareFolderManager(PyVmomi): def __init__(self, module): super(VmwareFolderManager, self).__init__(module) datacenter_name = self.params.get('datacenter', None) self.datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) if self.datacenter_obj is None: self.module.fail_json(msg="Failed to find datacenter %s" % datacenter_name) def ensure(self): """ Function to manage internal state management Returns: """ state = self.module.params.get('state') folder_type = self.module.params.get('folder_type') folder_name = self.module.params.get('folder_name') parent_folder = self.module.params.get('parent_folder', None) results = dict(changed=False, result=dict()) if state == 'present': # Create a new folder try: if parent_folder: folder = self.get_folder_by_name(folder_name=parent_folder) if folder: folder.CreateFolder(folder_name) results['changed'] = True results['result'] = "Folder '%s' of type '%s' created under %s" \ " successfully." % (folder_name, folder_type, parent_folder) else: self.module.fail_json(msg="Failed to find the parent folder %s" " for folder %s" % (parent_folder, folder_name)) else: datacenter_folder_type = { 'vm': self.datacenter_obj.vmFolder, 'host': self.datacenter_obj.hostFolder, 'datastore': self.datacenter_obj.datastoreFolder, 'network': self.datacenter_obj.networkFolder, } datacenter_folder_type[folder_type].CreateFolder(folder_name) results['changed'] = True results['result'] = "Folder '%s' of type '%s' created successfully" % (folder_name, folder_type) except vim.fault.DuplicateName as duplicate_name: # To be consistent with the other vmware modules, We decided to accept this error # and the playbook should simply carry on with other tasks. # User will have to take care of this exception # https://github.com/ansible/ansible/issues/35388#issuecomment-362283078 results['changed'] = False results['result'] = "Failed to create folder as another object has same name" \ " in the same target folder : %s" % to_native(duplicate_name.msg) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="Failed to create folder as folder name is not a valid " "entity name : %s" % to_native(invalid_name.msg)) except Exception as general_exc: self.module.fail_json(msg="Failed to create folder due to generic" " exception : %s " % to_native(general_exc)) self.module.exit_json(**results) elif state == 'absent': folder_obj = self.get_folder_by_name(folder_name=folder_name) if folder_obj: try: task = folder_obj.UnregisterAndDestroy() results['changed'], results['result'] = wait_for_task(task=task) except vim.fault.ConcurrentAccess as concurrent_access: self.module.fail_json(msg="Failed to remove folder as another client" " modified folder before this operation : %s" % to_native(concurrent_access.msg)) except vim.fault.InvalidState as invalid_state: self.module.fail_json(msg="Failed to remove folder as folder is in" " invalid state" % to_native(invalid_state.msg)) except Exception as e: self.module.fail_json(msg="Failed to remove folder due to generic" " exception %s " % to_native(e)) self.module.exit_json(**results) def get_folder_by_name(self, folder_name): """ Function to get managed object of folder by name Returns: Managed object of folder by name """ folder_objs = get_all_objs(self.content, [vim.Folder]) for folder in folder_objs: if folder.name == folder_name: return folder return None def main(): argument_spec = vmware_argument_spec() argument_spec.update( datacenter=dict(type='str', required=True), folder_name=dict(type='str', required=True), parent_folder=dict(type='str', required=False), state=dict(type='str', choices=['present', 'absent'], default='present'), folder_type=dict(type='str', default='vm', choices=['datastore', 'host', 'network', 'vm'], required=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, ) if len(module.params.get('folder_name')) > 79: module.fail_json(msg="Failed to manage folder as folder_name can only contain 80 characters.") vcenter_folder_mgr = VmwareFolderManager(module) vcenter_folder_mgr.ensure() if __name__ == "__main__": main()
import logging from django.core.mail import EmailMultiAlternatives, EmailMessage from django.utils.encoding import smart_text from django.core.urlresolvers import reverse from django.conf import settings from disturbance.components.emails.emails import TemplateEmailBase from ledger.accounts.models import EmailUser logger = logging.getLogger(__name__) SYSTEM_NAME = settings.SYSTEM_NAME_SHORT + ' Automated Message' class ApprovalExpireNotificationEmail(TemplateEmailBase): subject = 'Your Approval has expired.' html_template = 'disturbance/emails/approval_expire_notification.html' txt_template = 'disturbance/emails/approval_expire_notification.txt' class ApprovalCancelNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been cancelled.' html_template = 'disturbance/emails/approval_cancel_notification.html' txt_template = 'disturbance/emails/approval_cancel_notification.txt' class ApprovalSuspendNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been suspended.' html_template = 'disturbance/emails/approval_suspend_notification.html' txt_template = 'disturbance/emails/approval_suspend_notification.txt' class ApprovalSurrenderNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been surrendered.' html_template = 'disturbance/emails/approval_surrender_notification.html' txt_template = 'disturbance/emails/approval_surrender_notification.txt' class ApprovalReinstateNotificationEmail(TemplateEmailBase): subject = 'Your Approval has been reinstated.' html_template = 'disturbance/emails/approval_reinstate_notification.html' txt_template = 'disturbance/emails/approval_reinstate_notification.txt' class ApprovalRenewalNotificationEmail(TemplateEmailBase): subject = 'Your Approval is due for renewal.' html_template = 'disturbance/emails/approval_renewal_notification.html' txt_template = 'disturbance/emails/approval_renewal_notification.txt' def send_approval_expire_email_notification(approval): email = ApprovalExpireNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'proposal': proposal } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] msg = email.send(proposal.submitter.email,cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_cancel_email_notification(approval, future_cancel=False): email = ApprovalCancelNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'future_cancel': future_cancel } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_suspend_email_notification(approval, future_suspend=False): email = ApprovalSuspendNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'details': approval.suspension_details['details'], 'from_date': approval.suspension_details['from_date'], 'to_date': approval.suspension_details['to_date'], 'future_suspend': future_suspend } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_surrender_email_notification(approval, future_surrender=False): email = ApprovalSurrenderNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'details': approval.surrender_details['details'], 'surrender_date': approval.surrender_details['surrender_date'], 'future_surrender': future_surrender } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) msg = email.send(proposal.submitter.email, cc=all_ccs, context=context) _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) #approval renewal notice def send_approval_renewal_email_notification(approval): email = ApprovalRenewalNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, 'proposal': approval.current_proposal } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] sender = settings.DEFAULT_FROM_EMAIL try: sender_user = EmailUser.objects.get(email__icontains=sender) except: EmailUser.objects.create(email=sender, password='') sender_user = EmailUser.objects.get(email__icontains=sender) #attach renewal notice renewal_document= approval.renewal_document._file if renewal_document is not None: file_name = approval.renewal_document.name attachment = (file_name, renewal_document.file.read(), 'application/pdf') attachment = [attachment] else: attachment = [] msg = email.send(proposal.submitter.email, cc=all_ccs, attachments=attachment, context=context) sender = settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender_user) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender_user) def send_approval_reinstate_email_notification(approval, request): email = ApprovalReinstateNotificationEmail() proposal = approval.current_proposal context = { 'approval': approval, } all_ccs = [] if proposal.applicant.email: cc_list = proposal.applicant.email if cc_list: all_ccs = [cc_list] msg = email.send(proposal.submitter.email,cc=all_ccs, context=context) sender = request.user if request else settings.DEFAULT_FROM_EMAIL _log_approval_email(msg, approval, sender=sender) _log_org_email(msg, proposal.applicant, proposal.submitter, sender=sender) def _log_approval_email(email_message, approval, sender=None): from disturbance.components.approvals.models import ApprovalLogEntry if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): # TODO this will log the plain text body, should we log the html instead text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text(email_message.from_email) # the to email is normally a list if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) # we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = approval.current_proposal.submitter.email fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' customer = approval.current_proposal.submitter staff = sender kwargs = { 'subject': subject, 'text': text, 'approval': approval, 'customer': customer, 'staff': staff, 'to': to, 'fromm': fromm, 'cc': all_ccs } email_entry = ApprovalLogEntry.objects.create(**kwargs) return email_entry def _log_org_email(email_message, organisation, customer ,sender=None): from disturbance.components.organisations.models import OrganisationLogEntry if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): # TODO this will log the plain text body, should we log the html instead text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text(email_message.from_email) # the to email is normally a list if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) # we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = customer fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' customer = customer staff = sender kwargs = { 'subject': subject, 'text': text, 'organisation': organisation, 'customer': customer, 'staff': staff, 'to': to, 'fromm': fromm, 'cc': all_ccs } email_entry = OrganisationLogEntry.objects.create(**kwargs) return email_entry
# -*- coding: utf-8 - from iso8601 import parse_date from datetime import datetime, date, time, timedelta import dateutil.parser from pytz import timezone import os from decimal import Decimal import re TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev') def get_all_etender_dates(initial_tender_data): tender_period = initial_tender_data.tenderPeriod start_dt = dateutil.parser.parse(tender_period['startDate']) end_dt = dateutil.parser.parse(tender_period['endDate']) data = type('periods', (), { # dynamically creating objects instead of another dict 'tenderStart': type('date', (), {'date': start_dt.strftime("%d-%m-%Y"), 'time': start_dt.strftime("%H:%M")}), 'tenderEnd': type('date', (), {'date': end_dt.strftime("%d-%m-%Y"), 'time': end_dt.strftime("%H:%M")})}) if 'enquiryPeriod' in initial_tender_data: end_period = dateutil.parser.parse(initial_tender_data.enquiryPeriod['endDate']) data.enquiryEnd = type('date', (), {'date': end_period.strftime("%d-%m-%Y"), 'time': end_period.strftime("%H:%M")}) return data def get_procedure_type(methodType): return { 'aboveThresholdUA': 'Відкриті торги', 'belowThreshold': 'Допорогові закупівлі', 'negotiation': 'Переговорна процедура', 'aboveThresholdEU': 'Відкриті торги з публікацією англійською мовою', 'aboveThresholdUA.defense': 'Переговорна процедура для потреб оборони', 'reporting': 'Звіт про укладений договір', 'competitiveDialogueEU': 'Конкурентний діалог з публікацією англійською мовою 1-ий етап', 'competitiveDialogueUA': 'Конкурентний діалог 1-ий етап', 'open_esco': 'Відкриті торги для закупівлі енергосервісу', 'esco': 'Відкриті торги для закупівлі енергосервісу', 'closeFrameworkAgreementUA': 'Відкриті торги для укладання рамкової угоди', 'open_framework': 'Відкриті торгии для укладання рамкової угоди' }[methodType].decode('utf-8') def get_method_type(procedure_name): return { u'переговорна процедура для потреб оборони': 'aboveThresholdUA.defense', u'допорогові закупівлі': 'belowThreshold', u'відкриті торги з публікацією англійською мовою': 'aboveThresholdEU', u'переговорна процедура': 'negotiation', u'відкриті торги': 'aboveThresholdUA', u'конкурентний діалог 1-ий етап': 'competitiveDialogueUA', u'конкурентний діалог 2-ий етап': 'competitiveDialogueUA.stage2', u'звіт про укладений договір': 'reporting', u'відкриті торги для закупівлі енергосервісу': 'open_esco', u'відкриті торги для закупівлі енергосервісу': 'esco', u'конкурентний діалог з публікацією англійською мовою 1-ий етап': 'competitiveDialogueEU', u'конкурентний діалог з публікацією англійською мовою 2-ий етап': 'competitiveDialogueEU.stage2', u'відкриті торги для укладання рамкової угоди': 'closeFrameworkAgreementUA', u'відкриті торгии для укладання рамкової угоди': 'open_framework' }[procedure_name] def parse_etender_date(date, as_string=False): # converts date from ui to datetime d = datetime.strptime(date, '%d-%m-%Y, %H:%M') if as_string: return str(d) return d def cut_letters_and_parse_etender_date(date, as_string=True): # converts date from ui d = datetime.strptime(date.split(' ')[1], '%d-%m-%Y') if as_string: return str(d) return d def prepare_locator_to_scroll(locator): if locator[:3] == 'id=': return '//*[@id="{}"]'.format(locator[3:]) return locator[6:].replace("'", '"') # 6 for xpath= def to_iso(date): return date.isoformat() def convert_etender_date_to_iso_format(date): return TZ.localize(parse_etender_date(date)).isoformat() def convet_fra_to_variable(raw): b = re.findall(r'P(\d+)Y(\d+)M(\d+)D.*', raw) c, d, e = b[0] return c, d, e def convet_raw_to_chack(raw): raw = raw.replace(' ', '') b = re.findall(r'(\d+)р(\d+)м(\d+)д', raw) c, d, e = b[0] return c, d, e def get_year_from_full_date(string): data_as_str = string.split('T')[0] data_as_datetime = datetime.strptime(data_as_str, '%Y-%m-%d') return str(data_as_datetime.year) def convert_date_to_etender_format(isodate): iso_dt = parse_date(isodate) date_string = iso_dt.strftime("%d-%m-%Y") return date_string def convert_datetime_for_delivery(isodate): iso_dt = parse_date(isodate) date_string = iso_dt.strftime("%Y-%m-%d %H:%M") return date_string def convert_time_to_etender_format(isodate): iso_dt = parse_date(isodate) time_string = iso_dt.strftime("%H:%M") return time_string def float_to_string_2f(value): return '{:.2f}'.format(value) def float_to_string_3f(value): return '{:.3f}'.format(value) def string_to_float(string): return float(string) def change_data(initial_data): #TODO: remove redundant hardcoded values # initial_data['data']['procuringEntity']['identifier']['legalName'] = u"TenderOwner#" # initial_data['data']['procuringEntity']['identifier']['id'] = u"88008800" # initial_data['data']['procuringEntity']['name'] = u"TenderOwner#" initial_data['data']['items'][0]['deliveryAddress']['locality'] = u"м. Київ" initial_data['data']['items'][0]['deliveryAddress']['region'] = u"Київська область" initial_data['data']['procuringEntity']['address']['locality'] = u"Алупка" initial_data['data']['procuringEntity']['address']['postalCode'] = u"13531" initial_data['data']['procuringEntity']['address']['region'] = u"АР Крим" initial_data['data']['procuringEntity']['address']['streetAddress'] = u"Фрунзе, 666" initial_data['data']['procuringEntity']['contactPoint']['name'] = u"Владелец Этого Тендера" initial_data['data']['procuringEntity']['contactPoint']['telephone'] = u"613371488228" initial_data['data']['procuringEntity']['contactPoint']['url'] = u"http://e-tender.ua/" return initial_data def change_data_for_tender_owner(initial_data): initial_data['data']['procuringEntity']['identifier']['legalName'] = u"TenderOwner#" initial_data['data']['procuringEntity']['identifier']['id'] = u"88008800" initial_data['data']['procuringEntity']['name'] = u"TenderOwner#" return initial_data def change_buyers_data(initial_data): initial_data['data']['buyers'][0]['name'] = u"TenderOwner#" initial_data['data']['buyers'][0]['identifier']['id'] = u"88008800" initial_data['data']['buyers'][0]['identifier']['legalName'] = u"TenderOwner#" initial_data['data']['procuringEntity']['name'] = initial_data['data']['buyers'][0]['name'] initial_data['data']['procuringEntity']['identifier']['id'] = initial_data['data']['buyers'][0]['identifier']['id'] initial_data['data']['procuringEntity']['identifier']['legalName'] = \ initial_data['data']['buyers'][0]['identifier']['legalName'] return initial_data def convert_etender_date_to_iso_format_and_add_timezone(date): return TZ.localize(parse_etender_date(date)).isoformat() def get_time_now(): time_string = datetime.now().strftime("%H:%M") return time_string def get_date_now(): date_string = datetime.now().strftime("%d-%m-%Y") return date_string def get_date_10d_future(): date_string = (datetime.now() + timedelta(days=10)).strftime("%d-%m-%Y") return date_string def get_time_offset(add_minutes=17): _now = datetime.now() + timedelta(minutes=add_minutes) return _now.time().strftime('%H:%M') def convert_common_string_to_etender_string(string): dict = get_helper_dictionary() for key, val in dict.iteritems(): if val == string: return key return string def parse_currency_value_with_spaces(raw): # to convert raw values like '2 216 162,83 UAH' to string which is ready for conversion to float return ''.join(raw.split(' ')[:-1]).replace(',', '.') def get_minimalStep_currency(raw_value): # to get currency 'UAH' from raw values like '2 216 162,83 UAH' result_dic = raw_value.split(' ') result = result_dic[-1] return result def parse_currency_value_with_spaces_percentage(raw): # to convert raw values like '1,3244%' to string which is ready for conversion to float result = raw.replace('%', '') result = Decimal(result) result = (result / 100) result = float(result) return result def parse_currency_value_with_spaces_percentage_NBU(raw): # to convert raw values like 'Hi – 1,3244%' to string which is ready for conversion to float result = raw.split(' ', 4)[4] result = result.replace('%', '') result = Decimal(result) result = (result / 100) result = float(result) return result def convert_etender_string_to_common_string(string): return get_helper_dictionary().get(string, string) def get_helper_dictionary(): return { u"КЛАСИФІКАТОР ДК 021:2015 (CPV)": u"ДК021", u"кг.": u"кілограм", u"грн.": u"UAH", u"(з ПДВ)": True, u"з ПДВ": True, u"без ПДВ": False, # TODO: remove this temporary workaround, consult with quinta team about input data u"Дніпро": u"Дніпропетровськ", #tender statuses u'період уточнень': u'active.enquiries', u'очікування пропозицій': u'active.tendering', u'прекваліфікація': u'active.pre-qualification', u'оцінка пропозицій': u'active.pre-qualification', u'блокування перед аукціоном': u'active.pre-qualification.stand-still', u'проведення переговорів': u'active.pre-qualification.stand-still', u'перший проміжний етап': u'active.stage2.pending', u'період аукціону': u'active.auction', u'кваліфікація переможця': u'active.qualification', u'пропозиції розглянуто': u'active.awarded', u'завершена закупівля': u'complete', u'перший етап завершено': u'complete', u'закупівля не відбулась': u'unsuccessful', u'відмінена закупівля': u'cancelled', #bid statuses u'Пропозиція не дійсна': u'invalid', u"ст.35 ч. 2 п. 1": u"artContestIP", u"ст.35 ч. 2 п. 2": u"noCompetition", u"ст.35 ч. 2 п. 4": u"twiceUnsuccessful", u"ст.35 ч. 2 п. 5": u"additionalPurchase", u"ст.35 ч. 2 п. 6": u"additionalConstruction", u"ст.35 ч. 2 п. 7": u"stateLegalServices", u"Договір поки що не опубліковано": u"pending", u"Договір опубліковано": u"active", u"Переможець торгів": u"active", u"учасник виграв закупівлю": u"active", u'вимога': u'claim', u'відповідь надана': u'answered', u'задоволено': u'resolved', u'не задоволено': u'declined', u'скасована скаржником': u'cancelled', u'відхилено': u'invalid', u'залишена без відповіді': u'ignored', u'очікується кваліфікація': u'pending', u'відкликається скаржником': u'stopping', u'очікує розгляду органом оскарження': u'pending', u'Співфінансування з бюджетних коштів': u'budget', u'на розгляді': u'pending', u'Пропозиція не активована': u'invalid' } def get_feature_index(i): return {0.05: '1', 0.01: '2', 0: '3'}[i] def get_doc_type_index(i): return {'financial_documents': '1', 'qualification_documents': '2', 'eligibility_documents': '3'}.get(i, i) def convert_unit_name_to_unit_code(string): return { u"блок": u"D64", u"гектар": u"HAR", u"кілограми": u"KGM", u"кілометри": u"KMT", u"літр": u"LTR", u"лот": u"LO", u"метри квадратні": u"MTK", u"метри кубічні": u"MTQ", u"метри": u"MTR", u"місяць": u"MON", u"набір": u"SET", u"пара": u"PR", u"пачка": u"RM", u"пачок": u"NMP", u"послуга": u"E48", u"рейс": u"E54", u"тони": u"TNE", u"упаковка": u"PK", u"Флакон": u"VI", u"штуки": u"H87", u"ящик": u"BX", }.get(string, string) def convert_milestone_from_text_to_code(string): return { u"Аванс": u"prepayment", u"Пiсляоплата": u"postpayment" }.get(string, string) def convert_milestone_from_text_to_title(string): return { u"Виконання робіт": "executionOfWorks", u"Поставка товару": "deliveryOfGoods", u"Надання послуг": "submittingServices", u"Підписання договору": "signingTheContract", u"Дата подання заявки": "submissionDateOfApplications", u"Дата виставлення рахунку": "dateOfInvoicing", u"Дата закінчення звітного періоду": "endDateOfTheReportingPeriod", u"Інша подія": "anotherEvent", }.get(string, string) def convert_milestone_from_text_to_day_type(string): return { u"Робочі": "working", u"Банківські": "banking", u"Календарні": "calendar" }.get(string, string) def convert_main_procurement_category(string): return { u"Товари": "goods", u"Послуги": "services", u"Роботи": "works" }.get(string, string) def get_modulus_from_number(number): if isinstance(number, int): pass elif isinstance(number, str): number = int(number) elif isinstance(number, unicode): number = int(number) return abs(number)
import os import pytest import sys import random import tempfile import requests from pathlib import Path import ray from ray.test_utils import (run_string_as_driver, run_string_as_driver_nonblocking) from ray._private.utils import (get_wheel_filename, get_master_wheel_url, get_release_wheel_url) import ray.experimental.internal_kv as kv from time import sleep driver_script = """ from time import sleep import sys import logging sys.path.insert(0, "{working_dir}") import ray import ray.util import os try: import test_module except: pass try: job_config = ray.job_config.JobConfig( runtime_env={runtime_env} ) if not job_config.runtime_env: job_config=None if os.environ.get("USE_RAY_CLIENT"): ray.client("{address}").env({runtime_env}).namespace("").connect() else: ray.init(address="{address}", job_config=job_config, logging_level=logging.DEBUG, namespace="" ) except ValueError: print("ValueError") sys.exit(0) except TypeError: print("TypeError") sys.exit(0) except: print("ERROR") sys.exit(0) if os.environ.get("EXIT_AFTER_INIT"): sys.exit(0) @ray.remote def run_test(): return test_module.one() @ray.remote def check_file(name): try: with open(name) as f: return f.read() except: return "FAILED" @ray.remote class TestActor(object): @ray.method(num_returns=1) def one(self): return test_module.one() {execute_statement} if os.environ.get("USE_RAY_CLIENT"): ray.util.disconnect() else: ray.shutdown() sleep(10) """ def create_file(p): if not p.parent.exists(): p.parent.mkdir() with p.open("w") as f: f.write("Test") @pytest.fixture(scope="function") def working_dir(): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) module_path = path / "test_module" module_path.mkdir(parents=True) init_file = module_path / "__init__.py" test_file = module_path / "test.py" with test_file.open(mode="w") as f: f.write(""" def one(): return 1 """) with init_file.open(mode="w") as f: f.write(""" from test_module.test import one """) old_dir = os.getcwd() os.chdir(tmp_dir) yield tmp_dir os.chdir(old_dir) def start_client_server(cluster, client_mode): from ray._private.runtime_env import PKG_DIR if not client_mode: return (cluster.address, {}, PKG_DIR) ray.worker._global_node._ray_params.ray_client_server_port = "10003" ray.worker._global_node.start_ray_client_server() return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR) @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_travel(): import uuid with tempfile.TemporaryDirectory() as tmp_dir: dir_paths = set() file_paths = set() item_num = 0 excludes = [] root = Path(tmp_dir) / "test" def construct(path, excluded=False, depth=0): nonlocal item_num path.mkdir(parents=True) if not excluded: dir_paths.add(str(path)) if depth > 8: return if item_num > 500: return dir_num = random.randint(0, 10) file_num = random.randint(0, 10) for _ in range(dir_num): uid = str(uuid.uuid4()).split("-")[0] dir_path = path / uid exclud_sub = random.randint(0, 5) == 0 if not excluded and exclud_sub: excludes.append(str(dir_path.relative_to(root))) if not excluded: construct(dir_path, exclud_sub or excluded, depth + 1) item_num += 1 if item_num > 1000: return for _ in range(file_num): uid = str(uuid.uuid4()).split("-")[0] with (path / uid).open("w") as f: v = random.randint(0, 1000) f.write(str(v)) if not excluded: if random.randint(0, 5) == 0: excludes.append( str((path / uid).relative_to(root))) else: file_paths.add((str(path / uid), str(v))) item_num += 1 construct(root) exclude_spec = ray._private.runtime_env._get_excludes(root, excludes) visited_dir_paths = set() visited_file_paths = set() def handler(path): if path.is_dir(): visited_dir_paths.add(str(path)) else: with open(path) as f: visited_file_paths.add((str(path), f.read())) ray._private.runtime_env._dir_travel(root, [exclude_spec], handler) assert file_paths == visited_file_paths assert dir_paths == visited_dir_paths """ The following test cases are related with runtime env. It following these steps 1) Creating a temporary dir with fixture working_dir 2) Using a template named driver_script defined globally 3) Overwrite runtime_env and execute_statement in the template 4) Execute it as a separate driver and return the result """ @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_empty_working_dir(ray_start_cluster_head, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) env["EXIT_AFTER_INIT"] = "1" with tempfile.TemporaryDirectory() as working_dir: runtime_env = f"""{{ "working_dir": r"{working_dir}", "py_modules": [r"{working_dir}"] }}""" # Execute the following cmd in driver with runtime_env execute_statement = "sys.exit(0)" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out != "ERROR" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) env["EXIT_AFTER_INIT"] = "1" runtime_env = "{ 'working_dir': 10 }" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "TypeError" runtime_env = "{ 'py_modules': [10] }" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "TypeError" runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "ValueError" runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}" # Execute the following cmd in driver with runtime_env execute_statement = "" script = driver_script.format(**locals()) out = run_string_as_driver(script, env).strip().split()[-1] assert out == "ValueError" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_single_node(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # Setup runtime env here runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # Testing runtime env with working_dir runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_module(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # test runtime_env iwth py_modules runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_local_file(two_node_cluster, working_dir, client_mode): with open(os.path.join(working_dir, "test_file"), "w") as f: f.write("1") cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) # test runtime_env iwth working_dir runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ vals = ray.get([check_file.remote('test_file')] * 1000) print(sum([int(v) for v in vals])) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_exclusion(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) working_path = Path(working_dir) create_file(working_path / "tmp_dir" / "test_1") create_file(working_path / "tmp_dir" / "test_2") create_file(working_path / "tmp_dir" / "test_3") create_file(working_path / "tmp_dir" / "sub_dir" / "test_1") create_file(working_path / "tmp_dir" / "sub_dir" / "test_2") create_file(working_path / "test1") create_file(working_path / "test2") create_file(working_path / "test3") tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute()) runtime_env = f"""{{ "working_dir": r"{working_dir}", }}""" execute_statement = """ vals = ray.get([ check_file.remote('test1'), check_file.remote('test2'), check_file.remote('test3'), check_file.remote(os.path.join('tmp_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'test_2')), check_file.remote(os.path.join('tmp_dir', 'test_3')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')), ]) print(','.join(vals)) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) # Test it works before assert out.strip().split("\n")[-1] == \ "Test,Test,Test,Test,Test,Test,Test,Test" runtime_env = f"""{{ "working_dir": r"{working_dir}", "excludes": [ # exclude by relative path r"test2", # exclude by dir r"{str(Path("tmp_dir") / "sub_dir")}", # exclude part of the dir r"{str(Path("tmp_dir") / "test_1")}", # exclude part of the dir r"{str(Path("tmp_dir") / "test_2")}", ] }}""" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split("\n")[-1] == \ "Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) working_path = Path(working_dir) def create_file(p): if not p.parent.exists(): p.parent.mkdir(parents=True) with p.open("w") as f: f.write("Test") create_file(working_path / "tmp_dir" / "test_1") create_file(working_path / "tmp_dir" / "test_2") create_file(working_path / "tmp_dir" / "test_3") create_file(working_path / "tmp_dir" / "sub_dir" / "test_1") create_file(working_path / "tmp_dir" / "sub_dir" / "test_2") create_file(working_path / "test1") create_file(working_path / "test2") create_file(working_path / "test3") create_file(working_path / "cache" / "test_1") create_file(working_path / "tmp_dir" / "cache" / "test_1") create_file(working_path / "another_dir" / "cache" / "test_1") tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute()) runtime_env = f"""{{ "working_dir": r"{working_dir}", }}""" execute_statement = """ vals = ray.get([ check_file.remote('test1'), check_file.remote('test2'), check_file.remote('test3'), check_file.remote(os.path.join('tmp_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'test_2')), check_file.remote(os.path.join('tmp_dir', 'test_3')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')), check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')), check_file.remote(os.path.join("cache", "test_1")), check_file.remote(os.path.join("tmp_dir", "cache", "test_1")), check_file.remote(os.path.join("another_dir", "cache", "test_1")), ]) print(','.join(vals)) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) # Test it works before assert out.strip().split("\n")[-1] == \ "Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test" with open(f"{working_dir}/.gitignore", "w") as f: f.write(""" # Comment test_[12] /test1 !/tmp_dir/sub_dir/test_1 cache/ """) script = driver_script.format(**locals()) out = run_string_as_driver(script, env) t = out.strip().split("\n")[-1] assert out.strip().split("\n")[-1] == \ "FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ print(ray.get_runtime_context().runtime_env["working_dir"]) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == working_dir @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_two_node_uri(two_node_cluster, working_dir, client_mode): cluster, _ = two_node_cluster (address, env, PKG_DIR) = start_client_server(cluster, client_mode) import ray._private.runtime_env as runtime_env import tempfile with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file: pkg_name = runtime_env.get_project_package_name(working_dir, [], []) pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name runtime_env.create_project_package(working_dir, [], [], tmp_file.name) runtime_env.push_package(pkg_uri, tmp_file.name) runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}""" # Execute the following cmd in driver with runtime_env execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 # pinned uri will not be deleted print(list(kv._internal_kv_list(""))) assert len(kv._internal_kv_list("pingcs://")) == 1 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_regular_actors(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ test_actor = TestActor.options(name="test_actor").remote() print(sum(ray.get([test_actor.one.remote()] * 1000))) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") @pytest.mark.parametrize("client_mode", [True, False]) def test_detached_actors(ray_start_cluster_head, working_dir, client_mode): cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, client_mode) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the following cmd in driver with runtime_env execute_statement = """ test_actor = TestActor.options(name="test_actor", lifetime="detached").remote() print(sum(ray.get([test_actor.one.remote()] * 1000))) """ script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" # It's a detached actors, so it should still be there assert len(kv._internal_kv_list("gcs://")) == 1 assert len(list(Path(PKG_DIR).iterdir())) == 2 pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0] import sys sys.path.insert(0, str(pkg_dir)) test_actor = ray.get_actor("test_actor") assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000 ray.kill(test_actor) from time import sleep sleep(5) assert len(list(Path(PKG_DIR).iterdir())) == 1 assert len(kv._internal_kv_list("gcs://")) == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir): # start job_config=None # start job_config=something cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = None # To make the first one hanging there execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) # Have one running with job config = None proc = run_string_as_driver_nonblocking(script, env) # waiting it to be up sleep(5) runtime_env = f"""{{ "working_dir": "{working_dir}" }}""" # Execute the second one which should work because Ray Client servers. execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "1000" proc.kill() proc.wait() @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir): # start job_config=something # start job_config=None cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # To make the first one hanging there execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) proc = run_string_as_driver_nonblocking(script, env) sleep(5) runtime_env = None # Execute the following in the second one which should # succeed execute_statement = "print('OK')" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) assert out.strip().split()[-1] == "OK", out proc.kill() proc.wait() @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir): # start job_config=something # start job_config=something else cluster = ray_start_cluster_head (address, env, PKG_DIR) = start_client_server(cluster, True) runtime_env = """{ "py_modules": [test_module.__path__[0]] }""" # To make the first one hanging ther execute_statement = """ sleep(600) """ script = driver_script.format(**locals()) proc = run_string_as_driver_nonblocking(script, env) sleep(5) runtime_env = f""" {{ "working_dir": test_module.__path__[0] }}""" # noqa: F541 # Execute the following cmd in the second one and ensure that # it is able to run. execute_statement = "print('OK')" script = driver_script.format(**locals()) out = run_string_as_driver(script, env) proc.kill() proc.wait() assert out.strip().split()[-1] == "OK" @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_util_without_job_config(shutdown_only): from ray.cluster_utils import Cluster with tempfile.TemporaryDirectory() as tmp_dir: with (Path(tmp_dir) / "lib.py").open("w") as f: f.write(""" def one(): return 1 """) old_dir = os.getcwd() os.chdir(tmp_dir) cluster = Cluster() cluster.add_node(num_cpus=1) ray.init(address=cluster.address) (address, env, PKG_DIR) = start_client_server(cluster, True) script = f""" import ray import ray.util import os ray.util.connect("{address}", job_config=None) @ray.remote def run(): from lib import one return one() print(ray.get([run.remote()])[0]) """ out = run_string_as_driver(script, env) print(out) os.chdir(old_dir) @pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.") def test_init(shutdown_only): with tempfile.TemporaryDirectory() as tmp_dir: old_dir = os.getcwd() os.chdir(tmp_dir) with open("hello", "w") as f: f.write("world") job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."}) ray.init(job_config=job_config) @ray.remote class Test: def test(self): with open("hello") as f: return f.read() t = Test.remote() assert ray.get(t.test.remote()) == "world" os.chdir(old_dir) def test_get_wheel_filename(): ray_version = "2.0.0.dev0" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: filename = get_wheel_filename(sys_platform, ray_version, py_version) prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/" url = f"{prefix}{filename}" assert requests.head(url).status_code == 200 def test_get_master_wheel_url(): ray_version = "2.0.0.dev0" test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: url = get_master_wheel_url(test_commit, sys_platform, ray_version, py_version) assert requests.head(url).status_code == 200, url def test_get_release_wheel_url(): test_commits = { "1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c", "1.3.0": "0b4b444fadcdc23226e11fef066b982175804232", "1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b" } for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38"]: for version, commit in test_commits.items(): url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_task(ray_start_cluster_head): @ray.remote(runtime_env={"env_vars": {"foo": "bar"}}) def f(): return os.environ.get("foo") assert ray.get(f.remote()) == "bar" @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_actor(ray_start_cluster_head): @ray.remote(runtime_env={"env_vars": {"foo": "bar"}}) class A: def g(self): return os.environ.get("foo") a = A.remote() assert ray.get(a.g.remote()) == "bar" @pytest.mark.skipif( sys.platform == "win32", reason="runtime_env unsupported on Windows.") def test_decorator_complex(shutdown_only): ray.init( job_config=ray.job_config.JobConfig( runtime_env={"env_vars": { "foo": "job" }})) @ray.remote def env_from_job(): return os.environ.get("foo") assert ray.get(env_from_job.remote()) == "job" @ray.remote(runtime_env={"env_vars": {"foo": "task"}}) def f(): return os.environ.get("foo") assert ray.get(f.remote()) == "task" @ray.remote(runtime_env={"env_vars": {"foo": "actor"}}) class A: def g(self): return os.environ.get("foo") a = A.remote() assert ray.get(a.g.remote()) == "actor" # Test that runtime_env can be overridden by specifying .options(). assert ray.get( f.options(runtime_env={ "env_vars": { "foo": "new" } }).remote()) == "new" a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote() assert ray.get(a.g.remote()) == "new2" def test_container_option_serialize(): runtime_env = { "container": { "image": "ray:latest", "run_options": ["--name=test"] } } job_config = ray.job_config.JobConfig(runtime_env=runtime_env) job_config_serialized = job_config.serialize() # job_config_serialized is JobConfig protobuf serialized string, # job_config.runtime_env.raw_json has container_option info # job_config.serialized_runtime_env also has container_option info assert job_config_serialized.count(b"image") == 2 def test_working_dir_override_failure(shutdown_only): ray.init() @ray.remote(runtime_env={"working_dir": "."}) def f(): pass with pytest.raises(NotImplementedError): f.remote() @ray.remote def g(): pass with pytest.raises(NotImplementedError): g.options(runtime_env={"working_dir": "."}).remote() @ray.remote(runtime_env={"working_dir": "."}) class A: pass with pytest.raises(NotImplementedError): A.remote() @ray.remote class B: pass with pytest.raises(NotImplementedError): B.options(runtime_env={"working_dir": "."}).remote() if __name__ == "__main__": import sys sys.exit(pytest.main(["-sv", __file__]))
from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # app.config['FLASK_RUN_PORT'] = 5002 db = SQLAlchemy(app)
# coding: utf-8 """ SendinBlue API SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501 OpenAPI spec version: 3.0.0 Contact: contact@sendinblue.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import sib_api_v3_sdk from sib_api_v3_sdk.models.get_account_plan import GetAccountPlan # noqa: E501 from sib_api_v3_sdk.rest import ApiException class TestGetAccountPlan(unittest.TestCase): """GetAccountPlan unit test stubs""" def setUp(self): pass def tearDown(self): pass def testGetAccountPlan(self): """Test GetAccountPlan""" # FIXME: construct object with mandatory attributes with example values # model = sib_api_v3_sdk.models.get_account_plan.GetAccountPlan() # noqa: E501 pass if __name__ == '__main__': unittest.main()
# coding: utf-8 import math import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, se=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.se = se if(self.se): self.gap = nn.AdaptiveAvgPool2d(1) self.conv3 = conv1x1(planes, planes//16) self.conv4 = conv1x1(planes//16, planes) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) if(self.se): w = self.gap(out) w = self.conv3(w) w = self.relu(w) w = self.conv4(w).sigmoid() out = out * w out = out + residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, se=False): self.inplanes = 64 super(ResNet, self).__init__() self.se = se self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(1) self.bn = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, se=self.se)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, se=self.se)) return nn.Sequential(*layers) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.bn(x) return x class VideoCNN(nn.Module): def __init__(self, se=False): super(VideoCNN, self).__init__() # frontend3D self.frontend3D = nn.Sequential( nn.Conv3d(1, 64, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False), nn.BatchNorm3d(64), nn.ReLU(True), nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)) ) # resnet self.resnet18 = ResNet(BasicBlock, [2, 2, 2, 2], se=se) self.dropout = nn.Dropout(p=0.5) # backend_gru # initialize self._initialize_weights() def visual_frontend_forward(self, x): x = x.transpose(1, 2) x = self.frontend3D(x) x = x.transpose(1, 2) x = x.contiguous() x = x.view(-1, 64, x.size(3), x.size(4)) x = self.resnet18(x) return x def forward(self, x): b, t = x.size()[:2] x = self.visual_frontend_forward(x) #x = self.dropout(x) feat = x.view(b, -1, 512) x = x.view(b, -1, 512) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv3d): n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv1d): n = m.kernel_size[0] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_()
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. """ Tweepy Twitter API library """ __version__ = '3.2.0' __author__ = 'Joshua Roesslein' __license__ = 'MIT' from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category from tweepy.error import TweepError from tweepy.api import API from tweepy.cache import Cache, MemoryCache, FileCache from tweepy.auth import OAuthHandler, AppAuthHandler from tweepy.streaming import Stream, StreamListener from tweepy.cursor import Cursor # Global, unauthenticated instance of API api = API() def debug(enable=True, level=1): from six.moves.http_client import HTTPConnection HTTPConnection.debuglevel = level
#!/Users/julian/.local/share/virtualenvs/great/bin/pypy import json import sys from great.models import music from great.web import engine_from_config from pyperclip import copy from sqlalchemy import sql from titlecase import titlecase e = engine_from_config() def canonicalize(artist): if artist.isupper(): return artist return titlecase(artist) def spotify_uri(artist): return e.execute( sql.select( [ music.artists.c.id, music.artists.c.name, music.artists.c.spotify_uri, ], ).where(music.artists.c.name.like(artist)), ).fetchone() with open("/dev/tty") as tty: for line in sys.stdin: as_dict = json.loads(line) artist, uri = canonicalize(as_dict["name"]), as_dict["uri"] result = spotify_uri(artist) if result is None: print "Didn't find:", artist elif result.spotify_uri is None: e.execute( sql.update(music.artists).where( music.artists.c.id == result.id, ).values(spotify_uri=as_dict["uri"]), ) elif result.spotify_uri != uri: sys.exit( "Wat! {!r} has current ID {!r}, not {!r}".format( artist, result.spotify_uri, uri, ), )
# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import subprocess import uuid from nose import SkipTest import six from six.moves.urllib.parse import quote from swift.common import direct_client, utils from swift.common.manager import Manager from swift.common.memcached import MemcacheRing from swift.common.utils import ShardRange, parse_db_filename, get_db_files, \ quorum_size, config_true_value, Timestamp, md5 from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING, \ SHARDED from swift.container.sharder import CleavingContext, ContainerSharder from swift.container.replicator import ContainerReplicator from swiftclient import client, get_auth, ClientException from swift.proxy.controllers.base import get_cache_key from swift.proxy.controllers.obj import num_container_updates from test import annotate_failure from test.probe import PROXY_BASE_URL from test.probe.brain import BrainSplitter from test.probe.common import ReplProbeTest, get_server_number, \ wait_for_server_to_hangup import mock MIN_SHARD_CONTAINER_THRESHOLD = 4 MAX_SHARD_CONTAINER_THRESHOLD = 100 class ShardCollector(object): """ Returns map of node to tuples of (headers, shard ranges) returned from node """ def __init__(self): self.ranges = {} def __call__(self, cnode, cpart, account, container): self.ranges[cnode['id']] = direct_client.direct_get_container( cnode, cpart, account, container, headers={'X-Backend-Record-Type': 'shard'}) class BaseTestContainerSharding(ReplProbeTest): DELIM = '-' def _maybe_skip_test(self): try: cont_configs = [ utils.readconf(p, 'container-sharder') for p in self.configs['container-sharder'].values()] except ValueError: raise SkipTest('No [container-sharder] section found in ' 'container-server configs') skip_reasons = [] auto_shard = all(config_true_value(c.get('auto_shard', False)) for c in cont_configs) if not auto_shard: skip_reasons.append( 'auto_shard must be true in all container_sharder configs') self.max_shard_size = max( int(c.get('shard_container_threshold', '1000000')) for c in cont_configs) if not (MIN_SHARD_CONTAINER_THRESHOLD <= self.max_shard_size <= MAX_SHARD_CONTAINER_THRESHOLD): skip_reasons.append( 'shard_container_threshold %d must be between %d and %d' % (self.max_shard_size, MIN_SHARD_CONTAINER_THRESHOLD, MAX_SHARD_CONTAINER_THRESHOLD)) def skip_check(reason_list, option, required): values = {int(c.get(option, required)) for c in cont_configs} if values != {required}: reason_list.append('%s must be %s' % (option, required)) skip_check(skip_reasons, 'shard_scanner_batch_size', 10) skip_check(skip_reasons, 'shard_batch_size', 2) if skip_reasons: raise SkipTest(', '.join(skip_reasons)) def _load_rings_and_configs(self): super(BaseTestContainerSharding, self)._load_rings_and_configs() # perform checks for skipping test before starting services self._maybe_skip_test() def _make_object_names(self, number, start=0): return ['obj%s%04d' % (self.DELIM, x) for x in range(start, start + number)] def _setup_container_name(self): # Container where we're PUTting objects self.container_name = 'container%s%s' % (self.DELIM, uuid.uuid4()) def setUp(self): client.logger.setLevel(client.logging.WARNING) client.requests.logging.getLogger().setLevel( client.requests.logging.WARNING) super(BaseTestContainerSharding, self).setUp() _, self.admin_token = get_auth( PROXY_BASE_URL + '/auth/v1.0', 'admin:admin', 'admin') self._setup_container_name() self.init_brain(self.container_name) self.sharders = Manager(['container-sharder']) self.internal_client = self.make_internal_client() self.memcache = MemcacheRing(['127.0.0.1:11211']) def init_brain(self, container_name): self.container_to_shard = container_name self.brain = BrainSplitter( self.url, self.token, self.container_to_shard, None, 'container') self.brain.put_container(policy_index=int(self.policy)) def stop_container_servers(self, node_numbers=None): if node_numbers: ipports = [] server2ipport = {v: k for k, v in self.ipport2server.items()} for number in self.brain.node_numbers[node_numbers]: self.brain.servers.stop(number=number) server = 'container%d' % number ipports.append(server2ipport[server]) else: ipports = [k for k, v in self.ipport2server.items() if v.startswith('container')] self.brain.servers.stop() for ipport in ipports: wait_for_server_to_hangup(ipport) def put_objects(self, obj_names, contents=None): conn = client.Connection(preauthurl=self.url, preauthtoken=self.token) results = [] for obj in obj_names: rdict = {} conn.put_object(self.container_name, obj, contents=contents, response_dict=rdict) results.append((obj, rdict['headers'].get('x-object-version-id'))) return results def delete_objects(self, obj_names_and_versions): conn = client.Connection(preauthurl=self.url, preauthtoken=self.token) for obj in obj_names_and_versions: if isinstance(obj, tuple): obj, version = obj conn.delete_object(self.container_name, obj, query_string='version-id=%s' % version) else: conn.delete_object(self.container_name, obj) def get_container_shard_ranges(self, account=None, container=None, include_deleted=False): account = account if account else self.account container = container if container else self.container_to_shard path = self.internal_client.make_path(account, container) headers = {'X-Backend-Record-Type': 'shard'} if include_deleted: headers['X-Backend-Include-Deleted'] = 'true' resp = self.internal_client.make_request( 'GET', path + '?format=json', headers, [200]) return [ShardRange.from_dict(sr) for sr in json.loads(resp.body)] def direct_get_container_shard_ranges(self, account=None, container=None, expect_failure=False): collector = ShardCollector() self.direct_container_op( collector, account, container, expect_failure) return collector.ranges def get_storage_dir(self, part, node, account=None, container=None): account = account or self.brain.account container = container or self.container_name server_type, config_number = get_server_number( (node['ip'], node['port']), self.ipport2server) assert server_type == 'container' repl_server = '%s-replicator' % server_type conf = utils.readconf(self.configs[repl_server][config_number], section_name=repl_server) datadir = os.path.join(conf['devices'], node['device'], 'containers') container_hash = utils.hash_path(account, container) return (utils.storage_directory(datadir, part, container_hash), container_hash) def get_db_file(self, part, node, account=None, container=None): container_dir, container_hash = self.get_storage_dir( part, node, account=account, container=container) db_file = os.path.join(container_dir, container_hash + '.db') self.assertTrue(get_db_files(db_file)) # sanity check return db_file def get_broker(self, part, node, account=None, container=None): return ContainerBroker( self.get_db_file(part, node, account, container)) def get_shard_broker(self, shard_range, node_index=0): shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) return self.get_broker( shard_part, shard_nodes[node_index], shard_range.account, shard_range.container) def categorize_container_dir_content(self, account=None, container=None): account = account or self.brain.account container = container or self.container_name part, nodes = self.brain.ring.get_nodes(account, container) storage_dirs = [ self.get_storage_dir(part, node, account=account, container=container)[0] for node in nodes] result = { 'shard_dbs': [], 'normal_dbs': [], 'pendings': [], 'locks': [], 'other': [], } for storage_dir in storage_dirs: for f in os.listdir(storage_dir): path = os.path.join(storage_dir, f) if path.endswith('.db'): hash_, epoch, ext = parse_db_filename(path) if epoch: result['shard_dbs'].append(path) else: result['normal_dbs'].append(path) elif path.endswith('.db.pending'): result['pendings'].append(path) elif path.endswith('/.lock'): result['locks'].append(path) else: result['other'].append(path) if result['other']: self.fail('Found unexpected files in storage directory:\n %s' % '\n '.join(result['other'])) return result def assert_dict_contains(self, expected_items, actual_dict): ignored = set(expected_items) ^ set(actual_dict) filtered_actual = {k: actual_dict[k] for k in actual_dict if k not in ignored} self.assertEqual(expected_items, filtered_actual) def assert_shard_ranges_contiguous(self, expected_number, shard_ranges, first_lower='', last_upper=''): if shard_ranges and isinstance(shard_ranges[0], ShardRange): actual_shard_ranges = sorted(shard_ranges) else: actual_shard_ranges = sorted(ShardRange.from_dict(d) for d in shard_ranges) self.assertLengthEqual(actual_shard_ranges, expected_number) if expected_number: with annotate_failure('Ranges %s.' % actual_shard_ranges): self.assertEqual(first_lower, actual_shard_ranges[0].lower_str) for x, y in zip(actual_shard_ranges, actual_shard_ranges[1:]): self.assertEqual(x.upper, y.lower) self.assertEqual(last_upper, actual_shard_ranges[-1].upper_str) def assert_shard_range_equal(self, expected, actual, excludes=None): excludes = excludes or [] expected_dict = dict(expected) actual_dict = dict(actual) for k in excludes: expected_dict.pop(k, None) actual_dict.pop(k, None) self.assertEqual(expected_dict, actual_dict) def assert_shard_range_lists_equal(self, expected, actual, excludes=None): self.assertEqual(len(expected), len(actual)) for expected, actual in zip(expected, actual): self.assert_shard_range_equal(expected, actual, excludes=excludes) def assert_shard_range_state(self, expected_state, shard_ranges): if shard_ranges and not isinstance(shard_ranges[0], ShardRange): shard_ranges = [ShardRange.from_dict(data) for data in shard_ranges] self.assertEqual([expected_state] * len(shard_ranges), [sr.state for sr in shard_ranges]) def assert_total_object_count(self, expected_object_count, shard_ranges): actual = sum(sr['object_count'] for sr in shard_ranges) self.assertEqual(expected_object_count, actual) def assert_container_listing(self, expected_listing, req_hdrs=None): req_hdrs = req_hdrs if req_hdrs else {} headers, actual_listing = client.get_container( self.url, self.token, self.container_name, headers=req_hdrs) self.assertIn('x-container-object-count', headers) expected_obj_count = len(expected_listing) self.assertEqual(expected_listing, [ x['name'].encode('utf-8') if six.PY2 else x['name'] for x in actual_listing]) self.assertEqual(str(expected_obj_count), headers['x-container-object-count']) return headers, actual_listing def assert_container_object_count(self, expected_obj_count): headers = client.head_container( self.url, self.token, self.container_name) self.assertIn('x-container-object-count', headers) self.assertEqual(str(expected_obj_count), headers['x-container-object-count']) def assert_container_post_ok(self, meta_value): key = 'X-Container-Meta-Assert-Post-Works' headers = {key: meta_value} client.post_container( self.url, self.token, self.container_name, headers=headers) resp_headers = client.head_container( self.url, self.token, self.container_name) self.assertEqual(meta_value, resp_headers.get(key.lower())) def assert_container_post_fails(self, meta_value): key = 'X-Container-Meta-Assert-Post-Works' headers = {key: meta_value} with self.assertRaises(ClientException) as cm: client.post_container( self.url, self.token, self.container_name, headers=headers) self.assertEqual(404, cm.exception.http_status) def assert_container_delete_fails(self): with self.assertRaises(ClientException) as cm: client.delete_container(self.url, self.token, self.container_name) self.assertEqual(409, cm.exception.http_status) def assert_container_not_found(self): with self.assertRaises(ClientException) as cm: client.get_container(self.url, self.token, self.container_name) self.assertEqual(404, cm.exception.http_status) # check for headers leaking out while deleted resp_headers = cm.exception.http_response_headers self.assertNotIn('X-Container-Object-Count', resp_headers) self.assertNotIn('X-Container-Bytes-Used', resp_headers) self.assertNotIn('X-Timestamp', resp_headers) self.assertNotIn('X-PUT-Timestamp', resp_headers) def assert_container_has_shard_sysmeta(self): node_headers = self.direct_head_container() for node_id, headers in node_headers.items(): with annotate_failure('%s in %s' % (node_id, node_headers.keys())): for k, v in headers.items(): if k.lower().startswith('x-container-sysmeta-shard'): break else: self.fail('No shard sysmeta found in %s' % headers) def assert_container_state(self, node, expected_state, num_shard_ranges): headers, shard_ranges = direct_client.direct_get_container( node, self.brain.part, self.account, self.container_to_shard, headers={'X-Backend-Record-Type': 'shard'}) self.assertEqual(num_shard_ranges, len(shard_ranges)) self.assertIn('X-Backend-Sharding-State', headers) self.assertEqual( expected_state, headers['X-Backend-Sharding-State']) return [ShardRange.from_dict(sr) for sr in shard_ranges] def assert_subprocess_success(self, cmd_args): try: subprocess.check_output(cmd_args, stderr=subprocess.STDOUT) except Exception as exc: # why not 'except CalledProcessError'? because in my py3.6 tests # the CalledProcessError wasn't caught by that! despite type(exc) # being a CalledProcessError, isinstance(exc, CalledProcessError) # is False and the type has a different hash - could be # related to https://github.com/eventlet/eventlet/issues/413 try: # assume this is a CalledProcessError self.fail('%s with output:\n%s' % (exc, exc.output)) except AttributeError: raise exc def get_part_and_node_numbers(self, shard_range): """Return the partition and node numbers for a shard range.""" part, nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) return part, [n['id'] + 1 for n in nodes] def run_sharders(self, shard_ranges): """Run the sharder on partitions for given shard ranges.""" if not isinstance(shard_ranges, (list, tuple, set)): shard_ranges = (shard_ranges,) partitions = ','.join(str(self.get_part_and_node_numbers(sr)[0]) for sr in shard_ranges) self.sharders.once(additional_args='--partitions=%s' % partitions) def run_sharder_sequentially(self, shard_range=None): """Run sharder node by node on partition for given shard range.""" if shard_range: part, node_numbers = self.get_part_and_node_numbers(shard_range) else: part, node_numbers = self.brain.part, self.brain.node_numbers for node_number in node_numbers: self.sharders.once(number=node_number, additional_args='--partitions=%s' % part) def run_custom_sharder(self, conf_index, custom_conf, **kwargs): return self.run_custom_daemon(ContainerSharder, 'container-sharder', conf_index, custom_conf, **kwargs) class TestContainerShardingNonUTF8(BaseTestContainerSharding): def test_sharding_listing(self): # verify parameterised listing of a container during sharding all_obj_names = self._make_object_names(4 * self.max_shard_size) obj_names = all_obj_names[::2] obj_content = 'testing' self.put_objects(obj_names, contents=obj_content) # choose some names approx in middle of each expected shard range markers = [ obj_names[i] for i in range(self.max_shard_size // 4, 2 * self.max_shard_size, self.max_shard_size // 2)] def check_listing(objects, req_hdrs=None, **params): req_hdrs = req_hdrs if req_hdrs else {} qs = '&'.join('%s=%s' % (k, quote(str(v))) for k, v in params.items()) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string=qs, headers=req_hdrs) listing = [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing] if params.get('reverse'): marker = params.get('marker', ShardRange.MAX) end_marker = params.get('end_marker', ShardRange.MIN) expected = [o for o in objects if end_marker < o < marker] expected.reverse() else: marker = params.get('marker', ShardRange.MIN) end_marker = params.get('end_marker', ShardRange.MAX) expected = [o for o in objects if marker < o < end_marker] if 'limit' in params: expected = expected[:params['limit']] self.assertEqual(expected, listing) self.assertIn('x-timestamp', headers) self.assertIn('last-modified', headers) self.assertIn('x-trans-id', headers) self.assertEqual('bytes', headers.get('accept-ranges')) self.assertEqual('application/json; charset=utf-8', headers.get('content-type')) def check_listing_fails(exp_status, **params): qs = '&'.join(['%s=%s' % param for param in params.items()]) with self.assertRaises(ClientException) as cm: client.get_container( self.url, self.token, self.container_name, query_string=qs) self.assertEqual(exp_status, cm.exception.http_status) return cm.exception def do_listing_checks(objs, hdrs=None): hdrs = hdrs if hdrs else {} check_listing(objs, hdrs) check_listing(objs, hdrs, marker=markers[0], end_marker=markers[1]) check_listing(objs, hdrs, marker=markers[0], end_marker=markers[2]) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3]) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3], limit=self.max_shard_size // 4) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[3], limit=self.max_shard_size // 4) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[2], limit=self.max_shard_size // 2) check_listing(objs, hdrs, marker=markers[1], end_marker=markers[1]) check_listing(objs, hdrs, reverse=True) check_listing(objs, hdrs, reverse=True, end_marker=markers[1]) check_listing(objs, hdrs, reverse=True, marker=markers[3], end_marker=markers[1], limit=self.max_shard_size // 4) check_listing(objs, hdrs, reverse=True, marker=markers[3], end_marker=markers[1], limit=0) check_listing([], hdrs, marker=markers[0], end_marker=markers[0]) check_listing([], hdrs, marker=markers[0], end_marker=markers[1], reverse=True) check_listing(objs, hdrs, prefix='obj') check_listing([], hdrs, prefix='zzz') # delimiter headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=' + quote(self.DELIM), headers=hdrs) self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=j' + quote(self.DELIM), headers=hdrs) self.assertEqual([{'subdir': 'obj' + self.DELIM}], listing) limit = self.cluster_info['swift']['container_listing_limit'] exc = check_listing_fails(412, limit=limit + 1) self.assertIn(b'Maximum limit', exc.http_response_content) exc = check_listing_fails(400, delimiter='%ff') self.assertIn(b'not valid UTF-8', exc.http_response_content) # sanity checks do_listing_checks(obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # First run the 'leader' in charge of scanning, which finds all shard # ranges and cleaves first two self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Then run sharder on other nodes which will also cleave first two # shard ranges for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity check shard range states for node in self.brain.nodes: self.assert_container_state(node, 'sharding', 4) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 4) self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2]) self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:]) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted self.assert_container_post_ok('sharding') do_listing_checks(obj_names) # put some new objects spread through entire namespace; object updates # should be directed to the shard container (both the cleaved and the # created shards) new_obj_names = all_obj_names[1::4] self.put_objects(new_obj_names, obj_content) # new objects that fell into the first two cleaved shard ranges are # reported in listing; new objects in the yet-to-be-cleaved shard # ranges are not yet included in listing because listings prefer the # root over the final two shards that are not yet-cleaved exp_obj_names = [o for o in obj_names + new_obj_names if o <= shard_ranges[1].upper] exp_obj_names += [o for o in obj_names if o > shard_ranges[1].upper] exp_obj_names.sort() do_listing_checks(exp_obj_names) # run all the sharders again and the last two shard ranges get cleaved self.sharders.once(additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 4) shard_ranges = self.get_container_shard_ranges() self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) # listings are now gathered from all four shard ranges so should have # all the specified objects exp_obj_names = obj_names + new_obj_names exp_obj_names.sort() do_listing_checks(exp_obj_names) # shard ranges may now be cached by proxy so do listings checks again # forcing backend request do_listing_checks(exp_obj_names, hdrs={'X-Newest': 'true'}) # post more metadata to the container and check that it is read back # correctly from backend (using x-newest) and cache test_headers = {'x-container-meta-test': 'testing', 'x-container-read': 'read_acl', 'x-container-write': 'write_acl', 'x-container-sync-key': 'sync_key', # 'x-container-sync-to': 'sync_to', 'x-versions-location': 'versions', 'x-container-meta-access-control-allow-origin': 'aa', 'x-container-meta-access-control-expose-headers': 'bb', 'x-container-meta-access-control-max-age': '123'} client.post_container(self.url, self.admin_token, self.container_name, headers=test_headers) headers, listing = client.get_container( self.url, self.token, self.container_name, headers={'X-Newest': 'true'}) exp_headers = dict(test_headers) exp_headers.update({ 'x-container-object-count': str(len(exp_obj_names)), 'x-container-bytes-used': str(len(exp_obj_names) * len(obj_content)) }) for k, v in exp_headers.items(): self.assertIn(k, headers) self.assertEqual(v, headers[k], dict(headers)) cache_headers, listing = client.get_container( self.url, self.token, self.container_name) for k, v in exp_headers.items(): self.assertIn(k, cache_headers) self.assertEqual(v, cache_headers[k], dict(exp_headers)) # we don't expect any of these headers to be equal... for k in ('x-timestamp', 'last-modified', 'date', 'x-trans-id', 'x-openstack-request-id'): headers.pop(k, None) cache_headers.pop(k, None) self.assertEqual(headers, cache_headers) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') # delete original objects self.delete_objects(obj_names) do_listing_checks(new_obj_names) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') class TestContainerShardingFunkyNames(TestContainerShardingNonUTF8): DELIM = '\n' def _make_object_names(self, number): return ['obj\n%04d%%Ff' % x for x in range(number)] def _setup_container_name(self): self.container_name = 'container\n%%Ff\n%s' % uuid.uuid4() class TestContainerShardingUTF8(TestContainerShardingNonUTF8): def _make_object_names(self, number, start=0): # override default with names that include non-ascii chars name_length = self.cluster_info['swift']['max_object_name_length'] obj_names = [] for x in range(start, start + number): name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234-%04d' % x) name = name.encode('utf8').ljust(name_length, b'o') if not six.PY2: name = name.decode('utf8') obj_names.append(name) return obj_names def _setup_container_name(self): # override default with max length name that includes non-ascii chars super(TestContainerShardingUTF8, self)._setup_container_name() name_length = self.cluster_info['swift']['max_container_name_length'] cont_name = \ self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234' self.container_name = cont_name.encode('utf8').ljust(name_length, b'x') if not six.PY2: self.container_name = self.container_name.decode('utf8') class TestContainerShardingObjectVersioning(BaseTestContainerSharding): def _maybe_skip_test(self): super(TestContainerShardingObjectVersioning, self)._maybe_skip_test() try: vw_config = utils.readconf(self.configs['proxy-server'], 'filter:versioned_writes') except ValueError: raise SkipTest('No [filter:versioned_writes] section found in ' 'proxy-server configs') allow_object_versioning = config_true_value( vw_config.get('allow_object_versioning', False)) if not allow_object_versioning: raise SkipTest('allow_object_versioning must be true ' 'in all versioned_writes configs') def init_brain(self, container_name): client.put_container(self.url, self.token, container_name, headers={ 'X-Storage-Policy': self.policy.name, 'X-Versions-Enabled': 'true', }) self.container_to_shard = '\x00versions\x00' + container_name self.brain = BrainSplitter( self.url, self.token, self.container_to_shard, None, 'container') def test_sharding_listing(self): # verify parameterised listing of a container during sharding all_obj_names = self._make_object_names(3) * self.max_shard_size all_obj_names.extend(self._make_object_names(self.max_shard_size, start=3)) obj_names = all_obj_names[::2] obj_names_and_versions = self.put_objects(obj_names) def sort_key(obj_and_ver): obj, ver = obj_and_ver return obj, ~Timestamp(ver) obj_names_and_versions.sort(key=sort_key) # choose some names approx in middle of each expected shard range markers = [ obj_names_and_versions[i] for i in range(self.max_shard_size // 4, 2 * self.max_shard_size, self.max_shard_size // 2)] def check_listing(objects, **params): params['versions'] = '' qs = '&'.join('%s=%s' % param for param in params.items()) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string=qs) listing = [(x['name'].encode('utf-8') if six.PY2 else x['name'], x['version_id']) for x in listing] if params.get('reverse'): marker = ( params.get('marker', ShardRange.MAX), ~Timestamp(params['version_marker']) if 'version_marker' in params else ~Timestamp('0'), ) end_marker = ( params.get('end_marker', ShardRange.MIN), Timestamp('0'), ) expected = [o for o in objects if end_marker < sort_key(o) < marker] expected.reverse() else: marker = ( params.get('marker', ShardRange.MIN), ~Timestamp(params['version_marker']) if 'version_marker' in params else Timestamp('0'), ) end_marker = ( params.get('end_marker', ShardRange.MAX), ~Timestamp('0'), ) expected = [o for o in objects if marker < sort_key(o) < end_marker] if 'limit' in params: expected = expected[:params['limit']] self.assertEqual(expected, listing) def check_listing_fails(exp_status, **params): params['versions'] = '' qs = '&'.join('%s=%s' % param for param in params.items()) with self.assertRaises(ClientException) as cm: client.get_container( self.url, self.token, self.container_name, query_string=qs) self.assertEqual(exp_status, cm.exception.http_status) return cm.exception def do_listing_checks(objects): check_listing(objects) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1]) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 10) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 4) check_listing(objects, marker=markers[0][0], version_marker=markers[0][1], limit=self.max_shard_size // 2) check_listing(objects, marker=markers[1][0], version_marker=markers[1][1]) check_listing(objects, marker=markers[1][0], version_marker=markers[1][1], limit=self.max_shard_size // 10) check_listing(objects, marker=markers[2][0], version_marker=markers[2][1], limit=self.max_shard_size // 4) check_listing(objects, marker=markers[2][0], version_marker=markers[2][1], limit=self.max_shard_size // 2) check_listing(objects, reverse=True) check_listing(objects, reverse=True, marker=markers[1][0], version_marker=markers[1][1]) check_listing(objects, prefix='obj') check_listing([], prefix='zzz') # delimiter headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=-') self.assertEqual([{'subdir': 'obj-'}], listing) headers, listing = client.get_container( self.url, self.token, self.container_name, query_string='delimiter=j-') self.assertEqual([{'subdir': 'obj-'}], listing) limit = self.cluster_info['swift']['container_listing_limit'] exc = check_listing_fails(412, limit=limit + 1) self.assertIn(b'Maximum limit', exc.http_response_content) exc = check_listing_fails(400, delimiter='%ff') self.assertIn(b'not valid UTF-8', exc.http_response_content) # sanity checks do_listing_checks(obj_names_and_versions) # Shard the container. Use an internal_client so we get an implicit # X-Backend-Allow-Reserved-Names header self.internal_client.set_container_metadata( self.account, self.container_to_shard, { 'X-Container-Sysmeta-Sharding': 'True', }) # First run the 'leader' in charge of scanning, which finds all shard # ranges and cleaves first two self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Then run sharder on other nodes which will also cleave first two # shard ranges for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity check shard range states for node in self.brain.nodes: self.assert_container_state(node, 'sharding', 4) shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 4) self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2]) self.assert_shard_range_state(ShardRange.CREATED, shard_ranges[2:]) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # confirm no sysmeta deleted self.assert_container_post_ok('sharding') do_listing_checks(obj_names_and_versions) # put some new objects spread through entire namespace new_obj_names = all_obj_names[1::4] new_obj_names_and_versions = self.put_objects(new_obj_names) # new objects that fell into the first two cleaved shard ranges are # reported in listing, new objects in the yet-to-be-cleaved shard # ranges are not yet included in listing exp_obj_names_and_versions = [ o for o in obj_names_and_versions + new_obj_names_and_versions if '\x00' + o[0] <= shard_ranges[1].upper] exp_obj_names_and_versions += [ o for o in obj_names_and_versions if '\x00' + o[0] > shard_ranges[1].upper] exp_obj_names_and_versions.sort(key=sort_key) do_listing_checks(exp_obj_names_and_versions) # run all the sharders again and the last two shard ranges get cleaved self.sharders.once(additional_args='--partitions=%s' % self.brain.part) for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 4) shard_ranges = self.get_container_shard_ranges() self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) exp_obj_names_and_versions = \ obj_names_and_versions + new_obj_names_and_versions exp_obj_names_and_versions.sort(key=sort_key) do_listing_checks(exp_obj_names_and_versions) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') # delete original objects self.delete_objects(obj_names_and_versions) new_obj_names_and_versions.sort(key=sort_key) do_listing_checks(new_obj_names_and_versions) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') class TestContainerSharding(BaseTestContainerSharding): def _test_sharded_listing(self, run_replicators=False): obj_names = self._make_object_names(self.max_shard_size) self.put_objects(obj_names) # Verify that we start out with normal DBs, no shards found = self.categorize_container_dir_content() self.assertLengthEqual(found['normal_dbs'], 3) self.assertLengthEqual(found['shard_dbs'], 0) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) self.assertLengthEqual(broker.get_shard_ranges(), 0) headers, pre_sharding_listing = client.get_container( self.url, self.token, self.container_name) self.assertEqual(obj_names, [ x['name'].encode('utf-8') if six.PY2 else x['name'] for x in pre_sharding_listing]) # sanity # Shard it client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) pre_sharding_headers = client.head_container( self.url, self.admin_token, self.container_name) self.assertEqual('True', pre_sharding_headers.get('x-container-sharding')) # Only run the one in charge of scanning self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Verify that we have one sharded db -- though the other normal DBs # received the shard ranges that got defined found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) broker = self.get_broker(self.brain.part, self.brain.nodes[0]) # sanity check - the shard db is on replica 0 self.assertEqual(found['shard_dbs'][0], broker.db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) orig_root_shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] self.assertLengthEqual(orig_root_shard_ranges, 2) self.assert_total_object_count(len(obj_names), orig_root_shard_ranges) self.assert_shard_ranges_contiguous(2, orig_root_shard_ranges) self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE], [sr['state'] for sr in orig_root_shard_ranges]) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) self.direct_delete_container(expect_failure=True) self.assertLengthEqual(found['normal_dbs'], 2) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()] self.assertEqual([ShardRange.CREATED, ShardRange.CREATED], [sr['state'] for sr in shard_ranges]) # the sharded db had shard range meta_timestamps and state updated # during cleaving, so we do not expect those to be equal on other # nodes self.assert_shard_range_lists_equal( orig_root_shard_ranges, shard_ranges, excludes=['meta_timestamp', 'state', 'state_timestamp']) contexts = list(CleavingContext.load_all(broker)) self.assertEqual([], contexts) # length check if run_replicators: Manager(['container-replicator']).once() # replication doesn't change the db file names found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) self.assertLengthEqual(found['normal_dbs'], 2) # Now that everyone has shard ranges, run *everyone* self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # Verify that we only have shard dbs now found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) # Shards stayed the same for db_file in found['shard_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) # Well, except for meta_timestamps, since the shards each reported self.assert_shard_range_lists_equal( orig_root_shard_ranges, broker.get_shard_ranges(), excludes=['meta_timestamp', 'state_timestamp']) for orig, updated in zip(orig_root_shard_ranges, broker.get_shard_ranges()): self.assertGreaterEqual(updated.state_timestamp, orig['state_timestamp']) self.assertGreaterEqual(updated.meta_timestamp, orig['meta_timestamp']) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) # Check that entire listing is available headers, actual_listing = self.assert_container_listing(obj_names) # ... and check some other container properties self.assertEqual(headers['last-modified'], pre_sharding_headers['last-modified']) # It even works in reverse! headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual(pre_sharding_listing[::-1], listing) # and repeat checks to use shard ranges now cached in proxy headers, actual_listing = self.assert_container_listing(obj_names) self.assertEqual(headers['last-modified'], pre_sharding_headers['last-modified']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual(pre_sharding_listing[::-1], listing) # Now put some new objects into first shard, taking its count to # 3 shard ranges' worth more_obj_names = [ 'beta%03d' % x for x in range(self.max_shard_size)] self.put_objects(more_obj_names) # The listing includes new objects (shard ranges haven't changed, just # their object content, so cached shard ranges are still correct)... headers, listing = self.assert_container_listing( more_obj_names + obj_names) self.assertEqual(pre_sharding_listing, listing[len(more_obj_names):]) # ...but root object count is out of date until the sharders run and # update the root self.assert_container_object_count(len(obj_names)) # run sharders on the shard to get root updated shard_1 = ShardRange.from_dict(orig_root_shard_ranges[0]) self.run_sharders(shard_1) self.assert_container_object_count(len(more_obj_names + obj_names)) # we've added objects enough that we need to shard the first shard # *again* into three new sub-shards, but nothing happens until the root # leader identifies shard candidate... root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): self.assertLengthEqual(root_shards, 2) with annotate_failure('node %s. ' % node): self.assertEqual( [ShardRange.ACTIVE] * 2, [sr['state'] for sr in root_shards]) # orig shards 0, 1 should be contiguous self.assert_shard_ranges_contiguous(2, root_shards) # Now run the root leader to identify shard candidate...while one of # the shard container servers is down shard_1_part, shard_1_nodes = self.get_part_and_node_numbers(shard_1) self.brain.servers.stop(number=shard_1_nodes[2]) self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # ... so third replica of first shard state is not moved to sharding found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) self.assertEqual( [ShardRange.SHARDING, ShardRange.SHARDING, ShardRange.ACTIVE], [ContainerBroker(db_file).get_own_shard_range().state for db_file in found_for_shard['normal_dbs']]) # ...then run first cycle of first shard sharders in order, leader # first, to get to predictable state where all nodes have cleaved 2 out # of 3 ranges...starting with first two nodes for node_number in shard_1_nodes[:2]: self.sharders.once( number=node_number, additional_args='--partitions=%s' % shard_1_part) # ... first two replicas start sharding to sub-shards found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 2) for db_file in found_for_shard['shard_dbs'][:2]: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharding', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) shard_shards = broker.get_shard_ranges() self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_shards]) self.assert_shard_ranges_contiguous( 3, shard_shards, first_lower=orig_root_shard_ranges[0]['lower'], last_upper=orig_root_shard_ranges[0]['upper']) contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 1) context, _lm = contexts[0] self.assertIs(context.cleaving_done, False) self.assertIs(context.misplaced_done, True) self.assertEqual(context.ranges_done, 2) self.assertEqual(context.ranges_todo, 1) self.assertEqual(context.max_row, self.max_shard_size * 3 // 2) # but third replica still has no idea it should be sharding self.assertLengthEqual(found_for_shard['normal_dbs'], 3) self.assertEqual( ShardRange.ACTIVE, ContainerBroker( found_for_shard['normal_dbs'][2]).get_own_shard_range().state) # ...but once sharder runs on third replica it will learn its state; # note that any root replica on the stopped container server also won't # know about the shards being in sharding state, so leave that server # stopped for now so that shard fetches its state from an up-to-date # root replica self.sharders.once( number=shard_1_nodes[2], additional_args='--partitions=%s' % shard_1_part) # third replica is sharding but has no sub-shard ranges yet... found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 2) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) broker = ContainerBroker(found_for_shard['normal_dbs'][2]) self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) self.assertFalse(broker.get_shard_ranges()) contexts = list(CleavingContext.load_all(broker)) self.assertEqual([], contexts) # length check # ...until sub-shard ranges are replicated from another shard replica; # there may also be a sub-shard replica missing so run replicators on # all nodes to fix that if necessary self.brain.servers.start(number=shard_1_nodes[2]) self.replicators.once() # Now that the replicators have all run, third replica sees cleaving # contexts for the first two contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 2) # now run sharder again on third replica self.sharders.once( number=shard_1_nodes[2], additional_args='--partitions=%s' % shard_1_part) sharding_broker = ContainerBroker(found_for_shard['normal_dbs'][2]) self.assertEqual('sharding', sharding_broker.get_db_state()) broker_id = broker.get_info()['id'] # Old, unsharded DB doesn't have the context... contexts = list(CleavingContext.load_all(broker)) self.assertEqual(len(contexts), 2) self.assertNotIn(broker_id, [ctx[0].ref for ctx in contexts]) # ...but the sharding one does contexts = list(CleavingContext.load_all(sharding_broker)) self.assertEqual(len(contexts), 3) self.assertIn(broker_id, [ctx[0].ref for ctx in contexts]) # check original first shard range state and sub-shards - all replicas # should now be in consistent state found_for_shard = self.categorize_container_dir_content( shard_1.account, shard_1.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 3) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) for db_file in found_for_shard['shard_dbs']: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharding', broker.get_db_state()) self.assertEqual( ShardRange.SHARDING, broker.get_own_shard_range().state) shard_shards = broker.get_shard_ranges() self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_shards]) self.assert_shard_ranges_contiguous( 3, shard_shards, first_lower=orig_root_shard_ranges[0]['lower'], last_upper=orig_root_shard_ranges[0]['upper']) # check third sub-shard is in created state sub_shard = shard_shards[2] found_for_sub_shard = self.categorize_container_dir_content( sub_shard.account, sub_shard.container) self.assertFalse(found_for_sub_shard['shard_dbs']) self.assertLengthEqual(found_for_sub_shard['normal_dbs'], 3) for db_file in found_for_sub_shard['normal_dbs']: broker = ContainerBroker(db_file) with annotate_failure('sub shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('unsharded', broker.get_db_state()) self.assertEqual( ShardRange.CREATED, broker.get_own_shard_range().state) self.assertFalse(broker.get_shard_ranges()) # check root shard ranges root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): self.assertLengthEqual(root_shards, 5) with annotate_failure('node %s. ' % node): # shard ranges are sorted by upper, state, lower, so expect: # sub-shards, orig shard 0, orig shard 1 self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED, ShardRange.SHARDING, ShardRange.ACTIVE], [sr['state'] for sr in root_shards]) # sub-shards 0, 1, 2, orig shard 1 should be contiguous self.assert_shard_ranges_contiguous( 4, root_shards[:3] + root_shards[4:]) # orig shards 0, 1 should be contiguous self.assert_shard_ranges_contiguous(2, root_shards[3:]) self.assert_container_listing(more_obj_names + obj_names) self.assert_container_object_count(len(more_obj_names + obj_names)) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) # add another object that lands in the first of the new sub-shards self.put_objects(['alpha']) # check that alpha object is in the first new shard shard_listings = self.direct_get_container(shard_shards[0].account, shard_shards[0].container) for node, (hdrs, listing) in shard_listings.items(): with annotate_failure(node): self.assertIn('alpha', [o['name'] for o in listing]) self.assert_container_listing(['alpha'] + more_obj_names + obj_names) # Run sharders again so things settle. self.run_sharders(shard_1) # Also run replicators to settle cleaving contexts self.replicators.once() # check original first shard range shards for db_file in found_for_shard['shard_dbs']: broker = ContainerBroker(db_file) with annotate_failure('shard db file %s. ' % db_file): self.assertIs(False, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) self.assertEqual( [ShardRange.ACTIVE] * 3, [sr.state for sr in broker.get_shard_ranges()]) # Contexts should still be there, and should be complete contexts = set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]) self.assertEqual({True}, contexts) # check root shard ranges root_shard_ranges = self.direct_get_container_shard_ranges() for node, (hdrs, root_shards) in root_shard_ranges.items(): # old first shard range should have been deleted self.assertLengthEqual(root_shards, 4) with annotate_failure('node %s. ' % node): self.assertEqual( [ShardRange.ACTIVE] * 4, [sr['state'] for sr in root_shards]) self.assert_shard_ranges_contiguous(4, root_shards) headers, final_listing = self.assert_container_listing( ['alpha'] + more_obj_names + obj_names) # check root found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) new_shard_ranges = None for db_file in found['shard_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual('sharded', broker.get_db_state()) if new_shard_ranges is None: new_shard_ranges = broker.get_shard_ranges( include_deleted=True) self.assertLengthEqual(new_shard_ranges, 5) # Second half is still there, and unchanged self.assertIn( dict(orig_root_shard_ranges[1], meta_timestamp=None, state_timestamp=None), [dict(sr, meta_timestamp=None, state_timestamp=None) for sr in new_shard_ranges]) # But the first half split in three, then deleted by_name = {sr.name: sr for sr in new_shard_ranges} self.assertIn(orig_root_shard_ranges[0]['name'], by_name) old_shard_range = by_name.pop( orig_root_shard_ranges[0]['name']) self.assertTrue(old_shard_range.deleted) self.assert_shard_ranges_contiguous(4, list(by_name.values())) else: # Everyone's on the same page. Well, except for # meta_timestamps, since the shards each reported other_shard_ranges = broker.get_shard_ranges( include_deleted=True) self.assert_shard_range_lists_equal( new_shard_ranges, other_shard_ranges, excludes=['meta_timestamp', 'state_timestamp']) for orig, updated in zip(orig_root_shard_ranges, other_shard_ranges): self.assertGreaterEqual(updated.meta_timestamp, orig['meta_timestamp']) self.assert_container_delete_fails() for obj in final_listing: client.delete_object( self.url, self.token, self.container_name, obj['name']) # the objects won't be listed anymore self.assert_container_listing([]) # but root container stats will not yet be aware of the deletions self.assert_container_delete_fails() # One server was down while the shard sharded its first two sub-shards, # so there may be undeleted handoff db(s) for sub-shard(s) that were # not fully replicated; run replicators now to clean up so they no # longer report bogus stats to root. self.replicators.once() # Run sharder so that shard containers update the root. Do not run # sharder on root container because that triggers shrinks which can # cause root object count to temporarily be non-zero and prevent the # final delete. self.run_sharders(self.get_container_shard_ranges()) # then root is empty and can be deleted self.assert_container_listing([]) self.assert_container_object_count(0) client.delete_container(self.url, self.token, self.container_name) def test_sharded_listing_no_replicators(self): self._test_sharded_listing() def test_sharded_listing_with_replicators(self): self._test_sharded_listing(run_replicators=True) def test_async_pendings(self): obj_names = self._make_object_names(self.max_shard_size * 2) # There are some updates *everyone* gets self.put_objects(obj_names[::5]) # But roll some outages so each container only get ~2/5 more object # records i.e. total of 3/5 updates per container; and async pendings # pile up for i, n in enumerate(self.brain.node_numbers, start=1): self.brain.servers.stop(number=n) self.put_objects(obj_names[i::5]) self.brain.servers.start(number=n) # But there are also 1/5 updates *no one* gets self.brain.servers.stop() self.put_objects(obj_names[4::5]) self.brain.servers.start() # Shard it client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) headers = client.head_container(self.url, self.admin_token, self.container_name) self.assertEqual('True', headers.get('x-container-sharding')) # sanity check found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 0) self.assertLengthEqual(found['normal_dbs'], 3) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # Only run the 'leader' in charge of scanning. # Each container has ~2 * max * 3/5 objects # which are distributed from obj000 to obj<2 * max - 1>, # so expect 3 shard ranges to be found: the first two will be complete # shards with max/2 objects and lower/upper bounds spaced by approx: # (2 * max - 1)/(2 * max * 3/5) * (max/2) =~ 5/6 * max # # Note that during this shard cycle the leader replicates to other # nodes so they will end up with ~2 * max * 4/5 objects. self.sharders.once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # Verify that we have one shard db -- though the other normal DBs # received the shard ranges that got defined found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 1) node_index_zero_db = found['shard_dbs'][0] broker = ContainerBroker(node_index_zero_db) self.assertIs(True, broker.is_root_container()) self.assertEqual(SHARDING, broker.get_db_state()) expected_shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(expected_shard_ranges, 3) self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in expected_shard_ranges]) # Still have all three big DBs -- we've only cleaved 2 of the 3 shard # ranges that got defined self.assertLengthEqual(found['normal_dbs'], 3) db_states = [] for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertIs(True, broker.is_root_container()) db_states.append(broker.get_db_state()) # the sharded db had shard range meta_timestamps updated during # cleaving, so we do not expect those to be equal on other nodes self.assert_shard_range_lists_equal( expected_shard_ranges, broker.get_shard_ranges(), excludes=['meta_timestamp', 'state_timestamp', 'state']) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) self.assertEqual([SHARDING, UNSHARDED, UNSHARDED], sorted(db_states)) # Run the other sharders so we're all in (roughly) the same state for n in self.brain.node_numbers[1:]: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 3) for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertEqual(SHARDING, broker.get_db_state()) # no new rows self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # Run updaters to clear the async pendings Manager(['object-updater']).once() # Our "big" dbs didn't take updates for db_file in found['normal_dbs']: broker = ContainerBroker(db_file) self.assertEqual(len(obj_names) * 3 // 5, broker.get_info()['object_count']) # confirm that the async pending updates got redirected to the shards for sr in expected_shard_ranges: shard_listings = self.direct_get_container(sr.account, sr.container) for node, (hdrs, listing) in shard_listings.items(): shard_listing_names = [ o['name'].encode('utf-8') if six.PY2 else o['name'] for o in listing] for obj in obj_names[4::5]: if obj in sr: self.assertIn(obj, shard_listing_names) else: self.assertNotIn(obj, shard_listing_names) # The entire listing is not yet available - we have two cleaved shard # ranges, complete with async updates, but for the remainder of the # namespace only what landed in the original container headers, listing = client.get_container(self.url, self.token, self.container_name) start_listing = [ o for o in obj_names if o <= expected_shard_ranges[1].upper] self.assertEqual( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[:len(start_listing)]], start_listing) # we can't assert much about the remaining listing, other than that # there should be something self.assertTrue( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[len(start_listing):]]) self.assertIn('x-container-object-count', headers) self.assertEqual(str(len(listing)), headers['x-container-object-count']) headers, listing = client.get_container(self.url, self.token, self.container_name, query_string='reverse=on') self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[-len(start_listing):]], list(reversed(start_listing))) self.assertIn('x-container-object-count', headers) self.assertEqual(str(len(listing)), headers['x-container-object-count']) self.assertTrue( [x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing[:-len(start_listing)]]) # Run the sharders again to get everything to settle self.sharders.once() found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 3) self.assertLengthEqual(found['normal_dbs'], 0) # now all shards have been cleaved we should get the complete listing headers, listing = client.get_container(self.url, self.token, self.container_name) self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name'] for x in listing], obj_names) def test_shrinking(self): int_client = self.make_internal_client() def check_node_data(node_data, exp_hdrs, exp_obj_count, exp_shards, exp_sharded_root_range=False): hdrs, range_data = node_data self.assert_dict_contains(exp_hdrs, hdrs) sharded_root_range = False other_range_data = [] for data in range_data: sr = ShardRange.from_dict(data) if (sr.account == self.account and sr.container == self.container_name and sr.state == ShardRange.SHARDED): # only expect one root range self.assertFalse(sharded_root_range, range_data) sharded_root_range = True self.assertEqual(ShardRange.MIN, sr.lower, sr) self.assertEqual(ShardRange.MAX, sr.upper, sr) else: # include active root range in further assertions other_range_data.append(data) self.assertEqual(exp_sharded_root_range, sharded_root_range) self.assert_shard_ranges_contiguous(exp_shards, other_range_data) self.assert_total_object_count(exp_obj_count, other_range_data) def check_shard_nodes_data(node_data, expected_state='unsharded', expected_shards=0, exp_obj_count=0, exp_sharded_root_range=False): # checks that shard range is consistent on all nodes root_path = '%s/%s' % (self.account, self.container_name) exp_shard_hdrs = { 'X-Container-Sysmeta-Shard-Quoted-Root': quote(root_path), 'X-Backend-Sharding-State': expected_state} object_counts = [] bytes_used = [] for node_id, node_data in node_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data( node_data, exp_shard_hdrs, exp_obj_count, expected_shards, exp_sharded_root_range) hdrs = node_data[0] object_counts.append(int(hdrs['X-Container-Object-Count'])) bytes_used.append(int(hdrs['X-Container-Bytes-Used'])) if len(set(object_counts)) != 1: self.fail('Inconsistent object counts: %s' % object_counts) if len(set(bytes_used)) != 1: self.fail('Inconsistent bytes used: %s' % bytes_used) return object_counts[0], bytes_used[0] repeat = [0] def do_shard_then_shrink(): repeat[0] += 1 obj_names = ['obj-%s-%03d' % (repeat[0], x) for x in range(self.max_shard_size)] self.put_objects(obj_names) # these two object names will fall at start of first shard range... alpha = 'alpha-%s' % repeat[0] beta = 'beta-%s' % repeat[0] # Enable sharding client.post_container( self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # sanity check self.assert_container_listing(obj_names) # Only run the one in charge of scanning self.sharders.once( number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) # nodes on which sharder has not run are still in unsharded state # but have had shard ranges replicated to them exp_obj_count = len(obj_names) exp_hdrs = {'X-Backend-Sharding-State': 'unsharded', 'X-Container-Object-Count': str(exp_obj_count)} node_id = self.brain.node_numbers[1] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) node_id = self.brain.node_numbers[2] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) # only one that ran sharder is in sharded state exp_hdrs['X-Backend-Sharding-State'] = 'sharded' node_id = self.brain.node_numbers[0] - 1 check_node_data( root_nodes_data[node_id], exp_hdrs, exp_obj_count, 2) orig_range_data = root_nodes_data[node_id][1] orig_shard_ranges = [ShardRange.from_dict(r) for r in orig_range_data] # check first shard shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[0].account, orig_shard_ranges[0].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) total_shard_object_count = obj_count # check second shard shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) total_shard_object_count += obj_count self.assertEqual(exp_obj_count, total_shard_object_count) # Now that everyone has shard ranges, run *everyone* self.sharders.once( additional_args='--partitions=%s' % self.brain.part) # all root container nodes should now be in sharded state root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, exp_obj_count, 2) # run updaters to update .sharded account; shard containers have # not updated account since having objects replicated to them self.updaters.once() shard_cont_count, shard_obj_count = int_client.get_account_info( orig_shard_ranges[0].account, [204]) self.assertEqual(2 * repeat[0], shard_cont_count) # the shards account should always have zero object count to avoid # double accounting self.assertEqual(0, shard_obj_count) # checking the listing also refreshes proxy container info cache so # that the proxy becomes aware that container is sharded and will # now look up the shard target for subsequent updates self.assert_container_listing(obj_names) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) # delete objects from first shard range first_shard_objects = [obj_name for obj_name in obj_names if obj_name <= orig_shard_ranges[0].upper] for obj in first_shard_objects: client.delete_object( self.url, self.token, self.container_name, obj) with self.assertRaises(ClientException): client.get_object( self.url, self.token, self.container_name, obj) second_shard_objects = [obj_name for obj_name in obj_names if obj_name > orig_shard_ranges[1].lower] self.assert_container_listing(second_shard_objects) # put a new object 'alpha' in first shard range self.put_objects([alpha]) second_shard_objects = [obj_name for obj_name in obj_names if obj_name > orig_shard_ranges[1].lower] self.assert_container_listing([alpha] + second_shard_objects) # while container servers are down, but proxy has container info in # cache from recent listing, put another object; this update will # lurk in async pending until the updaters run again; because all # the root container servers are down and therefore cannot respond # to a GET for a redirect target, the object update will default to # being targeted at the root container self.stop_container_servers() # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) self.put_objects([beta]) self.brain.servers.start() async_pendings = self.gather_async_pendings( self.get_all_object_nodes()) num_container_replicas = len(self.brain.nodes) num_obj_replicas = self.policy.object_ring.replica_count expected_num_updates = num_container_updates( num_container_replicas, quorum_size(num_container_replicas), num_obj_replicas, self.policy.quorum) expected_num_pendings = min(expected_num_updates, num_obj_replicas) # sanity check with annotate_failure('policy %s. ' % self.policy): self.assertLengthEqual(async_pendings, expected_num_pendings) # root object count is not updated... self.assert_container_object_count(len(obj_names)) self.assert_container_listing([alpha] + second_shard_objects) root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): check_node_data(node_data, exp_hdrs, exp_obj_count, 2) range_data = node_data[1] self.assert_shard_range_lists_equal( orig_range_data, range_data, excludes=['meta_timestamp', 'state_timestamp']) # ...until the sharders run and update root; reclaim tombstones so # that the shard is shrinkable shard_0_part = self.get_part_and_node_numbers( orig_shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) exp_obj_count = len(second_shard_objects) + 1 self.assert_container_object_count(exp_obj_count) self.assert_container_listing([alpha] + second_shard_objects) # root sharder finds donor, acceptor pair and pushes changes self.sharders.once( additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing([alpha] + second_shard_objects) # run sharder on donor to shrink and replicate to acceptor self.run_sharders(orig_shard_ranges[0]) self.assert_container_listing([alpha] + second_shard_objects) # run sharder on acceptor to update root with stats self.run_sharders(orig_shard_ranges[1]) self.assert_container_listing([alpha] + second_shard_objects) self.assert_container_object_count(len(second_shard_objects) + 1) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) exp_hdrs['X-Container-Object-Count'] = str(exp_obj_count) for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): # NB now only *one* shard range in root check_node_data(node_data, exp_hdrs, exp_obj_count, 1) # the acceptor shard is intact.. shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) # all objects should now be in this shard self.assertEqual(exp_obj_count, obj_count) # the donor shard is also still intact donor = orig_shard_ranges[0] shard_nodes_data = self.direct_get_container_shard_ranges( donor.account, donor.container) # the donor's shard range will have the acceptor's projected stats; # donor also has copy of root shard range that will be ignored; # note: expected_shards does not include the sharded root range obj_count, bytes_used = check_shard_nodes_data( shard_nodes_data, expected_state='sharded', expected_shards=1, exp_obj_count=len(second_shard_objects) + 1, exp_sharded_root_range=True) # but the donor is empty and so reports zero stats self.assertEqual(0, obj_count) self.assertEqual(0, bytes_used) # check the donor own shard range state part, nodes = self.brain.ring.get_nodes( donor.account, donor.container) for node in nodes: with annotate_failure(node): broker = self.get_broker( part, node, donor.account, donor.container) own_sr = broker.get_own_shard_range() self.assertEqual(ShardRange.SHRUNK, own_sr.state) self.assertTrue(own_sr.deleted) # delete all the second shard's object apart from 'alpha' for obj in second_shard_objects: client.delete_object( self.url, self.token, self.container_name, obj) self.assert_container_listing([alpha]) # run sharders: second range should not shrink away yet because it # has tombstones self.sharders.once() # second shard updates root stats self.assert_container_listing([alpha]) self.sharders.once() # root finds shrinkable shard self.assert_container_listing([alpha]) self.sharders.once() # shards shrink themselves self.assert_container_listing([alpha]) # the acceptor shard is intact... shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) obj_count, bytes_used = check_shard_nodes_data(shard_nodes_data) self.assertEqual(1, obj_count) # run sharders to reclaim tombstones so that the second shard is # shrinkable shard_1_part = self.get_part_and_node_numbers( orig_shard_ranges[1])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_1_part]) self.assert_container_listing([alpha]) # run sharders so second range shrinks away, requires up to 2 # cycles self.sharders.once() # root finds shrinkable shard self.assert_container_listing([alpha]) self.sharders.once() # shards shrink themselves self.assert_container_listing([alpha]) # the second shard range has sharded and is empty shard_nodes_data = self.direct_get_container_shard_ranges( orig_shard_ranges[1].account, orig_shard_ranges[1].container) check_shard_nodes_data( shard_nodes_data, expected_state='sharded', expected_shards=1, exp_obj_count=1) # check root container root_nodes_data = self.direct_get_container_shard_ranges() self.assertEqual(3, len(root_nodes_data)) exp_hdrs = {'X-Backend-Sharding-State': 'collapsed', # just the alpha object 'X-Container-Object-Count': '1'} for node_id, node_data in root_nodes_data.items(): with annotate_failure('Node id %s.' % node_id): # NB now no shard ranges in root check_node_data(node_data, exp_hdrs, 0, 0) # delete the alpha object client.delete_object( self.url, self.token, self.container_name, alpha) # should now be able to delete the *apparently* empty container client.delete_container(self.url, self.token, self.container_name) self.assert_container_not_found() self.direct_head_container(expect_failure=True) # and the container stays deleted even after sharders run and shard # send updates self.sharders.once() self.assert_container_not_found() self.direct_head_container(expect_failure=True) # now run updaters to deal with the async pending for the beta # object self.updaters.once() # and the container is revived! self.assert_container_listing([beta]) # finally, clear out the container client.delete_object( self.url, self.token, self.container_name, beta) do_shard_then_shrink() # repeat from starting point of a collapsed and previously deleted # container do_shard_then_shrink() def test_delete_root_reclaim(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects - updates redirected to shards self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') # root not yet updated with shard stats self.assert_container_object_count(len(all_obj_names)) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # run sharder on shard containers to update root stats shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.run_sharders(shard_ranges) self.assert_container_listing([]) self.assert_container_post_ok('empty') self.assert_container_object_count(0) # and now we can delete it! client.delete_container(self.url, self.token, self.container_name) self.assert_container_post_fails('deleted') self.assert_container_not_found() # see if it will reclaim Manager(['container-updater']).once() for conf_file in self.configs['container-replicator'].values(): conf = utils.readconf(conf_file, 'container-replicator') conf['reclaim_age'] = 0 ContainerReplicator(conf).run_once() # we don't expect warnings from sharder root audits for conf_index in self.configs['container-sharder'].keys(): sharder = self.run_custom_sharder(conf_index, {}) self.assertEqual([], sharder.logger.get_lines_for_level('warning')) # until the root wants to start reclaiming but we haven't shrunk yet! found_warning = False for conf_index in self.configs['container-sharder'].keys(): sharder = self.run_custom_sharder(conf_index, {'reclaim_age': 0}) warnings = sharder.logger.get_lines_for_level('warning') if warnings: self.assertTrue(warnings[0].startswith( 'Reclaimable db stuck waiting for shrinking')) self.assertEqual(1, len(warnings)) found_warning = True self.assertTrue(found_warning) # TODO: shrink empty shards and assert everything reclaims def _setup_replication_scenario(self, num_shards, extra_objs=('alpha',)): # Get cluster to state where 2 replicas are sharding or sharded but 3rd # replica is unsharded and has an object that the first 2 are missing. # put objects while all servers are up obj_names = self._make_object_names( num_shards * self.max_shard_size // 2) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) node_numbers = self.brain.node_numbers # run replicators first time to get sync points set self.replicators.once() # stop the leader node and one other server self.stop_container_servers(slice(0, 2)) # ...then put one more object in first shard range namespace self.put_objects(extra_objs) # start leader and first other server, stop third server for number in node_numbers[:2]: self.brain.servers.start(number=number) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) # sanity check # shard the container - first two shard ranges are cleaved for number in node_numbers[:2]: self.sharders.once( number=number, additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing(obj_names) # sanity check return obj_names def test_replication_to_sharding_container(self): # verify that replication from an unsharded replica to a sharding # replica does not replicate rows but does replicate shard ranges obj_names = self._setup_replication_scenario(3) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharding', 3) # bring third server back up, run replicator node_numbers = self.brain.node_numbers self.brain.servers.start(number=node_numbers[2]) # sanity check... self.assert_container_state(self.brain.nodes[2], 'unsharded', 0) self.replicators.once(number=node_numbers[2]) # check db files unchanged found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 3) # the 'alpha' object is NOT replicated to the two sharded nodes for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertFalse(broker.get_objects()) self.assert_container_state(node, 'sharding', 3) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) # all nodes now have shard ranges self.brain.servers.start(number=node_numbers[2]) node_data = self.direct_get_container_shard_ranges() for node, (hdrs, shard_ranges) in node_data.items(): with annotate_failure(node): self.assert_shard_ranges_contiguous(3, shard_ranges) # complete cleaving third shard range on first two nodes self.brain.servers.stop(number=node_numbers[2]) for number in node_numbers[:2]: self.sharders.once( number=number, additional_args='--partitions=%s' % self.brain.part) # ...and now they are in sharded state self.assert_container_state(self.brain.nodes[0], 'sharded', 3) self.assert_container_state(self.brain.nodes[1], 'sharded', 3) # ...still no 'alpha' object in listing self.assert_container_listing(obj_names) # run the sharder on the third server, alpha object is included in # shards that it cleaves self.brain.servers.start(number=node_numbers[2]) self.assert_container_state(self.brain.nodes[2], 'unsharded', 3) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharding', 3) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharded', 3) self.assert_container_listing(['alpha'] + obj_names) def test_replication_to_sharded_container(self): # verify that replication from an unsharded replica to a sharded # replica does not replicate rows but does replicate shard ranges obj_names = self._setup_replication_scenario(2) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharded', 2) # sanity check found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 1) for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) info = broker.get_info() with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertEqual(len(obj_names), info['object_count']) self.assertFalse(broker.get_objects()) # bring third server back up, run replicator node_numbers = self.brain.node_numbers self.brain.servers.start(number=node_numbers[2]) # sanity check... self.assert_container_state(self.brain.nodes[2], 'unsharded', 0) self.replicators.once(number=node_numbers[2]) # check db files unchanged found = self.categorize_container_dir_content() self.assertLengthEqual(found['shard_dbs'], 2) self.assertLengthEqual(found['normal_dbs'], 1) # the 'alpha' object is NOT replicated to the two sharded nodes for node in self.brain.nodes[:2]: broker = self.get_broker(self.brain.part, node) with annotate_failure( 'Node id %s in %s' % (node['id'], self.brain.nodes[:2])): self.assertFalse(broker.get_objects()) self.assert_container_state(node, 'sharded', 2) self.brain.servers.stop(number=node_numbers[2]) self.assert_container_listing(obj_names) # all nodes now have shard ranges self.brain.servers.start(number=node_numbers[2]) node_data = self.direct_get_container_shard_ranges() for node, (hdrs, shard_ranges) in node_data.items(): with annotate_failure(node): self.assert_shard_ranges_contiguous(2, shard_ranges) # run the sharder on the third server, alpha object is included in # shards that it cleaves self.assert_container_state(self.brain.nodes[2], 'unsharded', 2) self.sharders.once(number=node_numbers[2], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[2], 'sharded', 2) self.assert_container_listing(['alpha'] + obj_names) def test_sharding_requires_sufficient_replication(self): # verify that cleaving only progresses if each cleaved shard range is # sufficiently replicated # put enough objects for 4 shard ranges obj_names = self._make_object_names(2 * self.max_shard_size) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) node_numbers = self.brain.node_numbers leader_node = self.brain.nodes[0] leader_num = node_numbers[0] # run replicators first time to get sync points set self.replicators.once() # start sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # Check the current progress. It shouldn't be complete. recon = direct_client.direct_get_recon(leader_node, "sharding") expected_in_progress = {'all': [{'account': 'AUTH_test', 'active': 0, 'cleaved': 2, 'created': 2, 'found': 0, 'db_state': 'sharding', 'state': 'sharding', 'error': None, 'file_size': mock.ANY, 'meta_timestamp': mock.ANY, 'node_index': 0, 'object_count': len(obj_names), 'container': mock.ANY, 'path': mock.ANY, 'root': mock.ANY}]} actual = recon['sharding_stats']['sharding']['sharding_in_progress'] self.assertEqual(expected_in_progress, actual) # stop *all* container servers for third shard range sr_part, sr_node_nums = self.get_part_and_node_numbers(shard_ranges[2]) for node_num in sr_node_nums: self.brain.servers.stop(number=node_num) # attempt to continue sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # no cleaving progress was made for node_num in sr_node_nums: self.brain.servers.start(number=node_num) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # stop two of the servers for third shard range, not including any # server that happens to be the leader node stopped = [] for node_num in sr_node_nums: if node_num != leader_num: self.brain.servers.stop(number=node_num) stopped.append(node_num) if len(stopped) >= 2: break self.assertLengthEqual(stopped, 2) # sanity check # attempt to continue sharding on the leader node self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # no cleaving progress was made for node_num in stopped: self.brain.servers.start(number=node_num) shard_ranges = self.assert_container_state(leader_node, 'sharding', 4) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) # stop just one of the servers for third shard range stopped = [] for node_num in sr_node_nums: if node_num != leader_num: self.brain.servers.stop(number=node_num) stopped.append(node_num) break self.assertLengthEqual(stopped, 1) # sanity check # attempt to continue sharding the container self.sharders.once(number=leader_num, additional_args='--partitions=%s' % self.brain.part) # this time cleaving completed self.brain.servers.start(number=stopped[0]) shard_ranges = self.assert_container_state(leader_node, 'sharded', 4) self.assertEqual([ShardRange.ACTIVE] * 4, [sr.state for sr in shard_ranges]) # Check the leader's progress again, this time is should be complete recon = direct_client.direct_get_recon(leader_node, "sharding") expected_in_progress = {'all': [{'account': 'AUTH_test', 'active': 4, 'cleaved': 0, 'created': 0, 'found': 0, 'db_state': 'sharded', 'state': 'sharded', 'error': None, 'file_size': mock.ANY, 'meta_timestamp': mock.ANY, 'node_index': 0, 'object_count': len(obj_names), 'container': mock.ANY, 'path': mock.ANY, 'root': mock.ANY}]} actual = recon['sharding_stats']['sharding']['sharding_in_progress'] self.assertEqual(expected_in_progress, actual) def test_sharded_delete(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects - updates redirected to shards self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') # root not yet updated with shard stats self.assert_container_object_count(len(all_obj_names)) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() # run sharder on shard containers to update root stats shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.run_sharders(shard_ranges) self.assert_container_listing([]) self.assert_container_post_ok('empty') self.assert_container_object_count(0) # put a new object - update redirected to shard self.put_objects(['alpha']) self.assert_container_listing(['alpha']) self.assert_container_object_count(0) # before root learns about new object in shard, delete the container client.delete_container(self.url, self.token, self.container_name) self.assert_container_post_fails('deleted') self.assert_container_not_found() # run the sharders to update root with shard stats self.run_sharders(shard_ranges) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) self.assert_container_delete_fails() self.assert_container_post_ok('revived') def test_object_update_redirection(self): all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects - updates redirected to shards self.delete_objects(all_obj_names) self.assert_container_listing([]) self.assert_container_post_ok('has objects') # run sharder on shard containers to update root stats; reclaim # the tombstones so that the shards appear to be shrinkable shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_partitions = [self.get_part_and_node_numbers(sr)[0] for sr in shard_ranges] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=shard_partitions) self.assert_container_object_count(0) # First, test a misplaced object moving from one shard to another. # with one shard server down, put a new 'alpha' object... shard_part, shard_nodes = self.get_part_and_node_numbers( shard_ranges[0]) self.brain.servers.stop(number=shard_nodes[2]) self.put_objects(['alpha']) self.assert_container_listing(['alpha']) self.assert_container_object_count(0) self.assertLengthEqual( self.gather_async_pendings(self.get_all_object_nodes()), 1) self.brain.servers.start(number=shard_nodes[2]) # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on the shard node without the alpha object self.sharders.once(additional_args='--partitions=%s' % shard_part, number=shard_nodes[2]) # root sees first shard has shrunk self.assertLengthEqual(self.get_container_shard_ranges(), 1) # cached shard ranges still show first shard range as active so listing # will include 'alpha' if the shard listing is fetched from node (0,1) # but not if fetched from node 2; to achieve predictability we use # x-newest to use shard ranges from the root so that only the second # shard range is used for listing, so alpha object not in listing self.assert_container_listing([], req_hdrs={'x-newest': 'true'}) self.assert_container_object_count(0) # run the updaters: the async pending update will be redirected from # shrunk shard to second shard self.updaters.once() self.assert_container_listing(['alpha']) self.assert_container_object_count(0) # root not yet updated # then run sharder on other shard nodes to complete shrinking for number in shard_nodes[:2]: self.sharders.once(additional_args='--partitions=%s' % shard_part, number=number) # and get root updated self.run_sharders(shard_ranges[1]) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) self.assertLengthEqual(self.get_container_shard_ranges(), 1) # Now we have just one active shard, test a misplaced object moving # from that shard to the root. # with one shard server down, delete 'alpha' and put a 'beta' object... shard_part, shard_nodes = self.get_part_and_node_numbers( shard_ranges[1]) self.brain.servers.stop(number=shard_nodes[2]) # Before writing, kill the cache self.memcache.delete(get_cache_key( self.account, self.container_name, shard='updating')) self.delete_objects(['alpha']) self.put_objects(['beta']) self.assert_container_listing(['beta']) self.assert_container_object_count(1) self.assertLengthEqual( self.gather_async_pendings(self.get_all_object_nodes()), 2) self.brain.servers.start(number=shard_nodes[2]) # run sharder on root to discover second shrink candidate - root is not # yet aware of the beta object self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on the shard node without the beta object, to shrink # it to root - note this moves stale copy of alpha to the root db self.sharders.once(additional_args='--partitions=%s' % shard_part, number=shard_nodes[2]) # now there are no active shards self.assertFalse(self.get_container_shard_ranges()) # with other two shard servers down, listing won't find beta object for number in shard_nodes[:2]: self.brain.servers.stop(number=number) self.assert_container_listing(['alpha']) self.assert_container_object_count(1) # run the updaters: the async pending update will be redirected from # shrunk shard to the root self.updaters.once() self.assert_container_listing(['beta']) self.assert_container_object_count(1) def test_misplaced_object_movement(self): def merge_object(shard_range, name, deleted=0): # it's hard to get a test to put a misplaced object into a shard, # so this hack is used force an object record directly into a shard # container db. Note: the actual object won't exist, we're just # using this to test object records in container dbs. shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) shard_broker = self.get_broker( shard_part, shard_nodes[0], shard_range.account, shard_range.container) shard_broker.merge_items( [{'name': name, 'created_at': Timestamp.now().internal, 'size': 0, 'content_type': 'text/plain', 'etag': md5(usedforsecurity=False).hexdigest(), 'deleted': deleted, 'storage_policy_index': shard_broker.storage_policy_index}]) return shard_nodes[0] all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects in first shard range - updates redirected to shard shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_0_objects = [name for name in all_obj_names if name in shard_ranges[0]] shard_1_objects = [name for name in all_obj_names if name in shard_ranges[1]] self.delete_objects(shard_0_objects) self.assert_container_listing(shard_1_objects) self.assert_container_post_ok('has objects') # run sharder on first shard container to update root stats; reclaim # the tombstones so that the shard appears to be shrinkable shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) self.assert_container_object_count(len(shard_1_objects)) # First, test a misplaced object moving from one shard to another. # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on first shard range to shrink it self.run_sharders(shard_ranges[0]) # force a misplaced object into the shrunken shard range to simulate # a client put that was in flight when it started to shrink misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0) # root sees first shard has shrunk, only second shard range used for # listing so alpha object not in listing self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing(shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) # until sharder runs on that node to move the misplaced object to the # second shard range shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[0]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['alpha'] + shard_1_objects) # root not yet updated self.assert_container_object_count(len(shard_1_objects)) # run sharder to get root updated self.run_sharders(shard_ranges[1]) self.assert_container_listing(['alpha'] + shard_1_objects) self.assert_container_object_count(len(shard_1_objects) + 1) self.assertLengthEqual(self.get_container_shard_ranges(), 1) # Now we have just one active shard, test a misplaced object moving # from that shard to the root. # delete most objects from second shard range, reclaim the tombstones, # and run sharder on root to discover second shrink candidate self.delete_objects(shard_1_objects) shard_1_part = self.get_part_and_node_numbers(shard_ranges[1])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_1_part]) self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on the shard node to shrink it to root - note this # moves alpha to the root db self.run_sharders(shard_ranges[1]) # now there are no active shards self.assertFalse(self.get_container_shard_ranges()) # force some misplaced object updates into second shrunk shard range merge_object(shard_ranges[1], 'alpha', deleted=1) misplaced_node = merge_object(shard_ranges[1], 'beta', deleted=0) # root is not yet aware of them self.assert_container_listing(['alpha']) self.assert_container_object_count(1) # until sharder runs on that node to move the misplaced object shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[1]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['beta']) self.assert_container_object_count(1) self.assert_container_delete_fails() def test_misplaced_object_movement_from_deleted_shard(self): def merge_object(shard_range, name, deleted=0): # it's hard to get a test to put a misplaced object into a shard, # so this hack is used force an object record directly into a shard # container db. Note: the actual object won't exist, we're just # using this to test object records in container dbs. shard_part, shard_nodes = self.brain.ring.get_nodes( shard_range.account, shard_range.container) shard_broker = self.get_shard_broker(shard_range) # In this test we want to merge into a deleted container shard shard_broker.delete_db(Timestamp.now().internal) shard_broker.merge_items( [{'name': name, 'created_at': Timestamp.now().internal, 'size': 0, 'content_type': 'text/plain', 'etag': md5(usedforsecurity=False).hexdigest(), 'deleted': deleted, 'storage_policy_index': shard_broker.storage_policy_index}]) return shard_nodes[0] all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names) # Shard the container client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # delete all objects in first shard range - updates redirected to shard shard_ranges = self.get_container_shard_ranges() self.assertLengthEqual(shard_ranges, 2) shard_0_objects = [name for name in all_obj_names if name in shard_ranges[0]] shard_1_objects = [name for name in all_obj_names if name in shard_ranges[1]] self.delete_objects(shard_0_objects) self.assert_container_listing(shard_1_objects) self.assert_container_post_ok('has objects') # run sharder on first shard container to update root stats shard_0_part = self.get_part_and_node_numbers(shard_ranges[0])[0] for conf_index in self.configs['container-sharder'].keys(): self.run_custom_sharder(conf_index, {'reclaim_age': 0}, override_partitions=[shard_0_part]) self.assert_container_object_count(len(shard_1_objects)) # First, test a misplaced object moving from one shard to another. # run sharder on root to discover first shrink candidate self.sharders.once(additional_args='--partitions=%s' % self.brain.part) # then run sharder on first shard range to shrink it self.run_sharders(shard_ranges[0]) # force a misplaced object into the shrunken shard range to simulate # a client put that was in flight when it started to shrink misplaced_node = merge_object(shard_ranges[0], 'alpha', deleted=0) # root sees first shard has shrunk, only second shard range used for # listing so alpha object not in listing self.assertLengthEqual(self.get_container_shard_ranges(), 1) self.assert_container_listing(shard_1_objects) self.assert_container_object_count(len(shard_1_objects)) # until sharder runs on that node to move the misplaced object to the # second shard range shard_part, shard_nodes_numbers = self.get_part_and_node_numbers( shard_ranges[0]) self.sharders.once(additional_args='--partitions=%s' % shard_part, number=misplaced_node['id'] + 1) self.assert_container_listing(['alpha'] + shard_1_objects) # root not yet updated self.assert_container_object_count(len(shard_1_objects)) # check the deleted shard did not push the wrong root path into the # other container for replica in 0, 1, 2: shard_x_broker = self.get_shard_broker(shard_ranges[1], replica) self.assertEqual("%s/%s" % (self.account, self.container_name), shard_x_broker.root_path) # run the sharder of the existing shard to update the root stats # to prove the misplaced object was moved to the other shard _and_ # the other shard still has the correct root because it updates root's # stats self.run_sharders(shard_ranges[1]) self.assert_container_object_count(len(shard_1_objects) + 1) def test_replication_to_sharded_container_from_unsharded_old_primary(self): primary_ids = [n['id'] for n in self.brain.nodes] handoff_node = next(n for n in self.brain.ring.devs if n['id'] not in primary_ids) # start with two sharded replicas and one unsharded with extra object obj_names = self._setup_replication_scenario(2) for node in self.brain.nodes[:2]: self.assert_container_state(node, 'sharded', 2) # Fake a ring change - copy unsharded db which has no shard ranges to a # handoff to create illusion of a new unpopulated primary node node_numbers = self.brain.node_numbers new_primary_node = self.brain.nodes[2] new_primary_node_number = node_numbers[2] new_primary_dir, container_hash = self.get_storage_dir( self.brain.part, new_primary_node) old_primary_dir, container_hash = self.get_storage_dir( self.brain.part, handoff_node) utils.mkdirs(os.path.dirname(old_primary_dir)) shutil.move(new_primary_dir, old_primary_dir) # make the cluster more or less "healthy" again self.brain.servers.start(number=new_primary_node_number) # get a db on every node... client.put_container(self.url, self.token, self.container_name) self.assertTrue(os.path.exists(os.path.join( new_primary_dir, container_hash + '.db'))) found = self.categorize_container_dir_content() self.assertLengthEqual(found['normal_dbs'], 1) # "new" primary self.assertLengthEqual(found['shard_dbs'], 2) # existing primaries # catastrophic failure! drive dies and is replaced on unchanged primary failed_node = self.brain.nodes[0] failed_dir, _container_hash = self.get_storage_dir( self.brain.part, failed_node) shutil.rmtree(failed_dir) # replicate the "old primary" to everybody except the "new primary" self.brain.servers.stop(number=new_primary_node_number) self.replicators.once(number=handoff_node['id'] + 1) # We're willing to rsync the retiring db to the failed primary. # This may or may not have shard ranges, depending on the order in # which we hit the primaries, but it definitely *doesn't* have an # epoch in its name yet. All objects are replicated. self.assertTrue(os.path.exists(os.path.join( failed_dir, container_hash + '.db'))) self.assertLengthEqual(os.listdir(failed_dir), 1) broker = self.get_broker(self.brain.part, failed_node) self.assertLengthEqual(broker.get_objects(), len(obj_names) + 1) # The other out-of-date primary is within usync range but objects are # not replicated to it because the handoff db learns about shard ranges broker = self.get_broker(self.brain.part, self.brain.nodes[1]) self.assertLengthEqual(broker.get_objects(), 0) # Handoff db still exists and now has shard ranges! self.assertTrue(os.path.exists(os.path.join( old_primary_dir, container_hash + '.db'))) broker = self.get_broker(self.brain.part, handoff_node) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 2) self.assert_container_state(handoff_node, 'unsharded', 2) # Replicate again, this time *including* "new primary" self.brain.servers.start(number=new_primary_node_number) self.replicators.once(number=handoff_node['id'] + 1) # Ordinarily, we would have rsync_then_merge'd to "new primary" # but instead we wait broker = self.get_broker(self.brain.part, new_primary_node) self.assertLengthEqual(broker.get_objects(), 0) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 2) # so the next time the sharder comes along, it can push rows out # and delete the big db self.sharders.once(number=handoff_node['id'] + 1, additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(handoff_node, 'sharded', 2) self.assertFalse(os.path.exists(os.path.join( old_primary_dir, container_hash + '.db'))) # the sharded db hangs around until replication confirms durability # first attempt is not sufficiently successful self.brain.servers.stop(number=node_numbers[0]) self.replicators.once(number=handoff_node['id'] + 1) self.assertTrue(os.path.exists(old_primary_dir)) self.assert_container_state(handoff_node, 'sharded', 2) # second attempt is successful and handoff db is deleted self.brain.servers.start(number=node_numbers[0]) self.replicators.once(number=handoff_node['id'] + 1) self.assertFalse(os.path.exists(old_primary_dir)) # run all the sharders, get us into a consistent state self.sharders.once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_listing(['alpha'] + obj_names) def test_replication_to_empty_new_primary_from_sharding_old_primary(self): primary_ids = [n['id'] for n in self.brain.nodes] handoff_node = next(n for n in self.brain.ring.devs if n['id'] not in primary_ids) num_shards = 3 obj_names = self._make_object_names( num_shards * self.max_shard_size // 2) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # start sharding on only the leader node leader_node = self.brain.nodes[0] leader_node_number = self.brain.node_numbers[0] self.sharders.once(number=leader_node_number) self.assert_container_state(leader_node, 'sharding', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'unsharded', 3) # Fake a ring change - copy leader node db to a handoff to create # illusion of a new unpopulated primary leader node new_primary_dir, container_hash = self.get_storage_dir( self.brain.part, leader_node) old_primary_dir, container_hash = self.get_storage_dir( self.brain.part, handoff_node) utils.mkdirs(os.path.dirname(old_primary_dir)) shutil.move(new_primary_dir, old_primary_dir) self.assert_container_state(handoff_node, 'sharding', 3) # run replicator on handoff node to create a fresh db on new primary self.assertFalse(os.path.exists(new_primary_dir)) self.replicators.once(number=handoff_node['id'] + 1) self.assertTrue(os.path.exists(new_primary_dir)) self.assert_container_state(leader_node, 'sharded', 3) broker = self.get_broker(self.brain.part, leader_node) shard_ranges = broker.get_shard_ranges() self.assertLengthEqual(shard_ranges, 3) self.assertEqual( [ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED], [sr.state for sr in shard_ranges]) # db still exists on handoff self.assertTrue(os.path.exists(old_primary_dir)) self.assert_container_state(handoff_node, 'sharding', 3) # continue sharding it... self.sharders.once(number=handoff_node['id'] + 1) self.assert_container_state(leader_node, 'sharded', 3) # now handoff is fully sharded the replicator will delete it self.replicators.once(number=handoff_node['id'] + 1) self.assertFalse(os.path.exists(old_primary_dir)) # all primaries now have active shard ranges but only one is in sharded # state self.assert_container_state(leader_node, 'sharded', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'unsharded', 3) node_data = self.direct_get_container_shard_ranges() for node_id, (hdrs, shard_ranges) in node_data.items(): with annotate_failure( 'node id %s from %s' % (node_id, node_data.keys)): self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges) # check handoff cleaved all objects before it was deleted - stop all # but leader node so that listing is fetched from shards for number in self.brain.node_numbers[1:3]: self.brain.servers.stop(number=number) self.assert_container_listing(obj_names) for number in self.brain.node_numbers[1:3]: self.brain.servers.start(number=number) self.sharders.once() self.assert_container_state(leader_node, 'sharded', 3) for node in self.brain.nodes[1:]: self.assert_container_state(node, 'sharding', 3) self.sharders.once() for node in self.brain.nodes: self.assert_container_state(node, 'sharded', 3) self.assert_container_listing(obj_names) def test_sharded_account_updates(self): # verify that .shards account updates have zero object count and bytes # to avoid double accounting all_obj_names = self._make_object_names(self.max_shard_size) self.put_objects(all_obj_names, contents='xyz') # Shard the container into 2 shards client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) for n in self.brain.node_numbers: self.sharders.once( number=n, additional_args='--partitions=%s' % self.brain.part) # sanity checks for node in self.brain.nodes: shard_ranges = self.assert_container_state(node, 'sharded', 2) self.assert_container_delete_fails() self.assert_container_has_shard_sysmeta() self.assert_container_post_ok('sharded') self.assert_container_listing(all_obj_names) # run the updaters to get account stats updated self.updaters.once() # check user account stats metadata = self.internal_client.get_account_metadata(self.account) self.assertEqual(1, int(metadata.get('x-account-container-count'))) self.assertEqual(self.max_shard_size, int(metadata.get('x-account-object-count'))) self.assertEqual(3 * self.max_shard_size, int(metadata.get('x-account-bytes-used'))) # check hidden .shards account stats metadata = self.internal_client.get_account_metadata( shard_ranges[0].account) self.assertEqual(2, int(metadata.get('x-account-container-count'))) self.assertEqual(0, int(metadata.get('x-account-object-count'))) self.assertEqual(0, int(metadata.get('x-account-bytes-used'))) class TestContainerShardingMoreUTF8(TestContainerSharding): def _make_object_names(self, number): # override default with names that include non-ascii chars name_length = self.cluster_info['swift']['max_object_name_length'] obj_names = [] for x in range(number): name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb-%04d' % x) name = name.encode('utf8').ljust(name_length, b'o') if not six.PY2: name = name.decode('utf8') obj_names.append(name) return obj_names def _setup_container_name(self): # override default with max length name that includes non-ascii chars super(TestContainerShardingMoreUTF8, self)._setup_container_name() name_length = self.cluster_info['swift']['max_container_name_length'] cont_name = \ self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb\u1234' self.container_name = cont_name.encode('utf8').ljust(name_length, b'x') if not six.PY2: self.container_name = self.container_name.decode('utf8') class TestManagedContainerSharding(BaseTestContainerSharding): '''Test sharding using swift-manage-shard-ranges''' def sharders_once(self, **kwargs): # inhibit auto_sharding regardless of the config setting additional_args = kwargs.get('additional_args', []) if not isinstance(additional_args, list): additional_args = [additional_args] additional_args.append('--no-auto-shard') kwargs['additional_args'] = additional_args self.sharders.once(**kwargs) def test_manage_shard_ranges(self): obj_names = self._make_object_names(7) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # sanity check: we don't have nearly enough objects for this to shard # automatically self.sharders_once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'unsharded', 0) self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '3', '--enable', '--minimum-shard-size', '2']) self.assert_container_state(self.brain.nodes[0], 'unsharded', 2) # "Run container-replicator to replicate them to other nodes." self.replicators.once() # "Run container-sharder on all nodes to shard the container." self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # Everybody's settled self.assert_container_state(self.brain.nodes[0], 'sharded', 2) self.assert_container_state(self.brain.nodes[1], 'sharded', 2) self.assert_container_state(self.brain.nodes[2], 'sharded', 2) self.assert_container_listing(obj_names) def test_manage_shard_ranges_compact(self): # verify shard range compaction using swift-manage-shard-ranges obj_names = self._make_object_names(8) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set, and get container # sharded into 4 shards self.replicators.once() self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '2', '--enable']) self.assert_container_state(self.brain.nodes[0], 'unsharded', 4) self.replicators.once() # run sharders twice to cleave all 4 shard ranges self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.assert_container_state(self.brain.nodes[0], 'sharded', 4) self.assert_container_state(self.brain.nodes[1], 'sharded', 4) self.assert_container_state(self.brain.nodes[2], 'sharded', 4) self.assert_container_listing(obj_names) # now compact some ranges; use --max-shrinking to allow 2 shrinking # shards self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'compact', '--max-expanding', '1', '--max-shrinking', '2', '--yes']) shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 4) self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] * 2, [sr.state for sr in shard_ranges]) self.replicators.once() self.sharders_once() # check there's now just 2 remaining shard ranges shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 2) self.assertEqual([ShardRange.ACTIVE] * 2, [sr.state for sr in shard_ranges]) self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'}) # root container own shard range should still be SHARDED for i, node in enumerate(self.brain.nodes): with annotate_failure('node[%d]' % i): broker = self.get_broker(self.brain.part, self.brain.nodes[0]) self.assertEqual(ShardRange.SHARDED, broker.get_own_shard_range().state) # now compact the final two shard ranges to the root; use # --max-shrinking to allow 2 shrinking shards self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'compact', '--yes', '--max-shrinking', '2']) shard_ranges = self.assert_container_state( self.brain.nodes[0], 'sharded', 2) self.assertEqual([ShardRange.SHRINKING] * 2, [sr.state for sr in shard_ranges]) self.replicators.once() self.sharders_once() self.assert_container_state(self.brain.nodes[0], 'collapsed', 0) self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'}) # root container own shard range should now be ACTIVE for i, node in enumerate(self.brain.nodes): with annotate_failure('node[%d]' % i): broker = self.get_broker(self.brain.part, self.brain.nodes[0]) self.assertEqual(ShardRange.ACTIVE, broker.get_own_shard_range().state) def test_manage_shard_ranges_repair_root(self): # provoke overlaps in root container and repair obj_names = self._make_object_names(16) self.put_objects(obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # find 4 shard ranges on nodes[0] - let's denote these ranges 0.0, 0.1, # 0.2 and 0.3 that are installed with epoch_0 self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '4', '--enable']) shard_ranges_0 = self.assert_container_state(self.brain.nodes[0], 'unsharded', 4) # *Also* go find 3 shard ranges on *another node*, like a dumb-dumb - # let's denote these ranges 1.0, 1.1 and 1.2 that are installed with # epoch_1 self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[1]), 'find_and_replace', '7', '--enable']) shard_ranges_1 = self.assert_container_state(self.brain.nodes[1], 'unsharded', 3) # Run sharder in specific order so that the replica with the older # epoch_0 starts sharding first - this will prove problematic later! # On first pass the first replica passes audit, creates shards and then # syncs shard ranges with the other replicas, so it has a mix of 0.* # shard ranges in CLEAVED state and 1.* ranges in FOUND state. It # proceeds to cleave shard 0.0, but after 0.0 cleaving stalls because # next in iteration is shard range 1.0 in FOUND state from the other # replica that it cannot yet cleave. self.sharders_once(number=self.brain.node_numbers[0], additional_args='--partitions=%s' % self.brain.part) # On first pass the second replica passes audit (it has its own found # ranges and the first replica's created shard ranges but none in the # same state overlap), creates its shards and then syncs shard ranges # with the other replicas. All of the 7 shard ranges on this replica # are now in CREATED state so it proceeds to cleave the first two shard # ranges, 0.1 and 1.0. self.sharders_once(number=self.brain.node_numbers[1], additional_args='--partitions=%s' % self.brain.part) self.replicators.once() # Uh-oh self.assert_container_state(self.brain.nodes[0], 'sharding', 7) self.assert_container_state(self.brain.nodes[1], 'sharding', 7) # There's a race: the third replica may be sharding, may be unsharded # Try it again a few times self.sharders_once(additional_args='--partitions=%s' % self.brain.part) self.replicators.once() self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # It's not really fixing itself... the sharder audit will detect # overlapping ranges which prevents cleaving proceeding; expect the # shard ranges to be mostly still in created state, with one or two # possibly cleaved during first pass before the sharding got stalled shard_ranges = self.assert_container_state(self.brain.nodes[0], 'sharding', 7) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5, [sr.state for sr in shard_ranges]) shard_ranges = self.assert_container_state(self.brain.nodes[1], 'sharding', 7) self.assertEqual([ShardRange.CLEAVED] * 2 + [ShardRange.CREATED] * 5, [sr.state for sr in shard_ranges]) # But hey, at least listings still work! They're just going to get # horribly out of date as more objects are added self.assert_container_listing(obj_names) # 'swift-manage-shard-ranges repair' will choose the second set of 3 # shard ranges (1.*) over the first set of 4 (0.*) because that's the # path with most cleaving progress, and so shrink shard ranges 0.*. db_file = self.get_db_file(self.brain.part, self.brain.nodes[0]) self.assert_subprocess_success( ['swift-manage-shard-ranges', db_file, 'repair', '--yes']) # make sure all root replicas now sync their shard ranges self.replicators.once() # Run sharder on the shrinking shards. This should not change the state # of any of the acceptors, particularly the ones that have yet to have # object cleaved from the roots, because we don't want the as yet # uncleaved acceptors becoming prematurely active and creating 'holes' # in listings. The shrinking shard ranges should however get deleted in # root container table. self.run_sharders(shard_ranges_0) shard_ranges = self.assert_container_state(self.brain.nodes[1], 'sharding', 3) self.assertEqual([ShardRange.CLEAVED] * 1 + [ShardRange.CREATED] * 2, [sr.state for sr in shard_ranges]) self.assert_container_listing(obj_names) # check the unwanted shards did shrink away... for shard_range in shard_ranges_0: with annotate_failure(shard_range): found_for_shard = self.categorize_container_dir_content( shard_range.account, shard_range.container) self.assertLengthEqual(found_for_shard['shard_dbs'], 3) actual = [] for shard_db in found_for_shard['shard_dbs']: broker = ContainerBroker(shard_db) own_sr = broker.get_own_shard_range() actual.append( (broker.get_db_state(), own_sr.state, own_sr.deleted)) self.assertEqual([(SHARDED, ShardRange.SHRUNK, True)] * 3, actual) # At this point one of the first two replicas may have done some useful # cleaving of 1.* shards, the other may have only cleaved 0.* shards, # and the third replica may have cleaved no shards. We therefore need # two more passes of the sharder to get to a predictable state where # all replicas have cleaved all three 0.* shards. self.sharders_once() self.sharders_once() # now we expect all replicas to have just the three 1.* shards, with # the 0.* shards all deleted brokers = {} exp_shard_ranges = sorted( [sr.copy(state=ShardRange.SHRUNK, deleted=True) for sr in shard_ranges_0] + [sr.copy(state=ShardRange.ACTIVE) for sr in shard_ranges_1], key=ShardRange.sort_key) for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.maxDiff = None self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) # Sadly, the first replica to start sharding is still reporting its db # state to be 'unsharded' because, although it has sharded, its shard # db epoch (epoch_0) does not match its own shard range epoch # (epoch_1), and that is because the second replica (with epoch_1) # updated the own shard range and replicated it to all other replicas. # If we had run the sharder on the second replica before the first # replica, then by the time the first replica started sharding it would # have learnt the newer epoch_1 and we wouldn't see this inconsistency. self.assertEqual(UNSHARDED, brokers[0].get_db_state()) self.assertEqual(SHARDED, brokers[1].get_db_state()) self.assertEqual(SHARDED, brokers[2].get_db_state()) epoch_1 = brokers[1].db_epoch self.assertEqual(epoch_1, brokers[2].db_epoch) self.assertLess(brokers[0].db_epoch, epoch_1) # the root replica that thinks it is unsharded is problematic - it will # not return shard ranges for listings, but has no objects, so it's # luck of the draw whether we get a listing or not at this point :( # Run the sharders again: the first replica that is still 'unsharded' # because of the older epoch_0 in its db filename will now start to # shard again with a newer epoch_1 db, and will start to re-cleave the # 3 active shards, albeit with zero objects to cleave. self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(epoch_1, broker.db_epoch) self.assertIn(brokers[0].get_db_state(), (SHARDING, SHARDED)) self.assertEqual(SHARDED, brokers[1].get_db_state()) self.assertEqual(SHARDED, brokers[2].get_db_state()) # This cycle of the sharders also guarantees that all shards have had # their state updated to ACTIVE from the root; this was not necessarily # true at end of the previous sharder pass because a shard audit (when # the shard is updated from a root) may have happened before all roots # have had their shard ranges transitioned to ACTIVE. for shard_range in shard_ranges_1: with annotate_failure(shard_range): found_for_shard = self.categorize_container_dir_content( shard_range.account, shard_range.container) self.assertLengthEqual(found_for_shard['normal_dbs'], 3) actual = [] for shard_db in found_for_shard['normal_dbs']: broker = ContainerBroker(shard_db) own_sr = broker.get_own_shard_range() actual.append( (broker.get_db_state(), own_sr.state, own_sr.deleted)) self.assertEqual([(UNSHARDED, ShardRange.ACTIVE, False)] * 3, actual) # We may need one more pass of the sharder before all three shard # ranges are cleaved (2 per pass) and all the root replicas are # predictably in sharded state. Note: the accelerated cleaving of >2 # zero-object shard ranges per cycle is defeated if a shard happens # to exist on the same node as the root because the roots cleaving # process doesn't think that it created the shard db and will therefore # replicate it as per a normal cleave. self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %s' % node): broker = self.get_broker(self.brain.part, self.brain.nodes[node]) brokers[node] = broker shard_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges_1, shard_ranges) shard_ranges = broker.get_shard_ranges(include_deleted=True) self.assertLengthEqual(shard_ranges, len(exp_shard_ranges)) self.assertEqual(exp_shard_ranges, shard_ranges) self.assertEqual(ShardRange.SHARDED, broker._own_shard_range().state) self.assertEqual(epoch_1, broker.db_epoch) self.assertEqual(SHARDED, broker.get_db_state()) # Finally, with all root replicas in a consistent state, the listing # will be be predictably correct self.assert_container_listing(obj_names) def test_manage_shard_ranges_repair_shard(self): # provoke overlaps in a shard container and repair them obj_names = self._make_object_names(24) initial_obj_names = obj_names[::2] # put 12 objects in container self.put_objects(initial_obj_names) client.post_container(self.url, self.admin_token, self.container_name, headers={'X-Container-Sharding': 'on'}) # run replicators first time to get sync points set self.replicators.once() # find 3 shard ranges on root nodes[0] and get the root sharded self.assert_subprocess_success([ 'swift-manage-shard-ranges', self.get_db_file(self.brain.part, self.brain.nodes[0]), 'find_and_replace', '4', '--enable']) self.replicators.once() # cleave first two shards self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # cleave third shard self.sharders_once(additional_args='--partitions=%s' % self.brain.part) # ensure all shards learn their ACTIVE state from root self.sharders_once() for node in (0, 1, 2): with annotate_failure('node %d' % node): shard_ranges = self.assert_container_state( self.brain.nodes[node], 'sharded', 3) for sr in shard_ranges: self.assertEqual(ShardRange.ACTIVE, sr.state) self.assert_container_listing(initial_obj_names) # add objects to second shard range so it has 8 objects ; this range # has bounds (obj-0006,obj-0014] root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(3, len(root_shard_ranges)) shard_1 = root_shard_ranges[1] self.assertEqual(obj_names[6], shard_1.lower) self.assertEqual(obj_names[14], shard_1.upper) more_obj_names = obj_names[7:15:2] self.put_objects(more_obj_names) expected_obj_names = sorted(initial_obj_names + more_obj_names) self.assert_container_listing(expected_obj_names) shard_1_part, shard_1_nodes = self.brain.ring.get_nodes( shard_1.account, shard_1.container) # find 3 sub-shards on one shard node; use --force-commits to ensure # the recently PUT objects are included when finding the shard range # pivot points self.assert_subprocess_success([ 'swift-manage-shard-ranges', '--force-commits', self.get_db_file(shard_1_part, shard_1_nodes[1], shard_1.account, shard_1.container), 'find_and_replace', '3', '--enable']) # ... and mistakenly find 4 shard ranges on a different shard node :( self.assert_subprocess_success([ 'swift-manage-shard-ranges', '--force-commits', self.get_db_file(shard_1_part, shard_1_nodes[2], shard_1.account, shard_1.container), 'find_and_replace', '2', '--enable']) # replicate the muddle of shard ranges between shard replicas, merged # result is: # '' - 6 shard ACTIVE # 6 - 8 sub-shard FOUND # 6 - 9 sub-shard FOUND # 8 - 10 sub-shard FOUND # 9 - 12 sub-shard FOUND # 10 - 12 sub-shard FOUND # 12 - 14 sub-shard FOUND # 12 - 14 sub-shard FOUND # 6 - 14 shard SHARDING # 14 - '' shard ACTIVE self.replicators.once() # try hard to shard the shard... self.sharders_once(additional_args='--partitions=%s' % shard_1_part) self.sharders_once(additional_args='--partitions=%s' % shard_1_part) self.sharders_once(additional_args='--partitions=%s' % shard_1_part) # sharding hasn't completed and there's overlaps in the shard and root: # the sub-shards will have been cleaved in the order listed above, but # sub-shards (10 -12) and one of (12 - 14) will be overlooked because # the cleave cursor will have moved past their namespace before they # were yielded by the shard range iterator, so we now have: # '' - 6 shard ACTIVE # 6 - 8 sub-shard ACTIVE # 6 - 9 sub-shard ACTIVE # 8 - 10 sub-shard ACTIVE # 10 - 12 sub-shard CREATED # 9 - 12 sub-shard ACTIVE # 12 - 14 sub-shard CREATED # 12 - 14 sub-shard ACTIVE # 14 - '' shard ACTIVE sub_shard_ranges = self.get_container_shard_ranges( shard_1.account, shard_1.container) self.assertEqual(7, len(sub_shard_ranges), sub_shard_ranges) root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(9, len(root_shard_ranges), root_shard_ranges) self.assertEqual([ShardRange.ACTIVE] * 4 + [ShardRange.CREATED, ShardRange.ACTIVE] * 2 + [ShardRange.ACTIVE], [sr.state for sr in root_shard_ranges]) # fix the overlaps - a set of 3 ACTIVE sub-shards will be chosen and 4 # other sub-shards will be shrunk away; apply the fix at the root # container db_file = self.get_db_file(self.brain.part, self.brain.nodes[0]) self.assert_subprocess_success( ['swift-manage-shard-ranges', db_file, 'repair', '--yes']) self.replicators.once() self.sharders_once() self.sharders_once() # check root now has just 5 shard ranges root_shard_ranges = self.get_container_shard_ranges() self.assertEqual(5, len(root_shard_ranges), root_shard_ranges) self.assertEqual([ShardRange.ACTIVE] * 5, [sr.state for sr in root_shard_ranges]) # check there are 1 sharded shard and 4 shrunk sub-shard ranges in the # root (note, shard_1's shard ranges aren't updated once it has sharded # because the sub-shards report their state to the root; we cannot make # assertions about shrunk states in shard_1's shard range table) root_shard_ranges = self.get_container_shard_ranges( include_deleted=True) self.assertEqual(10, len(root_shard_ranges), root_shard_ranges) shrunk_shard_ranges = [sr for sr in root_shard_ranges if sr.state == ShardRange.SHRUNK] self.assertEqual(4, len(shrunk_shard_ranges), root_shard_ranges) self.assertEqual([True] * 4, [sr.deleted for sr in shrunk_shard_ranges]) sharded_shard_ranges = [sr for sr in root_shard_ranges if sr.state == ShardRange.SHARDED] self.assertEqual(1, len(sharded_shard_ranges), root_shard_ranges) self.assert_container_listing(expected_obj_names)
#!/usr/bin/env python ######################################################################### # Author: Andy Ohlin (debian.user@gmx.com) # Modified by: Andrew Palmer (palmer@embl.de) # Artem Tarasov (lomereiter@gmail.com) # # Example usage: # pyisocalc('Fe(ClO3)5',plot=false,gauss=0.25,charge=-2,resolution=250) # Do "pyisocalc('--help') to find out more # ########################################################################## ver='0.2 (5 Sep. 2015)' # Version 0.2 -- Modified from v0.8 of Andy Ohlin's code # # Dependencies: # python2.7, python-numpy, python-matplotlib # pyms-mass_spectrum # # Isotopic abundances and masses were copied from Wsearch32. # Elemental oxidation states were mainly # taken from Matthew Monroe's molecular weight calculator, with some changes. ######################################################################### import re #for regular expressions import sys import time #for code execution analysis import numpy as np from numpy import shape,asarray,prod,zeros,repeat #for cartesian product from numpy import random,histogram # for binning from numpy import pi,sqrt,exp,true_divide,multiply # misc math functions from numpy import linspace #for gaussian from numpy import copysign from itertools import groupby, imap import operator from ..mass_spectrum import mass_spectrum as MassSpectrum from ..centroid_detection import gradient # values updated on 16/12/2015 from http://www.ciaaw.org/isotopic-abundances.htm / http://www.ciaaw.org/atomic-masses.htm PeriodicTable ={ 'H': [1, 1, [1.007825032, 2.014101778], [0.999855, 0.000145]], 'He': [2, 0, [3.01602932, 4.002603254], [0.000002, 0.999998]], 'Li': [3, 1, [6.015122887, 7.01600344], [0.0485, 0.9515]], 'Be': [4, 2, [9.0121831], [1.0]], 'B': [5, 3, [10.012937, 11.009305], [0.1965, 0.8035]], 'C': [6, -4, [12, 13.00335484], [0.9894, 0.0106]], 'N': [7, 5, [14.003074, 15.0001089], [0.996205, 0.003795]], 'O': [8, -2, [15.99491462, 16.99913176, 17.99915961], [0.99757, 0.0003835, 0.002045]], 'F': [9, -1, [18.99840316], [1.0]], 'Ne': [10, 0, [19.99244018, 20.9938467, 21.9913851], [0.9048, 0.0027, 0.0925]], 'Na': [11, 1, [22.98976928], [1.0]], 'Mg': [12, 2, [23.9850417, 24.985837, 25.982593], [0.78965, 0.10011, 0.11025]], 'Al': [13, 3, [26.9815385], [1.0]], 'Si': [14, 4, [27.97692654, 28.97649467, 29.97377001], [0.922545, 0.04672, 0.030735]], 'P': [15, 5, [30.973762], [1.0]], 'S': [16, -2, [31.97207117, 32.97145891, 33.967867, 35.967081], [0.9485, 0.00763, 0.04365, 0.000158]], 'Cl': [17, -1, [34.9688527, 36.9659026], [0.758, 0.242]], 'Ar': [18, 0, [35.9675451, 37.962732, 39.96238312], [0.003336, 0.000629, 0.996035]], 'K': [19, 1, [38.96370649, 39.9639982, 40.96182526], [0.932581, 0.000117, 0.067302]], 'Ca': [20, 2, [39.9625909, 41.958618, 42.958766, 43.955482, 45.95369, 47.9525228], [0.96941, 0.00647, 0.00135, 0.02086, 0.00004, 0.00187]], 'Sc': [21, 3, [44.955908], [1.0]], 'Ti': [22, 4, [45.952628, 46.951759, 47.947942, 48.947866, 49.944787], [0.0825, 0.0744, 0.7372, 0.0541, 0.0518]], 'V': [23, 5, [49.947156, 50.943957], [0.0025, 0.9975]], 'Cr': [24, 2, [49.946042, 51.940506, 52.940648, 53.938879], [0.04345, 0.83789, 0.09501, 0.02365]], 'Mn': [25, 2, [54.938044], [1.0]], 'Fe': [26, 3, [53.939609, 55.934936, 56.935393, 57.933274], [0.05845, 0.91754, 0.02119, 0.00282]], 'Co': [27, 2, [58.933194], [1.0]], 'Ni': [28, 2, [57.935342, 59.930786, 60.931056, 61.928345, 63.927967], [0.680769, 0.262231, 0.011399, 0.036345, 0.009256]], 'Cu': [29, 2, [62.929598, 64.92779], [0.6915, 0.3085]], 'Zn': [30, 2, [63.929142, 65.926034, 66.927128, 67.924845, 69.92532], [0.4917, 0.2773, 0.0404, 0.1845, 0.0061]], 'Ga': [31, 3, [68.925574, 70.924703], [0.60108, 0.39892]], 'Ge': [32, 2, [69.924249, 71.9220758, 72.923459, 73.92117776, 75.9214027], [0.2052, 0.2745, 0.0776, 0.3652, 0.0775]], 'As': [33, 3, [74.921595], [1.0]], 'Se': [34, 4, [73.9224759, 75.9192137, 76.9199142, 77.917309, 79.916522, 81.9167], [0.0086, 0.0923, 0.076, 0.2369, 0.498, 0.0882]], 'Br': [35, -1, [78.918338, 80.91629], [0.5065, 0.4935]], 'Kr': [36, 0, [77.920365, 79.916378, 81.913483, 82.914127, 83.91149773, 85.91061063], [0.00355, 0.02286, 0.11593, 0.115, 0.56987, 0.17279]], 'Rb': [37, 1, [84.91178974, 86.90918053], [0.7217, 0.2783]], 'Sr': [38, 2, [83.913419, 85.909261, 86.908878, 87.905613], [0.0056, 0.0986, 0.07, 0.8258]], 'Y': [39, 3, [88.90584], [1.0]], 'Zr': [40, 4, [89.9047, 90.90564, 91.90503, 93.90631, 95.90827], [0.5145, 0.1122, 0.1715, 0.1738, 0.028]], 'Nb': [41, 5, [92.90637], [1.0]], 'Mo': [42, 6, [91.906808, 93.905085, 94.905839, 95.904676, 96.906018, 97.905405, 99.907472], [0.14649, 0.09187, 0.15873, 0.16673, 0.09582, 0.24292, 0.09744]], 'Tc': [43, 2, [97.90721], [1.0]], 'Ru': [44, 3, [95.90759, 97.90529, 98.905934, 99.904214, 100.905577, 101.904344, 103.90543], [0.0554, 0.0187, 0.1276, 0.126, 0.1706, 0.3155, 0.1862]], 'Rh': [45, 2, [102.9055], [1.0]], 'Pd': [46, 2, [101.9056, 103.904031, 104.90508, 105.90348, 107.903892, 109.905172], [0.0102, 0.1114, 0.2233, 0.2733, 0.2646, 0.1172]], 'Ag': [47, 1, [106.90509, 108.904755], [0.51839, 0.48161]], 'Cd': [48, 2, [105.90646, 107.904183, 109.903007, 110.904183, 111.902763, 112.904408, 113.903365, 115.904763], [0.01245, 0.00888, 0.1247, 0.12795, 0.24109, 0.12227, 0.28754, 0.07512]], 'In': [49, 3, [112.904062, 114.9038788], [0.04281, 0.95719]], 'Sn': [50, 4, [111.904824, 113.902783, 114.9033447, 115.901743, 116.902954, 117.901607, 118.903311, 119.902202, 121.90344, 123.905277], [0.0097, 0.0066, 0.0034, 0.1454, 0.0768, 0.2422, 0.0859, 0.3258, 0.0463, 0.0579]], 'Sb': [51, 3, [120.90381, 122.90421], [0.5721, 0.4279]], 'Te': [52, 4, [119.90406, 121.90304, 122.90427, 123.90282, 124.90443, 125.90331, 127.904461, 129.9062228], [0.0009, 0.0255, 0.0089, 0.0474, 0.0707, 0.1884, 0.3174, 0.3408]], 'I': [53, -1, [126.90447], [1.0]], 'Xe': [54, 0, [123.90589, 125.9043, 127.903531, 128.9047809, 129.9035094, 130.905084, 131.9041551, 133.905395, 135.9072145], [0.00095, 0.00089, 0.0191, 0.26401, 0.04071, 0.21232, 0.26909, 0.10436, 0.08857]], 'Cs': [55, 1, [132.905452], [1.0]], 'Ba': [56, 2, [129.90632, 131.905061, 133.904508, 134.905688, 135.904576, 136.905827, 137.905247], [0.0011, 0.001, 0.0242, 0.0659, 0.0785, 0.1123, 0.717]], 'La': [57, 3, [137.90712, 138.90636], [0.0008881, 0.9991119]], 'Ce': [58, 3, [135.907129, 137.90599, 139.90544, 141.90925], [0.00186, 0.00251, 0.88449, 0.11114]], 'Pr': [59, 3, [140.90766], [1.0]], 'Nd': [60, 3, [141.90773, 142.90982, 143.91009, 144.91258, 145.91312, 147.9169, 149.9209], [0.27153, 0.12173, 0.23798, 0.08293, 0.17189, 0.05756, 0.05638]], 'Pm': [61, 3, [144.91276], [1.0]], 'Sm': [62, 3, [143.91201, 146.9149, 147.91483, 148.91719, 149.91728, 151.91974, 153.92222], [0.0308, 0.15, 0.1125, 0.1382, 0.0737, 0.2674, 0.2274]], 'Eu': [63, 3, [150.91986, 152.92124], [0.4781, 0.5219]], 'Gd': [64, 3, [151.9198, 153.92087, 154.92263, 155.92213, 156.92397, 157.92411, 159.92706], [0.002, 0.0218, 0.148, 0.2047, 0.1565, 0.2484, 0.2186]], 'Tb': [65, 4, [158.92535], [1.0]], 'Dy': [66, 3, [155.92428, 157.92442, 159.9252, 160.92694, 161.92681, 162.92874, 163.92918], [0.00056, 0.00095, 0.02329, 0.18889, 0.25475, 0.24896, 0.2826]], 'Ho': [67, 3, [164.93033], [1.0]], 'Er': [68, 3, [161.92879, 163.92921, 165.9303, 166.93205, 167.93238, 169.93547], [0.00139, 0.01601, 0.33503, 0.22869, 0.26978, 0.1491]], 'Tm': [69, 3, [168.93422], [1.0]], 'Yb': [70, 3, [167.93389, 169.93477, 170.93633, 171.93639, 172.93822, 173.93887, 175.94258], [0.00126, 0.03023, 0.14216, 0.21754, 0.16098, 0.31896, 0.12887]], 'Lu': [71, 3, [174.94078, 175.94269], [0.97401, 0.02599]], 'Hf': [72, 4, [173.94005, 175.94141, 176.94323, 177.94371, 178.94582, 179.94656], [0.0016, 0.0526, 0.186, 0.2728, 0.1362, 0.3508]], 'Ta': [73, 5, [179.94746, 180.948], [0.0001201, 0.9998799]], 'W': [74, 6, [179.94671, 181.948204, 182.950223, 183.950931, 185.95436], [0.0012, 0.265, 0.1431, 0.3064, 0.2843]], 'Re': [75, 2, [184.952955, 186.95575], [0.374, 0.626]], 'Os': [76, 4, [183.952489, 185.95384, 186.95575, 187.95584, 188.95814, 189.95844, 191.96148], [0.0002, 0.0159, 0.0196, 0.1324, 0.1615, 0.2626, 0.4078]], 'Ir': [77, 4, [190.96059, 192.96292], [0.373, 0.627]], 'Pt': [78, 4, [189.95993, 191.96104, 193.962681, 194.964792, 195.964952, 197.96789], [0.00012, 0.00782, 0.32864, 0.33775, 0.25211, 0.07356]], 'Au': [79, 3, [196.966569], [1.0]], 'Hg': [80, 2, [195.96583, 197.966769, 198.968281, 199.968327, 200.970303, 201.970643, 203.973494], [0.0015, 0.1004, 0.1694, 0.2314, 0.1317, 0.2974, 0.0682]], 'Tl': [81, 1, [202.972345, 204.974428], [0.29515, 0.70485]], 'Pb': [82, 2, [203.973044, 205.974466, 206.975897, 207.976653], [0.014, 0.241, 0.221, 0.524]], 'Bi': [83, 3, [208.9804], [1.0]], 'Po': [84, 4, [209], [1.0]], 'At': [85, 7, [210], [1.0]], 'Rn': [86, 0, [222], [1.0]], 'Fr': [87, 1, [223], [1.0]], 'Ra': [88, 2, [226], [1.0]], 'Ac': [89, 3, [227], [1.0]], 'Th': [90, 4, [230.03313, 232.03806], [0.0002, 0.9998]], 'Pa': [91, 4, [231.03588], [1.0]], 'U': [92, 6, [234.04095, 235.04393, 238.05079], [0.000054 , 0.007204, 0.992742]], 'Ee':[0,0,[0.000548597],[1.0]]} from collections import namedtuple FormulaSegment = namedtuple('FormulaSegment', ['atom', 'number']) ####################################### # Collect properties ####################################### def getAverageMass(segment): masses, ratios = PeriodicTable[segment.atom][2:4] atomic_mass = np.dot(masses, ratios) return atomic_mass * segment.number def getCharge(segment): atomic_charge = PeriodicTable[segment.atom][1] return atomic_charge * segment.number ##################################################### # Iterate over expanded formula to collect property ##################################################### def getSegments(formula): segments = re.findall('([A-Z][a-z]*)([0-9]*)',formula) for atom, number in segments: number = int(number) if number else 1 yield FormulaSegment(atom, number) def molmass(formula): return sum(imap(getAverageMass, getSegments(formula))) def molcharge(formula): return sum(imap(getCharge, getSegments(formula))) ################################################################################ #expands ((((M)N)O)P)Q to M*N*O*P*Q ################################################################################ def formulaExpander(formula): while len(re.findall('\(\w*\)',formula))>0: parenthetical=re.findall('\(\w*\)[0-9]+',formula) for i in parenthetical: p=re.findall('[0-9]+',str(re.findall('\)[0-9]+',i))) j=re.findall('[A-Z][a-z]*[0-9]*',i) oldj=j for n in range(0,len(j)): numero=re.findall('[0-9]+',j[n]) if len(numero)!=0: for k in numero: nu=re.sub(k,str(int(int(k)*int(p[0]))),j[n]) else: nu=re.sub(j[n],j[n]+p[0],j[n]) j[n]=nu newphrase="" for m in j: newphrase+=str(m) formula=formula.replace(i,newphrase) if (len((re.findall('\(\w*\)[0-9]+',formula)))==0) and (len(re.findall('\(\w*\)',formula))!=0): formula=formula.replace('(','') formula=formula.replace(')','') lopoff=re.findall('[A-Z][a-z]*0',formula) if lopoff!=[]: formula=formula.replace(lopoff[0],'') return formula def singleElementPattern(segment, threshold=1e-9): # see 'Efficient Calculation of Exact Fine Structure Isotope Patterns via the # Multidimensional Fourier Transform' (A. Ipsen, 2014) element, amount = segment.atom, segment.number iso_mass, iso_abundance = map(np.array, PeriodicTable[element][2:4]) if len(iso_abundance) == 1: return np.array([1.0]), iso_mass * amount if amount == 1: return iso_abundance, iso_mass dim = len(iso_abundance) - 1 abundance = np.zeros([amount + 1] * dim) abundance.flat[0] = iso_abundance[0] abundance.flat[(amount+1)**np.arange(dim)] = iso_abundance[-1:0:-1] abundance = np.real(np.fft.ifftn(np.fft.fftn(abundance) ** amount)) significant = np.where(abundance > threshold) intensities = abundance[significant] masses = amount * iso_mass[0] + (iso_mass[1:] - iso_mass[0]).dot(significant) return intensities, masses def trim(ry, my): my, inv = np.unique(my, return_inverse=True) ry = np.bincount(inv, weights=ry) return ry, my def cartesian(rx, mx, cutoff): ry, my = asarray(rx[0]), asarray(mx[0]) for i in xrange(1, len(rx)): newr = np.outer(rx[i], ry).ravel() newm = np.add.outer(mx[i], my).ravel() js = np.where(newr > cutoff)[0] ry, my = newr[js], newm[js] return trim(ry, my) def isotopes(segments, cutoff): patterns = [singleElementPattern(x, cutoff) for x in segments] ratios = [x[0] for x in patterns] masses = [x[1] for x in patterns] return cartesian(ratios,masses,cutoff) ################################################################################## # Does housekeeping to generate final intensity ratios and puts it into a dictionary ################################################################################## def genDict(m,n,charges,cutoff): m, n = np.asarray(m).round(8), np.asarray(n).round(8) filter = n > cutoff m, n = m[filter], n[filter] n *= 100.0 / max(n) m -= charges * PeriodicTable['Ee'][2][0] if charges != 0: m /= abs(charges) return dict(zip(m, n)) def genGaussian(final,sigma, pts): mzs = np.array(final.keys()) intensities = np.array(final.values()) xvector = np.linspace(min(mzs)-1,max(mzs)+1,pts) yvector = intensities.dot(exp(-0.5 * (np.add.outer(mzs, -xvector)/sigma)**2)) yvector *= 100.0 / max(yvector) return (xvector,yvector) def mz(a,b,c): if c==0: c=b if b==0: c=1 mz=a/c return mz def checkhelpcall(sf): print_help = False exit = False if sf=='--help': exit = True if sf == '': print_help = True if print_help: print " " print "\t\tThis is pyisocalc, an isotopic pattern calculator written in python (2.x)." print "\t\tGet the latest version from http://sourceforge.net/p/pyisocalc" print "\t\tThis is version",ver print "\tUsage:" print "\t-h\t--help \tYou're looking at it." print "\t-f\t--formula\tFormula enclosed in apostrophes, e.g. 'Al2(NO3)4'." print "\t-c\t--charge\tCharge, e.g. -2 or 3. Must be an integer. If not provided the charge will be calculated" print "\t \t \tbased on default oxidation states as defined in this file." print "\t-o\t--output\tFilename to save data into. The data will be saved as a tab-separated file. No output by default." print "\t-p\t--plot \tWhether to plot or not. Can be yes, YES, Yes, Y, y. Default is no." print "\t-g\t--gauss \tGaussian broadening factor (affects resolution). Default is 0.35. Lower value gives higher resolution." print "\t \t \tAdjust this factor to make the spectrum look like the experimentally observed one." print "\t-r\t--resolution\tNumber of points to use for the m/z axis (affects resolution). Default is 500. Higher is slower." print " " print "\t Example:" print "\t./pyisocalc.py -f 'Fe(ClO3)5' -p y -g 0.25 -o ironperchlorate.dat -c -2 -r 250" print "" exit=True return exit def resolution2pts(min_x,max_x,resolution): # turn resolving power into ft pts # resolution = fwhm/max height # turn resolution in points per mz then multipy by mz range pts = resolution/1000 * (max(max_x-min_x,1)) return pts def checkoutput(output): save = True if output == '': save = False return save ######## # main function# ######## def isodist(molecules,charges=0,output='',plot=False,sigma=0.05,resolution=50000,cutoff=0.0001,do_centroid=True,verbose=False): #exit = checkhelpcall(molecules) #save = checkoutput(output) #if exit==True: # sys.exit(0) molecules=molecules.split(',') for element in molecules: element=formulaExpander(element) if verbose: print ('The mass of %(substance)s is %(Mass)f and the calculated charge is %(Charge)d with m/z of %(Mz)f.' % {'substance': \ element, 'Mass': molmass(element), 'Charge': molcharge(element),'Mz':mz(molmass(element),molcharge(element),charges)}) segments = list(getSegments(element)) if charges==None: charges=sum(getCharge(x) for x in segments) if verbose: print "Using user-supplied charge of %d for mass spectrum" % charges ratios, masses = isotopes(segments, cutoff) final = genDict(masses, ratios, charges, cutoff) ms_output = MassSpectrum() pts = resolution2pts(min(final.keys()),max(final.keys()),resolution) xvector,yvector=genGaussian(final,sigma,pts) ms_output.add_spectrum(xvector,yvector) if do_centroid: mz_list,intensity_list,centroid_list = gradient(ms_output.get_spectrum()[0],ms_output.get_spectrum()[1],max_output=-1,weighted_bins=5) ms_output.add_centroids(mz_list,intensity_list) else: ms_output.add_centroids(np.array(final.keys()),np.array(final.values())) if plot==True: import matplotlib.pyplot as plt #for plotting plt.plot(xvector,yvector) plt.plot(mz_list,intensity_list,'rx') plt.show() #if save==True: # g=open(savefile,'w') # xs=xvector.tolist() # ys=yvector.tolist() # for i in range(0,len(xs)): # g.write(str(xs[i])+"\t"+str(ys[i])+"\n") # g.close return ms_output def str_to_el(str_in): import re atom_number = re.split('([A-Z][a-z]*)', str_in) el = {} for atom, number in zip(atom_number[1::2], atom_number[2::2]): if atom not in PeriodicTable: raise ValueError("Element not recognised: {} in {}".format(atom, str_in)) if number == '': number = '1' number = int(number) if not atom in el: el[atom] = number else: el[atom] += number return el def rm_1bracket(str_in): # find first and last brackets rb = str_in.index(')') lb = str_in[0:rb].rindex('(') # check if multiplier after last bracket if len(str_in) == rb + 1: # end of string mult = "1" mult_idx = 0 else: mult = str_in[rb + 1:] mult_idx = len(mult) if not mult.isdigit(): # not a number mult = '1' mult_idx = 0 # exband brackets str_tmp = "" for m in range(0, int(mult)): str_tmp = str_tmp + str_in[lb + 1:rb] if lb == 0: str_strt = "" else: str_strt = str_in[0:lb] if rb == len(str_in) - 1: str_end = "" else: str_end = str_in[rb + 1 + mult_idx:] return str_strt + str_tmp + str_end def strip_bracket(str_in): go = True try: while go == True: str_in = rm_1bracket(str_in) except ValueError as e: if str(e) != "substring not found": raise return str_in def process_sf(str_in): import re # split_sign sub_strings = re.split('([\+-])', str_in) if not sub_strings[0] in (('+', '-')): sub_strings = ["+"] + sub_strings el = {} for sign, sf in zip(sub_strings[0::2], sub_strings[1::2]): # remove brackets str_in = strip_bracket(sf) # count elements el_ = str_to_el(str_in) for atom in el_: number = int('{}1'.format(sign)) * el_[atom] if not atom in el: el[atom] = number else: el[atom] += number return el def process_complexes(str_in): """ Function splits strings at '.' and moves any preceding number to the end so A.nB -> A+(B)n :param str_in: molecular formula that may or may not contain complexes :return: reformatted string """ def _move_num_to_end(s): # move initial numbers to end alpha_idx = [ss.isalpha() for ss in s].index(True) str_re = "({}){}".format(s[alpha_idx:],s[0:alpha_idx]) return str_re if '.' not in str_in: return str_in str_in = str_in.split(".") str_out = ["{}".format(s) if s[0].isalpha() else _move_num_to_end(s) for s in str_in ] str_out = "+".join(str_out) return str_out def prep_str(str_in): str_in = process_complexes(str_in) #turn A.nB into A+(B)n str_in.split("+") return str_in def complex_to_simple(str_in): str_in = prep_str(str_in) el_dict = process_sf(str_in) if any((all([e==0 for e in el_dict.values()]),any([e<0 for e in el_dict.values()]))): return None sf_str = "".join(["{}{}".format(a,el_dict[a]) for a in el_dict if el_dict[a]>0]) return sf_str
import unittest from code.google_search import get_people_also_ask_links class TestGoogleSearch(unittest.TestCase): def setUp(self) -> None: pass def test_get_people_also_ask_links(self): """Test the get_people_also_ask_links method""" test = "principal components" result = get_people_also_ask_links(test) self.assertEqual(list, type(result))
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import json import uuid from xml.dom import minidom import webob from cinder.api import common from cinder.api.openstack.wsgi import MetadataXMLDeserializer from cinder.api.openstack.wsgi import XMLDeserializer from cinder import db from cinder import test from cinder.tests.api import fakes from cinder import volume def fake_volume_get(*args, **kwargs): return { 'id': 'fake', 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': datetime.datetime.now(), 'attach_status': None, 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': 'fake', } def fake_volume_get_all(*args, **kwargs): return [fake_volume_get()] fake_image_metadata = { 'image_id': 'someid', 'image_name': 'fake', 'kernel_id': 'somekernel', 'ramdisk_id': 'someramdisk', } def fake_get_volume_image_metadata(*args, **kwargs): return fake_image_metadata def fake_get_volumes_image_metadata(*args, **kwargs): return {'fake': fake_image_metadata} class VolumeImageMetadataTest(test.TestCase): content_type = 'application/json' def setUp(self): super(VolumeImageMetadataTest, self).setUp() self.stubs.Set(volume.API, 'get', fake_volume_get) self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) self.stubs.Set(volume.API, 'get_volume_image_metadata', fake_get_volume_image_metadata) self.stubs.Set(volume.API, 'get_volumes_image_metadata', fake_get_volumes_image_metadata) self.stubs.Set(db, 'volume_get', fake_volume_get) self.UUID = uuid.uuid4() def _make_request(self, url): req = webob.Request.blank(url) req.accept = self.content_type res = req.get_response(fakes.wsgi_app()) return res def _get_image_metadata(self, body): return json.loads(body)['volume']['volume_image_metadata'] def _get_image_metadata_list(self, body): return [ volume['volume_image_metadata'] for volume in json.loads(body)['volumes'] ] def test_get_volume(self): res = self._make_request('/v2/fake/volumes/%s' % self.UUID) self.assertEqual(res.status_int, 200) self.assertEqual(self._get_image_metadata(res.body), fake_image_metadata) def test_list_detail_volumes(self): res = self._make_request('/v2/fake/volumes/detail') self.assertEqual(res.status_int, 200) self.assertEqual(self._get_image_metadata_list(res.body)[0], fake_image_metadata) class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): metadata_node_name = "volume_image_metadata" class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): content_type = 'application/xml' def _get_image_metadata(self, body): deserializer = XMLDeserializer() volume = deserializer.find_first_child_named( minidom.parseString(body), 'volume') image_metadata = deserializer.find_first_child_named( volume, 'volume_image_metadata') return MetadataXMLDeserializer().extract_metadata(image_metadata) def _get_image_metadata_list(self, body): deserializer = XMLDeserializer() volumes = deserializer.find_first_child_named( minidom.parseString(body), 'volumes') volume_list = deserializer.find_children_named(volumes, 'volume') image_metadata_list = [ deserializer.find_first_child_named( volume, 'volume_image_metadata' ) for volume in volume_list] return map(MetadataXMLDeserializer().extract_metadata, image_metadata_list)
from datetime import datetime from typing import List, Dict, Optional from pydantic import BaseModel, validator, root_validator class ItemModel(BaseModel): cve: Dict configurations: Optional[Dict] impact: Optional[Dict] publishedDate: datetime lastModifiedDate: datetime class ResultModel(BaseModel): CVE_data_timestamp: datetime CVE_data_type: str CVE_Items: List[ItemModel] @validator('CVE_data_type') def fixed_type(cls, v): assert v == 'CVE', 'Must be of type CVE' return v class ResponseModel(BaseModel): resultsPerPage: int startIndex: int totalResults: int result: ResultModel
import easygui as g user_info=g.multenterbox(title='账号中心',msg='【*用户名】为必填项\t【*真实姓名】为必填项\t【*手机号码】为必填项\t【*E-mail】为必填项', fields=['*用户名','*真实姓名','固定电话','*手机号码','QQ','*E-mail'] )
import re import os __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) """ Holds all the custom exceptions raised by the api """ class OrderNotFound(StandardError): """Error raised when an order is not found""" def __init__(self, orderid): """Create new OrderNotFound Args: orderid (str): The orderid that was not found """ super(OrderNotFound, self).__init__(orderid) class ItemNotFound(StandardError): """Error raised when an item is not found""" def __init__(self, orderid, itemid): """Create new ItemNotFound Args: orderid (str): The orderid of the item itemid (str): The id of the item that was not found """ super(ItemNotFound, self).__init__(orderid, itemid) class ProductNotImplemented(NotImplementedError): """Exception to be thrown when trying to instantiate an unsupported product""" def __init__(self, product_id): """Constructor for the product not implemented Keyword args: product_id -- The product id of that is not implemented Return: None """ self.product_id = product_id super(ProductNotImplemented, self).__init__(product_id) class ValidationException(Exception): """Exceptions when there is an error with validating an order example: "3 validation errors": [ "Value u'' for field '<obj>.tm5.products[0]' cannot be blank'", "Value u'' for field '<obj>.tm5.products[0]' is not in the enumeration: ['source_metadata', 'l1', 'toa', 'bt', 'cloud', 'sr', 'lst', 'swe', 'sr_ndvi', 'sr_evi', 'sr_savi', 'sr_msavi', 'sr_ndmi', 'sr_nbr', 'sr_nbr2', 'stats']", "Value [u''] for field '<obj>.tm5.products' Requested products are not available" ] """ def __init__(self, msg): err_ls = msg.split('\n') err_key = err_ls[0].replace(':', '') self.response = {err_key: []} for err in err_ls[1:]: if err: err = re.sub(r'<obj>.', '', err) self.response[err_key].append(err) super(ValidationException, self).__init__(str(self.response)) class InventoryException(Exception): """Exception for handling problems with inventory handling""" def __init__(self, msg): super(InventoryException, self).__init__(msg) self.response = {'Inputs Not Available': msg} class InventoryConnectionException(Exception): """Exception handling if input data pool is down""" def __init__(self, msg): super(InventoryConnectionException, self).__init__(msg)
#Import modules and libraries from random import randint from string import ascii_uppercase, ascii_lowercase from itertools import permutations from copy import deepcopy from tail_recursion import tail_recursive, recurse #Define board mapping function def mapBoard(col, row, value): board = [[value for x in range(col)] for y in range(row)] return board #Define metaboard mapping function def mapMetaBoard(col, row): metaboard = [[[[0, 0, 0, 0], [0, 0, 0, 0]] for x in range(col)] for y in range(row)] return metaboard #Define view board function def viewBoard(board): alphabet = ascii_uppercase col = len(board[0]) row = len(board) border = "" topBorder = "#||" for i in range(col): border += "_" * 2 topBorder += alphabet[i] topBorder += " " border += "___" print(topBorder) print(border) for i in range(row): print(alphabet[i] + "||" + " ".join(board[i]) + "|") #Define mark function def mark(board, signature): alphabet = ascii_uppercase alphabet1 = ascii_lowercase dimensionY = len(board) dimensionX = len(board[0]) valid = False while (not valid): print("\n\nWhere do you want to mark?\n\n") x = input(f"Column (A - {alphabet[dimensionX - 1]})? ") y = input(f"Row (A - {alphabet[dimensionY - 1]})? ") try: x = alphabet.index(x) except ValueError: x = alphabet1.index(x) try: y = alphabet.index(y) except: y = alphabet1.index(y) if (board[y][x] == ' '): valid = True else: print('That position has already been marked. Please try again.\n') board[y][x] = signature print('\n') viewBoard(board) #Define function to find all occurences of 'X' #Value is [opponentSignature] #Return [[col1, row1], [col2, row2], ...] def locate(value, board): dimensionY = len(board) dimensionX = len(board[0]) returnList = [] for row in range(dimensionY): for col in range(dimensionX): if (board[row][col] in value): returnList.append([col, row]) return returnList #Define computer's turn -- recursive @tail_recursive def play(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, first = True): #AI #Each of metaboard's position is a list [danger, opportunity] #Define function to update metaboard #TODO: refine to improve efficiency at detecting risks and opportunities of non-continuous streak & multi-directional streaks #REQUIREMENTS 1: resonant effect on a tile immediately next to a continuous winCond - 1 streak == risk/opportunity factor of interrupted resonance on a tile conjoining 2 aligning sub-streaks whose sum >= winCond - 1 #REQUIREMENTS 2: implement weighted resonance system on a tile conjoining multiple directional streaks > resonance system for linear streaks def meta(board, opponentSignature, selfSignature, winCond, difficulty): #Define function to sweep perimeter of a position's coordinates and add attributes to them #coord = [col, row] def sweep(metaboard, coord, keyword, opponentSignature, selfSignature, winCond): if (keyword == 'danger'): type = 0 otherType = 1 signature = opponentSignature else: type = 1 otherType = 0 signature = selfSignature coordVars = list(permutations([-1, 0, 1], 2)) coordVars.extend(((-1, -1), (1, 1))) for coordVar in coordVars: try: if (coordVar in [(-1, -1), (1, 1)]): pos = 2 elif (coordVar in [(0, -1), (0, 1)]): pos = 0 elif (coordVar in [(-1, 0), (1, 0)]): pos = 1 else: pos = 3 row = coord[1] + coordVar[0] if (row < 0 or row > len(metaboard)): raise IndexError col = coord[0] + coordVar[1] if (col < 0 or col > len(metaboard[0])): raise IndexError #Ripple effect if (not isinstance(metaboard[row][col], str)): for i in range(winCond - 1): if (not isinstance(metaboard[row][col], str)): metaboard[row][col][type][pos] += (1 - i/(winCond - 1)) metaboard[row][col][otherType][pos] -= (1 - i/(winCond - 1)) row += coordVar[0] if (row < 0 or row > len(metaboard)): raise IndexError col += coordVar[1] if (col < 0 or col > len(metaboard[0])): raise IndexError elif (metaboard[row][col] == signature): row += coordVar[0] if (row < 0 or row > len(metaboard)): raise IndexError col += coordVar[1] if (col < 0 or col > len(metaboard[0])): raise IndexError else: raise IndexError #alphabet = ascii_uppercase #print(f'Metaboard at column {alphabet[col]} and row {alphabet[row]} has a {keyword} level of {metaboard[row][col][type]}.') #Resonance effect if (metaboard[row][col] == signature): alignment = 0 while (metaboard[row][col] == signature): row += coordVar[0] if (row < 0 or row > len(metaboard)): raise IndexError col += coordVar[1] if (col < 0 or col > len(metaboard[0])): raise IndexError alignment += 1 if (isinstance(metaboard[row][col], list)): metaboard[row][col][type][pos] += alignment except IndexError: pass #Define function to screen entire metaboard for invalidation def screen(metaboard, selfSignature, opponentSignature, winCond): #Define function to rotate board 90 degree counter-clockwise with perspective to keeping OG board intact def rotate(board): #Define function to inverse board vertically def invertY(board): invertYBoard = [] dimensionY = len(board) for row in range(dimensionY): invertYBoard.append(board[dimensionY - row - 1]) return invertYBoard rotateBoard = [] dimensionY = len(board) dimensionX = len(board[0]) for col in range(dimensionX): column = [board[row][col] for row in range(dimensionY)] rotateBoard.append(column) return invertY(rotateBoard) #Define function to screen the top left corner of the board def screenTopLeftCorner(metaboard, winCond, pos, name): for row in range(winCond - 1): for col in range(winCond - 1 - row): if (isinstance(metaboard[row][col], list)): #print(f'nullify {row}:{col}\'s danger and potential in the {name} diagonal') metaboard[row][col][0][pos] = 0 metaboard[row][col][1][pos] = 0 #Define function to screen metaboard to invalidate 'type' from signature (e.g, invalidate dangers between two blocked self) horizontally def screenHorizontal(metaboard, signature, type, winCond, pos): dimensionX = len(metaboard[0]) if type == 'danger': type = 0 else: type = 1 #Format all selfSignature's coords found in each row #sus = [susRow1, susRow3, ...] #susRow1 = [[col1, row], [col3, row], ...] sus = [] for row in metaboard: susEachRow = [] for col in row: if (col == signature): susEachRow.append([row.index(col), metaboard.index(row)]) sus.append(susEachRow) sus = [susEachRow for susEachRow in sus if len(susEachRow) != 0] #Filter out all invalid segments between two blocked self horizontally for susEachRow in sus: for i in range(len(susEachRow) - 1): if (2 <= susEachRow[i + 1][0] - susEachRow[i][0] <= winCond): for k in range(0, susEachRow[i + 1][0] - susEachRow[i][0]): if (isinstance(metaboard[susEachRow[i][1]][susEachRow[i][0] + k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {susEachRow[i][0]}:{susEachRow[i][1]} and {susEachRow[i + 1][0]}:{susEachRow[i + 1][1]}, the position with the coordinates {susEachRow[i][1]}:{susEachRow[i][0] + k} has been nullified of its {type}\'s {pos}.') metaboard[susEachRow[i][1]][susEachRow[i][0] + k][type][pos] = 0 #Filter out all invalid segments between self and border for susEachRow in sus: start = susEachRow[0] end = susEachRow[-1] if (1 <= start[0] < winCond): for k in range(0, start[0]): if (isinstance(metaboard[start[1]][k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the border, the position with the coordinates {start[1]}:{k} has been nullified of its {type}\'s {pos}.') metaboard[start[1]][k][type][pos] = 0 if (1 <= dimensionX - end[0] - 1 < winCond): for k in range(0, dimensionX - end[0] - 1): if (isinstance(metaboard[end[1]][end[0] + k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the border, the position with the coordinates {end[1]}:{end[0] + k} has been nullified of its {type}\'s {pos}.') metaboard[end[1]][end[0] + k][type][pos] = 0 return metaboard #Define function to screen metaboard to invalidate 'type' from signature (e.g, invalidate dangers between two blocked self) diagonally def screenDiagonal(metaboard, signature, type, winCond, pos): dimensionY = len(metaboard) dimensionX = len(metaboard[0]) if type == 'danger': type = 0 else: type = 1 #Format all selfSignature's coords found in each diagonal #susDiagDown, Up, sus = [susDiag1, susDiag3, ...] #susDiag1 = [[col1, row1], [col3, row3], ...] sus = [] susDiagDown = [] lenSusDiagDown = [] susDiagUp = [] lenSusDiagUp = [] susDuplicate = [] for i in range(dimensionY): susEachDiagDown = [] originalDiagLen = 0 for j in range(dimensionY): try: if (metaboard[i + j][j] == signature): susEachDiagDown.append([i + j, j]) originalDiagLen += 1 except IndexError: pass susDiagDown.append(susEachDiagDown) if (len(susEachDiagDown) != 0): lenSusDiagDown.append(originalDiagLen) else: lenSusDiagDown.append(0) for i in range(dimensionX): susEachDiagUp = [] originalDiagLen = 0 for j in range(dimensionX): try: if (metaboard[j][i + j] == signature): susEachDiagUp.append([j, i + j]) originalDiagLen += 1 except IndexError: pass susDiagUp.append(susEachDiagUp) if (len(susEachDiagUp) != 0): lenSusDiagUp.append(originalDiagLen) else: lenSusDiagUp.append(0) sus.extend(susDiagDown) sus.extend(susDiagUp) for i in range(min(dimensionX, dimensionY)): if (metaboard[i][i] == signature): susDuplicate.append([i, i]) sus.remove(susDuplicate) susDiagUp = [susEachDiag for susEachDiag in susDiagUp if len(susEachDiag) != 0] lenSusDiagUp = [eachLen for eachLen in lenSusDiagUp if eachLen != 0] susDiagDown = [susEachDiag for susEachDiag in susDiagDown if len(susEachDiag) != 0] lenSusDiagDown = [eachLen for eachLen in lenSusDiagDown if eachLen != 0] #Filter out all invalid segments between two blocked self diagontally for susEachDiag in sus: for i in range(len(susEachDiag) - 1): if (2 <= susEachDiag[i + 1][0] - susEachDiag[i][0] <= winCond): for k in range(0, susEachDiag[i + 1][0] - susEachDiag[i][0]): if (isinstance(metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {susEachDiag[i][0]}:{susEachDiag[i][1]} and {susEachDiag[i + 1][0]}:{susEachDiag[i + 1][1]}, the position with the coordinates {susEachDiag[i][0] + k}:{susEachDiag[i][1] + k} has been nullified of its {type}\'s {pos}.') metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k][type][pos] = 0 #Filter out all invalid segments between self and border for susDiagUp for susEachDiag in susDiagUp: start = susEachDiag[0] end = susEachDiag[-1] if (1 <= min(start[0], start[1]) < winCond): for k in range(0, min(start[0], start[1]) + 1): if (isinstance(metaboard[start[0] - k][start[1] - k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the corner, the position with the coordinates {start[0] + k}:{start[1] + k} has been nullified of its {type}\'s {pos}.') metaboard[start[0] - k][start[1] - k][type][pos] = 0 if (1 <= lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1]) <= winCond): for k in range(0, lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1])): if (isinstance(metaboard[end[0] + k][end[1] + k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.') metaboard[end[0] + k][end[1] + k][type][pos] = 0 #Filter out all invalid segments between self and border for susDiagDown for susEachDiag in susDiagDown: start = susEachDiag[0] end = susEachDiag[-1] if (1 <= min(start[0], start[1]) < winCond): for k in range(0, min(start[0], start[1]) + 1): if (isinstance(metaboard[start[0] - k][start[1] - k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the corner, the position with the coordinates {start[0] + k}:{start[1] + k} has been nullified of its {type}\'s {pos}.') metaboard[start[0] - k][start[1] - k][type][pos] = 0 if (1 <= lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1]) <= winCond): for k in range(0, lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1])): if (isinstance(metaboard[end[0] + k][end[1] + k], list)): #print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.') metaboard[end[0] + k][end[1] + k][type][pos] = 0 return metaboard #pos: index of relevant value (0: horizontal, 1: vertical, 2: NW - SE, 3: NE - SW) #Screen top left corner screenTopLeftCorner(metaboard, winCond, 3, 'top left') metaboard = rotate(metaboard) #Screen top right corner screenTopLeftCorner(metaboard, winCond, 2, 'top right') metaboard = rotate(metaboard) #Screen bottom right corner screenTopLeftCorner(metaboard, winCond, 3, 'bottom right') metaboard = rotate(metaboard) #Screen bottom left corner screenTopLeftCorner(metaboard, winCond, 2, 'bottom left') metaboard = rotate(metaboard) #Screen horizontally screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 0) screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 0) metaboard = rotate(metaboard) #Screen vertically screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 1) screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 1) for i in range(3): metaboard = rotate(metaboard) #Screen NW-SE diagonally screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 2) screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 2) metaboard = rotate(metaboard) #Screen NE-SW diagonally screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 3) screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 3) for i in range(3): metaboard = rotate(metaboard) metaboard = mapMetaBoard(len(board[0]), len(board)) dangerCoords = locate([opponentSignature], board) opportunityCoords = locate([selfSignature], board) for coord in dangerCoords: metaboard[coord[1]][coord[0]] = opponentSignature for coord in opportunityCoords: metaboard[coord[1]][coord[0]] = selfSignature for coord in dangerCoords: sweep(metaboard, coord, 'danger', opponentSignature, selfSignature, winCond) for coord in opportunityCoords: sweep(metaboard, coord, 'opportunity', opponentSignature, selfSignature, winCond) #Screening applies for difficulty 2 and up if (difficulty >= 2): screen(metaboard, selfSignature, opponentSignature, winCond) return metaboard #Define function to choose between aggresive or defensive def stance(metaboard, difficulty): dangerList = [] opportunityList = [] for row in metaboard: for col in row: if (isinstance(col, list)): dangerList.append(max(col[0])) opportunityList.append(max(col[1])) pressingDanger = max(dangerList) pressingOpportunity = max(opportunityList) #print(f'Highest danger is {pressingDanger}, whilst highest opportunity is {pressingOpportunity}.') #'Tactical' playstyle applies only for difficulty 3 if (difficulty >= 3): if (pressingOpportunity > pressingDanger): return 'aggressive', pressingOpportunity elif (pressingOpportunity == pressingDanger): return 'tactical', pressingOpportunity else: return 'defensive', pressingDanger else: if (pressingOpportunity >= pressingDanger): return 'aggressive', pressingOpportunity else: return 'defensive', pressingDanger #Define function to make a play @tail_recursive def decide(forecasted, checked, style, value, metaboard, difficulty): if style == 'aggressive': type = 1 elif style == 'defensive': type = 0 else: type = 2 if (style in ['aggressive', 'defensive']): for row in metaboard: for col in row: if (isinstance(col, list)): if max(col[type]) == value: #print(col[type].index(value)) x, y = row.index(col), metaboard.index(row) else: returnList = [] maxTracker = [] for row in range(len(metaboard)): for col in range(len(metaboard[0])): if (isinstance(metaboard[row][col], list)): if (max(metaboard[row][col][0]) == value) or (max(metaboard[row][col][1]) == value): #print(col[type].index(value)) returnList.append([col, row]) maxTracker.append(sum(metaboard[row][col][0]) + sum(metaboard[row][col][1])) x, y = returnList[maxTracker.index(max(maxTracker))][0], returnList[maxTracker.index(max(maxTracker))][1] if [*forecasted, [x, y]] not in checked: return x, y else: #For a checked position, set metaboard value to negative metaboardTemp = deepcopy(metaboard) metaboardTemp[y][x] = [[-1, -1, -1, -1], [-1, -1, -1, -1]] style, newValue = stance(metaboardTemp, difficulty) #When all potential positions have been checked, all potential metaboard values will have been set to negative => depleted if newValue != value: raise ValueError return recurse(forecasted, checked, style, newValue, metaboardTemp, difficulty) #Define function to swap self signature and opponent signature def swap(selfSignature, opponentSignature): temp = selfSignature selfSignature = opponentSignature opponentSignature = temp return selfSignature, opponentSignature #Define function to determine if terminal node has been reached def reachedTerminal(forecasted): if len(forecasted) >= 1: last = forecasted[-1][0] return isinstance(last, bool) or isinstance(last, float) return False #Define function to evaluate value of self node def evalSelf(selfPlaying: bool, possibilities, iteration): def countExact(values, countItem): counted = 0 for value in values: if value is countItem: counted += 1 return counted #Define function to collapse all forecasted paths with same iteration count def collapse(selfPlaying: bool, possibilities, iteration): def contains(values, comparisonItem): for value in values: if value is comparisonItem: return True return False #Extract all forecasted paths with same iteration count #print("All possibilities at this stage are: ", possibilities) extracted = deepcopy([possibility for possibility in possibilities if possibility[-1][1] == iteration]) #if selfPlaying: print("Node layer ", iteration, " and maximizer is playing.") #else: print("Node layer ", iteration, " and minimizer is playing.") #print("Before collapse, all values at node layer ", iteration, " is ", extracted) tempPossibilities = deepcopy([possibility for possibility in possibilities if possibility not in extracted]) #Heuristics: if only 1 or less forecasted at current node, skip collapse if len(extracted) == 1: #print("Taking shortcut to skip collapse because only 1 forecasted detected at layer ", iteration, ": ", extracted[0]) tempPossibilities.append(extracted[0]) return tempPossibilities elif len(extracted) == 0: #print("Taking shortcut to skip collapse because no forecasted detected at layer ", iteration) return tempPossibilities values = [extraction[-1][0] for extraction in extracted] #print("Performing collapse on ", values) tieLimiter = False for value in values: if isinstance(value, float): tieLimiter = True #Prioritize boolean: if True exists, all positive possibilities can be pruned if contains(values, True) and selfPlaying: values = [value for value in values if not (isinstance(value, float) and value > 0)] if contains(values, False) and not selfPlaying: values = [value for value in values if not (isinstance(value, float) and value < 0)] #When both True and False exists, eliminate any in-between if contains(values, True) and contains(values, False): values = [value for value in values if not isinstance(value, float)] #print("Preliminary sifting is done. Now performing collapse on ", values) if selfPlaying: #Due to Python's max([False, 0.0]) -> False, must remove all False if 0.0 exists in maximizer's turn if tieLimiter and contains(values, False): values = [value for value in values if value is not False] returnValue = max(values) else: #Due to Python's min([0.0, False]) -> 0.0, must remove all float if False exists in minimizer's turn if contains(values, False): returnValue = False else: returnValue = min(values) #print("Collapse done, ", returnValue) #Deeper eval performed when multiple returnValue in values; choose longest steps for min; shortest steps for max #Heuristics: when multiple combinations of moves result in same state, keep only 1 if countExact(values, returnValue) > 1: #print("Multiple forecasted evaluating to the same value detected. Comparing steps for each.") extractedShortlisted = [forecasted for forecasted in extracted if forecasted[-1][0] is returnValue] lenList = [len(forecasted) for forecasted in extractedShortlisted] if selfPlaying: fullReturnValue = extractedShortlisted[lenList.index(min(lenList))] else: fullReturnValue = extractedShortlisted[lenList.index(max(lenList))] #print("From ", extractedShortlisted, " choose ", fullReturnValue) else: #Reconstruct full format of possibility holding returnValue and add back to possibilities fullReturnValue = [possibility for possibility in extracted if possibility[-1][0] is returnValue][0] #print("After collapse, all values at node layer ", iteration, " is ", fullReturnValue) tempPossibilities.append(fullReturnValue) return tempPossibilities #Define function to decrement all forecasted paths (should be 1) with iteration count matching current (bubble-up) def passUp(possibilities, iteration): for possibility in possibilities: if possibility[-1][1] == iteration: possibility[-1][1] -= 1 #Identify if a duplicated iteration count exists in possibilities, then collapse all those forecasted depending on self nature iterationList = [possibility[-1][1] for possibility in possibilities] #print(iterationList) for iterationItem in iterationList: if countExact(iterationList, iterationItem) > 1: possibilities = collapse(selfPlaying, possibilities, iteration) #print(iteration) if (iteration > 0): passUp(possibilities, iteration) return possibilities #Even iteration = machine plays; odd = human #maxDepthSearch = layer of nodes forecasted ahead by AI -- CAREFUL! O(n) time complexity = b ** m, with m being maxDepthSearch and b being branching factor = (boardDimensionX * boardDimensionY - claimed tiles) #For 3x3 board, set to 10 for full coverage if len(board) == len(board[0]) and len(board) == 3: maxDepthSearch = 10 #If game is in developing phase (i.e, number of placed marks <= 1/2 win condition) elif max(len(locate(selfSignature, board)), len(locate(opponentSignature, board))) <= winCond/2: maxDepthSearch = 2 else: maxDepthSearch = 3 #possibilities = [forecasted1, forecasted2, ...] #forecasted = [[x1, y1], [x2, y2], [x3, y3]..., [True, iteration]] containing moves of both players until end & boolean of win state(True when self is winner, False otherwise) #forecasted = [[x1, y1], [x2, y2], [x3, y3]..., [score: float, iteration]] containing moves of both players until maxDepthSearch reached, score is evaluated to assign to board state (0 when tie, +highestTacticalValue when it's self's turn, - otherwise) #Evaluate value of self node depending on min/max nature, run when all child nodes to maxDepthSearch are explored/ when terminal node is detected #evalSelf only sifts through forecasteds and collapses those having the same iteration value (vying to value same node) #When bubble up 1 node, take all forecasteds in possibilities with matching current iteration (if everything is right this should already be collapsed to only 1) and decrement that (to imply this value is passed upwards to parent node and is now parent node's originating value) if reachedTerminal(forecasted): selfPlaying = (iteration % 2 == 0) forecastedCopy = deepcopy(forecasted) possibilities.append(forecastedCopy) possibilities = evalSelf(selfPlaying, possibilities, iteration) iteration -= 1 #Reset back 1 node higher forecasted.pop(-1) forecasted.pop(-1) return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False) #Terminal node: winCond is met/maxDepthSearch reached/no possible moves left if win(board, winCond, selfSignature, opponentSignature) or win(board, winCond, opponentSignature, selfSignature) or len(locate(' ', board)) == 0 or iteration == maxDepthSearch: if forecasted not in checked: checked.append(deepcopy(forecasted)) #If self/other is winner, document move if win(board, winCond, selfSignature, opponentSignature): #If it's computer's turn, and computer wins if (iteration % 2 == 0): forecasted.append([True, iteration]) #print("Forecasted a possible win if moves are as followed: ", forecasted) #viewBoard(board) else: forecasted.append([False, iteration]) #print("Forecasted a possible loss if moves are as followed: ", forecasted) #viewBoard(board) elif win(board, winCond, opponentSignature, selfSignature): #If it's computer's turn, and computer's opponent wins if (iteration % 2 == 0): forecasted.append([False, iteration]) #print("Forecasted a possible loss if moves are as followed: ", forecasted) #viewBoard(board) else: forecasted.append([True, iteration]) #print("Forecasted a possible win if moves are as followed: ", forecasted) #viewBoard(board) elif iteration == maxDepthSearch: metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty) try: style, value = stance(metaboard, difficulty) #If self's turn if (iteration % 2 == 0): forecasted.append([float(value), iteration]) #print("Max search depth reached: ", forecasted) #viewBoard(board) else: forecasted.append([float(-value), iteration]) #print("Max search depth reached: ", forecasted) #viewBoard(board) #When maxDepthSearch is reached, but game is also tied except ValueError: forecasted.append([0.0, iteration]) #print("Forecasted a possible tie at max depth search if moves are as followed: ", forecasted) #viewBoard(board) #When tie is reached through tiles depletion, score is set to 0.0 else: forecasted.append([0.0, iteration]) #print("Forecasted a possible tie if moves are as followed: ", forecasted) #viewBoard(board) #Reset back 1 node higher boardHistory.pop(-1) board = deepcopy(boardHistory[-1]) #print("Breakpoint 2: Reset board back to ") #viewBoard(board) selfSignature, opponentSignature = swap(selfSignature, opponentSignature) return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False) #At each node layer, make a decision and "forecast" board and metaboard, then switch position with opponent and do the same #Normal case: when self node is not terminal, and all children are not depleted yet/maxDepthSearch is not reached yet #dimension = len(board) metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty) #Heuristics: if there is only one available move left, take that move if (len(locate(' ', board)) == 1): x = locate(' ', board)[0][0] y = locate(' ', board)[0][1] #For actual move; only apply when not projecting self as opponent if (len(checked) == 0 and iteration == 0): alphabet = ascii_uppercase print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n') board = boardHistory[0] board[y][x] = selfSignature viewBoard(board) return board #For a forecasted move elif [*forecasted, [x, y]] not in checked: forecasted.append([x, y]) checked.append(deepcopy(forecasted)) board[y][x] = selfSignature boardHistory.append(deepcopy(board)) iteration += 1 selfSignature, opponentSignature = swap(selfSignature, opponentSignature) return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False) style, value = stance(metaboard, difficulty) try: #For first move only if len(locate(selfSignature, board)) == 0 and len(locate(opponentSignature, board)) == 0: #For symmetrical board or customized board dimension smaller than twice win condition if len(board) == len(board[0]) or (len(board) < winCond * 2) or (len(board[0]) < winCond * 2): move = [int(len(board[0])/2), int(len(board)/2)] #For customized board dimension larger than twice win condition else: move = [randint(winCond, len(board[0]) - 1 - winCond), randint(winCond, len(board) - 1 - winCond)] x = move[0] y = move[1] alphabet = ascii_uppercase print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n') board = boardHistory[0] board[y][x] = selfSignature viewBoard(board) return board else: x, y = decide(forecasted, checked, style, value, metaboard, difficulty) except ValueError: depleted = True #All child nodes had been depleted (i.e, checked has been populated with all possible forecasted combinations) if depleted: depleted = False selfPlaying = (iteration % 2 == 0) possibilities = evalSelf(selfPlaying, possibilities, iteration) iteration -= 1 #If base case had been evaluated; root has been given value; iteration is negative => make a move #All child branches had been depleted if iteration < 0: #print(possibilities) move = possibilities[0][0] x = move[0] y = move[1] alphabet = ascii_uppercase print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n') board = boardHistory[0] board[y][x] = selfSignature viewBoard(board) return board forecasted.pop(-1) boardHistory.pop(-1) board = deepcopy(boardHistory[-1]) #print("Breakpoint 1: Reset board back to ") #viewBoard(board) selfSignature, opponentSignature = swap(selfSignature, opponentSignature) return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False) forecasted.append([x, y]) checked.append(deepcopy(forecasted)) board[y][x] = selfSignature #print(selfSignature, " took the move ", [x, y]) #viewBoard(board) boardHistory.append(deepcopy(board)) #print(f'Assessing risk and opportunity, taking {style} move this turn at col {x}, row {y}.') # valid = False # while (not valid): # x = randint(0, dimension - 1) # y = randint(0, dimension - 1) # if board[y][x] == ' ': valid = True iteration += 1 #Swap player each turn selfSignature, opponentSignature = swap(selfSignature, opponentSignature) return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False) #Define winning def win(board, winCond, signature, opponentSignature): #Define function to determine box containing played area def box(board): #Define function to find first occurence of 'X' or 'O', row-wise; if none is found, return 0 #Value is [signature, opponentSignature] def locate(value, board): dimensionY = len(board) dimensionX = len(board[0]) for row in range(dimensionY): for col in range(dimensionX): if (board[row][col] in value): return row return 0 #Define function to inverse board vertically def invertY(board): invertYBoard = [] dimensionY = len(board) for row in range(dimensionY): invertYBoard.append(board[dimensionY - row - 1]) return invertYBoard #Define function to rotate board 90 degree def rotate(board): rotateBoard = [] dimensionY = len(board) dimensionX = len(board[0]) for col in range(dimensionX): column = [board[row][col] for row in range(dimensionY)] rotateBoard.append(column) return rotateBoard dimensionY = len(board) dimensionX = len(board[0]) boundaryN = locate([signature, opponentSignature], board) boundaryS = dimensionY - locate([signature, opponentSignature], invertY(board)) - 1 boundaryW = locate([signature, opponentSignature], rotate(board)) boundaryE = dimensionX - locate([signature, opponentSignature], invertY(rotate(board))) - 1 box = [] for row in range(boundaryN, boundaryS + 1): boxRow = [board[row][col] for col in range(boundaryW, boundaryE + 1)] box.append(boxRow) return box #Create as many winCond x winCond grids as needed to cover the entire played area def grid(box, winCond): dimensionY = len(box) dimensionX = len(box[0]) gridY = dimensionY - winCond + 1 if (gridY < 1): gridY = 1 gridX = dimensionX - winCond + 1 if (gridX < 1): gridX = 1 #List of grids grids = [] for offsetX in range(gridX): for offsetY in range(gridY): grid = [] for row in range(offsetY, offsetY + winCond): rowY = [] for col in range(offsetX, offsetX + winCond): try: rowY.append(box[row][col]) except IndexError: pass grid.append(rowY) grids.append(grid) return grids for board in grid(box(board), winCond): #Within each grid: dimensionY = len(board) dimensionX = len(board[0]) #Count 'O's in a row for row in range(dimensionY): if (board[row].count(signature) >= winCond): return True #Count 'O's in a column columns = [] for col in range(dimensionX): try: columns.append([row[col] for row in board]) except IndexError: pass for col in columns: if (col.count(signature) >= winCond): return True #Count 'O's in a diagonal line dimension = min(dimensionX, dimensionY) diagonalsNW = [] diagonalsNE = [] for i in range(dimension): diagonalNW = [] diagonalNE = [] for j in range(dimension): try: diagonalNW.append(board[j][j]) except IndexError: pass try: diagonalNE.append(board[j][dimension - j - 1]) except IndexError: pass diagonalsNW.append(diagonalNW) diagonalsNE.append(diagonalNE) for diagonalNW in diagonalsNW: if (diagonalNW.count(signature) >= winCond): return True for diagonalNE in diagonalsNE: if (diagonalNE.count(signature) >= winCond): return True #Game loop print('Welcome to a game of Tic-tac-toe!\nThe rule is simple: block your opponent before they can get a long enough streak in a continuous row, column or diagonal to win.\n') mode = True while (mode): gamemode = input('Before we start, there are two gamemodes: custom and preset. Which one would you prefer?\n(c) for custom, (p) for preset. ') if (gamemode not in ['c', 'p']): print('Unrecognized input command. Please read the instructions carefully and try again.\n') else: mode = False print('\n\n') #Configuration settings for custom gamemode configure = True while (configure): #Set custom dimension invalid = True while (invalid and gamemode == 'c'): try: dimensionX, dimensionY = input('Input dimension for game initialization:\n(width x length): ').split('x') dimensionX = int(dimensionX) dimensionY = int(dimensionY) invalid = False except: print('Invalid input detected. Please try again.\n') #Preset dimension if (gamemode == 'p'): print('Default grid set to 26x26.') dimensionX = 26 dimensionY = 26 #Set win condition valid = False while (not valid and gamemode == 'c'): try: winCond = input('Input streak size to count as win: ') winCond = int(winCond) if (not isinstance(winCond, int) or 3 > winCond > min(dimensionX, dimensionY)): raise TypeError valid = True except: print('Invalid input detected. Please try again.\n') #Preset win condition if (gamemode == 'p'): print('Default win streak set to 5.') winCond = 5 #Set difficulty chose = False while (not chose and gamemode == 'c'): try: difficulty = int(input('Choose difficulty (easiest: 1 - hardest: 3): ')) if (3 < difficulty or difficulty < 1): raise ValueError chose = True except: print('Invalid input detected. Please try again.\n') #Preset difficulty if (gamemode == 'p'): print('Default difficulty set to 3.') difficulty = 3 #Set player's marker proper = False while (not proper and gamemode == 'c'): marker = input('Choose your prefered marker:\n(o) for \'O\', (x) for \'X\': ') if (marker not in ['x', 'o']): print('Invalid input detected. Please try again.\n') else: proper = True if (marker == 'o'): opponentSignature = 'O' selfSignature = 'X' else: opponentSignature = 'X' selfSignature = 'O' #Preset marker if (gamemode == 'p'): print('Default player marker set to \'X\'.') opponentSignature = 'X' selfSignature = 'O' #Choose who goes first ok = False while (not ok and gamemode == 'c'): playerGoesFirst = input('Do you want to go first?\n(y) for yes, (n) for no: ') if (playerGoesFirst not in ['y', 'n']): print('Invalid input detected. Please try again.\n') else: ok = True playerGoesFirst = (playerGoesFirst == 'y') #Preset first play if (gamemode == 'p'): print('Default: computer goes first.') playerGoesFirst = False #Replay loop replay = True while (replay): print('\n\n') board = mapBoard(int(dimensionX), int(dimensionY), ' ') viewBoard(board) while (True): try: locate([' '], board)[0] except IndexError: print('\nIt\'s a tie!') break #Player plays if (playerGoesFirst): mark(board, opponentSignature) if (win(board, winCond, opponentSignature, selfSignature)): print('Congratulations, you won!') break playerGoesFirst = True try: locate([' '], board)[0] except IndexError: print('\nIt\'s a tie!') break print('\n\nComputer is calculating...') #Computer plays board = play([deepcopy(board)], False, [], 0, winCond, [], [], board, selfSignature, opponentSignature, difficulty) if (win(board, winCond, selfSignature, opponentSignature)): print('Sorry, you lost!') break #Replay choice makingChoice = True while makingChoice: choice = input('\n\nDo you want to replay?\n(y) to replay with current configurations, (n) to quit, (p) to play with recommended configurations, or (c) to replay with different configurations.\n') if (choice == 'y'): replay = True configure = False print('\n\n') makingChoice = False elif (choice == 'n'): replay = False configure = False makingChoice = False elif (choice == 'p'): replay = False configure = True gamemode = 'p' print('\n\n') makingChoice = False elif (choice == 'c'): replay = False configure = True gamemode = 'c' print('\n\n') makingChoice = False else: print('Invalid input detected. Please try again.\n') input('\nPress ENTER to quit.')
import os import argparse import json import pandas as pd import bilby from bilby_pipe.create_injections import InjectionCreator def main(): parser = argparse.ArgumentParser(description="Slurm files from nmma injection file") parser.add_argument( "--prior-file", type=str, required=True, help="The prior file from which to generate injections", ) parser.add_argument( "--injection-file", type=str, required=True, help="The bilby injection json file to be used", ) parser.add_argument( "--analysis-file", type=str, required=True, help="The analysis bash script to be replicated", ) parser.add_argument("-o", "--outdir", type=str, default="outdir") args = parser.parse_args() # load the injection json file if args.injection_file: if args.injection_file.endswith(".json"): with open(args.injection_file, "rb") as f: injection_data = json.load(f) datadict = injection_data["injections"]["content"] dataframe_from_inj = pd.DataFrame.from_dict(datadict) else: print("Only json supported.") exit(1) if len(dataframe_from_inj) > 0: args.n_injection = len(dataframe_from_inj) # create the injection dataframe from the prior_file injection_creator = InjectionCreator( prior_file=args.prior_file, prior_dict=None, n_injection=args.n_injection, default_prior="PriorDict", gps_file=None, trigger_time=0, generation_seed=0, ) dataframe_from_prior = injection_creator.get_injection_dataframe() # combine the dataframes dataframe = pd.DataFrame.merge( dataframe_from_inj, dataframe_from_prior, how="outer", left_index=True, right_index=True, ) for index, row in dataframe.iterrows(): with open(args.analysis_file, "r") as file: analysis = file.read() outdir = os.path.join(args.outdir, str(index)) if not os.path.isdir(outdir): os.makedirs(outdir) priors = bilby.gw.prior.PriorDict(args.prior_file) priors.to_file(outdir, label="injection") priorfile = os.path.join(outdir, "injection.prior") injfile = os.path.join(outdir, "lc.csv") analysis = analysis.replace("PRIOR", priorfile) analysis = analysis.replace("OUTDIR", outdir) analysis = analysis.replace("INJOUT", injfile) analysis = analysis.replace("INJNUM", str(index)) analysis_file = os.path.join(outdir, "inference.sh") fid = open(analysis_file, "w") fid.write(analysis) fid.close() if __name__ == "__main__": main()
from functools import partial from typing import NamedTuple, Union from flake8_annotations import Argument, Function from flake8_annotations.enums import AnnotationType class FormatTestCase(NamedTuple): """Named tuple for representing our test cases.""" test_object: Union[Argument, Function] str_output: str repr_output: str # Define partial functions to simplify object creation arg = partial(Argument, lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS) func = partial(Function, name="test_func", lineno=0, col_offset=0, decorator_list=[]) formatting_test_cases = { "arg": FormatTestCase( test_object=arg(argname="test_arg"), str_output="<Argument: test_arg, Annotated: False>", repr_output=( "Argument(" "argname='test_arg', " "lineno=0, " "col_offset=0, " "annotation_type=AnnotationType.ARGS, " "has_type_annotation=False, " "has_3107_annotation=False, " "has_type_comment=False" ")" ), ), "func_no_args": FormatTestCase( test_object=func(args=[arg(argname="return")]), str_output="<Function: test_func, Args: [<Argument: return, Annotated: False>]>", repr_output=( "Function(" "name='test_func', " "lineno=0, " "col_offset=0, " "function_type=FunctionType.PUBLIC, " "is_class_method=False, " "class_decorator_type=None, " "is_return_annotated=False, " "has_type_comment=False, " "has_only_none_returns=True, " "is_nested=False, " "decorator_list=[], " "args=[Argument(argname='return', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " # noqa: E501 "has_type_annotation=False, has_3107_annotation=False, has_type_comment=False)]" ")" ), ), "func_has_arg": FormatTestCase( test_object=func(args=[arg(argname="foo"), arg(argname="return")]), str_output="<Function: test_func, Args: [<Argument: foo, Annotated: False>, <Argument: return, Annotated: False>]>", # noqa: E501 repr_output=( "Function(" "name='test_func', " "lineno=0, " "col_offset=0, " "function_type=FunctionType.PUBLIC, " "is_class_method=False, " "class_decorator_type=None, " "is_return_annotated=False, " "has_type_comment=False, " "has_only_none_returns=True, " "is_nested=False, " "decorator_list=[], " "args=[Argument(argname='foo', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " # noqa: E501 "has_type_annotation=False, has_3107_annotation=False, has_type_comment=False), " "Argument(argname='return', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " # noqa: E501 "has_type_annotation=False, has_3107_annotation=False, has_type_comment=False)]" ")" ), ), }
# coding: utf-8 import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__),'../..')) import suzu.matdb.srim_compounddb as compounddb air = compounddb.Compound() air.desc = 'Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3' air.name = '%Air, Dry (ICRU-104)' air.density = 0.00120484 air.mass_percentage = True air.elems = [(6, 0.000124), (8, 0.231781), (7, 0.755267), (18, 0.012827)] air.bonding = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] air.comment = """corrected by H. Paul, Sept. 2004 """ air.fulltext = """*Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3 "%Air, Dry (ICRU-104)", .00120484, 4, 6, .000124, 8, .231781, 7, .755267, 18, .012827 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 $ corrected by H. Paul, Sept. 2004 $""" water = compounddb.Compound() water.desc = 'Water (liquid) 1.00 H-2, O-1' water.name = 'Water_Liquid (ICRU-276)' water.density = 1.0 water.mass_percentage = False water.elems = [(1, 2.0), (8, 1.0)] water.bonding = [0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] water.comment = b"""Chemical Formula: H \u00c4\u00c4 O \u00c4\u00c4 H There is about an 8% increase in the peak of the stopping power for ions in water vapour relative to the liquid. (The peak of the stopping occurs at an energy of about 100 keV/amu times the 2/3 power of the ion's atomic number.) Above the peak the phase difference begins to disappear. This calculation is for the LIQUID phase. """.decode('cp437') print(water.to_suzu()) print(air.to_suzu())
import numpy as np from sklearn import metrics from PIL import Image def get_metrics(pred, logits, gt): if isinstance(logits, list): logits = logits[-1] result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0]), 'auc': roc(gt, logits)} return result def get_metrics_without_roc(pred, gt): result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0])} return result def show_metrics(metrics): con_mat = np.zeros((2,2)) auc = 0.0 for m in metrics: con_mat += m['confusion_matrix'] auc += m['auc'] auc /= len(metrics) result = {'confusion_matrix': con_mat.tolist(), 'accuracy': accuracy(con_mat), 'kappa': kappa(con_mat), 'precision': precision(con_mat), 'sensitivity': sensitivity(con_mat), 'specificity': specificity(con_mat), 'auc': auc, } return result def show_metrics_without_roc(metrics): con_mat = np.zeros((2,2)) for m in metrics: con_mat += m['confusion_matrix'] result = {'confusion_matrix': con_mat, 'accuracy': accuracy(con_mat), 'kappa': kappa(con_mat), 'precision': precision(con_mat), 'sensitivity': sensitivity(con_mat), 'specificity': specificity(con_mat), } return result def show_metrics_from_save_image(data): pred = data[:,:,0] // 255 gt = data[:,:,1] // 255 metrics = [get_metrics_without_roc(pred, gt)] return show_metrics_without_roc(metrics) def kappa(matrix): matrix = np.array(matrix) n = np.sum(matrix) sum_po = 0 sum_pe = 0 for i in range(len(matrix[0])): sum_po += matrix[i][i] row = np.sum(matrix[i, :]) col = np.sum(matrix[:, i]) sum_pe += row * col po = sum_po / n pe = sum_pe / (n * n) # print(po, pe) return (po - pe) / (1 - pe) def sensitivity(matrix): return matrix[0][0]/(matrix[0][0]+matrix[1][0]) def specificity(matrix): return matrix[1][1]/(matrix[1][1]+matrix[0][1]) def precision(matrix): return matrix[0][0]/(matrix[0][0]+matrix[0][1]) def roc(gt, logits): gtlist = gt.flatten() predlist = logits.detach().cpu().numpy()[0, 1, ...].flatten() fpr, tpr, thresholds = metrics.roc_curve(gtlist, predlist, pos_label=1) roc_auc = metrics.auc(fpr, tpr) # auc为Roc曲线下的面积 return roc_auc def accuracy(matrix): return (matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1]) def error_rate(predictions, labels): """ Return the error rate based on dense predictions and 1-hot labels. """ return 100.0 - ( 100.0 * np.sum(np.argmin(predictions, 3) == np.argmin(labels, 3)) / (predictions.shape[0] * predictions.shape[1] * predictions.shape[2])) def save_predict(filename, data, gt, pred): pred = pred * 255 gt = gt[0, 1, :, :] gt = np.where(gt > 0.5, 255, 0) differ = np.stack([np.zeros_like(pred), gt, pred], -1) pred = np.stack([pred, pred, pred], -1) gt = np.stack([gt, gt, gt], -1) data = np.transpose(data, (0, 2, 3, 1))[0,...] if data.shape[2] == 60: data = data[:, :, 10:40:10] elif data.shape[2] == 1: data = np.concatenate([data, data, data], -1) elif data.shape[2] == 15: data = data[:, :, 0:15:5] data -= np.min(data, axis=(0,1)) data /= (np.max(data, axis=(0,1))/255) data = data.astype(np.uint8) img = Image.fromarray(np.concatenate([data, pred, gt, differ], axis=1).astype(np.uint8)) img.save(filename) def save_logits(filename, pred): pred = pred * 255 pred = np.stack([pred, pred, pred], -1) img = Image.fromarray(pred.astype(np.uint8)) img.save(filename)
from pysys.constants import * from apama.basetest import ApamaBaseTest from apama.correlator import CorrelatorHelper from GAPDemoConnected import GAPDemoConnectedHelper class PySysTest(ApamaBaseTest): def __init__(self, descriptor, outsubdir, runner): super(PySysTest, self).__init__(descriptor, outsubdir, runner) self.helper = GAPDemoConnectedHelper(self, PROJECT) def execute(self): # Start application correlator = self.helper.startApplication() # Find a phone device (phoneId, phoneName) = self.helper.getDeviceDetails() self.log.info(f'Found c8y_SensorPhone device with name "{phoneName}" and id "{phoneId}"') # Wait for application to subscribe to measurements from the phone self.helper.waitForSubscription() # Set baseline acceleration self.helper.sendAcceleration(phoneId, 0.0, 0.0, 1.23) # Wait for all events to be processed self.helper.waitForBaseline() # Get current active alarm counts flipUpBefore = self.helper.countActiveAlarms("FlipUp") self.log.info(f'Found {flipUpBefore} active "FlipUp" alarms before sending measurements') flipDownBefore = self.helper.countActiveAlarms("FlipDown") self.log.info(f'Found {flipDownBefore} active "FlipDown" alarms before sending measurements') # Send acceleration measurements self.log.info('Sending measurements...') self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.9) # Up self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.9) # Down self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.4) self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.0) self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.4) self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.9) # Up self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.8) self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.9) self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.85) # Down # wait for all events to be processed self.helper.waitForMeasurements() # Get latest active alarm counts and calculate delta flipUpAfter = self.helper.countActiveAlarms("FlipUp") self.log.info(f'Found {flipUpAfter} active "FlipUp" alarms after sending measurements') flipDownAfter = self.helper.countActiveAlarms("FlipDown") self.log.info(f'Found {flipDownAfter} active "FlipDown" alarms after sending measurements') self.flipUpDelta = flipUpAfter - flipUpBefore self.flipDownDelta = flipDownAfter - flipDownBefore def validate(self): self.assertEval("self.flipUpDelta=={expected}", expected=2) self.assertEval("self.flipDownDelta=={expected}", expected=2)
"""youtubesearch URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include urlpatterns = [ path('admin/', admin.site.urls), path('', include('search.urls')), ]
from cereal import car from selfdrive.car import dbc_dict Ecu = car.CarParams.Ecu class CarControllerParams: ACCEL_MAX = 2.0 ACCEL_MIN = -3.7 STEER_MAX = 384 # 409 is the max, 255 is stock STEER_DELTA_UP = 3 STEER_DELTA_DOWN = 7 STEER_DRIVER_ALLOWANCE = 50 STEER_DRIVER_MULTIPLIER = 2 STEER_DRIVER_FACTOR = 1 class CAR: # Hyundai ELANTRA_I30 = "HYUNDAI AVANTE,I30 2017~2020 (AD,PD)" ELANTRA21 = "HYUNDAI AVANTE 2021 (CN7)" ELANTRA21_HEV = "HYUNDAI AVANTE HEV 2021 (CN7)" SONATA = "HYUNDAI SONATA 2020 (DN8)" SONATA_HEV = "HYUNDAI SONATA HEV 2020 (DN8)" SONATA_LF = "HYUNDAI SONATA 2016~2019 (LF)" SONATA_LF_HEV = "HYUNDAI SONATA 2018 HEV (LF)" KONA = "HYUNDAI KONA 2019 (OS)" KONA_EV = "HYUNDAI KONA EV 2019 (OS)" KONA_HEV = "HYUNDAI KONA HEV 2019 (OS)" IONIQ_EV = "HYUNDAI IONIQ EV 2019~2020 (AE)" IONIQ_HEV = "HYUNDAI IONIQ HEV 2017 (AE)" SANTA_FE = "HYUNDAI SANTA FE 2019~2021 (TM)" SANTA_FE_HEV = "HYUNDAI SANTA FE 2021~2022 (TM)" PALISADE = "HYUNDAI PALISADE 2020 (LX2)" VELOSTER = "HYUNDAI VELOSTER 2019 (JS)" GRANDEUR = "GRANDEUR 2017~2019 (IG)" GRANDEUR_HEV = "GRANDEUR HEV 2018~2019 (IG)" GRANDEUR20 = "GRANDEUR 2020 (IG)" GRANDEUR20_HEV = "GRANDEUR HEV 2020 (IG)" NEXO = "HYUNDAI NEXO (FE)" # Kia FORTE = "KIA K3 2018 (BD)" K5 = "KIA K5 2016~2020 (JF)" K5_HEV = "KIA K5 HEV 2016~2020 (JF)" K5_DL3 = "KIA K5 2021 (DL3)" K5_DL3_HEV = "KIA K5 HEV 2021 (DL3)" K7 = "KIA K7 2016-2019 (YG)" K7_HEV = "KIA K7 HEV 2017-2019 (YG)" K9 = "KIA K9 2019-2021 (RJ)" SPORTAGE = "KIA SPORTAGE 2016~2020 (QL)" SORENTO = "KIA SORENTO 2017~2020 (UM)" MOHAVE = "KIA MOHAVE 2020 (HM)" STINGER = "KIA STINGER 2018~2021 (CK)" NIRO_EV = "KIA NIRO EV 2020 (DE)" NIRO_HEV = "KIA NIRO HEV 2018 (DE)" SOUL_EV = "KIA SOUL EV 2019 (SK3)" SELTOS = "KIA SELTOS 2019 (SP2)" # Genesis GENESIS = "GENESIS 2014-2016 (DH)" GENESIS_G70 = "GENESIS G70 2018~ (IK)" GENESIS_G80 = "GENESIS G80 2018~ (DH)" GENESIS_G90 = "GENESIS G90,EQ900 2016~2019 (HI)" # --------------------------------------------------------------------------------------- # E-CAN Signal CAR # hyundai - G80 2020(RG3), GV70 2021(JK1), GV80 2020(JX1), TUSON 2021(NX4), STARIA 2021(UX4), IONIQ5 2021(NE) # kia - CARNIVAL 2021(KA4), SORENTO 2020(MQ4), K8 2021(GL3) # --------------------------------------------------------------------------------------- class Buttons: NONE = 0 RES_ACCEL = 1 SET_DECEL = 2 GAP_DIST = 3 CANCEL = 4 FINGERPRINTS = { # Hyundai CAR.ELANTRA_I30: [{ 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 546: 8, 547: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 838: 8, 844: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1087: 8, 1151: 6, 1155: 8, 1164: 8, 1168: 7, 1170: 8, 1191: 2, 1193: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1485: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1792: 8, 1872: 8, 1937: 8, 1952: 8, 1953: 8, 1960: 8, 1968: 8, 1988: 8, 1990: 8, 1998: 8, 2000: 8, 2001: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8 }], CAR.ELANTRA21: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 524: 8, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1069: 8, 1078: 4, 1102: 8, 1107: 5, 1108: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1339: 8, 1342: 8, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8 }], CAR.ELANTRA21_HEV: [{ }], CAR.SONATA: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 545: 8, 546: 8, 547: 8, 548: 8, 549: 8, 550: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1089: 5, 1096: 8, 1107: 5, 1108: 8, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1460: 8, 1470: 8, 1485: 8, 1504: 3, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8 }], CAR.SONATA_HEV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 576: 8, 593: 8, 688: 6, 757: 2, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1102: 8, 1108: 8, 1114: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1446: 8, 1448: 8, 1456: 4, 1460: 8, 1470: 8, 1476: 8, 1535: 8 }], CAR.SONATA_LF: [{ 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1397: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8 }], CAR.SONATA_LF_HEV: [{ 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1186: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8 }], CAR.KONA: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1265: 4,1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1996: 8, 1998: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8 }], CAR.KONA_EV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8 }], CAR.KONA_HEV: [{ 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8 }], CAR.IONIQ_EV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 7, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8, 2015: 8 }], CAR.IONIQ_HEV: [{ 68:8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1473: 8, 1476: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8 }], CAR.SANTA_FE: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8, 1990: 8, 1998: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8 }], CAR.SANTA_FE_HEV: [{ }], CAR.PALISADE: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2000: 8, 2005: 8, 2008: 8 }], CAR.VELOSTER: [{ 64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1181: 5, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1378: 4, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1872: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8 }], CAR.GRANDEUR: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8 }], CAR.GRANDEUR_HEV: [{ 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1185: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8 }], CAR.GRANDEUR20: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 516: 8, 524: 8, 528: 8, 532: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8 }], CAR.GRANDEUR20_HEV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 764: 8, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8 }], CAR.NEXO: [{ 127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8 }], # Kia CAR.FORTE: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1191: 2, 1225: 8, 1265: 4, 1280: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1427: 6, 1456: 4, 1470: 8 }], CAR.K5: [{ 64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 625: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1236: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8, 1532: 5, 1905: 8, 1913: 8, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8 }], CAR.K5_HEV: [{ 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1236: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8 }], CAR.K5_DL3: [{ }], CAR.K5_DL3_HEV: [{ }], CAR.SPORTAGE: [{ 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1078: 4, 1170: 8, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8 }], CAR.SORENTO: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1 }], CAR.MOHAVE: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8 }], CAR.STINGER: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 2015: 8 }], CAR.NIRO_EV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1990: 8, 1998: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8 }], CAR.NIRO_HEV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8 }], CAR.SOUL_EV: [{ 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 6, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8 }], CAR.SELTOS: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1911: 8 }], CAR.K7: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8 }], CAR.K7_HEV: [{ 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1096: 8, 1102: 8, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8 }], CAR.K9: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8 }], # Genesis CAR.GENESIS: [{ 67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4 }], CAR.GENESIS_G70: [{ 67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8 }], CAR.GENESIS_G80: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1024: 2, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4, 1470: 8 }], CAR.GENESIS_G90: [{ 67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2011: 8, 2012: 8, 2013: 8, 2015: 8 }], } ECU_FINGERPRINT = { Ecu.fwdCamera: [832, 1156, 1191, 1342] #832:lkas11, 1156:hda11_mfc, 1191:mfc_4a7, 1342:lkas12 } FW_VERSIONS = { # fwdRadar, fwdCamera, eps, esp, engine, transmission # hyundai CAR.ELANTRA_I30: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00PD__ SCC F-CUP 1.00 1.01 99110-G3100 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00PDP LKAS AT AUS RHD 1.00 1.01 99211-G4000 v60', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00PDu MDPS C 1.00 1.01 56310/G3690 4PDUC101', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00PD ESC \x11 100 \a\x03 58910-G3AC0', ], (Ecu.engine, 0x7e0, None): [ b'\x01TPD-1A506F000H00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U2VA051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VA051\x00\x00DPD0H16US0\x00\x00\x00\x00', ], }, CAR.ELANTRA21: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ', b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ', b'\xf1\x8799110AA000\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.00 99210-AB000 200819' b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.03 99210-AA000 200819', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00CN7 MDPS C 1.00 1.06 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4CNDC106', b'\xf1\x8756310/AA070\xf1\x00CN7 MDPS C 1.00 1.06 56310/AA070 4CNDC106', b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800', b'\xf1\x8758910-AA800\xf1\x00CN ESC \t 104 \x08\x03 58910-AA800', b'\xf1\x8758910-AB800\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x82CNCWD0AMFCXCSFFA', b'\xf1\x82CNCWD0AMFCXCSFFB', b'\xf1\x82CNCVD0AMFCXCSFFB', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\xe8\xba\xce\xfa', b'\xf1\x87CXMQFM2135005JB2E\xb9\x89\x98W\xa9y\x97h\xa9\x98\x99wxvwh\x87\177\xffx\xff\xff\xff,,\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00', b'\xf1\x87CXMQFM1916035JB2\x88vvgg\x87Wuwgev\xa9\x98\x88\x98h\x99\x9f\xffh\xff\xff\xff\xa5\xee\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00', b'\xf1\x87CXLQF40189012JL2f\x88\x86\x88\x88vUex\xb8\x88\x88\x88\x87\x88\x89fh?\xffz\xff\xff\xff\x08z\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00', ], }, CAR.ELANTRA21_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\000CNhe SCC FHCUP 1.00 1.01 99110-BY000 ', b'\xf1\x8799110BY000\xf1\x00CNhe SCC FHCUP 1.00 1.01 99110-BY000 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\000CN7HMFC AT USA LHD 1.00 1.03 99210-AA000 200819' ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x8756310/BY050\xf1\000CN7 MDPS C 1.00 1.02 56310/BY050 4CNHC102' ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x816H6G5051\000\000\000\000\000\000\000\000' ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa', b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000', b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa', b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000' ], }, CAR.SONATA: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ', b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ', b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ', b'\xf1\x00DN8_ SCC F-CUP 1.00 1.02 99110-L1000 ', b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ', b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ', b'\xf1\x00DN89110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ', b'\xf1\x8799110L0000\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ', b'\xf1\x8799110L0000\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00DN8 MFC AT KOR LHD 1.00 1.02 99211-L1000 190422', b'\xf1\x00DN8 MFC AT RUS LHD 1.00 1.03 99211-L1000 190705', b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.00 99211-L0000 190716', b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016', b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.03 99211-L0000 210603', b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.05 99211-L1000 201109', b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.06 99211-L1000 210325', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101', b'\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101', b'\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101', b'\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100', b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101', b'\xf1\x8756310-L0010\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101', b'\xf1\x8756310-L0210\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0210 4DNAC101', b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103', b'\xf1\x8756310-L1030\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1030 4DNDC103', b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101', b'\xf1\x8756310L0210\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0210\x00 4DNAC101', b'\xf1\x8757700-L0000\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00DN ESC \a 106 \a\x01 58910-L0100', b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300', b'\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300', b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100', b'\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100', b'\xf1\x00DN ESC \x08 103\x19\x06\x01 58910-L1300', b'\xf1\x8758910-L0100\xf1\x00DN ESC \a 106 \a\x01 58910-L0100', b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100', b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100', b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100', b'\xf1\x8758910-L0300\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300', b'\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81HM6M1_0a0_F00', b'\xf1\x82DNBVN5GMCCXXXDCA', b'\xf1\x82DNBVN5GMCCXXXG2F', b'\xf1\x82DNBWN5TMDCXXXG2E', b'\xf1\x82DNCVN5GMCCXXXF0A', b'\xf1\x82DNCVN5GMCCXXXG2B', b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x82DNDWN5TMDCXXXJ1A', b'\xf1\x87391162M003', b'\xf1\x87391162M013', b'\xf1\x87391162M023', b'HM6M1_0a0_F00', b'HM6M1_0a0_G20', b'HM6M2_0a0_BD0', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1', b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x00HT6TA260BLHT6TA810A1TDN8M25GS0\x00\x00\x00\x00\x00\x00\xaa\x8c\xd9p', b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92', b'\xf1\x00HT6WA280BLHT6WAD10A1SDN8G25NB2\x00\x00\x00\x00\x00\x00\x08\xc9O:', b'\xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5', b'\xf1\x87954A02N060\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5', b'\xf1\x87SAKFBA2926554GJ2VefVww\x87xwwwww\x88\x87xww\x87wTo\xfb\xffvUo\xff\x8d\x16\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SAKFBA3030524GJ2UVugww\x97yx\x88\x87\x88vw\x87gww\x87wto\xf9\xfffUo\xff\xa2\x0c\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SAKFBA3356084GJ2\x86fvgUUuWgw\x86www\x87wffvf\xb6\xcf\xfc\xffeUO\xff\x12\x19\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SAKFBA3474944GJ2ffvgwwwwg\x88\x86x\x88\x88\x98\x88ffvfeo\xfa\xff\x86fo\xff\t\xae\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SAKFBA3475714GJ2Vfvgvg\x96yx\x88\x97\x88ww\x87ww\x88\x87xs_\xfb\xffvUO\xff\x0f\xff\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALDBA3510954GJ3ww\x87xUUuWx\x88\x87\x88\x87w\x88wvfwfc_\xf9\xff\x98wO\xffl\xe0\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA3573534GJ3\x89\x98\x89\x88EUuWgwvwwwwww\x88\x87xTo\xfa\xff\x86f\x7f\xffo\x0e\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA3601464GJ3\x88\x88\x88\x88ffvggwvwvw\x87gww\x87wvo\xfb\xff\x98\x88\x7f\xffjJ\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA3753044GJ3UUeVff\x86hwwwwvwwgvfgfvo\xf9\xfffU_\xffC\xae\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA3862294GJ3vfvgvefVxw\x87\x87w\x88\x87xwwwwc_\xf9\xff\x87w\x9f\xff\xd5\xdc\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA3873834GJ3fefVwuwWx\x88\x97\x88w\x88\x97xww\x87wU_\xfb\xff\x86f\x8f\xffN\x04\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA4525334GJ3\x89\x99\x99\x99fevWh\x88\x86\x88fwvgw\x88\x87xfo\xfa\xffuDo\xff\xd1>\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA4626804GJ3wwww\x88\x87\x88xx\x88\x87\x88wwgw\x88\x88\x98\x88\x95_\xf9\xffuDo\xff|\xe7\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA4803224GJ3wwwwwvwg\x88\x88\x98\x88wwww\x87\x88\x88xu\x9f\xfc\xff\x87f\x8f\xff\xea\xea\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA6212564GJ3\x87wwwUTuGg\x88\x86xx\x88\x87\x88\x87\x88\x98xu?\xf9\xff\x97f\x7f\xff\xb8\n\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA6347404GJ3wwwwff\x86hx\x88\x97\x88\x88\x88\x88\x88vfgf\x88?\xfc\xff\x86Uo\xff\xec/\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA6901634GJ3UUuWVeVUww\x87wwwwwvUge\x86/\xfb\xff\xbb\x99\x7f\xff]2\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALDBA7077724GJ3\x98\x88\x88\x88ww\x97ygwvwww\x87ww\x88\x87x\x87_\xfd\xff\xba\x99o\xff\x99\x01\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00', b'\xf1\x87SALFBA3525114GJ2wvwgvfvggw\x86wffvffw\x86g\x85_\xf9\xff\xa8wo\xffv\xcd\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA3624024GJ2\x88\x88\x88\x88wv\x87hx\x88\x97\x88x\x88\x97\x88ww\x87w\x86o\xfa\xffvU\x7f\xff\xd1\xec\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA3960824GJ2wwwwff\x86hffvfffffvfwfg_\xf9\xff\xa9\x88\x8f\xffb\x99\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA4011074GJ2fgvwwv\x87hw\x88\x87xww\x87wwfgvu_\xfa\xffefo\xff\x87\xc0\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA4121304GJ2x\x87xwff\x86hwwwwww\x87wwwww\x84_\xfc\xff\x98\x88\x9f\xffi\xa6\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA4195874GJ2EVugvf\x86hgwvwww\x87wgw\x86wc_\xfb\xff\x98\x88\x8f\xff\xe23\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA4625294GJ2eVefeUeVx\x88\x97\x88wwwwwwww\xa7o\xfb\xffvw\x9f\xff\xee.\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA4728774GJ2vfvg\x87vwgww\x87ww\x88\x97xww\x87w\x86_\xfb\xffeD?\xffk0\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA5129064GJ2vfvgwv\x87hx\x88\x87\x88ww\x87www\x87wd_\xfa\xffvfo\xff\x1d\x00\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA5454914GJ2\x98\x88\x88\x88\x87vwgx\x88\x87\x88xww\x87ffvf\xa7\x7f\xf9\xff\xa8w\x7f\xff\x1b\x90\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA5987784GJ2UVugDDtGx\x88\x87\x88w\x88\x87xwwwwd/\xfb\xff\x97fO\xff\xb0h\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA5987864GJ2fgvwUUuWgwvw\x87wxwwwww\x84/\xfc\xff\x97w\x7f\xff\xdf\x1d\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA6337644GJ2vgvwwv\x87hgffvwwwwwwww\x85O\xfa\xff\xa7w\x7f\xff\xc5\xfc\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA6802004GJ2UUuWUUuWgw\x86www\x87www\x87w\x96?\xf9\xff\xa9\x88\x7f\xff\x9fK\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA6892284GJ233S5\x87w\x87xx\x88\x87\x88vwwgww\x87w\x84?\xfb\xff\x98\x88\x8f\xff*\x9e\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v', b'\xf1\x87SALFBA7005534GJ2eUuWfg\x86xxww\x87x\x88\x87\x88\x88w\x88\x87\x87O\xfc\xffuUO\xff\xa3k\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1', b'\xf1\x87SALFBA7152454GJ2gvwgFf\x86hx\x88\x87\x88vfWfffffd?\xfa\xff\xba\x88o\xff,\xcf\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1', b'\xf1\x87SALFBA7485034GJ2ww\x87xww\x87xfwvgwwwwvfgf\xa5/\xfc\xff\xa9w_\xff40\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', b'\xf1\x87SAMDBA7743924GJ3wwwwww\x87xgwvw\x88\x88\x88\x88wwww\x85_\xfa\xff\x86f\x7f\xff0\x9d\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00', b'\xf1\x87SAMDBA7817334GJ3Vgvwvfvgww\x87wwwwwwfgv\x97O\xfd\xff\x88\x88o\xff\x8e\xeb\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00', b'\xf1\x87SAMDBA8054504GJ3gw\x87xffvgffffwwwweUVUf?\xfc\xffvU_\xff\xddl\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00', b'\xf1\x87SAMFB41553621GC7ww\x87xUU\x85Xvwwg\x88\x88\x88\x88wwgw\x86\xaf\xfb\xffuDo\xff\xaa\x8f\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', b'\xf1\x87SAMFB42555421GC7\x88\x88\x88\x88wvwgx\x88\x87\x88wwgw\x87wxw3\x8f\xfc\xff\x98f\x8f\xffga\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', b'\xf1\x87SAMFBA7978674GJ2gw\x87xgw\x97ywwwwvUGeUUeU\x87O\xfb\xff\x98w\x8f\xfffF\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', b'\xf1\x87SAMFBA9283024GJ2wwwwEUuWwwgwwwwwwwww\x87/\xfb\xff\x98w\x8f\xff<\xd3\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', b'\xf1\x87SAMFBA9708354GJ2wwwwVf\x86h\x88wx\x87xww\x87\x88\x88\x88\x88w/\xfa\xff\x97w\x8f\xff\x86\xa0\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc', ], }, CAR.SONATA_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ', b'\xf1\x8799110L5000\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ', b'\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ', b'\xf1\x8799110L5000\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\000DN8HMFC AT USA LHD 1.00 1.04 99211-L1000 191016', b'\xf1\x00DN8HMFC AT USA LHD 1.00 1.05 99211-L1000 201109', b'\xf1\000DN8HMFC AT USA LHD 1.00 1.06 99211-L1000 210325', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x8756310-L5500\xf1\000DN8 MDPS C 1.00 1.02 56310-L5500 4DNHC102', b'\xf1\x8756310-L5450\xf1\x00DN8 MDPS C 1.00 1.02 56310-L5450 4DNHC102', b'\xf1\x8756310-L5450\xf1\000DN8 MDPS C 1.00 1.03 56310-L5450 4DNHC103', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100\xf1\xa01.04', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x87391062J002\xf1\xa0000P', b'\xf1\x87391162J012', b'\xf1\x87391162J013', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\000PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW', b'\xf1\x87959102T250\000\000\000\000\000\xf1\x81E09\000\000\000\000\000\000\000\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e', b'\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e', b'\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab', b'\xf1\x87PCU\000\000\000\000\000\000\000\000\000\xf1\x81E16\000\000\000\000\000\000\000\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab', b'\xf1\x87959102T250\x00\x00\x00\x00\x00\xf1\x81E14\x00\x00\x00\x00\x00\x00\x00\xf1\x00PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW', ], }, CAR.SONATA_LF: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00LF__ SCC F-CUP 1.00 1.00 96401-C2200 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00LFF LKAS AT USA LHD 1.00 1.01 95740-C1000 E51', b'\xf1\x00LFF LKAS AT USA LHD 1.01 1.02 95740-C1000 E52', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00LF ESC \f 11 \x17\x01\x13 58920-C2610', b'\xf1\x00LF ESC \t 11 \x17\x01\x13 58920-C2610', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81606D5051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81606D5K51\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5', b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\x00\x00\x00\x00', b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5', b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24SL2n\x8d\xbe\xd8', b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2\x00\x00\x00\x00', b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2H\r\xbdm', ], }, CAR.KONA: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00OS__ SCC F-CUP 1.00 1.00 95655-J9200 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00OS9 LKAS AT USA LHD 1.00 1.00 95740-J9300 g21', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00OS MDPS C 1.00 1.05 56310J9030\x00 4OSDC105', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x816V5RAK00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.engine, 0x7e0, None): [ b'"\x01TOS-0NU06F301J02', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U2VE051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VE051\x00\x00DOS4T16NS3\x00\x00\x00\x00', ], }, CAR.KONA_EV: { (Ecu.fwdRadar, 0x7D0, None): [ b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ', b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4000 ', b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4100 ', b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ', b'\xf1\x00OSev SCC FNCUP 1.00 1.01 99110-K4000 ', b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ', b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ', b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ', b'\xf1\x8799110Q4500\xf1\000DEev SCC F-CUP 1.00 1.00 99110-Q4500 ', ], (Ecu.fwdCamera, 0x7C4, None): [ b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821', b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211', b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211', b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706', b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40', b'\xf1\x00OSE LKAS AT EUR RHD 1.00 1.00 95740-K4100 W40', b'\xf1\x00OSE LKAS AT KOR LHD 1.00 1.00 95740-K4100 W40', b'\xf1\x00OE2 LKAS AT EUR LHD 1.00 1.00 95740-K4200 200', b'\xf1\x00OSE LKAS AT USA LHD 1.00 1.00 95740-K4300 W50', ], (Ecu.eps, 0x7D4, None): [ b'\xf1\x00OS MDPS C 1.00 1.03 56310/K4550 4OEDC103', b'\xf1\x00OS MDPS C 1.00 1.04 56310K4000\x00 4OEDC104', b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104', b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105', b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105', ], (Ecu.esp, 0x7D1, None): [ b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000', b'\xf1\x00OS IEB \x01 212 \x11\x13 58520-K4000', b'\xf1\x00OS IEB \x02 212 \x11\x13 58520-K4000', b'\xf1\x00OS IEB \x03 210 \x02\x14 58520-K4000', b'\xf1\x00OS IEB \x03 212 \x11\x13 58520-K4000', ], }, CAR.KONA_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00OShe SCC FNCUP 1.00 1.01 99110-CM000 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00OSH LKAS AT KOR LHD 1.00 1.01 95740-CM000 l31', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00OS MDPS C 1.00 1.00 56310CM030\x00 4OHDC100', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00OS IEB \x01 104 \x11 58520-CM000', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HOS0G16DS1\x16\xc7\xb0\xd9', ], }, CAR.IONIQ_EV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7000 ', b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7100 ', b'\xf1\x00AEev SCC F-CUP 1.00 1.01 99110-G7000 ', b'\xf1\x00AEev SCC F-CUP 1.00 1.00 99110-G7200 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G7200 160418', b'\xf1\x00AEE MFC AT USA LHD 1.00 1.00 95740-G2400 180222', b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.03 95740-G2500 190516', b'\xf1\x00AEE MFC AT EUR RHD 1.00 1.01 95740-G2600 190819', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00AE MDPS C 1.00 1.02 56310G7300\x00 4AEEC102', b'\xf1\x00AE MDPS C 1.00 1.04 56310/G7501 4AEEC104', b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7310 4APEC101', b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7560 4APEC101', ], }, CAR.IONIQ_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\000AEhe SCC F-CUP 1.00 1.02 99110-G2100 ', b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2200 ', b'\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ', b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2600 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.01 95740-G2600 190819', b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.00 95740-G2400 180222', b'\xf1\000AEP MFC AT USA LHD 1.00 1.01 95740-G2600 190819', b'\xf1\x00AEH MFC AT USA LHD 1.00 1.00 95740-G2700 201027', ], (Ecu.eps, 0x7D4, None): [ b'\xf1\x00AE MDPS C 1.00 1.07 56310/G2301 4AEHC107', b'\xf1\x00AE MDPS C 1.00 1.01 56310/G2310 4APHC101', b'\xf1\000AE MDPS C 1.00 1.01 56310/G2510 4APHC101', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x816H6F2051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x816H6F6051\000\000\000\000\000\000\000\000', b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U3J8051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J8051\x00\x00HAE0G16UL0Nd\xed:', b'\xf1\x816U3H1051\x00\x00\xf1\x006U3H0_C2\x00\x006U3H1051\x00\x00HAE0G16US2\x95\xa2^$', b'\xf1\x816U3J9051\000\000\xf1\0006U3H1_C2\000\0006U3J9051\000\000PAE0G16NL0\x82zT\xd2', b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HAE0G16NL2\x00\x00\x00\x00', ], }, CAR.SANTA_FE: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00TM__ SCC F-CUP 1.00 1.01 99110-S2000 ', b'\xf1\x00TM__ SCC F-CUP 1.00 1.02 99110-S2000 ', b'\xf1\x00TM__ SCC F-CUP 1.00 1.03 99110-S2000 ', b'\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ', b'\xf1\x8799110S1500\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00TM MFC AT USA LHD 1.00 1.00 99211-S2000 180409', b'\xf1\x00TMA MFC AT MEX LHD 1.00 1.01 99211-S2500 210205', b'\xf1\x00TMA MFC AT USA LHD 1.00 1.00 99211-S2500 200720', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409', b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8A12', b'\xf1\x00TM MDPS C 1.00 1.01 56340-S2000 9129', b'\xf1\x00TM MDPS C 1.00 1.02 56370-S2AA0 0B19', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00TM ESC \r 100\x18\x031 58910-S2650', b'\xf1\x00TM ESC \r 103\x18\x11\x08 58910-S2650', b'\xf1\x00TM ESC \r 104\x19\a\b 58910-S2650', b'\xf1\x00TM ESC \x02 100\x18\x030 58910-S2600', b'\xf1\x00TM ESC \x02 102\x18\x07\x01 58910-S2600', b'\xf1\x00TM ESC \x02 103\x18\x11\x07 58910-S2600', b'\xf1\x00TM ESC \x02 104\x19\x07\x07 58910-S2600', b'\xf1\x00TM ESC \x03 103\x18\x11\x07 58910-S2600', b'\xf1\x00TM ESC \x0c 103\x18\x11\x08 58910-S2650', b'\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0', b'\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0', b'\xf1\x8758910-S2DA0\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0', b'\xf1\x8758910-S2GA0\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81606EA051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81606G3051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x82TMBZN5TMD3XXXG2E', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x87LBJSGA7082574HG0\x87www\x98\x88\x88\x88\x99\xaa\xb9\x9afw\x86gx\x99\xa7\x89co\xf8\xffvU_\xffR\xaf\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\xa6\xe0\x91', b'\xf1\x87LBKSGA0458404HG0vfvg\x87www\x89\x99\xa8\x99y\xaa\xa7\x9ax\x88\xa7\x88t_\xf9\xff\x86w\x8f\xff\x15x\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\x00\x00\x00', b'\xf1\x87LDJUEA6010814HG1\x87w\x87x\x86gvw\x88\x88\x98\x88gw\x86wx\x88\x97\x88\x85o\xf8\xff\x86f_\xff\xd37\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g', b'\xf1\x87LDJUEA6458264HG1ww\x87x\x97x\x87\x88\x88\x99\x98\x89g\x88\x86xw\x88\x97x\x86o\xf7\xffvw\x8f\xff3\x9a\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g', b'\xf1\x87LDKUEA2045844HG1wwww\x98\x88x\x87\x88\x88\xa8\x88x\x99\x97\x89x\x88\xa7\x88U\x7f\xf8\xffvfO\xffC\x1e\xf1\x816W3E0051\x00\x00\xf1\x006W351_C2\x00\x006W3E0051\x00\x00TTM4T20NS3\x00\x00\x00\x00', b'\xf1\x87LDKUEA9993304HG1\x87www\x97x\x87\x88\x99\x99\xa9\x99x\x99\xa7\x89w\x88\x97x\x86_\xf7\xffwwO\xffl#\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS1R\x7f\x90\n', b'\xf1\x87LDLUEA6061564HG1\xa9\x99\x89\x98\x87wwwx\x88\x97\x88x\x99\xa7\x89x\x99\xa7\x89sO\xf9\xffvU_\xff<\xde\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed', b'\xf1\x87LDLUEA6159884HG1\x88\x87hv\x99\x99y\x97\x89\xaa\xb8\x9ax\x99\x87\x89y\x99\xb7\x99\xa7?\xf7\xff\x97wo\xff\xf3\x05\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00', b'\xf1\x87LDLUEA6852664HG1\x97wWu\x97www\x89\xaa\xc8\x9ax\x99\x97\x89x\x99\xa7\x89SO\xf7\xff\xa8\x88\x7f\xff\x03z\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed', b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00', b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed', b'\xf1\x87SBJWAA5842214GG0\x88\x87\x88xww\x87x\x89\x99\xa8\x99\x88\x99\x98\x89w\x88\x87xw_\xfa\xfffU_\xff\xd1\x8d\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3', b'\xf1\x87SBJWAA5890864GG0\xa9\x99\x89\x98\x98\x87\x98y\x89\x99\xa8\x99w\x88\x87xww\x87wvo\xfb\xffuD_\xff\x9f\xb5\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3', b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x00\x00\x00\x00', b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3', b'\xf1\x87SBJWAA7780564GG0wvwgUUeVwwwwx\x88\x87\x88wwwwd_\xfc\xff\x86f\x7f\xff\xd7*\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0', b'\xf1\x87SBJWAA8278284GG0ffvgUU\x85Xx\x88\x87\x88x\x88w\x88ww\x87w\x96o\xfd\xff\xa7U_\xff\xf2\xa0\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0', b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6\x00\x00\x00\x00', b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6x0\x17\xfe', b'\xf1\x87SBLWAA4899564GG0VfvgUU\x85Xx\x88\x87\x88vfgf\x87wxwvO\xfb\xff\x97f\xb1\xffSB\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7\x00\x00\x00\x00', b'\xf1\x87SBLWAA6622844GG0wwwwff\x86hwwwwx\x88\x87\x88\x88\x88\x88\x88\x98?\xfd\xff\xa9\x88\x7f\xffn\xe5\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7u\x1e{\x1c', b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2\x00\x00\x00\x00', b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2K\xdaV0', b'\xf1\x87SDKXAA2443414GG1vfvgwv\x87h\x88\x88\x88\x88ww\x87wwwww\x99_\xfc\xffvD?\xffl\xd2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4G24NS6\x00\x00\x00\x00', b'\xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7', b'\xf1\x87SDMXCA8653204GN1EVugEUuWwwwwww\x87wwwwwv/\xfb\xff\xa8\x88\x9f\xff\xa5\x9c\xf1\x89HT6WAD00A1\xf1\x82STM4G25NH1\x00\x00\x00\x00\x00\x00', b'\xf1\x87954A02N250\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7', ], }, CAR.SANTA_FE_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x8799110CL500\xf1\x00TMhe SCC FHCUP 1.00 1.00 99110-CL500 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00TMH MFC AT USA LHD 1.00 1.03 99211-S1500 210224', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00TM MDPS C 1.00 1.02 56310-CLAC0 4TSHC102', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x87391312MTC1', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x87959102T250\x00\x00\x00\x00\x00\xf1\x81E14\x00\x00\x00\x00\x00\x00\x00\xf1\x00PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TTM2H16SA2\x80\xd7l\xb2', ], }, CAR.PALISADE: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\000LX2_ SCC F-CUP 1.00 1.05 99110-S8100 ', b'\xf1\x00LX2 SCC FHCUP 1.00 1.04 99110-S8100 ', b'\xf1\x00LX2_ SCC FHCU- 1.00 1.05 99110-S8100 ', b'\xf1\x00LX2_ SCC FHCUP 1.00 1.00 99110-S8110 ', b'\xf1\x00LX2_ SCC FHCUP 1.00 1.04 99110-S8100 ', b'\xf1\x00LX2_ SCC FHCUP 1.00 1.05 99110-S8100 ', b'\xf1\x00ON__ FCA FHCUP 1.00 1.02 99110-S9100 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.03 99211-S8100 190125', b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.05 99211-S8100 190909', b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.07 99211-S8100 200422', b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.08 99211-S8100 200903', b'\xf1\x00ON MFC AT USA LHD 1.00 1.01 99211-S9100 181105', b'\xf1\x00ON MFC AT USA LHD 1.00 1.03 99211-S9100 200720', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00LX2 MDPS C 1,00 1,03 56310-S8020 4LXDC103', # modified firmware b'\xf1\x00LX2 MDPS C 1.00 1.03 56310-S8020 4LXDC103', b'\xf1\x00LX2 MDPS C 1.00 1.04 56310-S8020 4LXDC104', b'\xf1\x00ON MDPS C 1.00 1.00 56340-S9000 8B13', b'\xf1\x00ON MDPS C 1.00 1.01 56340-S9000 9201', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00LX ESC \x01 103\x19\t\x10 58910-S8360', b'\xf1\x00LX ESC \x01 103\x31\t\020 58910-S8360', b'\xf1\x00LX ESC \x0b 101\x19\x03\x17 58910-S8330', b'\xf1\x00LX ESC \x0b 102\x19\x05\x07 58910-S8330', b'\xf1\x00LX ESC \x0b 103\x19\t\x07 58910-S8330', b'\xf1\x00LX ESC \x0b 103\x19\t\x10 58910-S8360', b'\xf1\x00LX ESC \x0b 104 \x10\x16 58910-S8360', b'\xf1\x00ON ESC \x0b 100\x18\x12\x18 58910-S9360', b'\xf1\x00ON ESC \x0b 101\x19\t\x08 58910-S9360', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81640K0051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81640S1051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28', b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6', b'\xf1\x87LBLUFN591307KF25vgvw\x97wwwy\x99\xa7\x99\x99\xaa\xa9\x9af\x88\x96h\x95o\xf7\xff\x99f/\xff\xe4c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB2\xd7\xc1/\xd1', b'\xf1\x87LBLUFN650868KF36\xa9\x98\x89\x88\xa8\x88\x88\x88h\x99\xa6\x89fw\x86gw\x88\x97x\xaa\x7f\xf6\xff\xbb\xbb\x8f\xff+\x82\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8', b'\xf1\x87LBLUFN655162KF36\x98\x88\x88\x88\x98\x88\x88\x88x\x99\xa7\x89x\x99\xa7\x89x\x99\x97\x89g\x7f\xf7\xffwU_\xff\xe9!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8', b'\xf1\x87LBLUFN731381KF36\xb9\x99\x89\x98\x98\x88\x88\x88\x89\x99\xa8\x99\x88\x99\xa8\x89\x88\x88\x98\x88V\177\xf6\xff\x99w\x8f\xff\xad\xd8\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\000bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8', b'\xf1\x87LDKVAA0028604HH1\xa8\x88x\x87vgvw\x88\x99\xa8\x89gw\x86ww\x88\x97x\x97o\xf9\xff\x97w\x7f\xffo\x02\xf1\x81U872\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28', b'\xf1\x87LDKVAA3068374HH1wwww\x87xw\x87y\x99\xa7\x99w\x88\x87xw\x88\x97x\x85\xaf\xfa\xffvU/\xffU\xdc\xf1\x81U872\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28', b'\xf1\x87LDKVBN382172KF26\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\xa7\x89\x87\x88\x98x\x98\x99\xa9\x89\xa5_\xf6\xffDDO\xff\xcd\x16\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7', b'\xf1\x87LDKVBN424201KF26\xba\xaa\x9a\xa9\x99\x99\x89\x98\x89\x99\xa8\x99\x88\x99\x98\x89\x88\x99\xa8\x89v\x7f\xf7\xffwf_\xffq\xa6\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7', b'\xf1\x87LDKVBN540766KF37\x87wgv\x87w\x87xx\x99\x97\x89v\x88\x97h\x88\x88\x88\x88x\x7f\xf6\xffvUo\xff\xd3\x01\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7', b'\xf1\x87LDLVAA4225634HH1\x98\x88\x88\x88eUeVx\x88\x87\x88g\x88\x86xx\x88\x87\x88\x86o\xf9\xff\x87w\x7f\xff\xf2\xf7\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6', b'\xf1\x87LDLVAA4777834HH1\x98\x88x\x87\x87wwwx\x88\x87\x88x\x99\x97\x89x\x88\x97\x88\x86o\xfa\xff\x86fO\xff\x1d9\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6', b'\xf1\x87LDLVAA5194534HH1ffvguUUUx\x88\xa7\x88h\x99\x96\x89x\x88\x97\x88ro\xf9\xff\x98wo\xff\xaaM\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6', b'\xf1\x87LDLVAA5949924HH1\xa9\x99y\x97\x87wwwx\x99\x97\x89x\x99\xa7\x89x\x99\xa7\x89\x87_\xfa\xffeD?\xff\xf1\xfd\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6', b'\xf1\x87LDLVBN560098KF26\x86fff\x87vgfg\x88\x96xfw\x86gfw\x86g\x95\xf6\xffeU_\xff\x92c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7', b'\xf1\x87LDLVBN602045KF26\xb9\x99\x89\x98\x97vwgy\xaa\xb7\x9af\x88\x96hw\x99\xa7y\xa9\x7f\xf5\xff\x99w\x7f\xff,\xd3\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN628911KF26\xa9\x99\x89\x98\x98\x88\x88\x88y\x99\xa7\x99fw\x86gw\x88\x87x\x83\x7f\xf6\xff\x98wo\xff2\xda\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN645817KF37\x87www\x98\x87xwx\x99\x97\x89\x99\x99\x99\x99g\x88\x96x\xb6_\xf7\xff\x98fo\xff\xe2\x86\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN662115KF37\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\x97\x89x\x99\xa7\x89\x88\x99\xa8\x89\x88\x7f\xf7\xfffD_\xff\xdc\x84\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN667933KF37\xb9\x99\x89\x98\xb9\x99\x99\x99x\x88\x87\x88w\x88\x87x\x88\x88\x98\x88\xcbo\xf7\xffe3/\xffQ!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN673087KF37\x97www\x86fvgx\x99\x97\x89\x99\xaa\xa9\x9ag\x88\x86x\xe9_\xf8\xff\x98w\x7f\xff"\xad\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN673841KF37\x98\x88x\x87\x86g\x86xy\x99\xa7\x99\x88\x99\xa8\x89w\x88\x97xdo\xf5\xff\x98\x88\x8f\xffT\xec\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN681363KF37\x98\x88\x88\x88\x97x\x87\x88y\xaa\xa7\x9a\x88\x88\x98\x88\x88\x88\x88\x88vo\xf6\xffvD\x7f\xff%v\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN713782KF37\x99\x99y\x97\x98\x88\x88\x88x\x88\x97\x88\x88\x99\x98\x89\x88\x99\xa8\x89\x87o\xf7\xffeU?\xff7,\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN713890KF26\xb9\x99\x89\x98\xa9\x99\x99\x99x\x99\x97\x89\x88\x99\xa8\x89\x88\x99\xb8\x89Do\xf7\xff\xa9\x88o\xffs\r\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN733215KF37\x99\x98y\x87\x97wwwi\x99\xa6\x99x\x99\xa7\x89V\x88\x95h\x86o\xf7\xffeDO\xff\x12\xe7\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN750044KF37\xca\xa9\x8a\x98\xa7wwwy\xaa\xb7\x9ag\x88\x96x\x88\x99\xa8\x89\xb9\x7f\xf6\xff\xa8w\x7f\xff\xbe\xde\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN752612KF37\xba\xaa\x8a\xa8\x87w\x87xy\xaa\xa7\x9a\x88\x99\x98\x89x\x88\x97\x88\x96o\xf6\xffvU_\xffh\x1b\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN755553KF37\x87xw\x87\x97w\x87xy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95gwo\xf6\xffwUO\xff\xb5T\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08', b'\xf1\x87LDLVBN757883KF37\x98\x87xw\x98\x87\x88xy\xaa\xb7\x9ag\x88\x96x\x89\x99\xa8\x99e\x7f\xf6\xff\xa9\x88o\xff5\x15\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6', b'\xf1\x87LDMVBN778156KF37\x87vWe\xa9\x99\x99\x99y\x99\xb7\x99\x99\x99\x99\x99x\x99\x97\x89\xa8\x7f\xf8\xffwf\x7f\xff\x82_\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6', b'\xf1\x87LDMVBN780576KF37\x98\x87hv\x97x\x97\x89x\x99\xa7\x89\x88\x99\x98\x89w\x88\x97x\x98\x7f\xf7\xff\xba\x88\x8f\xff\x1e0\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6', b'\xf1\x87LDMVBN783485KF37\x87www\x87vwgy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95g\x89_\xf6\xff\xa9w_\xff\xc5\xd6\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6', b'\xf1\x87LDMVBN811844KF37\x87vwgvfffx\x99\xa7\x89Vw\x95gg\x88\xa6xe\x8f\xf6\xff\x97wO\xff\t\x80\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6', b'\xf1\x87LDMVBN830601KF37\xa7www\xa8\x87xwx\x99\xa7\x89Uw\x85Ww\x88\x97x\x88o\xf6\xff\x8a\xaa\x7f\xff\xe2:\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6', b'\xf1\x87LDMVBN848789KF37\x87w\x87x\x87w\x87xy\x99\xb7\x99\x87\x88\x98x\x88\x99\xa8\x89\x87\x7f\xf6\xfffUo\xff\xe3!\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN851595KF37\x97wgvvfffx\x99\xb7\x89\x88\x99\x98\x89\x87\x88\x98x\x99\x7f\xf7\xff\x97w\x7f\xff@\xf3\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN873175KF26\xa8\x88\x88\x88vfVex\x99\xb7\x89\x88\x99\x98\x89x\x88\x97\x88f\x7f\xf7\xff\xbb\xaa\x8f\xff,\x04\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN879401KF26veVU\xa8\x88\x88\x88g\x88\xa6xVw\x95gx\x88\xa7\x88v\x8f\xf9\xff\xdd\xbb\xbf\xff\xb3\x99\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN881314KF37\xa8\x88h\x86\x97www\x89\x99\xa8\x99w\x88\x97xx\x99\xa7\x89\xca\x7f\xf8\xff\xba\x99\x8f\xff\xd8v\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN888651KF37\xa9\x99\x89\x98vfff\x88\x99\x98\x89w\x99\xa7y\x88\x88\x98\x88D\x8f\xf9\xff\xcb\x99\x8f\xff\xa5\x1e\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN889419KF37\xa9\x99y\x97\x87w\x87xx\x88\x97\x88w\x88\x97x\x88\x99\x98\x89e\x9f\xf9\xffeUo\xff\x901\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN895969KF37vefV\x87vgfx\x99\xa7\x89\x99\x99\xb9\x99f\x88\x96he_\xf7\xffxwo\xff\x14\xf9\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b'\xf1\x87LDMVBN899222KF37\xa8\x88x\x87\x97www\x98\x99\x99\x89\x88\x99\x98\x89f\x88\x96hdo\xf7\xff\xbb\xaa\x9f\xff\xe2U\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89', b"\xf1\x87LBLUFN622950KF36\xa8\x88\x88\x88\x87w\x87xh\x99\x96\x89\x88\x99\x98\x89\x88\x99\x98\x89\x87o\xf6\xff\x98\x88o\xffx'\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8", ], }, CAR.VELOSTER: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00JS__ SCC H-CUP 1.00 1.02 95650-J3200 ', b'\xf1\x00JS__ SCC HNCUP 1.00 1.02 95650-J3100 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00JS LKAS AT USA LHD 1.00 1.02 95740-J3000 K32', b'\xf1\x00JS LKAS AT KOR LHD 1.00 1.03 95740-J3000 K33', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00JSL MDPS C 1.00 1.03 56340-J3000 8308', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.engine, 0x7e0, None): [ b'\x01TJS-JNU06F200H0A', b'\x01TJS-JDK06F200H0A', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\xba\x02\xb8\x80', b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\x00\x00\x00\x00', b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16KS2\016\xba\036\xa2', ], }, # kia CAR.FORTE: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00BD__ SCC H-CUP 1.00 1.02 99110-M6000 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00BD LKAS AT USA LHD 1.00 1.04 95740-M6000 J33', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00BD MDPS C 1.00 1.02 56310-XX000 4BD2C102', b'\xf1\x00BD MDPS C 1.00 1.08 56310/M6300 4BDDC108', b'\xf1\x00BD MDPS C 1.00 1.08 56310M6300\x00 4BDDC108', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x816VGRAH00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.engine, 0x7e0, None): [ b'\x01TBDM1NU06F200H01', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\x00\x00\x00\x00', b"\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\xcf\x1e'\xc3", ], }, CAR.K5: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00JF__ SCC F-CUP 1.00 1.00 96400-D4110 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.02 95895-D5000 h31', b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.00 95895-D5001 h32', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00JF ESC \v 11 \x18\x030 58920-D5180', ], (Ecu.engine, 0x7e0, None): [ b'\x01TJFAJNU06F201H03', b'\xf1\x89F1JF600AISEIU702\xf1\x82F1JF600AISEIU702', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJF0T16NL0\t\xd2GW', ], }, CAR.K5_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00DE MDPS C 1.00 1.09 56310G5301\x00 4DEHC109', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x816H6F4051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b"\xf1\x816U3J2051\x00\x00\xf1\x006U3H0_C2\x00\x006U3J2051\x00\x00PDE0G16NS2\xf4'\\\x91", ], }, CAR.K5_DL3: { (Ecu.fwdRadar, 0x7D0, None): [ b'\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ', b'\xf1\x8799110L2000\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ', b'\xf1\x8799110L2100\xf1\x00DL3_ SCC F-CUP 1.00 1.03 99110-L2100 ', b'\xf1\x8799110L2100\xf1\x00DL3_ SCC FHCUP 1.00 1.03 99110-L2100 ', ], (Ecu.fwdCamera, 0x7C4, None): [ b'\xf1\000DL3 MFC AT USA LHD 1.00 1.03 99210-L3000 200915', b'\xf1\x00DL3 MFC AT USA LHD 1.00 1.04 99210-L3000 210208', ], (Ecu.eps, 0x7D4, None): [ b'\xf1\x8756310-L3110\xf1\000DL3 MDPS C 1.00 1.01 56310-L3110 4DLAC101', b'\xf1\x8756310-L3220\xf1\x00DL3 MDPS C 1.00 1.01 56310-L3220 4DLAC101', b'\xf1\x8757700-L3000\xf1\x00DL3 MDPS R 1.00 1.02 57700-L3000 4DLAP102', ], (Ecu.esp, 0x7D1, None): [ b'\xf1\000DL ESC \006 101 \004\002 58910-L3200', b'\xf1\x8758910-L3200\xf1\000DL ESC \006 101 \004\002 58910-L3200', b'\xf1\x8758910-L3800\xf1\x00DL ESC \t 101 \x07\x02 58910-L3800', b'\xf1\x8758910-L3600\xf1\x00DL ESC \x03 100 \x08\x02 58910-L3600', ], (Ecu.engine, 0x7E0, None): [ b'\xf1\x87391212MKT0', b'\xf1\x87391212MKV0', b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x82DLDWN5TMDCXXXJ1B', ], (Ecu.transmission, 0x7E1, None): [ b'\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8', b'\xf1\x87SALFEA5652514GK2UUeV\x88\x87\x88xxwg\x87ww\x87wwfwvd/\xfb\xffvU_\xff\x93\xd3\xf1\x81U913\000\000\000\000\000\000\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8', b'\xf1\x87SALFEA6046104GK2wvwgeTeFg\x88\x96xwwwwffvfe?\xfd\xff\x86fo\xff\x97A\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00TDL2T16NB1ia\x0b\xb8', b'\xf1\x87SCMSAA8572454GK1\x87x\x87\x88Vf\x86hgwvwvwwgvwwgT?\xfb\xff\x97fo\xffH\xb8\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00TDL4T16NB05\x94t\x18', b'\xf1\x87954A02N300\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 WDL3T25XXX730NS2b\x1f\xb8%', ], }, CAR.STINGER: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00CK__ SCC F_CUP 1.00 1.01 96400-J5100 ', b'\xf1\x00CK__ SCC F_CUP 1.00 1.03 96400-J5100 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00CK MFC AT USA LHD 1.00 1.03 95740-J5000 170822', b'\xf1\x00CK MFC AT USA LHD 1.00 1.04 95740-J5000 180504', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5200 4C2CL104', b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5220 4C2VL104', b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5420 4C4VL104', b'\xf1\x00CK MDPS R 1.00 1.06 57700-J5420 4C4VL106', b'\xf1\x00CK MDPS R 1.00 1.07 57700-J5420 4C4VL107', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81606DE051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81640E0051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81640L0051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x82CKJN3TMSDE0B\x00\x00\x00\x00', b'\xf1\x82CKKN3TMD_H0A\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x87VCJLE17622572DK0vd6D\x99\x98y\x97vwVffUfvfC%CuT&Dx\x87o\xff{\x1c\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0', b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0', b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x89E21\x00\x00\x00\x00\x00\x00\x00\xf1\x82SCK0T33NB0', b'\xf1\x87VDHLG17034412DK2vD6DfVvVTD$D\x99w\x88\x98EDEDeT6DgfO\xff\xc3=\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0', b'\xf1\x87VDHLG17118862DK2\x8awWwgu\x96wVfUVwv\x97xWvfvUTGTx\x87o\xff\xc9\xed\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0', b'\xf1\x87VDJLG18425192DK2xeGewfgf\x86eFeweWv\x88eVeuTGT\x89vo\xff\tJ\xf1\x81E24\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E24\x00\x00\x00\x00\x00\x00\x00SCK0T33NB1\x8a\xdcM\x90', b'\xf1\x87VDKLJ18675252DK6\x89vhgwwwwveVU\x88w\x87w\x99vgf\x97vXfgw_\xff\xc2\xfb\xf1\x89E25\x00\x00\x00\x00\x00\x00\x00\xf1\x82TCK0T33NB2', b'\xf1\x87WAJTE17552812CH4vfFffvfVeT5DwvvVVdFeegeg\x88\x88o\xff\x1a]\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00TCK2T20NB1\x19\xd2\x00\x94', ], }, CAR.NIRO_EV: { (Ecu.fwdRadar, 0x7D0, None): [ b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ', b'\xf1\x00DEev SCC F-CUP 1.00 1.02 96400-Q4100 ', b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ', b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ', b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ', b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ', b'\xf1\x8799110Q4500\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4500 ', b'\xf1\x8799110Q4600\xf1\x00DEev SCC FNCUP 1.00 1.00 99110-Q4600 ', b'\xf1\x8799110Q4600\xf1\x00DEev SCC FHCUP 1.00 1.00 99110-Q4600 ', ], (Ecu.fwdCamera, 0x7C4, None): [ b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821', b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211', b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211', b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706', b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40', ], (Ecu.eps, 0x7D4, None): [ b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104', b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105', b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105', ], (Ecu.esp, 0x7D1, None): [ b'\xf1\x00OS IEB \r 212 \x11\x13 58520-K4000', ], }, CAR.NIRO_HEV: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ', b'\xf1\x00DEhe SCC FHCUP 1.00 1.00 99110-G5600 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424', b'\xf1\x00DEH MFC AT USA LHD 1.00 1.07 99211-G5000 201221', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\000DE MDPS C 1.00 1.09 56310G5301\000 4DEHC109', b'\xf1\x00DE MDPS C 1.00 1.01 56310G5520\x00 4DEPC101', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x816H6F4051\000\000\000\000\000\000\000\000', b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b"\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\xf4\'\\\x91", b'\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\000\000\000\000', b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\x00\x00\x00\x00', b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\xb9\xd3\xfaW', ], }, CAR.SELTOS: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x8799110Q5100\xf1\000SP2_ SCC FHCUP 1.01 1.05 99110-Q5100 ', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\000SP2 MFC AT USA LHD 1.00 1.04 99210-Q5000 191114', b'\xf1\000SP2 MFC AT USA LHD 1.00 1.05 99210-Q5000 201012', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\000SP2 MDPS C 1.00 1.04 56300Q5200 ', b'\xf1\000SP2 MDPS C 1.01 1.05 56300Q5200 ', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x8758910-Q5450\xf1\000SP ESC \a 101\031\t\005 58910-Q5450', b'\xf1\x8758910-Q5450\xf1\000SP ESC \t 101\031\t\005 58910-Q5450', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81616D2051\000\000\000\000\000\000\000\000', b'\xf1\x81616D5051\000\000\000\000\000\000\000\000', b'\001TSP2KNL06F100J0K', b'\001TSP2KNL06F200J0K', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x87CZLUB49370612JF7h\xa8y\x87\x99\xa7hv\x99\x97fv\x88\x87x\x89x\x96O\xff\x88\xff\xff\xff.@\xf1\x816V2C2051\000\000\xf1\0006V2B0_C2\000\0006V2C2051\000\000CSP4N20NS3\000\000\000\000', b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS6\xd30\xa5\xb9', b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS8\r\xfe\x9c\x8b', ], }, CAR.K7: { (Ecu.eps, 0x7d4, None): [b'\xf1\000YG MDPS C 1.00 1.01 56310F6350\000 4YG7C101',], }, # Genesis CAR.GENESIS_G70: { (Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 ', b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 \xf1\xa01.02', ], (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00IK MFC AT USA LHD 1.00 1.01 95740-G9000 170920', ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x00IK MDPS R 1.00 1.06 57700-G9420 4I4VL106', b'\xf1\x00IK MDPS R 1.00 1.07 57700-G9220 4I2VL107', ], (Ecu.esp, 0x7d1, None): [ b'\xf1\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.engine, 0x7e0, None): [ b'\xf1\x81640F0051\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'\xf1\x87VDJLT17895112DN4\x88fVf\x99\x88\x88\x88\x87fVe\x88vhwwUFU\x97eFex\x99\xff\xb7\x82\xf1\x81E25\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB2\x11\x1am\xda', b'\xf1\x87VCJLP18407832DN3\x88vXfvUVT\x97eFU\x87d7v\x88eVeveFU\x89\x98\x7f\xff\xb2\xb0\xf1\x81E25\x00\x00\x00' b'\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB4\xecE\xefL', ], }, } CHECKSUM = { "crc8": [CAR.SONATA, CAR.SANTA_FE, CAR.PALISADE, CAR.SELTOS, CAR.ELANTRA21, CAR.K5_DL3, CAR.SONATA_HEV, CAR.SANTA_FE_HEV, CAR.SOUL_EV, CAR.ELANTRA21_HEV, CAR.K5_DL3_HEV], "6B": [CAR.SORENTO, CAR.GENESIS], } FEATURES = { "use_cluster_gears": # Use Cluster for Gear Selection, rather than Transmission [ CLU15 ] {CAR.ELANTRA_I30, CAR.KONA, CAR.GRANDEUR, CAR.MOHAVE, CAR.NIRO_HEV, CAR.K7}, "use_tcu_gears": # Use TCU Message for Gear Selection [ TCU12 ] {CAR.SONATA_LF, CAR.VELOSTER, CAR.K5}, "use_elect_gears": # Use Elect GEAR Message for Gear Selection [ ELECT_GEAR ] {CAR.KONA_EV, CAR.IONIQ_EV, CAR.NEXO, CAR.NIRO_EV, CAR.SOUL_EV, CAR.KONA_HEV, CAR.IONIQ_HEV, CAR.NIRO_HEV, CAR.SONATA_HEV, CAR.SONATA_LF_HEV, CAR.GRANDEUR_HEV, CAR.GRANDEUR20_HEV, CAR.K5_HEV, CAR.K5_DL3_HEV, CAR.K7_HEV}, # Gear not set is [ LVR12 ] # these cars use the [ FCA11 ] message for the AEB and FCW signals, all others use [ SCC12 ] # "use_fca": {}, carstate aeb_fcw / qt ui aebselect toggle set # "has_scc13": {}, # "has_scc14": {}, # new lfa car - carcontroller lfamfc / hyundaican lfamfc using qt ui mfcselect toggle set } EV_CAR = {CAR.KONA_EV, CAR.IONIQ_EV, CAR.NIRO_EV, CAR.SOUL_EV, CAR.NEXO} HYBRID_CAR = {CAR.KONA_HEV, CAR.IONIQ_HEV, CAR.NIRO_HEV, CAR.SANTA_FE_HEV, CAR.ELANTRA21_HEV, CAR.SONATA_HEV, CAR.SONATA_LF_HEV, CAR.GRANDEUR_HEV, CAR.GRANDEUR20_HEV, CAR.K5_HEV, CAR.K5_DL3_HEV, CAR.K7_HEV} EV_HYBRID_CAR = EV_CAR | HYBRID_CAR DBC = { # Hyundai CAR.ELANTRA_I30: dbc_dict('hyundai_kia_generic', None), CAR.ELANTRA21: dbc_dict('hyundai_kia_generic', None), CAR.ELANTRA21_HEV: dbc_dict('hyundai_kia_generic', None), CAR.SONATA: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.SONATA_LF: dbc_dict('hyundai_kia_generic', None), CAR.SONATA_LF_HEV: dbc_dict('hyundai_kia_generic', None), CAR.KONA: dbc_dict('hyundai_kia_generic', None), CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None), CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None), CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None), CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.SANTA_FE_HEV: dbc_dict('hyundai_kia_generic', None), CAR.PALISADE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None), CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None), CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None), CAR.GRANDEUR20: dbc_dict('hyundai_kia_generic', None), CAR.GRANDEUR20_HEV: dbc_dict('hyundai_kia_generic', None), CAR.NEXO: dbc_dict('hyundai_kia_generic_nexo', None), # Kia CAR.FORTE: dbc_dict('hyundai_kia_generic', None), CAR.K5: dbc_dict('hyundai_kia_generic', None), CAR.K5_HEV: dbc_dict('hyundai_kia_generic', None), CAR.K5_DL3: dbc_dict('hyundai_kia_generic', None), CAR.K5_DL3_HEV: dbc_dict('hyundai_kia_generic', None), CAR.SPORTAGE: dbc_dict('hyundai_kia_generic', None), CAR.SORENTO: dbc_dict('hyundai_kia_generic', None), CAR.MOHAVE: dbc_dict('hyundai_kia_generic', None), CAR.STINGER: dbc_dict('hyundai_kia_generic', None), CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.SOUL_EV: dbc_dict('hyundai_kia_generic', None), CAR.SELTOS: dbc_dict('hyundai_kia_generic', None), CAR.K7: dbc_dict('hyundai_kia_generic', None), CAR.K7_HEV: dbc_dict('hyundai_kia_generic', None), CAR.K9: dbc_dict('hyundai_kia_generic', None), # Genesis CAR.GENESIS: dbc_dict('hyundai_kia_generic', None), CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'), CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None), CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None), } STEER_THRESHOLD = 150 def main(): for member, value in vars(CAR).items(): if not member.startswith("_"): print(value) if __name__ == "__main__": main()
# -*-coding:Utf-8 -* # Copyright (c) 2010 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Fichier contenant le contexte éditeur EdtBoiteEnvoi""" from primaires.interpreteur.editeur import Editeur from primaires.interpreteur.editeur.env_objet import EnveloppeObjet from primaires.communication.editeurs.medit import EdtMedit from primaires.communication.mudmail import ENVOYE from primaires.format.fonctions import couper_phrase class EdtBoiteEnvoi(Editeur): """Classe définissant le contexte-éditeur 'boîte d'envoi'. Ce contexte liste les messages envoyés et propose des options d'édition. """ def __init__(self, pere, objet=None, attribut=None): """Constructeur de l'éditeur""" Editeur.__init__(self, pere, objet, attribut) self.ajouter_option("l", self.opt_lire) self.ajouter_option("c", self.opt_copier) self.ajouter_option("s", self.opt_supprimer) def accueil(self): """Méthode d'accueil""" joueur = self.pere.joueur mails = type(self).importeur.communication.mails.get_mails_pour( joueur, ENVOYE) msg = "||tit| " + "Messages envoyés".ljust(76) + "|ff||\n" msg += self.opts.separateur + "\n" msg += self.aide_courte + "\n\n" if not mails: msg += "|att|Vous n'avez envoyé aucun message.|ff|" else: taille = 0 for mail in mails: t_sujet = len(couper_phrase(mail.sujet, 33)) if t_sujet > taille: taille = t_sujet taille = (taille < 5 and 5) or taille msg += "+" + "-".ljust(taille + 41, "-") + "+\n" msg += "| |tit|N°|ff| | |tit|" + "Sujet".ljust(taille) msg += "|ff| | |tit|Destinataire|ff| | |tit|" + "Date".ljust(16) msg += "|ff| |\n" i = 1 for mail in mails: msg += "| |rg|" + str(i).rjust(2) + "|ff| | " msg += "|vr|" + couper_phrase(mail.sujet, 33).ljust( \ taille) + "|ff| | |blc|" msg += couper_phrase(mail.aff_dest,12).ljust(12) + "|ff| | " msg += "|jn|" + mail.date.isoformat(" ")[:16] + "|ff| |\n" i += 1 msg += "+" + "-".ljust(taille + 41, "-") + "+" return msg def opt_lire(self, arguments): """Option lire""" if not arguments or arguments.isspace(): self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \ "message.|ff|" return mails = type(self).importeur.communication.mails.get_mails_pour( self.pere.joueur, ENVOYE) try: num = int(arguments.split(" ")[0]) except ValueError: self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \ "valide.|ff|" else: i = 1 l_mail = None for mail in mails: if num == i: l_mail = mail break i += 1 if l_mail is None: self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \ "aucun message.|ff|" return self.pere.joueur << l_mail.afficher() def opt_copier(self, arguments): """Option copier""" if not arguments or arguments.isspace(): self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \ "message.|ff|" return mails = type(self).importeur.communication.mails.get_mails_pour( self.pere.joueur, ENVOYE) try: num = int(arguments.split(" ")[0]) except ValueError: self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \ "valide.|ff|" else: i = 1 c_mail = None for mail in mails: if num == i: c_mail = mail break i += 1 if c_mail is None: self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \ "aucun message.|ff|" return mail = type(self).importeur.communication.mails.creer_mail( self.pere.joueur) mail.sujet = "CC:" + c_mail.sujet mail.liste_dest = c_mail.liste_dest mail.contenu.ajouter_paragraphe(str(c_mail.contenu)) enveloppe = EnveloppeObjet(EdtMedit, mail, None) enveloppe.parent = self contexte = enveloppe.construire(self.pere.joueur) self.pere.joueur.contextes.ajouter(contexte) contexte.actualiser() def opt_supprimer(self, arguments): """Option supprimer""" if not arguments or arguments.isspace(): self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \ "message.|ff|" return mails = type(self).importeur.communication.mails.get_mails_pour( self.pere.joueur, ENVOYE) try: num = int(arguments.split(" ")[0]) except ValueError: self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \ "valide.|ff|" else: i = 1 s_mail = None for mail in mails: if num == i: s_mail = mail break i += 1 if s_mail is None: self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \ "aucun message.|ff|" return del type(self).importeur.communication.mails[s_mail.id] self.pere.joueur << "|att|Ce message a bien été supprimé.|ff|"
from __future__ import absolute_import, division, print_function import numbers import warnings import torch from torch.autograd import Variable import pyro import pyro.poutine as poutine from pyro.distributions.util import is_identically_zero from pyro.infer.elbo import ELBO from pyro.infer.enum import iter_discrete_traces from pyro.infer.util import torch_backward, torch_data_sum, torch_sum from pyro.poutine.util import prune_subsample_sites from pyro.util import check_model_guide_match, is_nan def check_enum_discrete_can_run(model_trace, guide_trace): """ Checks whether `enum_discrete` is supported for the given (model, guide) pair. :param Trace model: A model trace. :param Trace guide: A guide trace. :raises: NotImplementedError """ # Check that all batch_log_pdf shapes are the same, # since we currently do not correctly handle broadcasting. model_trace.compute_batch_log_pdf() guide_trace.compute_batch_log_pdf() shapes = {} for source, trace in [("model", model_trace), ("guide", guide_trace)]: for name, site in trace.nodes.items(): if site["type"] == "sample": shapes[site["batch_log_pdf"].size()] = (source, name) if len(shapes) > 1: raise NotImplementedError( "enum_discrete does not support mixture of batched and un-batched variables. " "Try rewriting your model to avoid batching or running with enum_discrete=False. " "Found the following variables of different batch shapes:\n{}".format( "\n".join(["{} {}: shape = {}".format(source, name, tuple(shape)) for shape, (source, name) in sorted(shapes.items())]))) class Trace_ELBO(ELBO): """ A trace implementation of ELBO-based SVI """ def _get_traces(self, model, guide, *args, **kwargs): """ runs the guide and runs the model against the guide with the result packaged as a trace generator """ for i in range(self.num_particles): if self.enum_discrete: # This iterates over a bag of traces, for each particle. for scale, guide_trace in iter_discrete_traces("flat", guide, *args, **kwargs): model_trace = poutine.trace(poutine.replay(model, guide_trace), graph_type="flat").get_trace(*args, **kwargs) check_model_guide_match(model_trace, guide_trace) guide_trace = prune_subsample_sites(guide_trace) model_trace = prune_subsample_sites(model_trace) check_enum_discrete_can_run(model_trace, guide_trace) guide_trace.compute_score_parts() log_r = model_trace.batch_log_pdf() - guide_trace.batch_log_pdf() weight = scale / self.num_particles yield weight, model_trace, guide_trace, log_r continue guide_trace = poutine.trace(guide).get_trace(*args, **kwargs) model_trace = poutine.trace(poutine.replay(model, guide_trace)).get_trace(*args, **kwargs) check_model_guide_match(model_trace, guide_trace) guide_trace = prune_subsample_sites(guide_trace) model_trace = prune_subsample_sites(model_trace) guide_trace.compute_score_parts() log_r = model_trace.log_pdf() - guide_trace.log_pdf() weight = 1.0 / self.num_particles yield weight, model_trace, guide_trace, log_r def _is_batched(self, weight): return self.enum_discrete and \ isinstance(weight, Variable) and \ weight.dim() > 0 and \ weight.size(0) > 1 def loss(self, model, guide, *args, **kwargs): """ :returns: returns an estimate of the ELBO :rtype: float Evaluates the ELBO with an estimator that uses num_particles many samples/particles. """ elbo = 0.0 for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs): elbo_particle = weight * 0 if self._is_batched(weight): log_pdf = "batch_log_pdf" else: log_pdf = "log_pdf" for name in model_trace.nodes.keys(): if model_trace.nodes[name]["type"] == "sample": if model_trace.nodes[name]["is_observed"]: elbo_particle += model_trace.nodes[name][log_pdf] else: elbo_particle += model_trace.nodes[name][log_pdf] elbo_particle -= guide_trace.nodes[name][log_pdf] # drop terms of weight zero to avoid nans if isinstance(weight, numbers.Number): if weight == 0.0: elbo_particle = torch.zeros_like(elbo_particle) else: elbo_particle[weight == 0] = 0.0 elbo += torch_data_sum(weight * elbo_particle) loss = -elbo if is_nan(loss): warnings.warn('Encountered NAN loss') return loss def loss_and_grads(self, model, guide, *args, **kwargs): """ :returns: returns an estimate of the ELBO :rtype: float Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator. Performs backward on the latter. Num_particle many samples are used to form the estimators. """ elbo = 0.0 # grab a trace from the generator for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs): elbo_particle = weight * 0 surrogate_elbo_particle = weight * 0 batched = self._is_batched(weight) # compute elbo and surrogate elbo if batched: log_pdf = "batch_log_pdf" else: log_pdf = "log_pdf" for name, model_site in model_trace.nodes.items(): if model_site["type"] == "sample": model_log_pdf = model_site[log_pdf] if model_site["is_observed"]: elbo_particle += model_log_pdf surrogate_elbo_particle += model_log_pdf else: guide_site = guide_trace.nodes[name] guide_log_pdf, score_function_term, entropy_term = guide_site["score_parts"] if not batched: guide_log_pdf = guide_log_pdf.sum() elbo_particle += model_log_pdf - guide_log_pdf surrogate_elbo_particle += model_log_pdf if not is_identically_zero(entropy_term): if not batched: entropy_term = entropy_term.sum() surrogate_elbo_particle -= entropy_term if not is_identically_zero(score_function_term): if not batched: score_function_term = score_function_term.sum() surrogate_elbo_particle += log_r.detach() * score_function_term # drop terms of weight zero to avoid nans if isinstance(weight, numbers.Number): if weight == 0.0: elbo_particle = torch.zeros_like(elbo_particle) surrogate_elbo_particle = torch.zeros_like(surrogate_elbo_particle) else: weight_eq_zero = (weight == 0) elbo_particle[weight_eq_zero] = 0.0 surrogate_elbo_particle[weight_eq_zero] = 0.0 elbo += torch_data_sum(weight * elbo_particle) surrogate_elbo_particle = torch_sum(weight * surrogate_elbo_particle) # collect parameters to train from model and guide trainable_params = set(site["value"] for trace in (model_trace, guide_trace) for site in trace.nodes.values() if site["type"] == "param") if trainable_params: surrogate_loss_particle = -surrogate_elbo_particle torch_backward(surrogate_loss_particle) pyro.get_param_store().mark_params_active(trainable_params) loss = -elbo if is_nan(loss): warnings.warn('Encountered NAN loss') return loss
# -*- coding: utf-8 -*- """HydroMT workflows""" from .basin_mask import * from .forcing import * from .rivers import *
""" To trace the falcon web framework, install the trace middleware:: import falcon from ddtrace import tracer from ddtrace.contrib.falcon import TraceMiddleware mw = TraceMiddleware(tracer, 'my-falcon-app') falcon.API(middleware=[mw]) You can also use the autopatching functionality:: import falcon from ddtrace import tracer, patch patch(falcon=True) app = falcon.API() To disable distributed tracing when using autopatching, set the ``DATADOG_FALCON_DISTRIBUTED_TRACING`` environment variable to ``False``. **Supported span hooks** The following is a list of available tracer hooks that can be used to intercept and modify spans created by this integration. - ``request`` - Called before the response has been finished - ``def on_falcon_request(span, request, response)`` Example:: import falcon from ddtrace import config, patch_all patch_all() app = falcon.API() @config.falcon.hooks.on('request') def on_falcon_request(span, request, response): span.set_tag('my.custom', 'tag') :ref:`Headers tracing <http-headers-tracing>` is supported for this integration. """ from ...utils.importlib import require_modules required_modules = ["falcon"] with require_modules(required_modules) as missing_modules: if not missing_modules: from .middleware import TraceMiddleware from .patch import patch __all__ = ["TraceMiddleware", "patch"]
# coding: utf-8 # @author octopoulo <polluxyz@gmail.com> # @version 2020-05-01 """ Sync """ import gzip from logging import getLogger import os import re import shutil from subprocess import run from time import time from typing import Any from PIL import Image, ImageFile from common import makedirs_safe, read_text_safe, write_text_safe from css_minify import css_minify # folders, might want to edit these BASE = os.path.dirname(os.path.dirname(__file__)) COMPILER = os.path.join(BASE, 'script/closure-compiler-v20200406.jar') CSS_FOLDER = os.path.join(BASE, 'css') JAVA = 'java' JS_FOLDER = os.path.join(BASE, 'js') LOCAL = BASE # edit these files CSS_FILES = [ 'light', ] JS_FILES = { '4d': [ 'libs/three', 'libs/stats', 'libs/GLTFLoader', 'libs/DRACOLoader', 'libs/camera-controls', ], 'all': [ 'libs/socket.io', ':common', 'libs/chess-quick', ':engine', ':global', ':3d', ':xboard', ':graph', ':game', ':temp', ':network', ':startup', ':config', 'script', ], 'chart': [ 'libs/chart-quick', ], } NEED_GZIPS = { '4d_.js', 'ammo.wasm.js', 'ammo.wasm.wasm', 'chart_.js', 'chart.min.js', 'dark.css', 'dark-archive.css', 'draco_decoder.js', 'draco_decoder.wasm', 'draco_wasm_wrapper.js', 'fra.json', 'index.html', 'jpn.json', 'light-archive.css', 'manifest.json', 'pieces-draco.glb', 'rus.json', 'sea.css', 'sea-archive.css', 'ukr.json', } # don't gzip inside those folders SKIP_GZIPS = { 'archive', 'doc', 'image', 'model', 'node_modules', 'script', 'sound', 'test', 'theme', } class Sync: """Sync """ # def __init__(self, **kwargs): self.kwargs = kwargs self.clean = kwargs.get('clean') # type: bool self.host = kwargs.get('host') # type: str self.no_compress = kwargs.get('no_compress') # type: bool self.no_debug = kwargs.get('no_debug') # type: bool self.no_process = kwargs.get('no_process') # type: bool self.zip = kwargs.get('zip') # type: bool self.logger = getLogger(self.__class__.__name__) def combine_pieces(self, folder: str): """Combine chess pieces png files into 1 file """ if 'metro' in folder: height = 160 width = 160 else: height = 80 width = 80 combined = Image.new('RGBA', (width * 12, height), (0, 255, 0, 0)) output = f'{folder}.png' i = 0 pieces = 'bknpqr' for color in 'bw': for piece in pieces: name = f'{color}{piece}' image = Image.open(os.path.join(folder, f'{name}.png')) offset = (i * width, 0) combined.paste(image, offset) i += 1 combined.save(output, format='png') print('a', end='') def combine_themes(self, folder: str): """Combine all pieces of each theme """ sources = os.listdir(folder) for source in sources: filename = os.path.join(folder, source) if os.path.isdir(filename): self.combine_pieces(filename) def compress_3d(self, data: str) -> str: """Compress THREE javascript """ data = re.sub(r'\bTHREE\b', 'T', data) data = re.sub(r'console\.(error|warn)\(.+?\);', '', data, flags=re.S) return data def compress_gzip(self, filename: str): """Gzip compress a file """ output = f'{filename}.gz' with open(filename, 'rb') as f_in: with gzip.open(output, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) # synchronise the date/time if os.path.isfile(output): info = os.stat(output) os.utime(filename, (info.st_atime, info.st_mtime)) print('g', end='') def compress_js(self, filename: str) -> str: """Compress javascript """ base, ext = os.path.splitext(filename) output = f'{base}_{ext}' if self.no_compress: shutil.copy(filename, output) return output args = [ JAVA, '-jar', COMPILER, '--js', filename, '--js_output_file', output, '--language_in', 'ECMASCRIPT_2018', '--language_out', 'ECMASCRIPT_2018', ] if self.kwargs.get('advanced'): args.extend(['--compilation_level', 'ADVANCED']) run(args) return output def gzip_files(self, folder: str, depth: int, delete: bool): """Gzip all wanted files, recursively """ queues = [] sources = os.listdir(folder) for source in sources: if source.startswith(('.', '_')): continue filename = os.path.join(folder, source) if os.path.isdir(filename): if source not in SKIP_GZIPS: queues.append(filename) continue # file if not os.path.isfile(filename): continue if source not in NEED_GZIPS: continue output = f'{filename}.gz' source_time = os.path.getmtime(filename) if os.path.isfile(output): destin_time = os.path.getmtime(output) if delete: os.unlink(output) print('d', end='') else: destin_time = 0 if not delete and source_time != destin_time: self.compress_gzip(filename) print(f"{' ' * depth}{filename}") for queue in queues: self.gzip_files(queue, depth + 1, delete) @staticmethod def import_file(match: Any) -> str: """@import {common.js} """ source = match.group(1) filename = os.path.join(JS_FOLDER, source) data = read_text_safe(filename) or '' if source.endswith('.js'): data = re.sub(r'["\']use strict["\'];?', '', data) return data def normalise_folders(self): """Add the missing / (slash) at the end of the folder """ global CSS_FOLDER, JS_FOLDER, LOCAL if CSS_FOLDER[-1] != '/': CSS_FOLDER += '/' if JS_FOLDER[-1] != '/': JS_FOLDER += '/' if LOCAL[-1] != '/': LOCAL += '/' def create_index(self): """Create the new index.html """ base = os.path.join(LOCAL, 'index_base.html') base_time = os.path.getmtime(base) index = os.path.join(LOCAL, 'index.html') index_time = os.path.getmtime(index) if os.path.isfile(index) else 0 change = 0 if base_time >= index_time: change += 1 # 1) minimise JS for js_output, js_files in JS_FILES.items(): all_js = os.path.join(JS_FOLDER, f'{js_output}.js') all_min_js = os.path.join(JS_FOLDER, f'{js_output}_.js') # common/engine changed => need to update, even though we're not using those files js_dates = [os.path.abspath(f"{JS_FOLDER}{js_file.strip(':')}.js") for js_file in js_files] js_names = [os.path.abspath(f'{JS_FOLDER}{js_file}.js') for js_file in js_files if js_file[0] != ':'] if js_output == 'all': # script_js = os.path.join(JS_FOLDER, 'script.js') extras = [] else: extras = [] # skip? update = True if os.path.isfile(all_min_js) and os.path.isfile(all_js): all_time = os.path.getmtime(all_min_js) update = False for js_date in js_dates + extras: update |= os.path.isfile(js_date) and os.path.getmtime(js_date) >= all_time if not update: print('J', end='') continue datas = [] for js_name in js_names: print(js_name) script_data = read_text_safe(js_name) if not script_data: continue # process the script.js if js_name.endswith('script.js'): script_data = re.sub('@import {(.*?)}', self.import_file, script_data); script_data = re.sub('// BEGIN.*?// END', '', script_data, flags=re.S) if self.no_debug: script_data = re.sub('// <<.*?// >>', '', script_data, flags=re.S) # use HOST print(f'host={self.host}') if self.host != '/': script_data = script_data.replace("HOST = '/',", f"HOST = '{self.host}',") datas.append(script_data) data = '\n'.join(datas) if '4d' in js_output: data = self.compress_3d(data) write_text_safe(all_js, data) self.compress_js(all_js) print('j', end='') change += 1 # 2) minimise CSS all_css = os.path.join(CSS_FOLDER, 'all.css') all_min_css = os.path.join(CSS_FOLDER, 'all_.css') css_names = [os.path.abspath(f'{CSS_FOLDER}{css_file}.css') for css_file in CSS_FILES] update = True if os.path.isfile(all_min_css) and os.path.isfile(all_css): all_time = os.path.getmtime(all_min_css) update = False for css_name in css_names: update |= os.path.isfile(css_name) and os.path.getmtime(css_name) >= all_time if update: datas = [] for css_name in css_names: datas.append(read_text_safe(css_name) or '') data = '\n'.join(datas) write_text_safe(all_css, data) css_data = css_minify(data) write_text_safe(all_min_css, css_data) print('c', end='') change += 1 else: css_data = read_text_safe(all_min_css) or '' print('C', end='') if not change: print('X', end='') return # 3) remove BEGIN ... END html = read_text_safe(base) html = re.sub('<!-- BEGIN -->.*?<!-- END -->', '', html, flags=re.S) html = re.sub('// BEGIN.*?// END', '', html, flags=re.S) # use the HOST if self.host != '/': replaces = { 'href="/': f'href="{self.host}', 'src="/': f'src="{self.host}', } for key, value in replaces.items(): html = html.replace(key, value) # 4) create the new index.html if not self.no_process: all_min_js = os.path.join(JS_FOLDER, 'all_.js') js_data = read_text_safe(all_min_js) or '' replaces = { '<!-- {SCRIPT} -->': f'<script>{js_data}</script>', '<!-- {STYLE} -->': f'<style>{css_data}</style>', } for key, value in replaces.items(): html = html.replace(key, value) html = re.sub('<!-- .*? -->', '', html, flags=re.S) html = re.sub(r'\n\s+', '\n', html) filename = os.path.join(LOCAL, 'index.html') write_text_safe(filename, html) def synchronise(self) -> bool: """Synchronise the files """ self.normalise_folders() self.create_index() if self.clean: self.gzip_files(LOCAL, 0, True) elif self.zip: self.gzip_files(LOCAL, 0, False) return True if __name__ == '__main__': start = time() sync = Sync() if 0: sync.combine_themes(os.path.join(BASE, 'theme')) else: sync.synchronise() end = time() print(f'\nELAPSED: {end-start:.3f} seconds')
"""Tests for the main module.""" import unittest from unittest.mock import Mock, patch from yala.main import LinterRunner class TestLinterRunner(unittest.TestCase): """Test the LinterRunner class.""" @patch('yala.main.Config') def test_chosen_not_found(self, mock_config): """Should print an error when chosen linter is not found.""" # Linter chosen by the user name = 'my linter' mock_config.user_linters = [name] _, stderr = self._path_and_run(mock_config, name) self.assertIn('Did you install', stderr[0]) @patch('yala.main.Config') def test_not_chosen_not_found(self, mock_config): """Should not print an error when chosen linter is not found.""" # No linters chosen by the user mock_config.user_linters = [] stdout, stderr = self._path_and_run(mock_config) self.assertEqual(0, len(stdout)) self.assertEqual(0, len(stderr)) def _path_and_run(self, mock_config, name='my linter'): cls = self._mock_linter_class(name) mock_config.get_linter_classes.return_value = [cls] with patch('yala.main.subprocess.run', side_effect=FileNotFoundError): linter_cfg_tgts = cls, mock_config, [] return LinterRunner.run(linter_cfg_tgts) @staticmethod def _mock_linter_class(name): linter_class = Mock() linter = linter_class.return_value linter.command_with_options = linter.name = name return linter_class
"""Platform for sensor integration.""" from __future__ import annotations import homeassistant.helpers.config_validation as cv import requests import voluptuous as vol from homeassistant.components.sensor import SensorEntity, PLATFORM_SCHEMA, SensorStateClass, SensorDeviceClass from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_API_TOKEN from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from requests.auth import HTTPBasicAuth PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_API_TOKEN): cv.string, }) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None ) -> None: """Set up the sensor platform.""" url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + config.get(CONF_API_TOKEN) payload = {} headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } response = requests.get(url, auth=HTTPBasicAuth(config.get(CONF_USERNAME), config.get(CONF_PASSWORD)), headers=headers, data=payload) response_json = response.json() for x in response_json: account = x.get('user_account') add_entities( [KontomierzSensor(hass, config, account.get('bank_name') + " - " + account.get('display_name'), account.get('iban'))]) class KontomierzSensor(SensorEntity): """Representation of a Sensor.""" def __init__(self, hass, config: dict, entity_name: string, iban: string) -> None: self._attr_device_class = SensorDeviceClass.MONETARY self._attr_state_class = SensorStateClass.MEASUREMENT self._state = None self.hass = hass self.username = config.get(CONF_USERNAME) self.password = config.get(CONF_PASSWORD) self.apiToken = config.get(CONF_API_TOKEN) self.entity_name = entity_name self.iban = iban @property def unique_id(self) -> str | None: return "kontomierz_sensor" + self.entity_name @property def name(self) -> str: return self.entity_name @property def state(self): """Return the state of the sensor.""" return self._state def update(self) -> None: """Fetch new state data for the sensor. This is the only method that should fetch new data for Home Assistant. """ url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + self.apiToken response = requests.get(url, auth=HTTPBasicAuth(self.username, self.password), headers={ 'Content-Type': 'application/json', 'Accept': 'application/json', }, data={}) response_json = response.json() result = 0.0 for x in response_json: user_account = x.get('user_account') if self.iban == user_account.get('iban'): result = float(user_account.get('balance')) self._attr_native_unit_of_measurement = user_account.get('currency_name') self._state = result
import torch import numpy as np def get_sigmas(config): if config.model.sigma_dist == 'geometric': sigmas = torch.tensor( np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end), config.model.num_classes))).float().to(config.device) elif config.model.sigma_dist == 'uniform': sigmas = torch.tensor( np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes) ).float().to(config.device) else: raise NotImplementedError('sigma distribution not supported') return sigmas @torch.no_grad() def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008, final_only=False, verbose=False, denoise=True, add_noise=True): images = [] with torch.no_grad(): for c, sigma in enumerate(sigmas): labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration labels = labels.long() step_size = step_lr * (sigma / sigmas[-1]) ** 2 for s in range(n_steps_each): grad = scorenet(x_mod, labels) #choose whether to add random noise during each gradient ascent step if add_noise: noise = torch.randn_like(x_mod) else: noise = torch.zeros_like(x_mod) #calculate l2 norms of gradient (score) and the additive noise for logging grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean() x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step #calc l2 norm of iterate variable for logging image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() #calc snr as scaled version of [||s(x, \sigma_i)|| / ||z_t||] and mean of score for logging snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2 if not final_only: images.append(x_mod.to('cpu')) if verbose: print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format( c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item())) #final denoising step if desired - removes the very last additive z_L if denoise: last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device) last_noise = last_noise.long() x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise) images.append(x_mod.to('cpu')) if final_only: return [x_mod.to('cpu')] else: return images @torch.no_grad() def langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008, final_only=False, verbose=False, denoise=True, add_noise=True, decimate_sigma=None, mode=None, true_x=None): images = [] #if desired, decimate the number of noise scales to speed up inference if decimate_sigma is not None: sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list # num_sigmas = sigmas.shape[0] // decimate_sigma # sigmas_temp = [] # for i in range(num_sigmas): # sigmas_temp.append(sigmas[-1]) sigmas = sigmas_temp #swap the new decimated sigma list for the main one mse = torch.nn.MSELoss() N, C, H, W = x_mod.shape steps = np.geomspace(start=5, stop=1, num=len(sigmas)) c2 = 1 with torch.no_grad(): #outer loop over noise scales for c, sigma in enumerate(sigmas): #dummy target 1...T depending on iteration labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() #step_size = step_lr * (sigma / sigmas[-1]) ** 2 step_size = steps[c] #Inner loop over T for s in range(n_steps_each): #s(x_t) ~= \grad_x log p(x) -- THE PRIOR grad = scorenet(x_mod, labels) prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() #prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2 #calculate the maximum likelihood gradient - i.e. MSE gradient #A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1] if mode=='denoising': Axt = x_mod mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x else: Axt = torch.matmul(A, x_mod.view(N, -1, 1)) mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient #mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() #likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2 if c == 0 and s == 0: c2 = prior_norm.item() / likelihood_norm.item() mle_grad = mle_grad * c2 #MSE gradient likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() #The final gradient grad = grad - mle_grad grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() #grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 #choose whether to add random noise during each gradient ascent step if add_noise: noise = torch.randn_like(x_mod) else: noise = torch.zeros_like(x_mod) x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step #calc l2 norm of iterate variable for logging image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean() snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm mse_iter = mse(Axt, y) if true_x is not None: mse_true = mse(true_x, x_mod) if not final_only: images.append(x_mod.to('cpu')) if verbose: print("\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \ image_norm: {:.4f}, train_mse: {:.4f}".format( \ c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \ mse_iter.item())) if true_x is not None: print("true_mse: {:.4f}".format(mse_true.item())) #final denoising step if desired - removes the very last additive z_L if denoise: last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device) last_noise = last_noise.long() x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise) images.append(x_mod.to('cpu')) if final_only: return [x_mod.to('cpu')] else: return images @torch.no_grad() def inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True, final_only=False, verbose=False, likelihood_every=1, decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type="l2"): images = [] #if desired, decimate the number of noise scales to speed up inference if decimate_sigma is not None: if sigma_type == 'subsample': #grab equally-spaced sigma values sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() sigmas_temp.append(sigmas[-1]) elif sigma_type == 'last': #grab just the last sigma value multiple times num_sigmas = sigmas.shape[0] // decimate_sigma sigmas_temp = [] for i in range(num_sigmas): sigmas_temp.append(sigmas[-1]) else: sigmas_temp = sigmas sigmas = sigmas_temp mse = torch.nn.MSELoss() N, C, H, W = x_mod.shape steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas)) likelihood_norm = 0 with torch.no_grad(): if sigma_type == 'last': labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099 labels = labels.long() for c, sigma in enumerate(sigmas): if sigma_type == 'subsample': labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c labels = labels.long() elif sigma_type != 'last': labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() step_size = steps[c] #s(x_t) ~= \grad_x log p(x) -- THE PRIOR grad = scorenet(x_mod, labels) * c1 prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() if c % likelihood_every == 0: #\grad_x log p(y | x) -- LIKELIHOOD if mode=='denoising': Axt = x_mod if likelihood_type == "l2": mle_grad = (Axt - y) * c2 elif likelihood_type == "l1": mle_grad = torch.sign(Axt - y) * c2 else: Axt = torch.matmul(A, x_mod.view(N, -1, 1)) if likelihood_type == "l2": mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 elif likelihood_type == "l1": mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2 likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() if auto_c2 and c == 0: c2 = prior_norm.item() / likelihood_norm.item() mle_grad = mle_grad * c2 #MSE gradient likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() grad = grad - mle_grad grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() x_mod = x_mod + step_size * grad #x_mod = torch.clamp(x_mod, 0.0, 1.0) #calc l2 norm of iterate variable for logging image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() mse_iter = mse(Axt, y) if true_x is not None: mse_true = mse(true_x, x_mod) if not final_only: images.append(x_mod.cpu()) if verbose: print("\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \ image_norm: {:.4f}, train_mse: {:.4f}".format( \ c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \ mse_iter.item())) if true_x is not None: print("true_mse: {:.4f}".format(mse_true.item())) if final_only: return [x_mod.to('cpu')] else: return images @torch.no_grad() def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size, n_steps_each=100, step_lr=0.000008): """ Currently only good for 32x32 images. Assuming the right half is missing. """ images = [] #refer_image is the untainted x (?) #right now this only works with 3-channel images refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1) refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size) x_mod = x_mod.view(-1, 3, image_size, image_size) cols = image_size // 2 half_refer_image = refer_image[..., :cols] with torch.no_grad(): for c, sigma in enumerate(sigmas): labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() step_size = step_lr * (sigma / sigmas[-1]) ** 2 for s in range(n_steps_each): images.append(x_mod.to('cpu')) corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma x_mod[:, :, :, :cols] = corrupted_half_image noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2) grad = scorenet(x_mod, labels) x_mod = x_mod + step_size * grad + noise print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(), grad.abs().max())) return images @torch.no_grad() def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008, final_only=False, verbose=False): images = [] n_rows = x_mod.shape[0] x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1) x_mod = x_mod.reshape(-1, *x_mod.shape[2:]) for c, sigma in enumerate(sigmas): labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() step_size = step_lr * (sigma / sigmas[-1]) ** 2 for s in range(n_steps_each): grad = scorenet(x_mod, labels) noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3], device=x_mod.device) noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3], device=x_mod.device) angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device) noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \ noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None] noise = noise.reshape(-1, *noise.shape[2:]) grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean() image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm if not final_only: images.append(x_mod.to('cpu')) if verbose: print( "level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format( c, step_size, image_norm.item(), grad_norm.item(), snr.item())) if final_only: return [x_mod.to('cpu')] else: return images
""" User Animation Card =================== Copyright (c) 2019 Ivanov Yuri For suggestions and questions: <kivydevelopment@gmail.com> This file is distributed under the terms of the same license, as the Kivy framework. Example ------- from kivymd.app import MDApp from kivy.lang import Builder from kivy.factory import Factory from kivymd.toast import toast from kivymd.theming import ThemeManager from kivymd.uix.useranimationcard import MDUserAnimationCard from kivymd.uix.button import MDIconButton from kivymd.uix.list import ILeftBodyTouch # Your content for a contact card. Builder.load_string(''' #:import get_hex_from_color kivy.utils.get_hex_from_color <TestAnimationCard@BoxLayout> orientation: 'vertical' padding: dp(10) spacing: dp(10) size_hint_y: None height: self.minimum_height BoxLayout: size_hint_y: None height: self.minimum_height Widget: MDRoundFlatButton: text: "Free call" Widget: MDRoundFlatButton: text: "Free message" Widget: OneLineIconListItem: text: "Video call" IconLeftSampleWidget: icon: 'camera-front-variant' TwoLineIconListItem: text: "Call Viber Out" secondary_text: "[color=%s]Advantageous rates for calls[/color]" % get_hex_from_color(app.theme_cls.primary_color) IconLeftSampleWidget: icon: 'phone' TwoLineIconListItem: text: "Call over mobile network" secondary_text: "[color=%s]Operator's tariffs apply[/color]" % get_hex_from_color(app.theme_cls.primary_color) IconLeftSampleWidget: icon: 'remote' ''') class IconLeftSampleWidget(ILeftBodyTouch, MDIconButton): pass class Example(MDApp): title = "Example Animation Card" def __init__(self, **kwargs): super().__init__(**kwargs) self.user_animation_card = None def build(self): def main_back_callback(): toast('Close card') if not self.user_animation_card: self.user_animation_card = MDUserAnimationCard( user_name="Lion Lion", path_to_avatar="./assets/african-lion-951778_1280.jpg", callback=main_back_callback) self.user_animation_card.box_content.add_widget( Factory.TestAnimationCard()) self.user_animation_card.open() Example().run() """ from kivy.clock import Clock from kivy.animation import Animation from kivy.core.window import Window from kivy.metrics import dp, sp from kivy.properties import ObjectProperty, StringProperty, ListProperty from kivy.lang import Builder from kivy.uix.boxlayout import BoxLayout from kivy.uix.floatlayout import FloatLayout from kivy.uix.modalview import ModalView from kivymd.uix.behaviors import SpecificBackgroundColorBehavior from kivymd.uix.button import MDIconButton from kivymd.theming import ThemableBehavior Builder.load_string( """ #:import Window kivy.core.window.Window #:import StiffScrollEffect kivymd.stiffscroll.StiffScrollEffect <ModifiedToolbar> size_hint_y: None height: root.theme_cls.standard_increment padding: [root.theme_cls.horizontal_margins - dp(12), 0] BoxLayout: id: left_actions orientation: 'horizontal' size_hint_x: None padding: [0, (self.height - dp(48))/2] BoxLayout: padding: dp(12), 0 MDLabel: font_style: 'H6' opposite_colors: root.opposite_colors theme_text_color: 'Custom' text_color: root.specific_text_color text: root.title shorten: True shorten_from: 'right' BoxLayout: id: right_actions orientation: 'horizontal' size_hint_x: None padding: [0, (self.height - dp(48))/2] <UserAnimationCard> canvas: Color: rgba: root.theme_cls.bg_dark \ if root.theme_cls.theme_style == 'Dark' \ else root.theme_cls.bg_light Rectangle: size: self.size pos: self.pos FitImage: id: image source: root.path_to_avatar size_hint: 1, None height: Window.height * 40 // 100 y: Window.height - self.height allow_stretch: True keep_ratio: False canvas.after: Color: rgba: root._primary_color Rectangle: size: self.size pos: self.pos MDLabel: id: user_name font_style: 'H4' theme_text_color: 'Custom' color: 1, 1, 1, 1 shorten: True shorten_from: 'right' text: root.user_name size_hint_y: None height: self.texture_size[1] ModifiedToolbar: id: toolbar md_bg_color: 0, 0, 0, 0 left_action_items: [['arrow-left', lambda x: root._callback_back()]] y: Window.height - self.height ScrollView: id: scroll y: -image.height effect_cls: StiffScrollEffect scroll_distance: 100 GridLayout: id: box_content size_hint_y: None height: self.minimum_height cols: 1 canvas: Color: rgba: root.theme_cls.bg_dark \ if root.theme_cls.theme_style == 'Dark' \ else root.theme_cls.bg_light Rectangle: size: self.size pos: self.pos """ ) class MDUserAnimationCard(ThemableBehavior, ModalView): user_name = StringProperty() path_to_avatar = StringProperty() box_content = ObjectProperty() callback = ObjectProperty() _anim_bottom = True def __init__(self, **kwargs): super().__init__(**kwargs) self._primary_color = self.theme_cls.primary_color self._primary_color[3] = 0 self.user_animation_card = UserAnimationCard( user_name=self.user_name, path_to_avatar=self.path_to_avatar, _callback_back=self._callback_back, _primary_color=self._primary_color, ) self.user_animation_card.ids.user_name.pos = ( dp(15), Window.height - self.user_animation_card.ids.image.height, ) self.box_content = self.user_animation_card.ids.box_content self.add_widget(self.user_animation_card) self._obj_avatar = self.user_animation_card.ids.image self._obj_user_name = self.user_animation_card.ids.user_name self._obj_toolbar = self.user_animation_card.ids.toolbar self._obj_scroll = self.user_animation_card.ids.scroll self._set_current_pos_objects() def _callback_back(self): self.dismiss() if self.callback: self.callback() def on_open(self): self._primary_color = self.theme_cls.primary_color self._primary_color[3] = 0 self.user_animation_card._primary_color = self._primary_color def _set_current_pos_objects(self): self._avatar_y = self._obj_avatar.y self._toolbar_y = self._obj_toolbar.y self._user_name_y = self._obj_user_name.y self._scroll_y = self._obj_scroll.y def on_touch_move(self, touch): if touch.ud["swipe_begin"] < touch.y: if self._anim_bottom: self._anim_bottom = False self.animation_to_top() else: if not self._anim_bottom: self._anim_bottom = True self.animation_to_bottom() def on_touch_down(self, touch): touch.ud["swipe_begin"] = touch.y return super().on_touch_down(touch) def on_touch_up(self, touch): touch.ud["swipe_begin"] = 0 def animation_to_bottom(self): Animation(y=self._scroll_y, d=0.4, t="in_out_cubic").start( self._obj_scroll ) Animation(y=self._user_name_y, d=0.5, x=dp(15), t="in_out_cubic").start( self._obj_user_name ) Animation(font_size=sp(36), d=0.3, t="in_out_cubic").start( self._obj_user_name ) Animation(_primary_color=[0, 0, 0, 0], d=0.3, t="in_out_cubic").start( self.user_animation_card ) Animation(y=self._avatar_y, d=0.4, t="in_out_cubic").start( self._obj_avatar ) def animation_to_top(self): user_name_y = ( Window.height - self._obj_toolbar.height + (self.theme_cls.standard_increment // 2 - dp(12)) ) user_name_x = self.theme_cls.horizontal_margins + dp(12) * 5 Animation(y=-self._obj_toolbar.height, d=0.4, t="in_out_cubic").start( self._obj_scroll ) Animation(y=user_name_y, d=0.3, x=user_name_x, t="in_out_cubic").start( self._obj_user_name ) Animation(font_size=sp(20), d=0.3, t="in_out_cubic").start( self._obj_user_name ) Animation( _primary_color=self.theme_cls.primary_color, d=0.3, t="in_out_cubic" ).start(self.user_animation_card) Animation(y=self._obj_avatar.y + 30, d=0.4, t="in_out_cubic").start( self._obj_avatar ) class UserAnimationCard(ThemableBehavior, FloatLayout): user_name = StringProperty() path_to_avatar = StringProperty() _callback_back = ObjectProperty() _primary_color = ListProperty() class ModifiedToolbar( ThemableBehavior, SpecificBackgroundColorBehavior, BoxLayout ): left_action_items = ListProperty() title = StringProperty() def __init__(self, **kwargs): super().__init__(**kwargs) self.bind(specific_text_color=self.update_action_bar_text_colors) Clock.schedule_once( lambda x: self.on_left_action_items(0, self.left_action_items) ) def on_left_action_items(self, instance, value): self.update_action_bar(self.ids["left_actions"], value) def update_action_bar(self, action_bar, action_bar_items): action_bar.clear_widgets() new_width = 0 for item in action_bar_items: new_width += dp(48) action_bar.add_widget( MDIconButton( icon=item[0], on_release=item[1], opposite_colors=True, text_color=self.specific_text_color, theme_text_color="Custom", ) ) action_bar.width = new_width def update_action_bar_text_colors(self, instance, value): for child in self.ids["left_actions"].children: child.text_color = self.specific_text_color
# Moduł definiujący walidatory API from marshmallow import Schema, fields, validate fields.Email.default_error_messages['required'] = 'Email jest wymagany' fields.Email.default_error_messages['invalid'] = 'Niepoprawny adres email' class VUser(Schema): # Walidator rejestracji nick = fields.String( required=True, validate=validate.Length(min=4, max=30, error='Login musi mieć 4 - 30 znaków')) email = fields.Email(required=True) password = fields.String( required=True, validate=validate.Length(min=8, max=30, error='Hasło musi mieć 8 - 30 znakow')) class VUserLogin(Schema): # Walidator logowania email = fields.Email(required=True) password = fields.String( required=True, validate=validate.Length(min=8, max=30, error='Hasło jest wymagane')) class VEmail(Schema): # Walidator adresu email email = fields.Email(required=True) class VUserPatch(Schema): # Walidator zapytania o zmianę pól w rekordzie użytkownika field = fields.String(required=True, validate=validate.OneOf(['nick'])) value = fields.String(required=True) class VEntry(Schema): # Walidator wpisu w dzienniku value = fields.Number(required=True) description = fields.String() class VDiary(Schema): # Walidator dziennika name = fields.String(required=True) max = fields.Number(required=True) date = fields.Number() color = fields.String(validate=validate.Regexp("#[0-9a-fA-F]{6}")) entries = fields.List(fields.Nested(VEntry), required=True) class VJson(Schema): # Walidator danych JSON diaries = fields.List(fields.Nested(VDiary)) class VDiaryIndex(Schema): # Walidator indexu dziennika index = fields.Integer(required=True)
class Option: def __init__(self, option_info): self.option_info = option_info self.flag = option_info['flag'] def mkdir(self): if self.flag == False: return False return self.option_info['mkdir'] def dir_name(self, problem): if self.flag == False: return '' if not self.mkdir(): return '' return self.replace_name(self.option_info['dir_name'], problem) + '/' def source_name(self, problem): if self.flag == False: return problem['problem_id'] return self.replace_info(self.option_info['source_name'], problem) def replace_name(self, value, problem): value = value.replace('[NO]', problem['problem_id']) value = value.replace('[TITLE]', problem['problem_title']) return value def get_ext(self, language): extensions = { 'C': '.c', 'C++': '.cpp', 'C++11': '.cpp', 'C++14': '.cpp', 'C++17': '.cpp', 'Java': '.java', 'Java (OpenJDK)': '.java', 'C11': '.c', 'Python 2': '.py', 'Python 3': '.py', 'PyPy2': '.py', 'PyPy3': '.py', 'Ruby2.5': '.rb', 'Kotlin': '.kt', 'Swift': '.swift', 'C# 6.0': '.cs', 'Text': '.txt', 'node.js': 'js', 'Go': '.go', 'F#': '.fs', 'PHP': '.php', 'Pascal': '.pas', 'Lua': '.lua', 'Perl': '.pl', 'Objective-C': '.m', 'Objective-C++': '.mm', 'C (Clang)': '.c', 'C++11 (Clang)': '.cpp', 'C++14 (Clang)': '.cpp', 'C++17 (Clang)': '.cpp', 'Golfscript': '.gs', 'Bash': '.sh', 'Fortran': '.f95', 'Scheme': '.scm', 'Ada': '.ada', 'awk': '.awk', 'OCaml': '.ml', 'Brainfuck': '.bf', 'Whitespace': '.ws', 'Tcl': '.tcl', 'Assembly (32bit)': '.asm', 'Assembly (32bit)': '.asm', 'D': '.d', 'Clojure': '.clj', 'Rhino': '.js', 'Cobol': '.cob', 'SpiderMonkey': '.js', 'Pike': '.pike', 'sed': '.sed', 'Rust': '.rs', 'Boo': '.boo', 'Intercal': '.i', 'bc': '.bc', 'Nemerle': '.n', 'Cobra': '.cobra', 'Algol 68': '.a68', 'Befunge': '.bf', 'Haxe': '.hx', 'LOLCODE': '.lol', 'VB.NET 4.0': '.vb', '아희': '.aheui' } if not language in extensions: return True, 'Unknown extension' return False, extensions[language]
import copy import torch.nn as nn from torch.quantization.fuser_method_mappings import get_fuser_method # for backward compatiblity from torch.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401 from torch.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401 from typing import List, Optional # Generalization of getattr def _get_module(model, submodule_key): tokens = submodule_key.split('.') cur_mod = model for s in tokens: cur_mod = getattr(cur_mod, s) return cur_mod # Generalization of setattr def _set_module(model, submodule_key, module): tokens = submodule_key.split('.') sub_tokens = tokens[:-1] cur_mod = model for s in sub_tokens: cur_mod = getattr(cur_mod, s) setattr(cur_mod, tokens[-1], module) def fuse_known_modules(mod_list, additional_fuser_method_mapping=None): r"""Returns a list of modules that fuses the operations specified in the input module list. Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, bn linear, relu For these sequences, the first element in the output module list performs the fused operation. The rest of the elements are set to nn.Identity() """ types = tuple(type(m) for m in mod_list) fuser_method = get_fuser_method(types, additional_fuser_method_mapping) if fuser_method is None: raise NotImplementedError("Cannot fuse modules: {}".format(types)) new_mod : List[Optional[nn.Module]] = [None] * len(mod_list) fused = fuser_method(*mod_list) # NOTE: forward hooks not processed in the two following for loops will be lost after the fusion # Move pre forward hooks of the base module to resulting fused module for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items(): fused.register_forward_pre_hook(pre_hook_fn) del mod_list[0]._forward_pre_hooks[handle_id] # Move post forward hooks of the last module to resulting fused module for handle_id, hook_fn in mod_list[-1]._forward_hooks.items(): fused.register_forward_hook(hook_fn) del mod_list[-1]._forward_hooks[handle_id] new_mod[0] = fused for i in range(1, len(mod_list)): identity = nn.Identity() identity.training = mod_list[0].training new_mod[i] = identity return new_mod def _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None): if fuse_custom_config_dict is None: fuse_custom_config_dict = {} additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {}) mod_list = [] for item in modules_to_fuse: mod_list.append(_get_module(model, item)) # Fuse list of modules new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping) # Replace original module list with fused module list for i, item in enumerate(modules_to_fuse): _set_module(model, item, new_mod_list[i]) def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None): r"""Fuses a list of modules into a single module Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, relu bn, relu All other sequences are left unchanged. For these sequences, replaces the first item in the list with the fused module, replacing the rest of the modules with identity. Args: model: Model containing the modules to be fused modules_to_fuse: list of list of module names to fuse. Can also be a list of strings if there is only a single list of modules to fuse. inplace: bool specifying if fusion happens in place on the model, by default a new model is returned fuser_func: Function that takes in a list of modules and outputs a list of fused modules of the same length. For example, fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()] Defaults to torch.quantization.fuse_known_modules `fuse_custom_config_dict`: custom configuration for fusion .. code-block:: python # Example of fuse_custom_config_dict fuse_custom_config_dict = { # Additional fuser_method mapping "additional_fuser_method_mapping": { (torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn }, } Returns: model with fused modules. A new copy is created if inplace=True. Examples:: >>> m = myModel() >>> # m is a module containing the sub-modules below >>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']] >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse) >>> output = fused_m(input) >>> m = myModel() >>> # Alternately provide a single list of modules to fuse >>> modules_to_fuse = ['conv1', 'bn1', 'relu1'] >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse) >>> output = fused_m(input) """ if not inplace: model = copy.deepcopy(model) if all(isinstance(module_element, str) for module_element in modules_to_fuse): # Handle case of modules_to_fuse being a list _fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict) else: # Handle case of modules_to_fuse being a list of lists for module_list in modules_to_fuse: _fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict) return model
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ # Institute for the Design of Advanced Energy Systems Process Systems # Engineering Framework (IDAES PSE Framework) Copyright (c) 2018, by the # software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia # University Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and # license information, respectively. Both files are also available online # at the URL "https://github.com/IDAES/idaes". """ __all__ = [ "ripemodel", "ems", "rspace", "sharedata", "debug", "powerlawp5", "powerlaw2", "powerlaw3", "powerlaw4", "avrami2", "avrami3", "avrami4", "avrami5", "randomnuc", "ptompkins", "jander", "antijander", "valensi", "parabolic", "gb3d", "zlt", "grain", # PYLINT-TODO-FIX: this seems to be a genuine error since "massact" is not imported from .mechs "massact", # pylint: disable=undefined-all-variable "massactm", "getmechs", ] from .main import ripemodel, ripewrite, print_results # noqa: F401 from .shared import rspace, sharedata, debug # noqa: F401 from .atermconstruct import ( makeaterm, formatinputs, checkargs, normalizefeatures, ) # noqa: F401 from .kinforms import lin, linjac, arr, arrjac, refarr, refarrjac # noqa: F401 from .mechs import ( powerlawp5, powerlaw2, powerlaw3, powerlaw4, avrami2, avrami3, avrami4, avrami5, randomnuc, ptompkins, jander, antijander, valensi, parabolic, gb3d, zlt, grain, getmechs, massactm, ) # noqa: F401 from .genpyomo import ripeomo # noqa: F401 from .targets import ( doalamo, dopwalamo, gentargets, sstargets, dynamictargets, ) # noqa: F401 from .confinv import confinv # noqa: F401 from .emsampling import constructmodel, ems # noqa: F401 from .checkoptions import checkoptions # noqa: F401 from .bounds import stoich_cons, count_neg, get_bounds # noqa: F401
"""Test component helpers.""" # pylint: disable=protected-access from collections import OrderedDict import unittest from homeassistant import helpers from tests.common import get_test_home_assistant class TestHelpers(unittest.TestCase): """Tests homeassistant.helpers module.""" # pylint: disable=invalid-name def setUp(self): """Init needed objects.""" self.hass = get_test_home_assistant() # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_extract_domain_configs(self): """Test the extraction of domain configuration.""" config = { 'zone': None, 'zoner': None, 'zone ': None, 'zone Hallo': None, 'zone 100': None, } self.assertEqual(set(['zone', 'zone Hallo', 'zone 100']), set(helpers.extract_domain_configs(config, 'zone'))) def test_config_per_platform(self): """Test config per platform method.""" config = OrderedDict([ ('zone', {'platform': 'hello'}), ('zoner', None), ('zone Hallo', [1, {'platform': 'hello 2'}]), ('zone 100', None), ]) assert [ ('hello', config['zone']), (None, 1), ('hello 2', config['zone Hallo'][1]), ] == list(helpers.config_per_platform(config, 'zone'))
import pandas as pd import numpy as np def top_time(ind=None, gs=None): """ Selects the location (by coordinates) which was visited for the longest period during given time interval :param ind: user id :param gs: GeoDataFrame from groupby execution containing all the data in the given time interval :return: user id (if given) and the data for the longest visited location """ aggregated = [] for tstamp, g in gs: # for each record in the GeoDataFrame if len(g) > 1: # if there is more than one record diff_places = (g['geometry'].shift(-1) != g['geometry']).iloc[:-1] # checks when coordinates change if diff_places.any(): # if there is change in locations g_res = g.reset_index() # drop index diffs = g_res.shift(-1)['datetime'] - g_res['datetime'] # find time differences (spent in location) joined_dfs = g_res.join(diffs, rsuffix='a') # add them to locations joined_dfs['geometry'] = g_res['geometry'].astype(str) # copy geometry as string point_max = joined_dfs.groupby('geometry')['datetimea'].sum().idxmax() # grouping locations find the longest time sum selected = g[g['geometry'].astype(str) == point_max] # select the location with the highest total time else: selected = g # if one location visited - copy GeoDataFrame else: selected = g aggregated.append(selected) if ind is None: return pd.concat(aggregated) else: return ind, pd.concat(aggregated) def mode_geoseries(ind, gs): """ Calculates mode for GeoSeries :param ind: identifier :param gs: GeoSeries :return: identifier and a mode for GeoSeries """ aggregated = [] for g in gs: if g[1].empty: aggregated.append(None) else: selected = g[1].mode() selected = selected.set_index(g[1].index) aggregated.append(selected) return ind, pd.concat(aggregated) def rowwise_average(gs, row_count=None): """ Calculates an average for each row in each group - rowwise. :param gs: GeoSeries :param row_count: defines how much rows should be considered :return: averaged GeoSeries rowwise """ if row_count is None: row_count = gs.groupby(level=0).size().max() return pd.Series([gs.groupby(level=0).nth(n).mean() for n in range(row_count)]) def groupwise_average(gs): """ Calculates an average from each group of GeoSeries :param gs: GeoSeries :return: averaged GeoSeries """ return gs.groupby(level=0).mean() def groupwise_normalise(gs): """ Normalises each group of GeoSeries :param gs: GeoSeries :return: normalised GeoSeries """ return gs.groupby(level=0).apply(lambda x: x / x.sum()) def groupwise_expansion(gs): """ Calculates expanding mean for each group of GeoSeries :param gs: GeoSeries :return: averaged GeoSeries """ return gs.groupby(level=0).expanding().mean() def total_normalise(gs): """ Performs complete normalisation of GeoSeries :param gs: GeoSeries :return: normalised GeoSeries """ return gs / gs.sum() def start_end(trajectories_frame): """ Compresses stops in TrajectoriesFrame by adding start and end of visits in locations :param trajectories_frame: TrajectoriesFrame object class :return: compressed TrajectoriesFrame """ to_concat = [] if 'date' not in trajectories_frame.columns: trajectories_frame['date'] = trajectories_frame.index.get_level_values(1) for gs in trajectories_frame.groupby(level=0): firsts = gs[1][gs[1]['geometry'].shift() != gs[1]['geometry']] lasts = gs[1][gs[1]['geometry'].shift(-1) != gs[1]['geometry']] firsts.loc[:, 'start'] = firsts['date'] lasts = lasts.set_index(firsts.index) firsts.loc[:, 'end'] = lasts['date'] firsts = firsts[firsts['start'] != firsts['end']] to_concat.append(firsts) return pd.concat(to_concat)
import numpy as np np.show_config()
from PIL import Image as im import numpy as np from io import BytesIO import csv class outputResponse(): def __init__(self,reponse): self.response = reponse def retrieveResult(response, returntype): if (returntype == "image/png" or returntype == "image/jpeg"): img_arr = np.array(im.open(BytesIO(response.content))) data = im.fromarray(img_arr) data.show() elif (returntype == "text/csv"): response = response.content.decode('utf-8') my_list = response.split (",") with open ('x.csv', 'w') as file: writer = csv.writer(file, delimiter = ',') writer.writerow(my_list) elif (returntype == 1 or returntype == 0): print(response.content) else: response = response.content.decode('utf-8') print (response)
import pytest import os from machaon.types.file import TextFile from machaon.types.shell import Path from machaon.core.invocation import instant_return_test, instant_context def test_construct(tmp_path): FILEPATH = Path(__file__) context = instant_context() context.define_type(TextFile) f = instant_return_test(context, FILEPATH, "TextFile").value assert isinstance(f, TextFile) assert isinstance(f.path(), Path) assert f.pathstr == FILEPATH.get() p = Path(tmp_path) / "hello.txt" f = instant_return_test(context, p, "TextFile").value f.set_encoding("utf-8") assert f.encoding() == "utf-8" with f.open("w"): f.stream.write("HELLO\n") f.stream.write("WORLD") assert f.text() == "HELLO\nWORLD"
import re # match whole string data1 = "aaab" data2 = "aaaba" pattern = r"\Aa+b\Z" match1 = re.match(pattern, data1) print(match1) match2 = re.match(pattern, data2) print(match2) # regular expression options data = "AaaA\n\raaaA" pattern = r"^(a+)$" match = re.match(pattern, data, re.I | re.M) print(match) print(match.group()) # search all matches data = "Pi = 3.14, exponent = 2.718" pattern = r"(\d+\.\d+)" matches = re.findall(pattern, data) print(matches) # replacement of the match(with catch group) data = re.sub(pattern, r'<f>\1</f>', data) print(data) # search for a match match = re.search(pattern, data) if match: print(match.group()) print(float(match.group()))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Rate', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('rate', models.DecimalField(null=True, verbose_name=b'Exchange rate', max_digits=8, decimal_places=4, blank=True)), ('date', models.DateField(db_index=True)), ('currency', models.CharField(default=b'USD', max_length=3, db_index=True, choices=[(b'CHF', b'CHF'), (b'EUR', b'EUR'), (b'GBP', b'GBP'), (b'USD', b'USD')])), ], options={ 'ordering': ['-date', 'currency'], }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='rate', unique_together=set([('date', 'currency')]), ), ]
DEBUG = True ALLOWED_HOSTS = ['*', ]
# coding: utf-8 # # python 爬虫相关 # ## 1. class 定义和使用 # In[11]: import os import requests import time import random from lxml import etree class Spider(object): def __init__(self, savePath, keyWord): self.headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36", } self.keyWord = keyWord self.filePath = (savePath + keyWord + '/') def createFile(self): filePath = self.filePath if not os.path.exists(filePath): os.makedirs(filePath) def getPageNum(self): #用来获取搜索关键词得到的结果总页面数,用totalPagenum记录。由于数字是夹在形如:1,985 Wallpapers found for “dog”的string中, #所以需要用个小函数,提取字符串中的数字保存到列表numlist中,再逐个拼接成完整数字。。。 total = "" url = ("https://alpha.wallhaven.cc/search?q={}&categories=111&purity=100&sorting=relevance&order=desc").format(self.keyWord) html = requests.get(url) selector = etree.HTML(html.text) pageInfo = selector.xpath('//header[@class="listing-header"]/h1[1]/text()') string = str(pageInfo[0]) numList = list(filter(str.isdigit, string)) for item in numList: total += item totalPageNum = int(total) return totalPageNum def main_func(self): count = self.getPageNum() print("We have found:{} images!".format(count)) # In[13]: s = Spider("/home/klm/work/spider/", "girl") print s.headers, s.filePath s.main_func()
import logging import logging.handlers import sys import os import json import sqlite3 import signal import threading import time import difflib import vk_api from vk_api.longpoll import VkLongPoll, VkEventType import requests.exceptions cwd = os.path.dirname(os.path.abspath(__file__)) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout, level=logging.WARNING ) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) handler = logging.handlers.RotatingFileHandler( os.path.join(cwd, 'log.txt'), maxBytes=102400 ) handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) logger.addHandler(handler) logger.info("Запуск...") def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, requests.exceptions.RequestException): return elif issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return logger.error("Непойманное исключение.", exc_info=(exc_type, exc_value, exc_traceback)) sys.excepthook = handle_exception defaultConfig = { "ACCESS_TOKEN": "", "createIndex": False, "maxCacheAge": 86400, "preloadMessages": False, "customActions": False, "disableMessagesLogging": False, 'enableFlaskWebServer': False, 'useAuth': False, 'users': { 'admin':'password' }, 'port': 8080, 'https': False, 'httpsPort': 8443, 'cert': [ os.path.join(cwd, "cert.pem"), os.path.join(cwd, "key.pem") ] } def grab_token_from_args(): if len(sys.argv) > 1: defaultConfig['ACCESS_TOKEN'] = sys.argv[1] elif defaultConfig['ACCESS_TOKEN'] == "": raise Exception("Не задан ACCESS_TOKEN") if not os.path.exists(os.path.join(cwd, "config.json")): with open(os.path.join(cwd, "config.json"), 'w') as conf: grab_token_from_args() json.dump(defaultConfig, conf, indent=4) config = defaultConfig del defaultConfig else: with open(os.path.join(cwd, "config.json"), 'r') as conf: config = json.load(conf) for i in config: if i in defaultConfig: defaultConfig[i] = config[i] grab_token_from_args() if len(set(config)) - len(set(defaultConfig)) != 0: with open(os.path.join(cwd, "config.json"), 'w') as conf: json.dump(defaultConfig, conf, indent=4) config = defaultConfig del defaultConfig stop_mutex = threading.Lock() def run_flask_server(): port = config['httpsPort'] if config['https'] else config['port'] import socket ip = socket.gethostbyname(socket.gethostname()) del socket while True: try: if config['https']: logger.info("Trying to run on https://%s:%s/", ip, port) app.run( host='0.0.0.0', port=port, ssl_context=( config['cert'][0], config['cert'][1] ) ) else: logger.info("Trying to run on http://%s:%s/", ip, port) app.run(host='0.0.0.0', port=port) except OSError: port += 1 if config['enableFlaskWebServer']: from flaskWebServer import app threading.Thread(target=run_flask_server).start() if config['createIndex']: from updateIndex import indexUpdater indexUpdater() def tryAgainIfFailed(func, *args, maxRetries=5, **kwargs): c = maxRetries delay = 1 while True: try: return func(*args, **kwargs) except vk_api.exceptions.ApiError: if str(sys.exc_info()[1]).find("User authorization failed") != -1: logger.warning("Токен недействителен.") interrupt_handler(0, None) raise Warning except requests.exceptions.RequestException: if delay < 32: delay*=2 time.sleep(delay) continue except BaseException: if maxRetries == 0: logger.exception("После %s попыток %s(%s%s) завершился с ошибкой.", c, func.__name__, args, kwargs) raise Warning logger.warning("Перезапуск %s(%s%s) через %s секунд...", func.__name__, args, kwargs, delay) if delay < 32: delay*=2 time.sleep(delay) if maxRetries > 0: maxRetries -= 1 continue vk_session = vk_api.VkApi(token=config['ACCESS_TOKEN'],api_version='5.130') longpoll = VkLongPoll(vk_session, wait=60, mode=2) vk = vk_session.get_api() account_id = tryAgainIfFailed(vk.users.get)[0]['id'] if not config['disableMessagesLogging']: if not os.path.exists( os.path.join( cwd, "mesAct" ) ): os.makedirs( os.path.join( cwd, "mesAct" ) ) f = open( os.path.join( cwd, "mesAct", "vkGetVideoLink.html" ), 'w', encoding='utf-8' ) f.write("""<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <style> html,body,iframe{ width: 100%; height: 100%; } </style> </head> <body> <p>Если видео не проигрывается, прямую ссылку можно получить через api:</p> <script> function embedLink(id) { var link = document.createElement('a'); link.href = "https://vk.com/dev/video.get?params[videos]=0_0," + id + "&params[count]=1&params[offset]=1"; link.innerText = id; link.setAttribute('target', '_blank') document.getElementsByTagName("body")[0].appendChild(link); } function embedPlayer(link) { var frame = document.createElement('iframe'); frame.src = link; frame.style = "width:100%;height:100%;"; frame.setAttribute('allowFullScreen', '') document.getElementsByTagName("body")[0].appendChild(frame); } function splitArgs(){ var args = document.location.search; var lastAmpersand = args.lastIndexOf('&'); return [args.slice(1, lastAmpersand), args.slice(lastAmpersand + 1)]; } var args = splitArgs(); embedLink(args[1]); embedPlayer(args[0]); </script> </body> </html>""") f.close() if not os.path.exists( os.path.join( cwd, "messages.db" ) ): conn = sqlite3.connect( os.path.join( cwd, "messages.db" ), check_same_thread=False, isolation_level=None, timeout=15.0 ) cursor = conn.cursor() cursor.execute("""CREATE TABLE "messages" ( "peer_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "message_id" INTEGER NOT NULL UNIQUE, "message" TEXT, "attachments" TEXT, "timestamp" INTEGER NOT NULL, "fwd_messages" TEXT )""") cursor.execute("""CREATE TABLE "chats_cache" ( "chat_id" INTEGER NOT NULL UNIQUE, "chat_name" TEXT NOT NULL )""") cursor.execute("""CREATE TABLE "users_cache" ( "user_id" INTEGER NOT NULL UNIQUE, "user_name" TEXT NOT NULL )""") account_name = tryAgainIfFailed( vk.users.get, user_id=account_id )[0] account_name = f"{account_name['first_name']} {account_name['last_name']}" cursor.execute( """INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (account_id, account_name,) ) conn.commit() else: conn = sqlite3.connect( os.path.join(cwd, "messages.db"), check_same_thread=False, timeout=15.0 ) cursor = conn.cursor() if not os.path.exists( os.path.join( cwd, "mesAct", "bootstrap.css" ) ): f = open( os.path.join( cwd, "mesAct", "bootstrap.css" ), 'w', encoding='utf-8' ) f.write(':root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}dl,ol,ul{margin-top:0;margin-bottom:1rem}b,strong{font-weight:bolder}a{color:#007bff;text-decoration:none;background-color:transparent}img{vertical-align:middle;border-style:none}table{border-collapse:collapse}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item+.list-group-item{border-top-width:0}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.mes{word-break:break-all}img,a,audio{display:block}img{max-width:100%}') f.close() if config['customActions']: from customActions import customActions cust = customActions(vk, conn, cursor) def bgWatcher(): while True: maxCacheAge = config['maxCacheAge'] with stop_mutex: logger.info("Обслуживание БД...") try: showMessagesWithDeletedAttachments() except BaseException: logger.exception("Ошибка при поиске удаленных фото") try: if maxCacheAge != -1: cursor.execute( """DELETE FROM messages WHERE timestamp < ?""", (time.time() - maxCacheAge,) ) conn.commit() cursor.execute("VACUUM") else: maxCacheAge = 86400 except BaseException: logger.exception("Ошибка при очистке базы данных") logger.info("Обслуживание БД завершено.") time.sleep(maxCacheAge) def interrupt_handler(signum, frame): conn.commit() cursor.close() try: tableWatcher.cancel() except AttributeError: pass logger.info("Завершение...") os._exit(0) signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGTERM, interrupt_handler) def eventWorker_predefinedDisabled(): global events while True: flag.wait() event = events.pop(0) with stop_mutex: try: cust.act(event) except BaseException: logger.exception("Ошибка в customActions. \n %s", vars(event)) if len(events) == 0: flag.clear() def eventWorker_customDisabled(): global events while True: flag.wait() event = events.pop(0) with stop_mutex: predefinedActions(event) if len(events) == 0: flag.clear() conn.commit() def eventWorker(): global events while True: flag.wait() event = events.pop(0) with stop_mutex: try: cust.act(event) except BaseException: logger.exception("Ошибка в customActions. \n %s", vars(event)) predefinedActions(event) if len(events) == 0: flag.clear() conn.commit() def predefinedActions(event): try: if event.type == VkEventType.MESSAGE_NEW: cursor.execute( """INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""", (event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],) ) conn.commit() elif event.type == VkEventType.MESSAGE_EDIT: if event.message_data[0]: activityReport(event.message_id, event.peer_id, event.user_id, event.timestamp, True, event.message_data[1], event.message_data[2], event.text) cursor.execute( """INSERT or REPLACE INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""", (event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],) ) conn.commit() elif event.type == VkEventType.MESSAGE_FLAGS_SET: try: activityReport(event.message_id) cursor.execute( """DELETE FROM messages WHERE message_id = ?""", (event.message_id,) ) conn.commit() except TypeError: logger.info("Удаление невозможно, сообщение отсутствует в БД.") except sqlite3.IntegrityError: logger.warning("Запущено несколько копий программы, завершение...") interrupt_handler(0, None) except Warning: pass except BaseException: logger.exception("Ошибка при сохранении сообщения. \n %s", vars(event)) def main(): logger.info("Запущен основной цикл.") global events for event in longpoll.listen(): try: if event.raw[0] == 4 or event.raw[0] == 5: if event.attachments != {}: event.message_data = getAttachments(event) else: event.message_data = True, None, None if event.from_user and event.raw[2] & 2: event.user_id = account_id elif event.from_group: if event.from_me: event.user_id = account_id else: event.user_id = event.peer_id if not event.message: event.message = None events.append(event) flag.set() elif event.raw[0] == 2 and (event.raw[2] & 131072 or event.raw[2] & 128): events.append(event) flag.set() except Warning: pass except BaseException: logger.exception("Ошибка при добавлении события в очередь. \n %s", vars(event)) def showMessagesWithDeletedAttachments(): cursor.execute("""SELECT message_id, attachments FROM messages WHERE attachments IS NOT NULL""") fetch_attachments = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()] cursor.execute("""SELECT message_id, fwd_messages FROM messages WHERE fwd_messages IS NOT NULL""") fetch_fwd = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()] c = 0 for i in range(len(fetch_attachments)): for j in fetch_attachments[i - c][1]: if j['type'] == 'photo' or j['type'] == 'video' or j['type'] == 'doc': break else: del fetch_attachments[i - c] c += 1 messages_attachments = [] messages_fwd = [] for i in [[j[0] for j in fetch_attachments[i:i + 100]] for i in range(0, len(fetch_attachments), 100)]: messages_attachments.extend(tryAgainIfFailed( vk.messages.getById, message_ids=','.join(i))['items'] ) for i in [[j[0] for j in fetch_fwd[i:i + 100]] for i in range(0, len(fetch_fwd), 100)]: messages_fwd.extend(tryAgainIfFailed( vk.messages.getById, message_ids=','.join(i))['items'] ) c = 0 for i in range(len(fetch_attachments)): if compareAttachments(messages_attachments[i - c]['attachments'], fetch_attachments[i - c][1]): del fetch_attachments[i - c] del messages_attachments[i - c] c += 1 for i in range(len(fetch_attachments)): activityReport(fetch_attachments[i][0]) if messages_attachments[i]['attachments'] == []: cursor.execute( """UPDATE messages SET attachments = ? WHERE message_id = ?""", (None, fetch_attachments[i][0],) ) else: cursor.execute( """UPDATE messages SET attachments = ? WHERE message_id = ?""", ( json.dumps(messages_attachments[i]['attachments']), fetch_attachments[i][0], ) ) c = 0 for i in range(len(fetch_fwd)): if compareFwd( messages_fwd[i - c], { 'fwd_messages': fetch_fwd[i - c][1] } ): del fetch_fwd[i - c] del messages_fwd[i - c] c += 1 for i in range(len(fetch_fwd)): activityReport(fetch_fwd[i][0]) if messages_fwd[i]['fwd_messages'] == []: cursor.execute( """UPDATE messages SET fwd_messages = ? WHERE message_id = ?""", (None, fetch_fwd[i][0],) ) else: cursor.execute( """UPDATE messages SET fwd_messages = ? WHERE message_id = ?""", ( json.dumps(messages_fwd[i]['fwd_messages']), fetch_fwd[i][0], ) ) conn.commit() def compareFwd(new, old): if 'reply_message' in new: new['fwd_messages'] = [new['reply_message']] if 'reply_message' in old: old['fwd_messages'] = [old['reply_message']] for i in range(len(old['fwd_messages'])): if 'fwd_messages' in old['fwd_messages'][i] and 'fwd_messages' in new['fwd_messages'][i]: if not compareFwd( new['fwd_messages'][i], old['fwd_messages'][i] ): return False if not compareAttachments( new['fwd_messages'][i]['attachments'], old['fwd_messages'][i]['attachments'] ): return False return True def compareAttachments(new, old): if len(new) < len(old): return False return True def attachmentsParse(urls): if urls is None: return "" html = """<div> """ for i in urls: urlSplit = i.split(',') if i.find('vk.com/sticker/') != -1: html += """ <img src="{}" /> """.format(i) elif i.find('.jpg') != -1 and i.find(',') == -1: html += """ <img src="{}" /> """.format(i) elif i.find('.mp3') != -1: html += """ <audio src="{}" controls></audio> """.format(i) elif i.find('https://vk.com/audio') != -1: html += """ <a href="{}" target="_blank"> {} </a> """.format(i, i[23:-11].replace('%20', ' ')) elif i.find('@') != -1: i = i.rsplit('@', 1) html += """ <a href="{}" target="_blank"> {} </a> """.format(i[1], i[0]) elif len(urlSplit) == 3: html += """ <a href="{}" target="_blank"> Видео <img src="{}"/> </a> """.format(f"./vkGetVideoLink.html?{urlSplit[1]}&{urlSplit[2]}", urlSplit[0]) else: html += """ <a href="{0}" target="_blank"> {0} </a> """.format(i) html += """</div>""" return html def getAttachments(event): message_id = event.message_id fullLoadUnNeeded = not (event.raw[0] == 5 or 'fwd' in event.attachments) count = 0 if fullLoadUnNeeded: for i in range(1,11): if f'attach{i}_type' in event.attachments: if event.attachments[f'attach{i}_type'] not in ('sticker', 'link'): fullLoadUnNeeded = False else: count = i break if fullLoadUnNeeded: attachments = [] for i in range(1,count): if event.attachments[f'attach{i}_type'] == 'sticker': attachments.append({'type':'sticker','sticker':{'images':[{'height':64,'url':f'https://vk.com/sticker/1-{event.attachments[f"attach{i}"]}-64'}]}}) else: if f'attach{i}_title' in event.attachments: title = event.attachments[f'attach{i}_title'] else: title = event.attachments[f'attach{i}_url'] attachments.append({'type':'link','link':{'title':title,'url':event.attachments[f'attach{i}_url']}}) return False, json.dumps(attachments, ensure_ascii=False,), None mes = tryAgainIfFailed( vk.messages.getById, message_ids=message_id )['items'] if not len(mes): logger.info("Не удалось запросить вложения для сообщения, message_id = %i.", event.message_id) return False, "[]", "[]" else: mes = mes[0] hasUpdateTime = 'update_time' in mes fwd_messages = None if 'reply_message' in mes: fwd_messages = json.dumps([mes['reply_message']], ensure_ascii=False,) elif mes['fwd_messages'] != []: fwd_messages = json.dumps(mes['fwd_messages'], ensure_ascii=False,) if mes['attachments'] == []: attachments = None else: attachments = json.dumps(mes['attachments'], ensure_ascii=False,) return hasUpdateTime, attachments, fwd_messages def parseUrls(attachments): urls = [] for i in attachments: if i['type'] == 'photo': maxHeight = 0 maxUrl = "" for j in i['photo']['sizes']: if j['height'] > maxHeight: maxHeight = j['height'] maxUrl = j['url'] urls.append(maxUrl) elif i['type'] == 'audio_message': urls.append(i['audio_message']['link_mp3']) elif i['type'] == 'sticker': urls.append(i['sticker']['images'][0]['url']) elif i['type'] == 'gift': urls.append(i['gift']['thumb_48']) elif i['type'] == 'link': urls.append(f"Ссылка: {i['link']['title']}@{i['link']['url']}") elif i['type'] == 'video': urls.append(f"{i['video']['image'][0]['url']},{i['video']['player']},{i['video']['owner_id']}_{i['video']['id']}_{i['video']['access_key']}") elif i['type'] == 'wall': urls.append(f"Пост: {i['wall']['text'][:25]}@https://vk.com/wall{i['wall']['from_id']}_{i['wall']['id']}") elif i['type'] == 'wall_reply': urls.append(f"Комментарий: {i['wall_reply']['text'][:25]}@https://vk.com/wall{i['wall_reply']['owner_id']}_{i['wall_reply']['post_id']}?reply={i['wall_reply']['id']}") elif i['type'] == 'audio': urls.append(f"https://vk.com/audio?q={i['audio']['artist'].replace(' ', '%20')}%20-%20{i['audio']['title'].replace(' ', '%20')}&tab=global") elif i['type'] == 'audio_playlist': urls.append(f"Плейлист: {i['audio_playlist']['title']}@https://vk.com/music?z=audio_playlist{i['audio_playlist']['owner_id']}_{i['audio_playlist']['id']}/{i['audio_playlist']['access_key']}") elif i['type'] == 'market': urls.append(f"https://vk.com/market?w=product{i['market']['owner_id']}_{i['market']['id']}") elif i['type'] == 'poll': urls.append(f"Голосование: {i['poll']['question'][:25]}@https://vk.com/poll{i['poll']['owner_id']}_{i['poll']['id']}") elif i['type'] == 'doc': urls.append(f"Документ: {i['doc']['title']}@{i['doc']['url']}") else: if 'url' in i[i['type']]: urls.append(i[i['type']]['url']) if urls == []: return None return urls def getPeerName(id): if id > 2000000000: cursor.execute("""SELECT chat_name FROM chats_cache WHERE chat_id = ?""", (id,)) fetch = cursor.fetchone() if fetch is None: try: name = tryAgainIfFailed( vk.messages.getChat, chat_id=id-2000000000 )['title'] cursor.execute("""INSERT INTO chats_cache (chat_id,chat_name) VALUES (?,?)""", (id, name,)) conn.commit() except Warning: name = "Секретный чат, используйте токен другого приложения" else: name = fetch[0] elif id < 0: cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,)) fetch = cursor.fetchone() if fetch is None: name = tryAgainIfFailed( vk.groups.getById, group_id=-id )[0]['name'] cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,)) conn.commit() else: name = fetch[0] else: cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,)) fetch = cursor.fetchone() if fetch is None: name = tryAgainIfFailed( vk.users.get, user_id=id )[0] name = f"{name['first_name']} {name['last_name']}" cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,)) conn.commit() else: name = fetch[0] return name def fwdParse(fwd): html = """<table class="table table-sm table-bordered"> """ for i in fwd: user_name = getPeerName(i['from_id']) if i['from_id'] < 0: html += """ <tr> <td> <a href='https://vk.com/public{}' target="_blank"> {} </a> </td> </tr> """.format(-i['from_id'], user_name) else: html += """ <tr> <td> <a href='https://vk.com/id{}' target="_blank"> {} </a> </td> </tr> """.format(i['from_id'], user_name) if i['text'] != "": html += """ <tr> <td> <div class='mes'> {} </div> """.format(xssFilter(i['text'])) else: html += """ <tr> <td> """ if i['attachments'] != []: html += attachmentsParse(parseUrls(i['attachments'])) if 'fwd_messages' in i: html += fwdParse(i['fwd_messages']) elif 'reply_message' in i: html += fwdParse([i['reply_message']]) html += """ </td> </tr> <tr> <td> {} </td> </tr> """.format(time.strftime('%H:%M:%S %d.%m.%y', time.localtime(i['date']))) html += "</table>" return html def xssFilter(s): return s\ .replace('<', '&lt;')\ .replace('>', '&gt;')\ .replace('\n', '<br />') def compareStrings(a, b): aCounter = 0 bCounter = 0 for i in difflib.SequenceMatcher(None, a, b).get_opcodes(): if i[0] == 'insert': b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}" bCounter += 11 elif i[0] == 'delete': a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}" aCounter += 11 elif i[0] == 'replace': a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}" b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}" aCounter += 11 bCounter += 11 return a, b def activityReport(message_id, peer_id=None, user_id=None, timestamp=None, isEdited=False, attachments=None, fwd=None, message=None): try: peer_name = user_name = oldMessage = oldAttachments = date = oldFwd = None cursor.execute("""SELECT * FROM messages WHERE message_id = ?""", (message_id,)) fetch = cursor.fetchone() if attachments is not None: attachments = parseUrls(json.loads(attachments)) if fwd is not None: fwd = json.loads(fwd) if fetch is None: if isEdited: logger.info("Изменение сообщения, отсутствующего в БД, message_id = %i.", message_id) fetch = [0]*7 peer_name = getPeerName(peer_id) user_name = getPeerName(user_id) oldMessage = f"⚠️ {message}" oldAttachments = attachments oldFwd = fwd date = f"<b>Доб:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime(timestamp))}<br /><b>Изм:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime())}" else: raise TypeError else: if fetch[3] is not None: oldMessage = str(fetch[3]) if fetch[4] is not None: oldAttachments = parseUrls(json.loads(fetch[4])) if fetch[6] is not None: oldFwd = json.loads(fetch[6]) peer_name = getPeerName(fetch[0]) user_name = getPeerName(fetch[1]) date = f"<b>Доб:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime(fetch[5]))}<br /><b>Изм:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime())}" peer_id = fetch[0] user_id = fetch[1] del fetch row = """ <tr><!-- {} --> <td>{} </td> <td>{} </td> {} <td> {} </td> </tr> """ messageBlock = """ <div class='mes'> {} </div>""" attachmentsBlock = """ <div> <b>Вложения</b><br /> {} </div>""" fwdBlock = """ <div> <b>Пересланное</b><br /> {} </div>""" if peer_id > 2000000000: peer_id = """ <a href='https://vk.com/im?sel=c{}' target='_blank'> {} </a>""".format(str(peer_id-2000000000), peer_name) elif peer_id < 0: peer_id = """ <a href='https://vk.com/public{}' target='_blank'> {} </a>""".format(str(-peer_id), peer_name) else: peer_id = """ <a href='https://vk.com/id{}' target='_blank'> {} </a>""".format(str(peer_id), peer_name) if user_id < 0: user_id = """ <a href='https://vk.com/public{}' target='_blank'> {} </a>""".format(str(-user_id), user_name) else: user_id = """ <a href='https://vk.com/id{}' target='_blank'> {} </a>""".format(str(user_id), user_name) if isEdited: if not (oldMessage is None or message is None): message = xssFilter(message) oldMessage = xssFilter(oldMessage) message, oldMessage = compareStrings(message, oldMessage) oldMessage = messageBlock.format(oldMessage) message = messageBlock.format(message) elif oldMessage is None: oldMessage = "" message = messageBlock.format(xssFilter(message)) else: oldMessage = messageBlock.format(xssFilter(oldMessage)) message = "" if oldAttachments is not None: oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments)) else: oldAttachments = "" if oldFwd is not None: oldFwd = fwdBlock.format(fwdParse(oldFwd)) else: oldFwd = "" if attachments is not None: attachments = attachmentsBlock.format(attachmentsParse(attachments)) else: attachments = "" if fwd is not None: fwd = fwdBlock.format(fwdParse(fwd)) else: fwd = "" messageBlock = """<td width='50%'> <b>Старое</b><br />{} </td> <td width='50%'> <b>Новое</b><br />{} </td>""".format(oldMessage+oldAttachments+oldFwd, message+attachments+fwd) else: if oldMessage is not None: oldMessage = messageBlock.format(xssFilter(oldMessage)) else: oldMessage = "" if oldAttachments is not None: oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments)) else: oldAttachments = "" if oldFwd is not None: oldFwd = fwdBlock.format(fwdParse(oldFwd)) else: oldFwd = "" messageBlock = """<td width='100%' colspan='2'> <b>Удалено</b><br />{} </td>""".format(oldMessage+oldAttachments+oldFwd) row = row.format(message_id, peer_id, user_id, messageBlock, date) if os.path.exists( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y', time.localtime())}.html" ) ): messagesActivities = open( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y',time.localtime())}.html" ), 'r', encoding='utf-8' ) messagesDump = messagesActivities.read() messagesActivities.close() messagesActivities = open( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y',time.localtime())}.html" ), 'w', encoding='utf-8' ) else: messagesDump = template messagesActivities = open( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y',time.localtime())}.html" ), 'w', encoding='utf-8' ) messagesDump = messagesDump[:offset]+row+messagesDump[offset:] messagesActivities.write(messagesDump) messagesActivities.close() except TypeError: raise TypeError except BaseException: logger.exception("Ошибка при логгировании изменений.") if not config['disableMessagesLogging']: tableWatcher = threading.Thread(target=bgWatcher) tableWatcher.start() template = """<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <link rel="stylesheet" href="./bootstrap.css"> </head> <body> <table class="table table-sm"> </table> </body> </html>""" offset = template.index(""" </table>""") events = [] flag = threading.Event() def preloadMessages(): logger.info("Предзагрузка сообщений...") offset = 0 peer_ids = [] messages = [] shouldContinue = True try: while shouldContinue: shouldContinue = False dialogs = tryAgainIfFailed(vk.messages.getConversations, offset=offset, count=20) for i in range(0,len(dialogs['items'])): if dialogs['items'][i]['last_message']['date'] >= time.time() - config['maxCacheAge']: peer_ids.append(dialogs['items'][i]['conversation']['peer']['id']) if i == len(dialogs['items']) - 1: shouldContinue = True offset+=20 for i in peer_ids: offset = 0 if i > 2000000000: count = 200 else: count = 50 shouldContinue = True while shouldContinue: shouldContinue = False mes = vk.messages.getHistory(offset=offset, count=count, peer_id=i)['items'] if mes[-1]['date']>= time.time() - config['maxCacheAge']: shouldContinue = True offset+=count for j in mes: if j['date'] >= time.time() - config['maxCacheAge']: messages.append(j) for i in messages: message_id = i['id'] with stop_mutex: cursor.execute("""SELECT message_id FROM messages WHERE message_id = ?""", (message_id,)) if cursor.fetchone() is not None: continue peer_id = i['peer_id'] user_id = i['from_id'] message = i['text'] timestamp = i['date'] fwd_messages = None if 'reply_message' in i: fwd_messages = json.dumps([i['reply_message']], ensure_ascii=False,) elif i['fwd_messages'] != []: fwd_messages = json.dumps(i['fwd_messages'], ensure_ascii=False,) if i['attachments'] == []: attachments = None else: attachments = json.dumps(i['attachments'], ensure_ascii=False,) with stop_mutex: cursor.execute( """INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""", (peer_id, user_id, message_id, message, attachments, timestamp, fwd_messages,) ) conn.commit() except BaseException: logger.exception("Ошибка во время предзагрузки сообщений") logger.info("Предзагрузка сообщений завершена.") if config['customActions'] and config['disableMessagesLogging']: threading.Thread(target=eventWorker_predefinedDisabled).start() elif not config['disableMessagesLogging'] and not config['customActions']: threading.Thread(target=eventWorker_customDisabled).start() else: threading.Thread(target=eventWorker).start() if config['preloadMessages']: threading.Thread(target=preloadMessages).start() try: tryAgainIfFailed( main, maxRetries=-1 ) except Warning: pass
""" Definition of events. """ from abc import ABC EVENT_LOG = 'eLog' #Log Event EVENT_MARKETDATA = 'eMarketData' #Pushing MarketData Event EVENT_TRADE = 'eTrade' #Trade Event EVENT_BUY = 'eBuy' #Buy Event EVENT_SELL = 'eSell' #Sell Event EVENT_CANCEL = 'eCancel' #Cancel Event EVENT_POSITION = 'ePosition' #Position Query Event EVENT_STATUS = 'eStatus' #Order Status Event EVENT_ACCOUNT = 'eAccount' #Account Query Event EVENT_PROFIT_CHANGED = 'eProfitChanged' #Profit Event class StrategyEvent: def __init__(self, type_=None, even_param_=None): self.type_ = type_ self.even_param_ = even_param_ def clear(self): """ Delete unreferenced source. """ self.even_param_.clear() class EventEngine(ABC): pass
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import openerp from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools.translate import _ class sale_configuration(osv.osv_memory): _inherit = 'sale.config.settings' _columns = { 'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders', implied_group='sale_stock.group_invoice_deli_orders', help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."), 'task_work': fields.boolean("Prepare invoices based on task's activities", help='Lets you transfer the entries under tasks defined for Project Management to ' 'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways ' 'and to automatically creates project tasks from procurement lines.\n' '-This installs the modules project_timesheet and sale_service.'), 'default_order_policy': fields.selection( [('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')], 'The default invoicing method is', default_model='sale.order', help="You can generate invoices based on sales orders or based on shippings."), 'module_delivery': fields.boolean('Allow adding shipping costs', help='Allows you to add delivery methods in sales orders and delivery orders.\n' 'You can define your own carrier and delivery grids for prices.\n' '-This installs the module delivery.'), 'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.", help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."), 'group_mrp_properties': fields.boolean('Product properties on order lines', implied_group='sale.group_mrp_properties', help="Allows you to tag sales order lines with properties."), 'module_project_timesheet': fields.boolean("Project Timesheet"), 'module_sale_service': fields.boolean("Sale Service"), 'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines', implied_group='sale_stock.group_route_so_lines', help="Allows you to choose a delivery route on sales order lines"), } _defaults = { 'default_order_policy': 'manual', } def default_get(self, cr, uid, fields, context=None): res = super(sale_configuration, self).default_get(cr, uid, fields, context) # task_work, time_unit depend on other fields res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet') return res def get_default_sale_config(self, cr, uid, ids, context=None): ir_values = self.pool.get('ir.values') default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy') return { 'default_picking_policy': default_picking_policy == 'one', } def set_sale_defaults(self, cr, uid, ids, context=None): if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'): raise openerp.exceptions.AccessError(_("Only administrators can change the settings")) ir_values = self.pool.get('ir.values') wizard = self.browse(cr, uid, ids)[0] default_picking_policy = 'one' if wizard.default_picking_policy else 'direct' ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy) res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context) return res def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None): if not group_invoice_deli_orders: return {'value': {'default_order_policy': 'manual'}} if not group_invoice_so_lines: return {'value': {'default_order_policy': 'picking'}} return {}
import pytest import io from cite_seq_count import preprocessing @pytest.fixture def data(): from collections import OrderedDict from itertools import islice # Test file paths pytest.correct_whitelist_path = 'tests/test_data/whitelists/correct.csv' pytest.correct_tags_path = 'tests/test_data/tags/correct.csv' pytest.correct_R1_path = 'tests/test_data/fastq/correct_R1.fastq.gz' pytest.correct_R2_path = 'tests/test_data/fastq/correct_R2.fastq.gz' pytest.corrupt_R1_path = 'tests/test_data/fastq/corrupted_R1.fastq.gz' pytest.corrupt_R2_path = 'tests/test_data/fastq/corrupted_R2.fastq.gz' # Create some variables to compare to pytest.correct_whitelist = set(['ACTGTTTTATTGGCCT','TTCATAAGGTAGGGAT']) pytest.correct_tags = { 'AGGACCATCCAA':'CITE_LEN_12_1', 'ACATGTTACCGT':'CITE_LEN_12_2', 'AGCTTACTATCC':'CITE_LEN_12_3', 'TCGATAATGCGAGTACAA':'CITE_LEN_18_1', 'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2', 'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3', 'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1', 'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2', 'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3'} pytest.correct_ordered_tags = OrderedDict({ 'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1-TGTGACGTATTGCTAGCTAG', 'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2-ACTGTCTAACGGGTCAGTGC', 'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3-TATCACATCGGTGGATCCAT', 'TCGATAATGCGAGTACAA':'CITE_LEN_18_1-TCGATAATGCGAGTACAA', 'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2-GAGGCTGAGCTAGCTAGT', 'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3-GGCTGATGCTGACTGCTA', 'AGGACCATCCAA':'CITE_LEN_12_1-AGGACCATCCAA', 'ACATGTTACCGT':'CITE_LEN_12_2-ACATGTTACCGT', 'AGCTTACTATCC':'CITE_LEN_12_3-AGCTTACTATCC'}) pytest.barcode_slice = slice(0, 16) pytest.umi_slice = slice(16, 26) pytest.barcode_umi_length = 26 @pytest.mark.dependency() def test_parse_whitelist_csv(data): assert preprocessing.parse_whitelist_csv(pytest.correct_whitelist_path, 16, 1) == (pytest.correct_whitelist,1) @pytest.mark.dependency() def test_parse_tags_csv(data): assert preprocessing.parse_tags_csv(pytest.correct_tags_path) == pytest.correct_tags @pytest.mark.dependency(depends=['test_parse_tags_csv']) def test_check_tags(data): assert preprocessing.check_tags(pytest.correct_tags, 5) == pytest.correct_ordered_tags @pytest.mark.dependency(depends=['test_check_tags']) def test_check_distance_too_big_between_tags(data): with pytest.raises(SystemExit): preprocessing.check_tags(pytest.correct_tags, 8) @pytest.mark.dependency(depends=['test_parse_whitelist_csv']) def test_check_barcodes_lengths(data): assert preprocessing.check_barcodes_lengths(26, 1, 16, 17, 26) == (pytest.barcode_slice, pytest.umi_slice, pytest.barcode_umi_length) @pytest.mark.dependency() def test_get_n_lines(data): assert preprocessing.get_n_lines(pytest.correct_R1_path) == (200 * 4) @pytest.mark.dependency(depends=['test_get_n_lines']) def test_get_n_lines_not_multiple_of_4(data): with pytest.raises(SystemExit): preprocessing.get_n_lines(pytest.corrupt_R1_path)
from game_data import * from hosting import ServerHandler, ClientHandler import json board = [ ["R", "K", "B", "Q", "E", "B", "K", "R"], ["P", "P", "P", "P", "P", "P", "P", "P"], [" ", " ", " ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " ", " ", " "], ["P", "P", "P", "P", "P", "P", "P", "P"], ["R", "K", "B", "Q", "E", "B", "K", "R"] ] pieces = Initiator() pos_handler = PositionHandler(pieces[0]+pieces[1]) p1 = Player("white", pieces[0]) p2 = Player("black", pieces[1]) player_handler = PlayerHandler(p1, p2) end = False win_team = None checkmate = False try: try: net = eval(input("Enter Server IP, Port to Host: ")) except KeyboardInterrupt: exit() if type(net[0]) == str and net[1] > 5000 and net[1] < 65000: server = ServerHandler(*net) DisplayBoard(board) while True: error_msg = "" if player_handler.current.team == "white": if checkmate: error_msg = "You're in Checkmate" print(player_handler.current.give_pieces_position()) try: piece_pos = eval(input("Position of Piece: ")) piece_to_go = eval(input("Position To Go: ")) except KeyboardInterrupt: break if PositionChecks(piece_pos) and PositionChecks(piece_to_go): piece = pos_handler.get_piece(piece_pos) if piece == False or piece.team != player_handler.current.team: error_msg = "Piece Position is Incorrect" else: check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler) if check: board = n_board if piece != " ": pieces[2].append(piece) player_handler.remove_piece(piece) pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces) end, lose_player = player_handler.game_end() checkmate = player_handler.checkmate(board, pos_handler) player_handler.change_player() else: error_msg = "Bad Position" else: error_msg = "Bad Position" clear_screen() DisplayBoard(board) print(error_msg) if end: break win_team = "white" if lose_player.team == "black" else "black" else: if checkmate: server.send_state(server.encode_state("", "", "You're in Checkmate")) server.send_state(server.encode_state(board, player_handler.current.give_pieces_position(), "")) server.send_state("input") pos_data = server.recv_inputs() try: pos_data = json.loads(pos_data) print(pos_data) piece_pos = tuple(pos_data["piece_pos"]) piece_to_go = tuple(pos_data["piece_to_go"]) if PositionChecks(piece_pos) and PositionChecks(piece_to_go): piece = pos_handler.get_piece(piece_pos) print(piece) if piece == False or piece.team != player_handler.current.team: server.send_state(server.encode_state("", "", "Piece Position is Incorrect")) else: check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler) if check: board = n_board if piece != " ": pieces[2].append(piece) player_handler.remove_piece(piece) pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces) end, lose_player = player_handler.game_end() checkmate = player_handler.checkmate(board, pos_handler) player_handler.change_player() server.send_state(server.encode_state(board, "", "")) else: server.send_state(server.encode_state("", "", "Bad Position")) else: server.send_state(server.encode_state("", "", "Bad Position")) # clear_screen() if end: win_team = "white" if lose_player.team == "black" else "black" break clear_screen() DisplayBoard(board) except json.decoder.JSONDecodeError: pass server.send_state(server.encode_state("", "", f"{win_team} Won The Match")) server.close_conn("end") else: print("[-] IP/Port is not Correctly Specified as rules.") print("[-] Ip should be like \"127.0.0.1\" and Port Should be Between 5000 and 65000") print("[-] Enter both like this \"127.0.0.1\", 9999") print("[-] Do It Correctly Next Time Bitch :]") except ConnectionResetError: print("Client Disconnected") except SyntaxError: server.close_conn("end") print("Syntax Error")
# Pydifact - a python edifact library # # Copyright (c) 2019 Christian González # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from typing import Union, List from pydifact.api import EDISyntaxError, PluginMount from pydifact.control import Characters class SegmentProvider(metaclass=PluginMount): """This is a plugin mount point for Segment plugins which represent a certain EDIFACT Segment. Classes implementing this PluginMount should provide the following attributes: """ def __str__(self): """Returns the user readable text representation of this segment.""" def validate(self) -> bool: """Validates the Segment.""" class Segment(SegmentProvider): """Represents a low-level segment of an EDI interchange. This class is used internally. read-world implementations of specialized should subclass Segment and provide the `tag` and `validate` attributes. """ # tag is not a class attribute in this case, as each Segment instance could have another tag. __omitted__ = True def __init__(self, tag: str, *elements: Union[str, List[str]]): """Create a new Segment instance. :param str tag: The code/tag of the segment. Must not be empty. :param list elements: The data elements for this segment, as (possibly empty) list. """ self.tag = tag # The data elements for this segment. # this is converted to a list (due to the fact that python creates a tuple # when passing a variable arguments list to a method) self.elements = list(elements) def __str__(self) -> str: """Returns the Segment in Python list printout""" return "'{tag}' EDI segment: {elements}".format( tag=self.tag, elements=str(self.elements) ) def __repr__(self) -> str: return "{} segment: {}".format(self.tag, str(self.elements)) def __eq__(self, other) -> bool: # FIXME the other way round too? isinstance(other, type(self))? return ( isinstance(self, type(other)) and self.tag == other.tag and list(self.elements) == list(other.elements) ) def __getitem__(self, key): return self.elements[key] def __setitem__(self, key, value): self.elements[key] = value def validate(self) -> bool: """ Segment validation. The Segment class is part of the lower level interfaces of pydifact. So it assumes that the given parameters are correct, there is no validation done here. However, in segments derived from this class, there should be validation. :return: bool True if given tag and elements are a valid EDIFACT segment, False if not. """ # FIXME: there should be a way of returning an error message - WHICH kind of validation failed. if not self.tag: return False return True class EDIenergySegment(Segment): def __init__(self, tag: str, *elements: Union[str, List[str]]): super().__init__(tag, *elements) def validate(self) -> bool: if not super().validate(): return False else: # TODO add validation method for EDI@Energy pass class SegmentFactory: """Factory for producing segments.""" characters = None @staticmethod def create_segment( name: str, *elements: Union[str, List[str]], validate: bool = True ) -> Segment: """Create a new instance of the relevant class type. :param name: The name of the segment :param elements: The data elements for this segment :param validate: bool if True, the created segment is validated before return """ if not SegmentFactory.characters: SegmentFactory.characters = Characters() # Basic segment type validation is done here. # The more special validation must be done in the corresponding Segment if not name: raise EDISyntaxError("The tag of a segment must not be empty.") if type(name) != str: raise EDISyntaxError( "The tag name of a segment must be a str, but is a {}: {}".format( type(name), name ) ) if not name.isalnum(): raise EDISyntaxError( "Tag '{}': A tag name must only contain alphanumeric characters.".format( name ) ) for Plugin in SegmentProvider.plugins: if getattr(Plugin, "tag", "") == name: s = Plugin(name, *elements) break else: # we don't support this kind of EDIFACT segment (yet), so # just create a generic Segment() s = Segment(name, *elements) if validate: if not s.validate(): raise EDISyntaxError( "could not create '{}' Segment. Validation failed.".format(name) ) # FIXME: characters is not used! return s
"""Utilities for reading real time clocks and keeping soft real time constraints.""" import gc import os import time import multiprocessing from common.clock import sec_since_boot # pylint: disable=no-name-in-module, import-error from selfdrive.hardware import PC, TICI # time step for each process DT_CTRL = 0.01 # controlsd DT_MDL = 0.05 # model DT_TRML = 0.5 # thermald and manager # driver monitoring if TICI: DT_DMON = 0.05 else: DT_DMON = 0.1 class Priority: # CORE 2 # - modeld = 55 # - camerad = 54 CTRL_LOW = 51 # plannerd & radard # CORE 3 # - boardd = 55 CTRL_HIGH = 53 def set_realtime_priority(level): if not PC: os.sched_setscheduler(0, os.SCHED_FIFO, os.sched_param(level)) def set_core_affinity(core): if not PC: os.sched_setaffinity(0, [core,]) def config_realtime_process(core, priority): gc.disable() set_realtime_priority(priority) set_core_affinity(core) class Ratekeeper(): def __init__(self, rate, print_delay_threshold=0.): """Rate in Hz for ratekeeping. print_delay_threshold must be nonnegative.""" self._interval = 1. / rate self._next_frame_time = sec_since_boot() + self._interval self._print_delay_threshold = print_delay_threshold self._frame = 0 self._remaining = 0 self._process_name = multiprocessing.current_process().name @property def frame(self): return self._frame @property def remaining(self): return self._remaining # Maintain loop rate by calling this at the end of each loop def keep_time(self): lagged = self.monitor_time() if self._remaining > 0: time.sleep(self._remaining) return lagged # this only monitor the cumulative lag, but does not enforce a rate def monitor_time(self): lagged = False remaining = self._next_frame_time - sec_since_boot() self._next_frame_time += self._interval if self._print_delay_threshold is not None and remaining < -self._print_delay_threshold: print("%s lagging by %.2f ms" % (self._process_name, -remaining * 1000)) lagged = True self._frame += 1 self._remaining = remaining return lagged
import os, json import shutil, logging import click from pyspark.sql.functions import lit, udf, explode, array, to_json from pyspark.sql.types import ArrayType, StringType, IntegerType, MapType, StructType, StructField from luna.common.CodeTimer import CodeTimer from luna.common.config import ConfigSet from luna.common.custom_logger import init_logger from luna.common.sparksession import SparkConfig from luna.common.utils import get_absolute_path from luna.pathology.common.slideviewer_client import fetch_slide_ids import luna.common.constants as const os.environ['OPENBLAS_NUM_THREADS'] = '1' def download_point_annotation(slideviewer_url, slideviewer_path, project_id, user): """Downloads point-click nuclear annotations using slideviewer API Args: slideviewer_url (string): slideviewer base url e.g. https://slideviewer-url.com slideviewer_path (string): slide path in slideviewer project_id (string): slideviewer project id user (string): username used to create the expert annotation Returns: json: point-click nuclear annotations """ from slideviewer_client import download_sv_point_annotation print (f" >>>>>>> Processing [{slideviewer_path}] <<<<<<<<") url = slideviewer_url + "/slides/" + str(user) + "@mskcc.org/projects;" + \ str(project_id) + ';' + slideviewer_path + "/getSVGLabels/nucleus" print(url) return download_sv_point_annotation(url) @click.command() @click.option('-d', '--data_config_file', default=None, type=click.Path(exists=True), help="path to yaml file containing data input and output parameters. " "See data_config.yaml.template") @click.option('-a', '--app_config_file', default='config.yaml', type=click.Path(exists=True), help="path to yaml file containing application runtime parameters. " "See config.yaml.template") def cli(data_config_file, app_config_file): """This module generates a parquet table of point-click nuclear annotation jsons. The configuration files are copied to your project/configs/table_name folder to persist the metadata used to generate the proxy table. INPUT PARAMETERS app_config_file - path to yaml file containing application runtime parameters. See config.yaml.template data_config_file - path to yaml file containing data input and output parameters. See data_config.yaml.template - ROOT_PATH: path to output data - DATA_TYPE: data type used in table name e.g. POINT_RAW_JSON - PROJECT: your project name. used in table path - DATASET_NAME: optional, dataset name to version your table - PROJECT_ID: Slideviewer project id - USERS: list of users that provide expert annotations for this project - SLIDEVIEWER_CSV_FILE: an optional path to a SlideViewer csv file to use that lists the names of the whole slide images and for which the regional annotation proxy table generator should download point annotations. If this field is left blank, then the regional annotation proxy table generator will download this file from SlideViewer. TABLE SCHEMA - slideviewer_path: path to original slide image in slideviewer platform - slide_id: id for the slide. synonymous with image_id - sv_project_id: same as the PROJECT_ID from data_config_file, refers to the SlideViewer project number. - sv_json: json annotation file downloaded from slideviewer. - user: username of the annotator for a given annotation - sv_json_record_uuid: hash of raw json annotation file from slideviewer, format: SVPTJSON-{json_hash} """ logger = init_logger() with CodeTimer(logger, 'generate POINT_RAW_JSON table'): logger.info('data config file: ' + data_config_file) logger.info('app config file: ' + app_config_file) # load configs cfg = ConfigSet(name=const.DATA_CFG, config_file=data_config_file) cfg = ConfigSet(name=const.APP_CFG, config_file=app_config_file) # copy app and data configuration to destination config dir config_location = const.CONFIG_LOCATION(cfg) os.makedirs(config_location, exist_ok=True) shutil.copy(app_config_file, os.path.join(config_location, "app_config.yaml")) shutil.copy(data_config_file, os.path.join(config_location, "data_config.yaml")) logger.info("config files copied to %s", config_location) create_proxy_table() def create_proxy_table(): """Create a proxy table of point annotation json files downloaded from the SlideViewer API Each row of the table is a point annotation json created by a user for a slide. Returns: None """ cfg = ConfigSet() logger = logging.getLogger(__name__) spark = SparkConfig().spark_session(config_name=const.APP_CFG, app_name="luna.pathology.point_annotation.proxy_table.generate") # load paths from configs point_table_path = const.TABLE_LOCATION(cfg) PROJECT_ID = cfg.get_value(path=const.DATA_CFG+'::PROJECT_ID') SLIDEVIEWER_URL = cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_URL') # Get slide list to use # Download CSV file in the project configs dir slides = fetch_slide_ids(SLIDEVIEWER_URL, PROJECT_ID, const.CONFIG_LOCATION(cfg), cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_CSV_FILE')) logger.info(slides) schema = StructType([StructField("slideviewer_path", StringType()), StructField("slide_id", StringType()), StructField("sv_project_id", IntegerType()) ]) df = spark.createDataFrame(slides, schema) # populate columns df = df.withColumn("users", array([lit(user) for user in cfg.get_value(const.DATA_CFG+'::USERS')])) df = df.select("slideviewer_path", "slide_id", "sv_project_id", explode("users").alias("user")) # download slide point annotation jsons # example point json: # [{"project_id":"8","image_id":"123.svs","label_type":"nucleus","x":"1440","y":"747","class":"0","classname":"Tissue 1"},{"project_id":"8","image_id":"123.svs","label_type":"nucleus","x":"1424","y":"774","class":"3","classname":"Tissue 4"}] point_json_struct = ArrayType( MapType(StringType(), StringType()) ) spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/slideviewer_client.py")) download_point_annotation_udf = udf(download_point_annotation, point_json_struct) df = df.withColumn("sv_json", download_point_annotation_udf(lit(SLIDEVIEWER_URL), "slideviewer_path", "sv_project_id", "user"))\ .cache() # drop empty jsons that may have been created df = df.dropna(subset=["sv_json"]) # populate "date_added", "date_updated","latest", "sv_json_record_uuid" spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/EnsureByteContext.py")) spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/utils.py")) from luna.common.utils import generate_uuid_dict sv_json_record_uuid_udf = udf(generate_uuid_dict, StringType()) df = df.withColumn("sv_json_record_uuid", sv_json_record_uuid_udf(to_json("sv_json"), array(lit("SVPTJSON")))) df.show(10, False) df.write.format("parquet").mode("overwrite").save(point_table_path) if __name__ == "__main__": cli()
# Copyright 2021 Supun Nakandala. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import sys import numpy as np import tensorflow as tf import pandas as pd import random import math import argparse sys.path.append('./') from commons import cnn_bi_lstm_model, input_iterator # Setting random seeds tf.random.set_random_seed(2019) random.seed(2019) np.random.seed(2019) def get_train_ops(y, logits, learning_rate, n_classes, class_weights): y = tf.reshape(y, [-1]) logits = tf.reshape(logits, [-1, n_classes]) balanced_accuracy, update_op = tf.metrics.mean_per_class_accuracy(y, tf.argmax(logits, 1), n_classes) y = tf.reshape(tf.one_hot(y, depth=n_classes, axis=1), [-1, n_classes]) loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y) * tf.reduce_sum(tf.constant(class_weights, dtype=tf.float32) * y, axis=1)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss) return train_op, update_op, balanced_accuracy, loss def window_generator(data_root, win_size_10s, subject_ids): x_segments = []; y_segments = [] for subject_id in subject_ids: for x_seq, _, y_seq in input_iterator(data_root, subject_id, train=True): x_window = []; y_window = [] for x,y in zip(x_seq, y_seq): x_window.append(x) y_window.append(y) if len(y_window) == win_size_10s: yield np.stack(x_window, axis=0), np.stack(y_window, axis=0) x_window = []; y_window = [] if __name__ == "__main__": parser = argparse.ArgumentParser(description='Argument parser for training CNN model.') optional_arguments = parser._action_groups.pop() required_arguments = parser.add_argument_group('required arguments') required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True) optional_arguments.add_argument('--transfer-learning-model', help='Transfer learning model name (default: CHAP_ALL_ADULTS)', default=None, required=False, choices=['CHAP_ALL_ADULTS']) optional_arguments.add_argument('--learning-rate', help='Learning rate for training the model (default: 0.0001)', default=1e-4, type=float, required=False) optional_arguments.add_argument('--num-epochs', help='Number of epochs to train the model (default: 15)', default=15, type=int, required=False) optional_arguments.add_argument('--batch-size', help='Training batch size (default: 16)', default=16, type=int, required=False) optional_arguments.add_argument('--amp-factor', help='Factor to increase the number of neurons in the CNN layers (default: 2)', default=2, type=int, required=False) optional_arguments.add_argument('--cnn-window-size', help='CNN window size in seconds on which the predictions to be made (default: 10)', default=10, type=int, required=False) optional_arguments.add_argument('--bi-lstm-window-size', help='BiLSTM window size in minutes on which the predictions to be smoothed (default: 7)', default=7, type=int, required=False) optional_arguments.add_argument('--shuffle-buffer-size', help='Training data shuffle buffer size in terms of number of records (default: 10000)', default=10000, type=int, required=False) optional_arguments.add_argument('--training-data-fraction', help='Percentage of subjects to be used for training (default: 60)', default=60, type=int, required=False) optional_arguments.add_argument('--validation-data-fraction', help='Percentage of subjects to be used for validation (default: 20)', default=20, type=int, required=False) optional_arguments.add_argument('--testing-data-fraction', help='Percentage of subjects to be used for testing (default: 20)', default=20, type=int, required=False) optional_arguments.add_argument('--model-checkpoint-path', help='Path where the trained model will be saved (default: ./model-checkpoint)', default='./model-checkpoint', required=False) optional_arguments.add_argument('--num-classes', help='Number of classes in the training dataset (default: 2)', default=2, type=int, required=False) optional_arguments.add_argument('--class-weights', help='Class weights for loss aggregation (default: [1.0, 1.0])', default='[1.0, 1.0]', required=False) optional_arguments.add_argument('--down-sample-frequency', help='Downsample frequency in Hz for GT3X data (default: 10)', default=10, type=int, required=False) optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true') parser._action_groups.append(optional_arguments) args = parser.parse_args() if os.path.exists(args.model_checkpoint_path): raise Exception('Model checkpoint: {} already exists.'.format(args.model_checkpoint_path)) if args.transfer_learning_model: if args.transfer_learning_model == 'CHAP_ALL_ADULTS': args.amp_factor = 2 args.cnn_window_size = 10 args.bi_lstm_win_size = 7 else: raise Exception('Unsupported transfer learning model: {}'.format(args.transfer_learning_model)) assert (args.training_data_fraction + args.validation_data_fraction + args.testing_data_fraction) == 100, 'Train, validation,test split fractions should add up to 100%' subject_ids = [fname.split('.')[0] for fname in os.listdir(args.pre_processed_dir)] random.shuffle(subject_ids) n_train_subjects = int(math.ceil(len(subject_ids) * args.training_data_fraction / 100.)) train_subjects = subject_ids[:n_train_subjects] subject_ids = subject_ids[n_train_subjects:] test_frac = args.testing_data_fraction / (100.0 - args.training_data_fraction) * 100 n_test_subjects = int(math.ceil(len(subject_ids) * test_frac / 100.)) test_subjects = subject_ids[:n_test_subjects] valid_subjects = subject_ids[n_test_subjects:] output_shapes = ((args.bi_lstm_window_size*(60//args.cnn_window_size), args.cnn_window_size*args.down_sample_frequency, 3), (args.bi_lstm_window_size*(60//args.cnn_window_size))) bi_lstm_win_size = 60//args.down_sample_frequency * args.bi_lstm_window_size train_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, train_subjects),output_types=(tf.float32, tf.int32), output_shapes=output_shapes).shuffle(args.shuffle_buffer_size).batch(args.batch_size).prefetch(10) valid_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, valid_subjects),output_types=(tf.float32, tf.int32), output_shapes=output_shapes).batch(args.batch_size).prefetch(10) test_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, test_subjects),output_types=(tf.float32, tf.int32), output_shapes=output_shapes).batch(args.batch_size).prefetch(10) iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes) train_init_op = iterator.make_initializer(train_dataset) valid_init_op = iterator.make_initializer(valid_dataset) test_init_op = iterator.make_initializer(test_dataset) x, y = iterator.get_next() x = tf.reshape(x, [-1, args.cnn_window_size*args.down_sample_frequency, 3, 1]) x = tf.identity(x, name='input') y = tf.reshape(y, [-1, bi_lstm_win_size]) learning_rate = tf.placeholder(tf.float32) logits = cnn_bi_lstm_model(x, args.amp_factor, bi_lstm_win_size, args.num_classes) output = tf.argmax(tf.reshape(logits, [-1, args.num_classes]), axis=1, name='output') prediction = tf.identity(tf.argmax(logits, axis=1), name='prediction') class_weights = eval(args.class_weights) train_op, update_op, balanced_accuracy, loss = get_train_ops(y, logits, learning_rate, args.num_classes, class_weights) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if args.transfer_learning_model: ckpt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pre-trained-models', '{}_CKPT'.format(args.transfer_learning_model), 'model') # Weights for the final classification layer (dense) are ignored variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if not v.name.startswith('dense/')] restorer = tf.train.Saver(variables) restorer.restore(sess, ckpt_path) if not args.silent: print('Training subjects: {}'.format(train_subjects)) print('Validation subjects: {}'.format(valid_subjects)) print('Testing subjects: {}'.format(test_subjects)) for epoch in range(args.num_epochs): for label, init_op, subjects in zip(["Train", "Validation", "Test"], [train_init_op, valid_init_op, test_init_op], [train_subjects, valid_subjects, test_subjects]): sess.run(tf.local_variables_initializer()) sess.run(init_op) losses = [] while True: try: if label == "Train": _, _, l = sess.run([train_op, update_op, loss], feed_dict={learning_rate: args.learning_rate}) elif label == "Validation": _, l = sess.run([update_op, loss]) elif label == "Test": _, l = sess.run([update_op, loss]) losses.append(l) except tf.errors.OutOfRangeError: if not args.silent: ba = sess.run(balanced_accuracy) print("Epoch: %d, %s Loss: %f, Balanced Accuracy: %f" %(epoch, label, sum(losses), ba)) break if not os.path.exists(args.model_checkpoint_path): os.makedirs(args.model_checkpoint_path) tf.saved_model.simple_save(sess, os.path.join(args.model_checkpoint_path, 'CUSTOM_MODEL'), inputs={"input": x}, outputs={"output": output}) if not args.silent: print('Model saved in path: {}'.format(args.model_checkpoint_path))
# labplus mPython-box library # MIT license; Copyright (c) 2018 labplus # mpython-box buildin periphers drivers # history: # V1.0 zhaohuijiang from machine import Pin, UART import time import ujson from time import sleep_ms, sleep_us, sleep # touchpad class BS8112A(object): """ """ def __init__(self, i2c): self.addr = 80 # config self._i2c = i2c self.config = [0xB0, 0x00, 0x00, 0x83, 0xf3, 0x98, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x00] checksum = 0 for i in range(1, 19): checksum += self.config[i] checksum &= 0xff self.config[18] = checksum # print(self.config[18]) retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, bytearray(self.config), True) return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") # i2c.writeto(self.addr, b'\xB0', False) # time.sleep_ms(10) # print(i2c.readfrom(self.addr, 17, True)) # key map: # value bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 # bs8112a key Key8 Key7 Key6 Key5 Key4 Key3 Key2 Key1 # mpython key N O H T Y P def key_value(self): retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, b'\x08', False) time.sleep_ms(10) value = self._i2c.readfrom(self.addr, 1, True) time.sleep_ms(10) return value except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") class Codec_mode(): ES_MODULE_ADC_DAC = 0x00 ES_MODULE_DAC = 0x01 ES_MODULE_ADC = 0x02 class Es8388(): """ """ def __init__(self, i2c, adc_volume=0, dac_volume=0, volume=65): self._i2c = i2c self.addr = 16 self.adc_volume = adc_volume self.dac_volume = dac_volume self.volume = volume self.set_voice_mute(1) retry = 0 if (retry < 5): try: # i2c.writeto(self.addr, bytearray([0x19, 0x04])) # ES8388_DACCONTROL3 0x04 mute/0x00 unmute&ramp;DAC unmute and disabled digital volume control soft ramp # Chip Control and Power Management self._i2c.writeto(self.addr, bytearray( [0x01, 0x50])) # ES8388_CONTROL2 0x40? # ES8388_CHIPPOWER normal all and power up all self._i2c.writeto(self.addr, bytearray([0x02, 0x00])) # ES8388_MASTERMODE CODEC IN I2S SLAVE MODE 0x00: slave self._i2c.writeto(self.addr, bytearray([0x08, 0x00])) # dac setup # ES8388_DACPOWER . disable DAC and disable Lout/Rout/1/2 self._i2c.writeto(self.addr, bytearray([0x04, 0xC0])) # ES8388_CONTROL1. Enfr=0,Play&Record Mode,(0x17-both of mic&paly) self._i2c.writeto(self.addr, bytearray([0x00, 0x12])) # ES8388_DACCONTROL1 1a 0x18:16bit iis , 0x00:24 self._i2c.writeto(self.addr, bytearray([0x17, 0x18])) # ES8388_DACCONTROL2 DACFsMode,SINGLE SPEED; DACFsRatio,256 self._i2c.writeto(self.addr, bytearray([0x18, 0x02])) # ES8388_DACCONTROL16 0x00 audio on LIN1&RIN1, 0x09 LIN2&RIN2 self._i2c.writeto(self.addr, bytearray([0x26, 0x00])) # ES8388_DACCONTROL17 only left DAC to left mixer enable 0db self._i2c.writeto(self.addr, bytearray([0x27, 0x90])) # ES8388_DACCONTROL20 only right DAC to right mixer enable 0db self._i2c.writeto(self.addr, bytearray([0x2a, 0x90])) # ES8388_DACCONTROL21 set internal ADC and DAC use the same LRCK clock, ADC LRCK as internal LRCK self._i2c.writeto(self.addr, bytearray([0x2b, 0x80])) # ES8388_DACCONTROL23 vroi=0 self._i2c.writeto(self.addr, bytearray([0x2d, 0x00])) self.set_adc_dac_volume( Codec_mode.ES_MODULE_DAC, self.dac_volume, 0) # 0db # ES8388_DACPOWER 0x3c Enable DAC and Enable Lout/Rout/1/2 self._i2c.writeto(self.addr, bytearray([0x04, 0x3c])) # adc setup self._i2c.writeto(self.addr, bytearray( [0x03, 0xff])) # ES8388_ADCPOWER # ES8388_ADCCONTROL1 MIC Left and Right channel PGA gain self._i2c.writeto(self.addr, bytearray([0x09, 0xbb])) # ES8388_ADCCONTROL2 0x00 LINSEL & RINSEL, LIN1/RIN1 as ADC Input; DSSEL,use one DS Reg11; DSR, LINPUT1-RINPUT1 self._i2c.writeto(self.addr, bytearray([0x0a, 0x00])) # ES8388_ADCCONTROL3 clock input self._i2c.writeto(self.addr, bytearray([0x0b, 0x02])) # ES8388_ADCCONTROL4 Left/Right data, Left/Right justified mode, Bits length 16bit, I2S format 0x0c? self._i2c.writeto(self.addr, bytearray([0x0c, 0x0c])) # ES8388_ADCCONTROL5 ADCFsMode,singel SPEED,RATIO=256 self._i2c.writeto(self.addr, bytearray([0x0d, 0x02])) # ALC for Microphone self.set_adc_dac_volume( Codec_mode.ES_MODULE_ADC, self.adc_volume, 0) # 0db # ES8388_ADCPOWER Power on ADC, Enable LIN&RIN, Power off MICBIAS, set int1lp to low power mode self._i2c.writeto(self.addr, bytearray([0x03, 0x09])) # set volume self.set_volume(self.volume) self.set_voice_mute(0) # test # for i in range(0, 52): # i2c.writeto(self.addr, bytearray([i])) # print("%d: %d" % (i, i2c.readfrom(self.addr, 1)[0])) return except: retry = retry + 1 else: raise Exception("es8388 i2c read/write error!") def deinit(self): retry = 0 if (retry < 5): try: # ES8388_CHIPPOWER reset and stop es838 self._i2c.writeto(self.addr, bytearray([0x02, 0xff])) return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") def set_adc_dac_volume(self, mode, volume, dot): _volume = volume if (_volume < -96): _volume = -96 else: _volume = 0 _dot = 0 if dot >= 5: _dot = 1 _volume = (-_volume << 1) + _dot retry = 0 if (retry < 5): try: if (mode == Codec_mode.ES_MODULE_ADC or mode == Codec_mode.ES_MODULE_ADC_DAC): self._i2c.writeto(self.addr, bytearray( [0x10, _volume])) # ES8388_ADCCONTROL8 self._i2c.writeto(self.addr, bytearray( [0x11, _volume])) # ES8388_ADCCONTROL9 if (mode == Codec_mode.ES_MODULE_DAC or mode == Codec_mode.ES_MODULE_ADC_DAC): self._i2c.writeto(self.addr, bytearray( [0x1b, _volume])) # ES8388_DACCONTROL5 self._i2c.writeto(self.addr, bytearray( [0x1a, _volume])) # ES8388_DACCONTROL4 return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") def set_volume(self, volume): self.volume = volume if (self.volume < 0): self.volume = 0 elif (self.volume > 100): self.volume = 100 retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, bytearray( [0x2e, self.volume//3])) # ES8388_DACCONTROL24 self._i2c.writeto(self.addr, bytearray( [0x2f, self.volume//3])) # ES8388_DACCONTROL25 self._i2c.writeto(self.addr, bytearray( [0x30, 0])) # ES8388_DACCONTROL26 self._i2c.writeto(self.addr, bytearray( [0x31, 0])) # ES8388_DACCONTROL27 # print("volume L: %d" % (self.volume//3)) return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") def get_volume(self): return self.volume def set_voice_mute(self, mute): retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, b'\x19') dac_ctr3 = self._i2c.readfrom(self.addr, 1)[0] if(mute): dac_ctr3 |= 0x04 else: dac_ctr3 &= 0xFB self._i2c.writeto(self.addr, bytearray([0x19, dac_ctr3])) except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") uart2 = UART(2, baudrate=1152000, rx=Pin.P8, tx=Pin.P23, timeout=50, timeout_char=1024, rxbuf=2048, txbuf=2048) class K210Error(Exception): """K210异常类""" pass class blob(): def __init__(self,*args): self.dict = args[0] def __repr__(self): return self.dict def x(self): return self.dict['x'] def y(self): return self.dict['y'] def w(self): return self.dict['w'] def h(self): return self.dict['h'] def rect(self): return(self.dict['x'], self.dict['y'], self.dict['w'], self.dict['h']) def pixels(self): return self.dict['pixels'] def cx(self): return self.dict['cx'] def cy(self): return self.dict['cy'] def rotation(self): return self.dict['rotation'] def code(self): return self.dict['code'] def count(self): return self.dict['count'] class K210(): def __init__(self): t1 = time.ticks_ms() while (time.ticks_diff(time.ticks_ms(), t1) < 10000): rsp = self.send_cmd({'GET_KEYS': 0}) # 通过发获取按键指令测试K210是否初始化成功 if rsp is not None: return raise K210Error("K210 init failed!") def send_cmd(self, command, wait=True, timeout=200): json_stream = ujson.dumps(command) uart2.write(json_stream + '\n') # print("UART_Send:%s" % (json_stream + '\n')) t1 = time.ticks_ms() while wait: if uart2.any() > 0: r=None r = uart2.readline() r= r.strip() while uart2.readline(): pass # print("UART_Recv:%s" % r) try: rsp = ujson.loads(r) except Exception as e: print(e) break else: if rsp and isinstance(rsp, dict): for key, value in rsp.items(): if key == 'ERROR': raise K210Error(value) if key == 'RESP': return value if time.ticks_diff(time.ticks_ms(), t1) > timeout: # raise K210Error("k210 not respone!") return None def get_key(self): return self.send_cmd({'GET_KEYS': 0}) def get_distance(self): resp = self.send_cmd({'GET_DISTANCE': 0}) if resp is None: resp = 340 return resp def set_cam_led(self, on_off): return self.send_cmd({'SET_CAM_LED': on_off}) def set_motor(self, speed): return self.send_cmd({'SET_MOTOR': speed}) def file_open(self, *args): return self.send_cmd({'FILE_OPEN': args}) def file_read(self, *args): return self.send_cmd({'FILE_READ': args[0]},timeout=300) def file_write(self, *args): return self.send_cmd({'FILE_WRITE': args[0]},timeout=300) def file_close(self): return self.send_cmd({'FILE_CLOSE': 0}) def reset(self): self.send_cmd({'RESET': 0},False) def select_model(self, *args): self.send_cmd({'SELE_MOD': args[0]}, timeout=3000) def load_model(self, **kws): self.send_cmd({'LOD_MOD': kws}, timeout=3000) def detect_yolo(self): return self.send_cmd({'DET_YO': 0}) def predict_net(self): return self.send_cmd({'PRE_NET': 0}) def deinit_yolo(self): return self.send_cmd({'DINT_YO': 0}) def deinit_net(self): return self.send_cmd({'DINT_NET': 0}) def camera_snapshot(self): return self.send_cmd({'SNAPSHOT': 0}) def camera_reset(self): return self.send_cmd({'CAM_RST': 0},timeout=3000) def camera_run(self, *arg): return self.send_cmd({'CAM_RUN': arg[0]}) def camera_set_pixformat(self, *arg): return self.send_cmd({'CAM_SET_PF': arg[0]}) def camera_set_contrast(self, *arg): return self.send_cmd({'CAM_SET_CRA': arg[0]}) def camera_set_brightness(self, *arg): return self.send_cmd({'CAM_SET_BRG': arg[0]}) def camera_set_saturation(self, *arg): return self.send_cmd({'CAM_SET_SAT': arg[0]}) def camera_set_auto_gain(self, *arg, **kw): return self.send_cmd({'CAM_AUTO_GAIN': [arg, kw]}) def camera_set_auto_whitebal(self, *arg): return self.send_cmd({'CAM_AUTO_WBAL': arg[0]}) def camera_set_windowing(self, *arg): return self.send_cmd({'CAM_SET_WIN': arg[0]}) def camera_set_hmirror(self, *arg): return self.send_cmd({'CAM_SET_HM': arg[0]}) def camera_set_vflip(self, *arg): return self.send_cmd({'CAM_SET_VF': arg[0]}) def camera_skip_frames(self, *arg, **kw): return self.send_cmd({'CAM_SKIP_FRM': [arg, kw]}) def lcd_init(self, *args, **kws): return self.send_cmd({'LCD_INT': [args, kws]},timeout=5000) def lcd_display(self, **kws): return self.send_cmd({'LCD_DISP': kws}) def lcd_clear(self, **kws): return self.send_cmd({'LCD_CLR': kws}) def lcd_draw_string(self, *args): return self.send_cmd({'LCD_STR': args}) def image_load(self, *args, **kws): self.send_cmd({'IMG_LOD': [args, kws]}) time.sleep_ms(200) def image_width(self): return self.send_cmd({'IMG_WID': 0}) def image_hight(self): return self.send_cmd({'IMG_HIG': 0}) def image_format(self): return self.send_cmd({'IMG_FRM': 0}) def image_size(self): return self.send_cmd({'IMG_SIZE': 0}) def image_get_pixel(self, *args, **kws): return self.send_cmd({'IMG_GET_PIX': [args, kws]}) def image_set_pixel(self, *args, **kws): self.send_cmd({'IMG_SET_PIX': [args, kws]}) def image_mean_pool(self, *args, **kws): self.send_cmd({'IMG_MEAN_P': [args, kws]}) def image_to_grayscale(self): self.send_cmd({'IMG_TO_GRAY': 0}) def image_to_rainbow(self): self.send_cmd({'IMG_TO_RB': 0}) def image_copy(self, *args, **kws): self.send_cmd({'IMG_CPY': [args, kws]}) def image_save(self, *args, **kws): self.send_cmd({'IMG_SAVE': [args, kws]}) time.sleep_ms(200) def image_clear(self): self.send_cmd({'IMG_CLR': 0}) def image_draw_line(self, *args, **kws): self.send_cmd({'IMG_DRW_LN': [args, kws]}) def image_draw_rectangle(self, *args, **kws): self.send_cmd({'IMG_DRW_RECTANG': [args, kws]}) def image_draw_circle(self, *args, **kws): self.send_cmd({'IMG_DRW_CIR': [args, kws]}) def image_draw_string(self, *args, **kws): self.send_cmd({'IMG_DRW_STR': [args, kws]}) def image_draw_cross(self, *args, **kws): self.send_cmd({'IMG_DRW_CRS': [args, kws]}) def image_draw_arrow(self, *args, **kws): self.send_cmd({'IMG_DRW_ARR': [args, kws]}) def image_draw_image(self, *args, **kws): self.send_cmd({'IMG_DRW_IMG': [args, kws]}) def image_binary(self, *args, **kws): self.send_cmd({'IMG_BINARY': [args, kws]}) def image_invert(self): self.send_cmd({'IMG_INVERT': 0}) def image_erode(self, *args, **kws): self.send_cmd({'IMG_ERODE': [args, kws]}) def image_dilate(self, *args, **kws): self.send_cmd({'IMG_DIL': [args, kws]}) def image_negate(self, *args, **kws): self.send_cmd({'IMG_NEG': [args, kws]}) def image_mean(self, *args, **kws): self.send_cmd({'IMG_MEAN': [args, kws]}) def image_mode(self, *args, **kws): self.send_cmd({'IMG_MODE': [args, kws]}) def image_median(self, *args, **kws): self.send_cmd({'IMG_MEDIAN': [args, kws]}) def image_midpoint(self, *args, **kws): self.send_cmd({'IMG_MIDP': [args, kws]}) def image_cartoon(self, *args, **kws): self.send_cmd({'IMG_CART': [args, kws]}) def image_conv3(self, *args, **kws): self.send_cmd({'IMG_CONV': [args, kws]}) def image_gaussian(self, *args, **kws): self.send_cmd({'IMG_GAUS': [args, kws]}) def image_bilateral(self, *args, **kws): self.send_cmd({'IMG_BIL': [args, kws]}) def image_linpolar(self, *args, **kws): self.send_cmd({'IMG_LINP': [args, kws]}) def image_logpolar(self, *args, **kws): self.send_cmd({'IMG_LOGP': [args, kws]}) def image_rotation_corr(self, *args, **kws): self.send_cmd({'IMG_ROT_COR': [args, kws]}) def image_find_blobs(self, *args, **kws): return [blob(i) for i in self.send_cmd({'IMG_FID_BLOB': [args, kws]})]
import fodmc # output_mode: PyFLOSIC, NRLMOL # output_name: NameOfMolecule.xyz (for PyFLOSIC only) output_mode = ['NRLMOL','PyFLOSIC'][1] output_name = ['', 'test.xyz'][1] fodmc.fodmc_mod.get_guess(output_mode,output_name)
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import mock from parameterized import parameterized from airflow.gcp.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus from airflow.gcp.sensors.cloud_storage_transfer_service import CloudDataTransferServiceJobStatusSensor class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase): @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_success(self, mock_tool): operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.return_value = True op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.return_value.list_transfer_operations.assert_called_once_with( request_filter={'project_id': 'project-id', 'job_names': ['job-name']} ) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_success_default_expected_status(self, mock_tool): op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_after_retry(self, mock_tool): operations_set = [ [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}], [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}], ] mock_tool.return_value.list_transfer_operations.side_effect = operations_set mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) mock_tool.operations_contain_expected_statuses.reset_mock() result = op.poke(context) self.assertTrue(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) @parameterized.expand( [ (GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}), ({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}), ( {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, ), ] ) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool): operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=expected_status, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses=received_status )
# Generated by Django 3.1.1 on 2020-10-19 16:09 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('maps', '0011_auto_20201019_1839'), ] operations = [ migrations.AlterField( model_name='trafficsignal', name='timer', field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 21, 39, 12, 862273)), ), ]
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-07-25 13:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('band', '0002_auto_20160725_1313'), ] operations = [ migrations.RemoveField( model_name='personal', name='id', ), migrations.AlterField( model_name='personal', name='username', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='band.Account'), ), ]
""" Augmenters that somehow change the size of the images. List of augmenters: * :class:`Resize` * :class:`CropAndPad` * :class:`Crop` * :class:`Pad` * :class:`PadToFixedSize` * :class:`CenterPadToFixedSize` * :class:`CropToFixedSize` * :class:`CenterCropToFixedSize` * :class:`CropToMultiplesOf` * :class:`CenterCropToMultiplesOf` * :class:`PadToMultiplesOf` * :class:`CenterPadToMultiplesOf` * :class:`CropToPowersOf` * :class:`CenterCropToPowersOf` * :class:`PadToPowersOf` * :class:`CenterPadToPowersOf` * :class:`CropToAspectRatio` * :class:`CenterCropToAspectRatio` * :class:`PadToAspectRatio` * :class:`CenterPadToAspectRatio` * :class:`CropToSquare` * :class:`CenterCropToSquare` * :class:`PadToSquare` * :class:`CenterPadToSquare` * :class:`KeepSizeByResize` """ from __future__ import print_function, division, absolute_import import re import functools import numpy as np import cv2 import imgaug as ia from imgaug.imgaug import _normalize_cv2_input_arr_ from . import meta from .. import parameters as iap def _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True): if prevent_zero_size: top, right, bottom, left = _crop_prevent_zero_size( shape[0], shape[1], top, right, bottom, left) height, width = shape[0:2] x1 = left x2 = width - right y1 = top y2 = height - bottom # these steps prevent negative sizes # if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis # note that if height/width of arr is zero, then y2==y1 or x2==x1, which # is still valid, even if height/width is zero and results in a zero-sized # axis x2 = max(x2, x1) y2 = max(y2, y1) return x1, y1, x2, y2 def _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True): x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left, prevent_zero_size=prevent_zero_size) return arr[y1:y2, x1:x2, ...] def _crop_and_pad_arr(arr, croppings, paddings, pad_mode="constant", pad_cval=0, keep_size=False): height, width = arr.shape[0:2] image_cr = _crop_arr_(arr, *croppings) image_cr_pa = pad( image_cr, top=paddings[0], right=paddings[1], bottom=paddings[2], left=paddings[3], mode=pad_mode, cval=pad_cval) if keep_size: image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width)) return image_cr_pa def _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img, pad_mode="constant", pad_cval=0.0, keep_size=False): return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img, paddings_img, pad_mode, pad_cval, keep_size) def _crop_and_pad_segmap_(segmap, croppings_img, paddings_img, pad_mode="constant", pad_cval=0, keep_size=False): return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img, paddings_img, pad_mode, pad_cval, keep_size) def _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img, paddings_img, pad_mode="constant", pad_cval=None, keep_size=False): if isinstance(augmentable, ia.HeatmapsOnImage): arr_attr_name = "arr_0to1" pad_cval = pad_cval if pad_cval is not None else 0.0 else: assert isinstance(augmentable, ia.SegmentationMapsOnImage), ( "Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s." % ( type(augmentable))) arr_attr_name = "arr" pad_cval = pad_cval if pad_cval is not None else 0 arr = getattr(augmentable, arr_attr_name) arr_shape_orig = arr.shape augm_shape = augmentable.shape croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape) paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape) croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1], *croppings_proj) arr_cr = _crop_arr_(arr, croppings_proj[0], croppings_proj[1], croppings_proj[2], croppings_proj[3]) arr_cr_pa = pad( arr_cr, top=paddings_proj[0], right=paddings_proj[1], bottom=paddings_proj[2], left=paddings_proj[3], mode=pad_mode, cval=pad_cval) setattr(augmentable, arr_attr_name, arr_cr_pa) if keep_size: augmentable = augmentable.resize(arr_shape_orig[0:2]) else: augmentable.shape = _compute_shape_after_crop_and_pad( augmentable.shape, croppings_img, paddings_img) return augmentable def _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size): # using the trbl function instead of croppings_img has the advantage # of incorporating prevent_zero_size, dealing with zero-sized input image # axis and dealing the negative crop amounts x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img) crop_left = x1 crop_top = y1 shape_orig = kpsoi.shape shifted = kpsoi.shift_( x=-crop_left+paddings_img[3], y=-crop_top+paddings_img[0]) shifted.shape = _compute_shape_after_crop_and_pad( shape_orig, croppings_img, paddings_img) if keep_size: shifted = shifted.on_(shape_orig) return shifted def _compute_shape_after_crop_and_pad(old_shape, croppings, paddings): x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings) new_shape = list(old_shape) new_shape[0] = y2 - y1 + paddings[0] + paddings[2] new_shape[1] = x2 - x1 + paddings[1] + paddings[3] return tuple(new_shape) def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom, crop_left): remaining_height = height - (crop_top + crop_bottom) remaining_width = width - (crop_left + crop_right) if remaining_height < 1: regain = abs(remaining_height) + 1 regain_top = regain // 2 regain_bottom = regain // 2 if regain_top + regain_bottom < regain: regain_top += 1 if regain_top > crop_top: diff = regain_top - crop_top regain_top = crop_top regain_bottom += diff elif regain_bottom > crop_bottom: diff = regain_bottom - crop_bottom regain_bottom = crop_bottom regain_top += diff crop_top = crop_top - regain_top crop_bottom = crop_bottom - regain_bottom if remaining_width < 1: regain = abs(remaining_width) + 1 regain_right = regain // 2 regain_left = regain // 2 if regain_right + regain_left < regain: regain_right += 1 if regain_right > crop_right: diff = regain_right - crop_right regain_right = crop_right regain_left += diff elif regain_left > crop_left: diff = regain_left - crop_left regain_left = crop_left regain_right += diff crop_right = crop_right - regain_right crop_left = crop_left - regain_left return ( max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0), max(crop_left, 0)) def _project_size_changes(trbl, from_shape, to_shape): if from_shape[0:2] == to_shape[0:2]: return trbl height_to = to_shape[0] width_to = to_shape[1] height_from = from_shape[0] width_from = from_shape[1] top = trbl[0] right = trbl[1] bottom = trbl[2] left = trbl[3] # Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap # is exactly half the size of an image and the size change on an axis is # an odd value. Then the projected value would end up being <something>.5 # and the rounding would always round up to the next integer. If both # sides then have the same change, they are both rounded up, resulting # in more change than expected. # E.g. image height is 8, map height is 4, change is 3 at the top and 3 at # the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded # up to 2.0. Hence, the maps are changed by 4 (100% of the map height, # vs. 6 for images, which is 75% of the image height). top = _int_r(height_to * (top/height_from) - 1e-4) right = _int_r(width_to * (right/width_from) + 1e-4) bottom = _int_r(height_to * (bottom/height_from) + 1e-4) left = _int_r(width_to * (left/width_from) - 1e-4) return top, right, bottom, left def _int_r(value): return int(np.round(value)) # TODO somehow integrate this with pad() def _handle_pad_mode_param(pad_mode): pad_modes_available = { "constant", "edge", "linear_ramp", "maximum", "mean", "median", "minimum", "reflect", "symmetric", "wrap"} if pad_mode == ia.ALL: return iap.Choice(list(pad_modes_available)) if ia.is_string(pad_mode): assert pad_mode in pad_modes_available, ( "Value '%s' is not a valid pad mode. Valid pad modes are: %s." % ( pad_mode, ", ".join(pad_modes_available))) return iap.Deterministic(pad_mode) if isinstance(pad_mode, list): assert all([v in pad_modes_available for v in pad_mode]), ( "At least one in list %s is not a valid pad mode. Valid pad " "modes are: %s." % (str(pad_mode), ", ".join(pad_modes_available))) return iap.Choice(pad_mode) if isinstance(pad_mode, iap.StochasticParameter): return pad_mode raise Exception( "Expected pad_mode to be ia.ALL or string or list of strings or " "StochasticParameter, got %s." % (type(pad_mode),)) def _handle_position_parameter(position): if position == "uniform": return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0) if position == "normal": return ( iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2), minval=0.0, maxval=1.0), iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2), minval=0.0, maxval=1.0) ) if position == "center": return iap.Deterministic(0.5), iap.Deterministic(0.5) if (ia.is_string(position) and re.match(r"^(left|center|right)-(top|center|bottom)$", position)): mapping = {"top": 0.0, "center": 0.5, "bottom": 1.0, "left": 0.0, "right": 1.0} return ( iap.Deterministic(mapping[position.split("-")[0]]), iap.Deterministic(mapping[position.split("-")[1]]) ) if isinstance(position, iap.StochasticParameter): return position if isinstance(position, tuple): assert len(position) == 2, ( "Expected tuple with two entries as position parameter. " "Got %d entries with types %s.." % ( len(position), str([type(item) for item in position]))) for item in position: if ia.is_single_number(item) and (item < 0 or item > 1.0): raise Exception( "Both position values must be within the value range " "[0.0, 1.0]. Got type %s with value %.8f." % ( type(item), item,)) position = [iap.Deterministic(item) if ia.is_single_number(item) else item for item in position] only_sparams = all([isinstance(item, iap.StochasticParameter) for item in position]) assert only_sparams, ( "Expected tuple with two entries that are both either " "StochasticParameter or float/int. Got types %s." % ( str([type(item) for item in position]) )) return tuple(position) raise Exception( "Expected one of the following as position parameter: string " "'uniform', string 'normal', string 'center', a string matching " "regex ^(left|center|right)-(top|center|bottom)$, a single " "StochasticParameter or a tuple of two entries, both being either " "StochasticParameter or floats or int. Got instead type %s with " "content '%s'." % ( type(position), (str(position) if len(str(position)) < 20 else str(position)[0:20] + "...") ) ) # TODO this is the same as in imgaug.py, make DRY def _assert_two_or_three_dims(shape): if hasattr(shape, "shape"): shape = shape.shape assert len(shape) in [2, 3], ( "Expected image with two or three dimensions, but got %d dimensions " "and shape %s." % (len(shape), shape)) def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0): """Pad an image-like array on its top/right/bottom/left side. This function is a wrapper around :func:`numpy.pad`. Supported dtypes ---------------- * ``uint8``: yes; fully tested (1) * ``uint16``: yes; fully tested (1) * ``uint32``: yes; fully tested (2) (3) * ``uint64``: yes; fully tested (2) (3) * ``int8``: yes; fully tested (1) * ``int16``: yes; fully tested (1) * ``int32``: yes; fully tested (1) * ``int64``: yes; fully tested (2) (3) * ``float16``: yes; fully tested (2) (3) * ``float32``: yes; fully tested (1) * ``float64``: yes; fully tested (1) * ``float128``: yes; fully tested (2) (3) * ``bool``: yes; tested (2) (3) - (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``. Otherwise uses ``numpy``. - (2) Uses ``numpy``. - (3) Rejected by ``cv2``. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray Image-like array to pad. top : int, optional Amount of pixels to add to the top side of the image. Must be ``0`` or greater. right : int, optional Amount of pixels to add to the right side of the image. Must be ``0`` or greater. bottom : int, optional Amount of pixels to add to the bottom side of the image. Must be ``0`` or greater. left : int, optional Amount of pixels to add to the left side of the image. Must be ``0`` or greater. mode : str, optional Padding mode to use. See :func:`numpy.pad` for details. In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values`` parameter to :func:`numpy.pad`. In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values`` parameter to :func:`numpy.pad`. cval : number or iterable of number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. The cval is expected to match the input array's dtype and value range. If an iterable is used, it is expected to contain one value per channel. The number of values and number of channels are expected to match. Returns ------- (H',W') ndarray or (H',W',C) ndarray Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``. """ import imgaug.dtypes as iadt _assert_two_or_three_dims(arr) assert all([v >= 0 for v in [top, right, bottom, left]]), ( "Expected padding amounts that are >=0, but got %d, %d, %d, %d " "(top, right, bottom, left)" % (top, right, bottom, left)) is_multi_cval = ia.is_iterable(cval) if top > 0 or right > 0 or bottom > 0 or left > 0: min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype) # without the if here there are crashes for float128, e.g. if # cval is an int (just using float(cval) seems to not be accurate # enough) if arr.dtype.name == "float128": cval = np.float128(cval) # pylint: disable=no-member if is_multi_cval: cval = np.clip(cval, min_value, max_value) else: cval = max(min(cval, max_value), min_value) # Note that copyMakeBorder() hangs/runs endlessly if arr has an # axis of size 0 and mode is "reflect". # Numpy also complains in these cases if mode is not "constant". has_zero_sized_axis = any([axis == 0 for axis in arr.shape]) if has_zero_sized_axis: mode = "constant" mapping_mode_np_to_cv2 = { "constant": cv2.BORDER_CONSTANT, "edge": cv2.BORDER_REPLICATE, "linear_ramp": None, "maximum": None, "mean": None, "median": None, "minimum": None, "reflect": cv2.BORDER_REFLECT_101, "symmetric": cv2.BORDER_REFLECT, "wrap": None, cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101, cv2.BORDER_REFLECT: cv2.BORDER_REFLECT } bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None # these datatypes all simply generate a "TypeError: src data type = X # is not supported" error bad_datatype_cv2 = ( arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"] ) # OpenCV turns the channel axis for arrays with 0 channels to 512 # TODO add direct test for this. indirectly tested via Pad bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0) if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2: # convert cval to expected type, as otherwise we get TypeError # for np inputs kind = arr.dtype.kind if is_multi_cval: cval = [float(cval_c) if kind == "f" else int(cval_c) for cval_c in cval] else: cval = float(cval) if kind == "f" else int(cval) if arr.ndim == 2 or arr.shape[2] <= 4: # without this, only the first channel is padded with the cval, # all following channels with 0 if arr.ndim == 3 and not is_multi_cval: cval = tuple([cval] * arr.shape[2]) arr_pad = cv2.copyMakeBorder( _normalize_cv2_input_arr_(arr), top=top, bottom=bottom, left=left, right=right, borderType=mapping_mode_np_to_cv2[mode], value=cval) if arr.ndim == 3 and arr_pad.ndim == 2: arr_pad = arr_pad[..., np.newaxis] else: result = [] channel_start_idx = 0 cval = cval if is_multi_cval else tuple([cval] * arr.shape[2]) while channel_start_idx < arr.shape[2]: arr_c = arr[..., channel_start_idx:channel_start_idx+4] cval_c = cval[channel_start_idx:channel_start_idx+4] arr_pad_c = cv2.copyMakeBorder( _normalize_cv2_input_arr_(arr_c), top=top, bottom=bottom, left=left, right=right, borderType=mapping_mode_np_to_cv2[mode], value=cval_c) arr_pad_c = np.atleast_3d(arr_pad_c) result.append(arr_pad_c) channel_start_idx += 4 arr_pad = np.concatenate(result, axis=2) else: # paddings for 2d case paddings_np = [(top, bottom), (left, right)] # add paddings for 3d case if arr.ndim == 3: paddings_np.append((0, 0)) if mode == "constant": if arr.ndim > 2 and is_multi_cval: arr_pad_chans = [ np.pad(arr[..., c], paddings_np[0:2], mode=mode, constant_values=cval[c]) for c in np.arange(arr.shape[2])] arr_pad = np.stack(arr_pad_chans, axis=-1) else: arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval) elif mode == "linear_ramp": if arr.ndim > 2 and is_multi_cval: arr_pad_chans = [ np.pad(arr[..., c], paddings_np[0:2], mode=mode, end_values=cval[c]) for c in np.arange(arr.shape[2])] arr_pad = np.stack(arr_pad_chans, axis=-1) else: arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval) else: arr_pad = np.pad(arr, paddings_np, mode=mode) return arr_pad return np.copy(arr) def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False): """Pad an image array on its sides so that it matches a target aspect ratio. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Supported dtypes ---------------- See :func:`~imgaug.augmenters.size.pad`. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray Image-like array to pad. aspect_ratio : float Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the image having twice as much width as height. mode : str, optional Padding mode to use. See :func:`~imgaug.imgaug.pad` for details. cval : number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. return_pad_amounts : bool, optional If ``False``, then only the padded image will be returned. If ``True``, a ``tuple`` with two entries will be returned, where the first entry is the padded image and the second entry are the amounts by which each image side was padded. These amounts are again a ``tuple`` of the form ``(top, right, bottom, left)``, with each value being an ``int``. Returns ------- (H',W') ndarray or (H',W',C) ndarray Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray, fulfilling the given `aspect_ratio`. tuple of int Amounts by which the image was padded on each side, given as a ``tuple`` ``(top, right, bottom, left)``. This ``tuple`` is only returned if `return_pad_amounts` was set to ``True``. """ pad_top, pad_right, pad_bottom, pad_left = \ compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio) arr_padded = pad( arr, top=pad_top, right=pad_right, bottom=pad_bottom, left=pad_left, mode=mode, cval=cval ) if return_pad_amounts: return arr_padded, (pad_top, pad_right, pad_bottom, pad_left) return arr_padded def pad_to_multiples_of(arr, height_multiple, width_multiple, mode="constant", cval=0, return_pad_amounts=False): """Pad an image array until its side lengths are multiples of given values. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Supported dtypes ---------------- See :func:`~imgaug.augmenters.size.pad`. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray Image-like array to pad. height_multiple : None or int The desired multiple of the height. The computed padding amount will reflect a padding that increases the y axis size until it is a multiple of this value. width_multiple : None or int The desired multiple of the width. The computed padding amount will reflect a padding that increases the x axis size until it is a multiple of this value. mode : str, optional Padding mode to use. See :func:`~imgaug.imgaug.pad` for details. cval : number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. return_pad_amounts : bool, optional If ``False``, then only the padded image will be returned. If ``True``, a ``tuple`` with two entries will be returned, where the first entry is the padded image and the second entry are the amounts by which each image side was padded. These amounts are again a ``tuple`` of the form ``(top, right, bottom, left)``, with each value being an integer. Returns ------- (H',W') ndarray or (H',W',C) ndarray Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray. tuple of int Amounts by which the image was padded on each side, given as a ``tuple`` ``(top, right, bottom, left)``. This ``tuple`` is only returned if `return_pad_amounts` was set to ``True``. """ pad_top, pad_right, pad_bottom, pad_left = \ compute_paddings_to_reach_multiples_of( arr, height_multiple, width_multiple) arr_padded = pad( arr, top=pad_top, right=pad_right, bottom=pad_bottom, left=pad_left, mode=mode, cval=cval ) if return_pad_amounts: return arr_padded, (pad_top, pad_right, pad_bottom, pad_left) return arr_padded def compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio): """Compute pad amounts required to fulfill an aspect ratio. "Pad amounts" here denotes the number of pixels that have to be added to each side to fulfill the desired constraint. The aspect ratio is given as ``ratio = width / height``. Depending on which dimension is smaller (height or width), only the corresponding sides (top/bottom or left/right) will be padded. The axis-wise padding amounts are always distributed equally over the sides of the respective axis (i.e. left and right, top and bottom). For odd pixel amounts, one pixel will be left over after the equal distribution and could be added to either side of the axis. This function will always add such a left over pixel to the bottom (y-axis) or right (x-axis) side. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute pad amounts. aspect_ratio : float Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the image having twice as much width as height. Returns ------- tuple of int Required padding amounts to reach the target aspect ratio, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ _assert_two_or_three_dims(arr) assert aspect_ratio > 0, ( "Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,)) pad_top = 0 pad_right = 0 pad_bottom = 0 pad_left = 0 shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] if height == 0: height = 1 pad_bottom += 1 if width == 0: width = 1 pad_right += 1 aspect_ratio_current = width / height if aspect_ratio_current < aspect_ratio: # image is more vertical than desired, width needs to be increased diff = (aspect_ratio * height) - width pad_right += int(np.ceil(diff / 2)) pad_left += int(np.floor(diff / 2)) elif aspect_ratio_current > aspect_ratio: # image is more horizontal than desired, height needs to be increased diff = ((1/aspect_ratio) * width) - height pad_top += int(np.floor(diff / 2)) pad_bottom += int(np.ceil(diff / 2)) return pad_top, pad_right, pad_bottom, pad_left def compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio): """Compute crop amounts required to fulfill an aspect ratio. "Crop amounts" here denotes the number of pixels that have to be removed from each side to fulfill the desired constraint. The aspect ratio is given as ``ratio = width / height``. Depending on which dimension is smaller (height or width), only the corresponding sides (top/bottom or left/right) will be cropped. The axis-wise padding amounts are always distributed equally over the sides of the respective axis (i.e. left and right, top and bottom). For odd pixel amounts, one pixel will be left over after the equal distribution and could be added to either side of the axis. This function will always add such a left over pixel to the bottom (y-axis) or right (x-axis) side. If an aspect ratio cannot be reached exactly, this function will return rather one pixel too few than one pixel too many. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute crop amounts. aspect_ratio : float Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the image having twice as much width as height. Returns ------- tuple of int Required cropping amounts to reach the target aspect ratio, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ _assert_two_or_three_dims(arr) assert aspect_ratio > 0, ( "Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,)) shape = arr.shape if hasattr(arr, "shape") else arr assert shape[0] > 0, ( "Expected to get an array with height >0, got shape %s." % (shape,)) height, width = shape[0:2] aspect_ratio_current = width / height top = 0 right = 0 bottom = 0 left = 0 if aspect_ratio_current < aspect_ratio: # image is more vertical than desired, height needs to be reduced # c = H - W/r crop_amount = height - (width / aspect_ratio) crop_amount = min(crop_amount, height - 1) top = int(np.floor(crop_amount / 2)) bottom = int(np.ceil(crop_amount / 2)) elif aspect_ratio_current > aspect_ratio: # image is more horizontal than desired, width needs to be reduced # c = W - Hr crop_amount = width - height * aspect_ratio crop_amount = min(crop_amount, width - 1) left = int(np.floor(crop_amount / 2)) right = int(np.ceil(crop_amount / 2)) return top, right, bottom, left def compute_paddings_to_reach_multiples_of(arr, height_multiple, width_multiple): """Compute pad amounts until img height/width are multiples of given values. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute pad amounts. height_multiple : None or int The desired multiple of the height. The computed padding amount will reflect a padding that increases the y axis size until it is a multiple of this value. width_multiple : None or int The desired multiple of the width. The computed padding amount will reflect a padding that increases the x axis size until it is a multiple of this value. Returns ------- tuple of int Required padding amounts to reach multiples of the provided values, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, multiple): if multiple is None: return 0, 0 if axis_size == 0: to_pad = multiple elif axis_size % multiple == 0: to_pad = 0 else: to_pad = multiple - (axis_size % multiple) return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2)) _assert_two_or_three_dims(arr) if height_multiple is not None: assert height_multiple > 0, ( "Can only pad to multiples of 1 or larger, got %d." % ( height_multiple,)) if width_multiple is not None: assert width_multiple > 0, ( "Can only pad to multiples of 1 or larger, got %d." % ( width_multiple,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_multiple) left, right = _compute_axis_value(width, width_multiple) return top, right, bottom, left def compute_croppings_to_reach_multiples_of(arr, height_multiple, width_multiple): """Compute croppings to reach multiples of given heights/widths. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required cropping amounts are distributed per image axis. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute crop amounts. height_multiple : None or int The desired multiple of the height. The computed croppings will reflect a crop operation that decreases the y axis size until it is a multiple of this value. width_multiple : None or int The desired multiple of the width. The computed croppings amount will reflect a crop operation that decreases the x axis size until it is a multiple of this value. Returns ------- tuple of int Required cropping amounts to reach multiples of the provided values, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, multiple): if multiple is None: return 0, 0 if axis_size == 0: to_crop = 0 elif axis_size % multiple == 0: to_crop = 0 else: to_crop = axis_size % multiple return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2)) _assert_two_or_three_dims(arr) if height_multiple is not None: assert height_multiple > 0, ( "Can only crop to multiples of 1 or larger, got %d." % ( height_multiple,)) if width_multiple is not None: assert width_multiple > 0, ( "Can only crop to multiples of 1 or larger, got %d." % ( width_multiple,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_multiple) left, right = _compute_axis_value(width, width_multiple) return top, right, bottom, left def compute_paddings_to_reach_powers_of(arr, height_base, width_base, allow_zero_exponent=False): """Compute paddings to reach powers of given base values. For given axis size ``S``, padded size ``S'`` (``S' >= S``) and base ``B`` this function computes paddings that fulfill ``S' = B^E``, where ``E`` is any exponent from the discrete interval ``[0 .. inf)``. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute pad amounts. height_base : None or int The desired base of the height. width_base : None or int The desired base of the width. allow_zero_exponent : bool, optional Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes with size ``0`` or ``1`` will be padded up to size ``B^0=1`` and axes with size ``1 < S <= B`` will be padded up to ``B^1=B``. If ``False``, the minimum output axis size is always at least ``B``. Returns ------- tuple of int Required padding amounts to fulfill ``S' = B^E`` given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, base): if base is None: return 0, 0 if axis_size == 0: to_pad = 1 if allow_zero_exponent else base elif axis_size <= base: to_pad = base - axis_size else: # log_{base}(axis_size) in numpy exponent = np.log(axis_size) / np.log(base) to_pad = (base ** int(np.ceil(exponent))) - axis_size return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2)) _assert_two_or_three_dims(arr) if height_base is not None: assert height_base > 1, ( "Can only pad to base larger than 1, got %d." % (height_base,)) if width_base is not None: assert width_base > 1, ( "Can only pad to base larger than 1, got %d." % (width_base,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_base) left, right = _compute_axis_value(width, width_base) return top, right, bottom, left def compute_croppings_to_reach_powers_of(arr, height_base, width_base, allow_zero_exponent=False): """Compute croppings to reach powers of given base values. For given axis size ``S``, cropped size ``S'`` (``S' <= S``) and base ``B`` this function computes croppings that fulfill ``S' = B^E``, where ``E`` is any exponent from the discrete interval ``[0 .. inf)``. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required cropping amounts are distributed per image axis. .. note:: For axes where ``S == 0``, this function alwayws returns zeros as croppings. For axes where ``1 <= S < B`` see parameter `allow_zero_exponent`. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute crop amounts. height_base : None or int The desired base of the height. width_base : None or int The desired base of the width. allow_zero_exponent : bool Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes with size ``1 <= S < B`` will be cropped to size ``B^0=1``. If ``False``, axes with sizes ``S < B`` will not be changed. Returns ------- tuple of int Required cropping amounts to fulfill ``S' = B^E`` given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, base): if base is None: return 0, 0 if axis_size == 0: to_crop = 0 elif axis_size < base: # crop down to B^0 = 1 to_crop = axis_size - 1 if allow_zero_exponent else 0 else: # log_{base}(axis_size) in numpy exponent = np.log(axis_size) / np.log(base) to_crop = axis_size - (base ** int(exponent)) return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2)) _assert_two_or_three_dims(arr) if height_base is not None: assert height_base > 1, ( "Can only crop to base larger than 1, got %d." % (height_base,)) if width_base is not None: assert width_base > 1, ( "Can only crop to base larger than 1, got %d." % (width_base,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_base) left, right = _compute_axis_value(width, width_base) return top, right, bottom, left @ia.deprecated(alt_func="Resize", comment="Resize has the exactly same interface as Scale.") def Scale(*args, **kwargs): """Augmenter that resizes images to specified heights and widths.""" # pylint: disable=invalid-name return Resize(*args, **kwargs) class Resize(meta.Augmenter): """Augmenter that resizes images to specified heights and widths. Supported dtypes ---------------- See :func:`~imgaug.imgaug.imresize_many_images`. Parameters ---------- size : 'keep' or int or float or tuple of int or tuple of float or list of int or list of float or imgaug.parameters.StochasticParameter or dict The new size of the images. * If this has the string value ``keep``, the original height and width values will be kept (image is not resized). * If this is an ``int``, this value will always be used as the new height and width of the images. * If this is a ``float`` ``v``, then per image the image's height ``H`` and width ``W`` will be changed to ``H*v`` and ``W*v``. * If this is a ``tuple``, it is expected to have two entries ``(a, b)``. If at least one of these are ``float`` s, a value will be sampled from range ``[a, b]`` and used as the ``float`` value to resize the image (see above). If both are ``int`` s, a value will be sampled from the discrete range ``[a..b]`` and used as the integer value to resize the image (see above). * If this is a ``list``, a random value from the ``list`` will be picked to resize the image. All values in the ``list`` must be ``int`` s or ``float`` s (no mixture is possible). * If this is a ``StochasticParameter``, then this parameter will first be queried once per image. The resulting value will be used for both height and width. * If this is a ``dict``, it may contain the keys ``height`` and ``width`` or the keys ``shorter-side`` and ``longer-side``. Each key may have the same datatypes as above and describes the scaling on x and y-axis or the shorter and longer axis, respectively. Both axis are sampled independently. Additionally, one of the keys may have the value ``keep-aspect-ratio``, which means that the respective side of the image will be resized so that the original aspect ratio is kept. This is useful when only resizing one image size by a pixel value (e.g. resize images to a height of ``64`` pixels and resize the width so that the overall aspect ratio is maintained). interpolation : imgaug.ALL or int or str or list of int or list of str or imgaug.parameters.StochasticParameter, optional Interpolation to use. * If ``imgaug.ALL``, then a random interpolation from ``nearest``, ``linear``, ``area`` or ``cubic`` will be picked (per image). * If ``int``, then this interpolation will always be used. Expected to be any of the following: ``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC`` * If string, then this interpolation will always be used. Expected to be any of the following: ``nearest``, ``linear``, ``area``, ``cubic`` * If ``list`` of ``int`` / ``str``, then a random one of the values will be picked per image as the interpolation. * If a ``StochasticParameter``, then this parameter will be queried per image and is expected to return an ``int`` or ``str``. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.Resize(32) Resize all images to ``32x32`` pixels. >>> aug = iaa.Resize(0.5) Resize all images to ``50`` percent of their original size. >>> aug = iaa.Resize((16, 22)) Resize all images to a random height and width within the discrete interval ``[16..22]`` (uniformly sampled per image). >>> aug = iaa.Resize((0.5, 0.75)) Resize all any input image so that its height (``H``) and width (``W``) become ``H*v`` and ``W*v``, where ``v`` is uniformly sampled from the interval ``[0.5, 0.75]``. >>> aug = iaa.Resize([16, 32, 64]) Resize all images either to ``16x16``, ``32x32`` or ``64x64`` pixels. >>> aug = iaa.Resize({"height": 32}) Resize all images to a height of ``32`` pixels and keeps the original width. >>> aug = iaa.Resize({"height": 32, "width": 48}) Resize all images to a height of ``32`` pixels and a width of ``48``. >>> aug = iaa.Resize({"height": 32, "width": "keep-aspect-ratio"}) Resize all images to a height of ``32`` pixels and resizes the x-axis (width) so that the aspect ratio is maintained. >>> aug = iaa.Resize( >>> {"shorter-side": 224, "longer-side": "keep-aspect-ratio"}) Resize all images to a height/width of ``224`` pixels, depending on which axis is shorter and resize the other axis so that the aspect ratio is maintained. >>> aug = iaa.Resize({"height": (0.5, 0.75), "width": [16, 32, 64]}) Resize all images to a height of ``H*v``, where ``H`` is the original height and ``v`` is a random value sampled from the interval ``[0.5, 0.75]``. The width/x-axis of each image is resized to either ``16`` or ``32`` or ``64`` pixels. >>> aug = iaa.Resize(32, interpolation=["linear", "cubic"]) Resize all images to ``32x32`` pixels. Randomly use either ``linear`` or ``cubic`` interpolation. """ def __init__(self, size, interpolation="cubic", seed=None, name=None, **old_kwargs): super(Resize, self).__init__( seed=seed, name=name, **old_kwargs) self.size, self.size_order = self._handle_size_arg(size, False) self.interpolation = self._handle_interpolation_arg(interpolation) @classmethod def _handle_size_arg(cls, size, subcall): def _dict_to_size_tuple(val1, val2): kaa = "keep-aspect-ratio" not_both_kaa = (val1 != kaa or val2 != kaa) assert not_both_kaa, ( "Expected at least one value to not be \"keep-aspect-ratio\", " "but got it two times.") size_tuple = [] for k in [val1, val2]: if k in ["keep-aspect-ratio", "keep"]: entry = iap.Deterministic(k) else: entry = cls._handle_size_arg(k, True) size_tuple.append(entry) return tuple(size_tuple) def _contains_any_key(dict_, keys): return any([key in dict_ for key in keys]) # HW = height, width # SL = shorter, longer size_order = "HW" if size == "keep": result = iap.Deterministic("keep") elif ia.is_single_number(size): assert size > 0, "Expected only values > 0, got %s" % (size,) result = iap.Deterministic(size) elif not subcall and isinstance(size, dict): if len(size.keys()) == 0: result = iap.Deterministic("keep") elif _contains_any_key(size, ["height", "width"]): height = size.get("height", "keep") width = size.get("width", "keep") result = _dict_to_size_tuple(height, width) elif _contains_any_key(size, ["shorter-side", "longer-side"]): shorter = size.get("shorter-side", "keep") longer = size.get("longer-side", "keep") result = _dict_to_size_tuple(shorter, longer) size_order = "SL" else: raise ValueError( "Expected dictionary containing no keys, " "the keys \"height\" and/or \"width\", " "or the keys \"shorter-side\" and/or \"longer-side\". " "Got keys: %s." % (str(size.keys()),)) elif isinstance(size, tuple): assert len(size) == 2, ( "Expected size tuple to contain exactly 2 values, " "got %d." % (len(size),)) assert size[0] > 0 and size[1] > 0, ( "Expected size tuple to only contain values >0, " "got %d and %d." % (size[0], size[1])) if ia.is_single_float(size[0]) or ia.is_single_float(size[1]): result = iap.Uniform(size[0], size[1]) else: result = iap.DiscreteUniform(size[0], size[1]) elif isinstance(size, list): if len(size) == 0: result = iap.Deterministic("keep") else: all_int = all([ia.is_single_integer(v) for v in size]) all_float = all([ia.is_single_float(v) for v in size]) assert all_int or all_float, ( "Expected to get only integers or floats.") assert all([v > 0 for v in size]), ( "Expected all values to be >0.") result = iap.Choice(size) elif isinstance(size, iap.StochasticParameter): result = size else: raise ValueError( "Expected number, tuple of two numbers, list of numbers, " "dictionary of form " "{'height': number/tuple/list/'keep-aspect-ratio'/'keep', " "'width': <analogous>}, dictionary of form " "{'shorter-side': number/tuple/list/'keep-aspect-ratio'/" "'keep', 'longer-side': <analogous>} " "or StochasticParameter, got %s." % (type(size),) ) if subcall: return result return result, size_order @classmethod def _handle_interpolation_arg(cls, interpolation): if interpolation == ia.ALL: interpolation = iap.Choice( ["nearest", "linear", "area", "cubic"]) elif ia.is_single_integer(interpolation): interpolation = iap.Deterministic(interpolation) elif ia.is_string(interpolation): interpolation = iap.Deterministic(interpolation) elif ia.is_iterable(interpolation): interpolation = iap.Choice(interpolation) elif isinstance(interpolation, iap.StochasticParameter): pass else: raise Exception( "Expected int or string or iterable or StochasticParameter, " "got %s." % (type(interpolation),)) return interpolation def _augment_batch_(self, batch, random_state, parents, hooks): nb_rows = batch.nb_rows samples = self._draw_samples(nb_rows, random_state) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: # TODO this uses the same interpolation as for images for heatmaps # while other augmenters resort to cubic batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, "arr_0to1", samples) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, "arr", (samples[0], samples[1], [None] * nb_rows)) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): input_was_array = False input_dtype = None if ia.is_np_array(images): input_was_array = True input_dtype = images.dtype samples_a, samples_b, samples_ip = samples result = [] for i, image in enumerate(images): h, w = self._compute_height_width(image.shape, samples_a[i], samples_b[i], self.size_order) image_rs = ia.imresize_single_image(image, (h, w), interpolation=samples_ip[i]) result.append(image_rs) if input_was_array: all_same_size = (len({image.shape for image in result}) == 1) if all_same_size: result = np.array(result, dtype=input_dtype) return result def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples): result = [] samples_h, samples_w, samples_ip = samples for i, augmentable in enumerate(augmentables): arr = getattr(augmentable, arr_attr_name) arr_shape = arr.shape img_shape = augmentable.shape h_img, w_img = self._compute_height_width( img_shape, samples_h[i], samples_w[i], self.size_order) h = int(np.round(h_img * (arr_shape[0] / img_shape[0]))) w = int(np.round(w_img * (arr_shape[1] / img_shape[1]))) h = max(h, 1) w = max(w, 1) if samples_ip[0] is not None: # TODO change this for heatmaps to always have cubic or # automatic interpolation? augmentable_resize = augmentable.resize( (h, w), interpolation=samples_ip[i]) else: augmentable_resize = augmentable.resize((h, w)) augmentable_resize.shape = (h_img, w_img) + img_shape[2:] result.append(augmentable_resize) return result def _augment_keypoints_by_samples(self, kpsois, samples): result = [] samples_a, samples_b, _samples_ip = samples for i, kpsoi in enumerate(kpsois): h, w = self._compute_height_width( kpsoi.shape, samples_a[i], samples_b[i], self.size_order) new_shape = (h, w) + kpsoi.shape[2:] keypoints_on_image_rs = kpsoi.on_(new_shape) result.append(keypoints_on_image_rs) return result def _draw_samples(self, nb_images, random_state): rngs = random_state.duplicate(3) if isinstance(self.size, tuple): samples_h = self.size[0].draw_samples(nb_images, random_state=rngs[0]) samples_w = self.size[1].draw_samples(nb_images, random_state=rngs[1]) else: samples_h = self.size.draw_samples(nb_images, random_state=rngs[0]) samples_w = samples_h samples_ip = self.interpolation.draw_samples(nb_images, random_state=rngs[2]) return samples_h, samples_w, samples_ip @classmethod def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order): imh, imw = image_shape[0:2] if size_order == 'SL': # size order: short, long if imh < imw: h, w = sample_a, sample_b else: w, h = sample_a, sample_b else: # size order: height, width h, w = sample_a, sample_b if ia.is_single_float(h): assert h > 0, "Expected 'h' to be >0, got %.4f" % (h,) h = int(np.round(imh * h)) h = h if h > 0 else 1 elif h == "keep": h = imh if ia.is_single_float(w): assert w > 0, "Expected 'w' to be >0, got %.4f" % (w,) w = int(np.round(imw * w)) w = w if w > 0 else 1 elif w == "keep": w = imw # at least the checks for keep-aspect-ratio must come after # the float checks, as they are dependent on the results # this is also why these are not written as elifs if h == "keep-aspect-ratio": h_per_w_orig = imh / imw h = int(np.round(w * h_per_w_orig)) if w == "keep-aspect-ratio": w_per_h_orig = imw / imh w = int(np.round(h * w_per_h_orig)) return h, w def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.size, self.interpolation, self.size_order] class _CropAndPadSamplingResult(object): def __init__(self, crop_top, crop_right, crop_bottom, crop_left, pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval): self.crop_top = crop_top self.crop_right = crop_right self.crop_bottom = crop_bottom self.crop_left = crop_left self.pad_top = pad_top self.pad_right = pad_right self.pad_bottom = pad_bottom self.pad_left = pad_left self.pad_mode = pad_mode self.pad_cval = pad_cval @property def croppings(self): """Get absolute pixel amounts of croppings as a TRBL tuple.""" return self.crop_top, self.crop_right, self.crop_bottom, self.crop_left @property def paddings(self): """Get absolute pixel amounts of paddings as a TRBL tuple.""" return self.pad_top, self.pad_right, self.pad_bottom, self.pad_left class CropAndPad(meta.Augmenter): """Crop/pad images by pixel amounts or fractions of image sizes. Cropping removes pixels at the sides (i.e. extracts a subimage from a given full image). Padding adds pixels to the sides (e.g. black pixels). This augmenter will never crop images below a height or width of ``1``. .. note:: This augmenter automatically resizes images back to their original size after it has augmented them. To deactivate this, add the parameter ``keep_size=False``. Supported dtypes ---------------- if (keep_size=False): * ``uint8``: yes; fully tested * ``uint16``: yes; tested * ``uint32``: yes; tested * ``uint64``: yes; tested * ``int8``: yes; tested * ``int16``: yes; tested * ``int32``: yes; tested * ``int64``: yes; tested * ``float16``: yes; tested * ``float32``: yes; tested * ``float64``: yes; tested * ``float128``: yes; tested * ``bool``: yes; tested if (keep_size=True): minimum of ( ``imgaug.augmenters.size.CropAndPad(keep_size=False)``, :func:`~imgaug.imgaug.imresize_many_images` ) Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop (negative values) or pad (positive values) on each side of the image. Either this or the parameter `percent` may be set, not both at the same time. * If ``None``, then pixel-based cropping/padding will not be used. * If ``int``, then that exact number of pixels will always be cropped/padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to ``False``, as then only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``, then each side will be cropped/padded by a random amount sampled uniformly per image and side from the inteval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``int`` (always crop/pad by exactly that value), a ``tuple`` of two ``int`` s ``a`` and ``b`` (crop/pad by an amount within ``[a, b]``), a ``list`` of ``int`` s (crop/pad by a random value that is contained in the ``list``) or a ``StochasticParameter`` (sample the amount to crop/pad from that parameter). percent : None or number or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop (negative values) or pad (positive values) on each side of the image given as a *fraction* of the image height/width. E.g. if this is set to ``-0.1``, the augmenter will always crop away ``10%`` of the image's height at both the top and the bottom (both ``10%`` each), as well as ``10%`` of the width at the right and left. Expected value range is ``(-1.0, inf)``. Either this or the parameter `px` may be set, not both at the same time. * If ``None``, then fraction-based cropping/padding will not be used. * If ``number``, then that fraction will always be cropped/padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left). If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``, then each side will be cropped/padded by a random fraction sampled uniformly per image and side from the interval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``float`` (always crop/pad by exactly that percent value), a ``tuple`` of two ``float`` s ``a`` and ``b`` (crop/pad by a fraction from ``[a, b]``), a ``list`` of ``float`` s (crop/pad by a random value that is contained in the list) or a ``StochasticParameter`` (sample the percentage to crop/pad from that parameter). pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional Padding mode to use. The available modes match the numpy padding modes, i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``, ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes ``constant`` and ``linear_ramp`` use extra values, which are provided by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for more details. * If ``imgaug.ALL``, then a random mode from all available modes will be sampled per image. * If a ``str``, it will be used as the pad mode for all images. * If a ``list`` of ``str``, a random one of these will be sampled per image and used as the mode. * If ``StochasticParameter``, a random mode will be sampled from this parameter per image. pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional The constant value to use if the pad mode is ``constant`` or the end value to use if the mode is ``linear_ramp``. See :func:`~imgaug.imgaug.pad` for more details. * If ``number``, then that value will be used. * If a ``tuple`` of two ``number`` s and at least one of them is a ``float``, then a random number will be uniformly sampled per image from the continuous interval ``[a, b]`` and used as the value. If both ``number`` s are ``int`` s, the interval is discrete. * If a ``list`` of ``number``, then a random value will be chosen from the elements of the ``list`` and used as the value. * If ``StochasticParameter``, a random value will be sampled from that parameter per image. keep_size : bool, optional After cropping and padding, the result image will usually have a different height/width compared to the original input image. If this parameter is set to ``True``, then the cropped/padded image will be resized to the input image's size, i.e. the augmenter's output shape is always identical to the input shape. sample_independently : bool, optional If ``False`` *and* the values for `px`/`percent` result in exactly *one* probability distribution for all image sides, only one single value will be sampled from that probability distribution and used for all sides. I.e. the crop/pad amount then is the same for all sides. If ``True``, four values will be sampled independently, one per side. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropAndPad(px=(-10, 0)) Crop each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[-10..0]``. >>> aug = iaa.CropAndPad(px=(0, 10)) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding happens by zero-padding, i.e. it adds black pixels (default setting). >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode="edge") Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding uses the ``edge`` mode from numpy's pad function, i.e. the pixel colors around the image sides are repeated. >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=["constant", "edge"]) Similar to the previous example, but uses zero-padding (``constant``) for half of the images and ``edge`` padding for the other half. >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255)) Similar to the previous example, but uses any available padding mode. In case the padding mode ends up being ``constant`` or ``linear_ramp``, and random intensity is uniformly sampled (once per image) from the discrete interval ``[0..255]`` and used as the intensity of the new pixels. >>> aug = iaa.CropAndPad(px=(0, 10), sample_independently=False) Pad each side by a random pixel value sampled uniformly once per image from the discrete interval ``[0..10]``. Each sampled value is used for *all* sides of the corresponding image. >>> aug = iaa.CropAndPad(px=(0, 10), keep_size=False) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. Afterwards, do **not** resize the padded image back to the input image's size. This will increase the image's height and width by a maximum of ``20`` pixels. >>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5))) Pad the top and bottom by a random pixel value sampled uniformly from the discrete interval ``[0..10]``. Pad the left and right analogously by a random value sampled from ``[0..5]``. Each value is always sampled independently. >>> aug = iaa.CropAndPad(percent=(0, 0.1)) Pad each side by a random fraction sampled uniformly from the continuous interval ``[0.0, 0.10]``. The fraction is sampled once per image and side. E.g. a sampled fraction of ``0.1`` for the top side would pad by ``0.1*H``, where ``H`` is the height of the input image. >>> aug = iaa.CropAndPad( >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1])) Pads each side by either ``5%`` or ``10%``. The values are sampled once per side and image. >>> aug = iaa.CropAndPad(px=(-10, 10)) Sample uniformly per image and side a value ``v`` from the discrete range ``[-10..10]``. Then either crop (negative sample) or pad (positive sample) the side by ``v`` pixels. """ def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True, seed=None, name=None, **old_kwargs): # pylint: disable=invalid-name super(CropAndPad, self).__init__( seed=seed, name=name, **old_kwargs) self.mode, self.all_sides, self.top, self.right, self.bottom, \ self.left = self._handle_px_and_percent_args(px, percent) self.pad_mode = _handle_pad_mode_param(pad_mode) # TODO enable ALL here, like in e.g. Affine self.pad_cval = iap.handle_discrete_param( pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.keep_size = keep_size self.sample_independently = sample_independently # set these to None to use the same values as sampled for the # images (not tested) self._pad_mode_heatmaps = "constant" self._pad_mode_segmentation_maps = "constant" self._pad_cval_heatmaps = 0.0 self._pad_cval_segmentation_maps = 0 @classmethod def _handle_px_and_percent_args(cls, px, percent): # pylint: disable=invalid-name all_sides = None top, right, bottom, left = None, None, None, None if px is None and percent is None: mode = "noop" elif px is not None and percent is not None: raise Exception("Can only pad by pixels or percent, not both.") elif px is not None: mode = "px" all_sides, top, right, bottom, left = cls._handle_px_arg(px) else: # = elif percent is not None: mode = "percent" all_sides, top, right, bottom, left = cls._handle_percent_arg( percent) return mode, all_sides, top, right, bottom, left @classmethod def _handle_px_arg(cls, px): # pylint: disable=invalid-name all_sides = None top, right, bottom, left = None, None, None, None if ia.is_single_integer(px): all_sides = iap.Deterministic(px) elif isinstance(px, tuple): assert len(px) in [2, 4], ( "Expected 'px' given as a tuple to contain 2 or 4 " "entries, got %d." % (len(px),)) def handle_param(p): if ia.is_single_integer(p): return iap.Deterministic(p) if isinstance(p, tuple): assert len(p) == 2, ( "Expected tuple of 2 values, got %d." % (len(p))) only_ints = ( ia.is_single_integer(p[0]) and ia.is_single_integer(p[1])) assert only_ints, ( "Expected tuple of integers, got %s and %s." % ( type(p[0]), type(p[1]))) return iap.DiscreteUniform(p[0], p[1]) if isinstance(p, list): assert len(p) > 0, ( "Expected non-empty list, but got empty one.") assert all([ia.is_single_integer(val) for val in p]), ( "Expected list of ints, got types %s." % ( ", ".join([str(type(v)) for v in p]))) return iap.Choice(p) if isinstance(p, iap.StochasticParameter): return p raise Exception( "Expected int, tuple of two ints, list of ints or " "StochasticParameter, got type %s." % (type(p),)) if len(px) == 2: all_sides = handle_param(px) else: # len == 4 top = handle_param(px[0]) right = handle_param(px[1]) bottom = handle_param(px[2]) left = handle_param(px[3]) elif isinstance(px, iap.StochasticParameter): top = right = bottom = left = px else: raise Exception( "Expected int, tuple of 4 " "ints/tuples/lists/StochasticParameters or " "StochasticParameter, got type %s." % (type(px),)) return all_sides, top, right, bottom, left @classmethod def _handle_percent_arg(cls, percent): all_sides = None top, right, bottom, left = None, None, None, None if ia.is_single_number(percent): assert percent > -1.0, ( "Expected 'percent' to be >-1.0, got %.4f." % (percent,)) all_sides = iap.Deterministic(percent) elif isinstance(percent, tuple): assert len(percent) in [2, 4], ( "Expected 'percent' given as a tuple to contain 2 or 4 " "entries, got %d." % (len(percent),)) def handle_param(p): if ia.is_single_number(p): return iap.Deterministic(p) if isinstance(p, tuple): assert len(p) == 2, ( "Expected tuple of 2 values, got %d." % (len(p),)) only_numbers = ( ia.is_single_number(p[0]) and ia.is_single_number(p[1])) assert only_numbers, ( "Expected tuple of numbers, got %s and %s." % ( type(p[0]), type(p[1]))) assert p[0] > -1.0 and p[1] > -1.0, ( "Expected tuple of values >-1.0, got %.4f and " "%.4f." % (p[0], p[1])) return iap.Uniform(p[0], p[1]) if isinstance(p, list): assert len(p) > 0, ( "Expected non-empty list, but got empty one.") assert all([ia.is_single_number(val) for val in p]), ( "Expected list of numbers, got types %s." % ( ", ".join([str(type(v)) for v in p]))) assert all([val > -1.0 for val in p]), ( "Expected list of values >-1.0, got values %s." % ( ", ".join(["%.4f" % (v,) for v in p]))) return iap.Choice(p) if isinstance(p, iap.StochasticParameter): return p raise Exception( "Expected int, tuple of two ints, list of ints or " "StochasticParameter, got type %s." % (type(p),)) if len(percent) == 2: all_sides = handle_param(percent) else: # len == 4 top = handle_param(percent[0]) right = handle_param(percent[1]) bottom = handle_param(percent[2]) left = handle_param(percent[3]) elif isinstance(percent, iap.StochasticParameter): top = right = bottom = left = percent else: raise Exception( "Expected number, tuple of 4 " "numbers/tuples/lists/StochasticParameters or " "StochasticParameter, got type %s." % (type(percent),)) return all_sides, top, right, bottom, left def _augment_batch_(self, batch, random_state, parents, hooks): shapes = batch.get_rowwise_shapes() samples = self._draw_samples(random_state, shapes) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, self._pad_mode_heatmaps, self._pad_cval_heatmaps, samples) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, self._pad_mode_segmentation_maps, self._pad_cval_segmentation_maps, samples) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): result = [] for i, image in enumerate(images): samples_i = samples[i] image_cr_pa = _crop_and_pad_arr( image, samples_i.croppings, samples_i.paddings, samples_i.pad_mode, samples_i.pad_cval, self.keep_size) result.append(image_cr_pa) if ia.is_np_array(images): if self.keep_size: result = np.array(result, dtype=images.dtype) else: nb_shapes = len({image.shape for image in result}) if nb_shapes == 1: result = np.array(result, dtype=images.dtype) return result def _augment_maps_by_samples(self, augmentables, pad_mode, pad_cval, samples): result = [] for i, augmentable in enumerate(augmentables): samples_img = samples[i] augmentable = _crop_and_pad_hms_or_segmaps_( augmentable, croppings_img=samples_img.croppings, paddings_img=samples_img.paddings, pad_mode=(pad_mode if pad_mode is not None else samples_img.pad_mode), pad_cval=(pad_cval if pad_cval is not None else samples_img.pad_cval), keep_size=self.keep_size ) result.append(augmentable) return result def _augment_keypoints_by_samples(self, keypoints_on_images, samples): result = [] for i, keypoints_on_image in enumerate(keypoints_on_images): samples_i = samples[i] kpsoi_aug = _crop_and_pad_kpsoi_( keypoints_on_image, croppings_img=samples_i.croppings, paddings_img=samples_i.paddings, keep_size=self.keep_size) result.append(kpsoi_aug) return result def _draw_samples(self, random_state, shapes): nb_rows = len(shapes) if self.mode == "noop": top = right = bottom = left = np.full((nb_rows,), 0, dtype=np.int32) else: if self.all_sides is not None: if self.sample_independently: samples = self.all_sides.draw_samples( (nb_rows, 4), random_state=random_state) top = samples[:, 0] right = samples[:, 1] bottom = samples[:, 2] left = samples[:, 3] else: sample = self.all_sides.draw_samples( (nb_rows,), random_state=random_state) top = right = bottom = left = sample else: top = self.top.draw_samples( (nb_rows,), random_state=random_state) right = self.right.draw_samples( (nb_rows,), random_state=random_state) bottom = self.bottom.draw_samples( (nb_rows,), random_state=random_state) left = self.left.draw_samples( (nb_rows,), random_state=random_state) if self.mode == "px": # no change necessary for pixel values pass elif self.mode == "percent": # percentage values have to be transformed to pixel values shapes_arr = np.array([shape[0:2] for shape in shapes], dtype=np.float32) heights = shapes_arr[:, 0] widths = shapes_arr[:, 1] top = np.round(heights * top).astype(np.int32) right = np.round(widths * right).astype(np.int32) bottom = np.round(heights * bottom).astype(np.int32) left = np.round(widths * left).astype(np.int32) else: raise Exception("Invalid mode") def _only_above_zero(arr): arr = np.copy(arr) mask = (arr < 0) arr[mask] = 0 return arr crop_top = _only_above_zero((-1) * top) crop_right = _only_above_zero((-1) * right) crop_bottom = _only_above_zero((-1) * bottom) crop_left = _only_above_zero((-1) * left) pad_top = _only_above_zero(top) pad_right = _only_above_zero(right) pad_bottom = _only_above_zero(bottom) pad_left = _only_above_zero(left) pad_mode = self.pad_mode.draw_samples((nb_rows,), random_state=random_state) pad_cval = self.pad_cval.draw_samples((nb_rows,), random_state=random_state) # TODO vectorize this part -- especially return only one instance result = [] for i, shape in enumerate(shapes): height, width = shape[0:2] crop_top_i, crop_right_i, crop_bottom_i, crop_left_i = \ _crop_prevent_zero_size( height, width, crop_top[i], crop_right[i], crop_bottom[i], crop_left[i]) # add here any_crop_y to not warn in case of zero height/width # images any_crop_y = (crop_top_i > 0 or crop_bottom_i > 0) if any_crop_y and crop_top_i + crop_bottom_i >= height: ia.warn( "Expected generated crop amounts in CropAndPad for top and " "bottom image side to be less than the image's height, but " "got %d (top) and %d (bottom) vs. image height %d. This " "will result in an image with output height=1 (if input " "height was >=1) or output height=0 (if input height " "was 0)." % (crop_top_i, crop_bottom_i, height)) # add here any_crop_x to not warn in case of zero height/width # images any_crop_x = (crop_left_i > 0 or crop_right_i > 0) if any_crop_x and crop_left_i + crop_right_i >= width: ia.warn( "Expected generated crop amounts in CropAndPad for left " "and right image side to be less than the image's width, " "but got %d (left) and %d (right) vs. image width %d. " "This will result in an image with output width=1 (if " "input width was >=1) or output width=0 (if input width " "was 0)." % (crop_left_i, crop_right_i, width)) result.append( _CropAndPadSamplingResult( crop_top=crop_top_i, crop_right=crop_right_i, crop_bottom=crop_bottom_i, crop_left=crop_left_i, pad_top=pad_top[i], pad_right=pad_right[i], pad_bottom=pad_bottom[i], pad_left=pad_left[i], pad_mode=pad_mode[i], pad_cval=pad_cval[i])) return result def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.all_sides, self.top, self.right, self.bottom, self.left, self.pad_mode, self.pad_cval] class Pad(CropAndPad): """Pad images, i.e. adds columns/rows of pixels to them. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropAndPad`. Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to pad on each side of the image. Expected value range is ``[0, inf)``. Either this or the parameter `percent` may be set, not both at the same time. * If ``None``, then pixel-based padding will not be used. * If ``int``, then that exact number of pixels will always be padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to ``False``, as then only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``, then each side will be padded by a random amount sampled uniformly per image and side from the inteval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``int`` (always pad by exactly that value), a ``tuple`` of two ``int`` s ``a`` and ``b`` (pad by an amount within ``[a, b]``), a ``list`` of ``int`` s (pad by a random value that is contained in the ``list``) or a ``StochasticParameter`` (sample the amount to pad from that parameter). percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to pad on each side of the image given as a *fraction* of the image height/width. E.g. if this is set to ``0.1``, the augmenter will always pad ``10%`` of the image's height at both the top and the bottom (both ``10%`` each), as well as ``10%`` of the width at the right and left. Expected value range is ``[0.0, inf)``. Either this or the parameter `px` may be set, not both at the same time. * If ``None``, then fraction-based padding will not be used. * If ``number``, then that fraction will always be padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left). If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``, then each side will be padded by a random fraction sampled uniformly per image and side from the interval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``float`` (always pad by exactly that fraction), a ``tuple`` of two ``float`` s ``a`` and ``b`` (pad by a fraction from ``[a, b]``), a ``list`` of ``float`` s (pad by a random value that is contained in the list) or a ``StochasticParameter`` (sample the percentage to pad from that parameter). pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional Padding mode to use. The available modes match the numpy padding modes, i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``, ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes ``constant`` and ``linear_ramp`` use extra values, which are provided by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for more details. * If ``imgaug.ALL``, then a random mode from all available modes will be sampled per image. * If a ``str``, it will be used as the pad mode for all images. * If a ``list`` of ``str``, a random one of these will be sampled per image and used as the mode. * If ``StochasticParameter``, a random mode will be sampled from this parameter per image. pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional The constant value to use if the pad mode is ``constant`` or the end value to use if the mode is ``linear_ramp``. See :func:`~imgaug.imgaug.pad` for more details. * If ``number``, then that value will be used. * If a ``tuple`` of two ``number`` s and at least one of them is a ``float``, then a random number will be uniformly sampled per image from the continuous interval ``[a, b]`` and used as the value. If both ``number`` s are ``int`` s, the interval is discrete. * If a ``list`` of ``number``, then a random value will be chosen from the elements of the ``list`` and used as the value. * If ``StochasticParameter``, a random value will be sampled from that parameter per image. keep_size : bool, optional After padding, the result image will usually have a different height/width compared to the original input image. If this parameter is set to ``True``, then the padded image will be resized to the input image's size, i.e. the augmenter's output shape is always identical to the input shape. sample_independently : bool, optional If ``False`` *and* the values for `px`/`percent` result in exactly *one* probability distribution for all image sides, only one single value will be sampled from that probability distribution and used for all sides. I.e. the pad amount then is the same for all sides. If ``True``, four values will be sampled independently, one per side. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.Pad(px=(0, 10)) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding happens by zero-padding, i.e. it adds black pixels (default setting). >>> aug = iaa.Pad(px=(0, 10), pad_mode="edge") Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding uses the ``edge`` mode from numpy's pad function, i.e. the pixel colors around the image sides are repeated. >>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"]) Similar to the previous example, but uses zero-padding (``constant``) for half of the images and ``edge`` padding for the other half. >>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255)) Similar to the previous example, but uses any available padding mode. In case the padding mode ends up being ``constant`` or ``linear_ramp``, and random intensity is uniformly sampled (once per image) from the discrete interval ``[0..255]`` and used as the intensity of the new pixels. >>> aug = iaa.Pad(px=(0, 10), sample_independently=False) Pad each side by a random pixel value sampled uniformly once per image from the discrete interval ``[0..10]``. Each sampled value is used for *all* sides of the corresponding image. >>> aug = iaa.Pad(px=(0, 10), keep_size=False) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. Afterwards, do **not** resize the padded image back to the input image's size. This will increase the image's height and width by a maximum of ``20`` pixels. >>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5))) Pad the top and bottom by a random pixel value sampled uniformly from the discrete interval ``[0..10]``. Pad the left and right analogously by a random value sampled from ``[0..5]``. Each value is always sampled independently. >>> aug = iaa.Pad(percent=(0, 0.1)) Pad each side by a random fraction sampled uniformly from the continuous interval ``[0.0, 0.10]``. The fraction is sampled once per image and side. E.g. a sampled fraction of ``0.1`` for the top side would pad by ``0.1*H``, where ``H`` is the height of the input image. >>> aug = iaa.Pad( >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1])) Pads each side by either ``5%`` or ``10%``. The values are sampled once per side and image. """ def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True, seed=None, name=None, **old_kwargs): def recursive_validate(value): if value is None: return value if ia.is_single_number(value): assert value >= 0, "Expected value >0, got %.4f" % (value,) return value if isinstance(value, iap.StochasticParameter): return value if isinstance(value, tuple): return tuple([recursive_validate(v_) for v_ in value]) if isinstance(value, list): return [recursive_validate(v_) for v_ in value] raise Exception( "Expected None or int or float or StochasticParameter or " "list or tuple, got %s." % (type(value),)) px = recursive_validate(px) percent = recursive_validate(percent) super(Pad, self).__init__( px=px, percent=percent, pad_mode=pad_mode, pad_cval=pad_cval, keep_size=keep_size, sample_independently=sample_independently, seed=seed, name=name, **old_kwargs) class Crop(CropAndPad): """Crop images, i.e. remove columns/rows of pixels at the sides of images. This augmenter allows to extract smaller-sized subimages from given full-sized input images. The number of pixels to cut off may be defined in absolute values or as fractions of the image sizes. This augmenter will never crop images below a height or width of ``1``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropAndPad`. Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop on each side of the image. Expected value range is ``[0, inf)``. Either this or the parameter `percent` may be set, not both at the same time. * If ``None``, then pixel-based cropping will not be used. * If ``int``, then that exact number of pixels will always be cropped. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to ``False``, as then only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``, then each side will be cropped by a random amount sampled uniformly per image and side from the inteval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``int`` (always crop by exactly that value), a ``tuple`` of two ``int`` s ``a`` and ``b`` (crop by an amount within ``[a, b]``), a ``list`` of ``int`` s (crop by a random value that is contained in the ``list``) or a ``StochasticParameter`` (sample the amount to crop from that parameter). percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop on each side of the image given as a *fraction* of the image height/width. E.g. if this is set to ``0.1``, the augmenter will always crop ``10%`` of the image's height at both the top and the bottom (both ``10%`` each), as well as ``10%`` of the width at the right and left. Expected value range is ``[0.0, 1.0)``. Either this or the parameter `px` may be set, not both at the same time. * If ``None``, then fraction-based cropping will not be used. * If ``number``, then that fraction will always be cropped. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left). If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``, then each side will be cropped by a random fraction sampled uniformly per image and side from the interval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``float`` (always crop by exactly that fraction), a ``tuple`` of two ``float`` s ``a`` and ``b`` (crop by a fraction from ``[a, b]``), a ``list`` of ``float`` s (crop by a random value that is contained in the list) or a ``StochasticParameter`` (sample the percentage to crop from that parameter). keep_size : bool, optional After cropping, the result image will usually have a different height/width compared to the original input image. If this parameter is set to ``True``, then the cropped image will be resized to the input image's size, i.e. the augmenter's output shape is always identical to the input shape. sample_independently : bool, optional If ``False`` *and* the values for `px`/`percent` result in exactly *one* probability distribution for all image sides, only one single value will be sampled from that probability distribution and used for all sides. I.e. the crop amount then is the same for all sides. If ``True``, four values will be sampled independently, one per side. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.Crop(px=(0, 10)) Crop each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. >>> aug = iaa.Crop(px=(0, 10), sample_independently=False) Crop each side by a random pixel value sampled uniformly once per image from the discrete interval ``[0..10]``. Each sampled value is used for *all* sides of the corresponding image. >>> aug = iaa.Crop(px=(0, 10), keep_size=False) Crop each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. Afterwards, do **not** resize the cropped image back to the input image's size. This will decrease the image's height and width by a maximum of ``20`` pixels. >>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5))) Crop the top and bottom by a random pixel value sampled uniformly from the discrete interval ``[0..10]``. Crop the left and right analogously by a random value sampled from ``[0..5]``. Each value is always sampled independently. >>> aug = iaa.Crop(percent=(0, 0.1)) Crop each side by a random fraction sampled uniformly from the continuous interval ``[0.0, 0.10]``. The fraction is sampled once per image and side. E.g. a sampled fraction of ``0.1`` for the top side would crop by ``0.1*H``, where ``H`` is the height of the input image. >>> aug = iaa.Crop( >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1])) Crops each side by either ``5%`` or ``10%``. The values are sampled once per side and image. """ def __init__(self, px=None, percent=None, keep_size=True, sample_independently=True, seed=None, name=None, **old_kwargs): def recursive_negate(value): if value is None: return value if ia.is_single_number(value): assert value >= 0, "Expected value >0, got %.4f." % (value,) return -value if isinstance(value, iap.StochasticParameter): return iap.Multiply(value, -1) if isinstance(value, tuple): return tuple([recursive_negate(v_) for v_ in value]) if isinstance(value, list): return [recursive_negate(v_) for v_ in value] raise Exception( "Expected None or int or float or StochasticParameter or " "list or tuple, got %s." % (type(value),)) px = recursive_negate(px) percent = recursive_negate(percent) super(Crop, self).__init__( px=px, percent=percent, keep_size=keep_size, sample_independently=sample_independently, seed=seed, name=name, **old_kwargs) # TODO maybe rename this to PadToMinimumSize? # TODO this is very similar to CropAndPad, maybe add a way to generate crop # values imagewise via a callback in in CropAndPad? # TODO why is padding mode and cval here called pad_mode, pad_cval but in other # cases mode/cval? class PadToFixedSize(meta.Augmenter): """Pad images to a predefined minimum width and/or height. If images are already at the minimum width/height or are larger, they will not be padded. Note that this also means that images will not be cropped if they exceed the required width/height. The augmenter randomly decides per image how to distribute the required padding amounts over the image axis. E.g. if 2px have to be padded on the left or right to reach the required width, the augmenter will sometimes add 2px to the left and 0px to the right, sometimes add 2px to the right and 0px to the left and sometimes add 1px to both sides. Set `position` to ``center`` to prevent that. Supported dtypes ---------------- See :func:`~imgaug.augmenters.size.pad`. Parameters ---------- width : int or None Pad images up to this minimum width. If ``None``, image widths will not be altered. height : int or None Pad images up to this minimum height. If ``None``, image heights will not be altered. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.CropAndPad.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.CropAndPad.__init__`. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional Sets the center point of the padding, which determines how the required padding amounts are distributed to each side. For a ``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in range ``[0.0, 1.0]`` and describe the fraction of padding applied to the left/right (low/high values for ``a``) and the fraction of padding applied to the top/bottom (low/high values for ``b``). A padding position at ``(0.5, 0.5)`` would be the center of the image and distribute the padding equally to all sides. A padding position at ``(0.0, 1.0)`` would be the left-bottom and would apply 100% of the required padding to the bottom and left sides of the image so that the bottom left corner becomes more and more the new image center (depending on how much is padded). * If string ``uniform`` then the share of padding is randomly and uniformly distributed over each side. Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``. * If string ``normal`` then the share of padding is distributed based on a normal distribution, leading to a focus on the center of the images. Equivalent to ``(Clip(Normal(0.5, 0.45/2), 0, 1), Clip(Normal(0.5, 0.45/2), 0, 1))``. * If string ``center`` then center point of the padding is identical to the image center. Equivalent to ``(0.5, 0.5)``. * If a string matching regex ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top`` or ``center-bottom`` then sets the center point of the padding to the X-Y position matching that description. * If a tuple of float, then expected to have exactly two entries between ``0.0`` and ``1.0``, which will always be used as the combination the position matching (x, y) form. * If a ``StochasticParameter``, then that parameter will be queried once per call to ``augment_*()`` to get ``Nx2`` center positions in ``(x, y)`` form (with ``N`` the number of images). * If a ``tuple`` of ``StochasticParameter``, then expected to have exactly two entries that will both be queried per call to ``augment_*()``, each for ``(N,)`` values, to get the center positions. First parameter is used for ``x`` coordinates, second for ``y`` coordinates. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToFixedSize(width=100, height=100) For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do nothing for the other edges. The padding is randomly (uniformly) distributed over the sides, so that e.g. sometimes most of the required padding is applied to the left, sometimes to the right (analogous top/bottom). >>> aug = iaa.PadToFixedSize(width=100, height=100, position="center") For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do nothing for the other image sides. The padding is always equally distributed over the left/right and top/bottom sides. >>> aug = iaa.PadToFixedSize(width=100, height=100, pad_mode=ia.ALL) For image sides smaller than ``100`` pixels, pad to ``100`` pixels and use any possible padding mode for that. Do nothing for the other image sides. The padding is always equally distributed over the left/right and top/bottom sides. >>> aug = iaa.Sequential([ >>> iaa.PadToFixedSize(width=100, height=100), >>> iaa.CropToFixedSize(width=100, height=100) >>> ]) Pad images smaller than ``100x100`` until they reach ``100x100``. Analogously, crop images larger than ``100x100`` until they reach ``100x100``. The output images therefore have a fixed size of ``100x100``. """ def __init__(self, width, height, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToFixedSize, self).__init__( seed=seed, name=name, **old_kwargs) self.size = (width, height) # Position of where to pad. The further to the top left this is, the # larger the share of pixels that will be added to the top and left # sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) to only # add at the top and left, (Deterministic(1.0), Deterministic(1.0)) # to only add at the bottom right. Analogously (0.5, 0.5) pads equally # on both axis, (0.0, 1.0) pads left and bottom, (1.0, 0.0) pads right # and top. self.position = _handle_position_parameter(position) self.pad_mode = _handle_pad_mode_param(pad_mode) # TODO enable ALL here like in eg Affine self.pad_cval = iap.handle_discrete_param( pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) # set these to None to use the same values as sampled for the # images (not tested) self._pad_mode_heatmaps = "constant" self._pad_mode_segmentation_maps = "constant" self._pad_cval_heatmaps = 0.0 self._pad_cval_segmentation_maps = 0 def _augment_batch_(self, batch, random_state, parents, hooks): # Providing the whole batch to _draw_samples() would not be necessary # for this augmenter. The number of rows would be sufficient. This # formulation however enables derived augmenters to use rowwise shapes # without having to compute them here for this augmenter. samples = self._draw_samples(batch, random_state) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, samples, self._pad_mode_heatmaps, self._pad_cval_heatmaps) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, samples, self._pad_mode_heatmaps, self._pad_cval_heatmaps) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): result = [] sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples for i, (image, size) in enumerate(zip(images, sizes)): width_min, height_min = size height_image, width_image = image.shape[:2] paddings = self._calculate_paddings(height_image, width_image, height_min, width_min, pad_xs[i], pad_ys[i]) image = _crop_and_pad_arr( image, (0, 0, 0, 0), paddings, pad_modes[i], pad_cvals[i], keep_size=False) result.append(image) # TODO result is always a list. Should this be converted to an array # if possible (not guaranteed that all images have same size, # some might have been larger than desired height/width) return result def _augment_keypoints_by_samples(self, keypoints_on_images, samples): result = [] sizes, pad_xs, pad_ys, _, _ = samples for i, (kpsoi, size) in enumerate(zip(keypoints_on_images, sizes)): width_min, height_min = size height_image, width_image = kpsoi.shape[:2] paddings_img = self._calculate_paddings(height_image, width_image, height_min, width_min, pad_xs[i], pad_ys[i]) keypoints_padded = _crop_and_pad_kpsoi_( kpsoi, (0, 0, 0, 0), paddings_img, keep_size=False) result.append(keypoints_padded) return result def _augment_maps_by_samples(self, augmentables, samples, pad_mode, pad_cval): sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples for i, (augmentable, size) in enumerate(zip(augmentables, sizes)): width_min, height_min = size height_img, width_img = augmentable.shape[:2] paddings_img = self._calculate_paddings( height_img, width_img, height_min, width_min, pad_xs[i], pad_ys[i]) # TODO for the previous method (and likely the new/current one # too): # for 30x30 padded to 32x32 with 15x15 heatmaps this results # in paddings of 1 on each side (assuming # position=(0.5, 0.5)) giving 17x17 heatmaps when they should # be 16x16. Error is due to each side getting projected 0.5 # padding which is rounded to 1. This doesn't seem right. augmentables[i] = _crop_and_pad_hms_or_segmaps_( augmentables[i], (0, 0, 0, 0), paddings_img, pad_mode=pad_mode if pad_mode is not None else pad_modes[i], pad_cval=pad_cval if pad_cval is not None else pad_cvals[i], keep_size=False) return augmentables def _draw_samples(self, batch, random_state): nb_images = batch.nb_rows rngs = random_state.duplicate(4) if isinstance(self.position, tuple): pad_xs = self.position[0].draw_samples(nb_images, random_state=rngs[0]) pad_ys = self.position[1].draw_samples(nb_images, random_state=rngs[1]) else: pads = self.position.draw_samples((nb_images, 2), random_state=rngs[0]) pad_xs = pads[:, 0] pad_ys = pads[:, 1] pad_modes = self.pad_mode.draw_samples(nb_images, random_state=rngs[2]) pad_cvals = self.pad_cval.draw_samples(nb_images, random_state=rngs[3]) # We return here the sizes even though they are static as it allows # derived augmenters to define image-specific heights/widths. return [self.size] * nb_images, pad_xs, pad_ys, pad_modes, pad_cvals @classmethod def _calculate_paddings(cls, height_image, width_image, height_min, width_min, pad_xs_i, pad_ys_i): pad_top = 0 pad_right = 0 pad_bottom = 0 pad_left = 0 if width_min is not None and width_image < width_min: pad_total_x = width_min - width_image pad_left = int((1-pad_xs_i) * pad_total_x) pad_right = pad_total_x - pad_left if height_min is not None and height_image < height_min: pad_total_y = height_min - height_image pad_top = int((1-pad_ys_i) * pad_total_y) pad_bottom = pad_total_y - pad_top return pad_top, pad_right, pad_bottom, pad_left def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.size[0], self.size[1], self.pad_mode, self.pad_cval, self.position] class CenterPadToFixedSize(PadToFixedSize): """Pad images equally on all sides up to given minimum heights/widths. This is an alias for :class:`~imgaug.augmenters.size.PadToFixedSize` with ``position="center"``. It spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToFixedSize` by defaults spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width : int or None See :func:`PadToFixedSize.__init__`. height : int or None See :func:`PadToFixedSize.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToFixedSize(height=20, width=30) Create an augmenter that pads images up to ``20x30``, with the padded rows added *equally* on the top and bottom (analogous for the padded columns). """ def __init__(self, width, height, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToFixedSize, self).__init__( width=width, height=height, pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) # TODO maybe rename this to CropToMaximumSize ? # TODO this is very similar to CropAndPad, maybe add a way to generate crop # values imagewise via a callback in in CropAndPad? # TODO add crop() function in imgaug, similar to pad class CropToFixedSize(meta.Augmenter): """Crop images down to a predefined maximum width and/or height. If images are already at the maximum width/height or are smaller, they will not be cropped. Note that this also means that images will not be padded if they are below the required width/height. The augmenter randomly decides per image how to distribute the required cropping amounts over the image axis. E.g. if 2px have to be cropped on the left or right to reach the required width, the augmenter will sometimes remove 2px from the left and 0px from the right, sometimes remove 2px from the right and 0px from the left and sometimes remove 1px from both sides. Set `position` to ``center`` to prevent that. Supported dtypes ---------------- * ``uint8``: yes; fully tested * ``uint16``: yes; tested * ``uint32``: yes; tested * ``uint64``: yes; tested * ``int8``: yes; tested * ``int16``: yes; tested * ``int32``: yes; tested * ``int64``: yes; tested * ``float16``: yes; tested * ``float32``: yes; tested * ``float64``: yes; tested * ``float128``: yes; tested * ``bool``: yes; tested Parameters ---------- width : int or None Crop images down to this maximum width. If ``None``, image widths will not be altered. height : int or None Crop images down to this maximum height. If ``None``, image heights will not be altered. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional Sets the center point of the cropping, which determines how the required cropping amounts are distributed to each side. For a ``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in range ``[0.0, 1.0]`` and describe the fraction of cropping applied to the left/right (low/high values for ``a``) and the fraction of cropping applied to the top/bottom (low/high values for ``b``). A cropping position at ``(0.5, 0.5)`` would be the center of the image and distribute the cropping equally over all sides. A cropping position at ``(1.0, 0.0)`` would be the right-top and would apply 100% of the required cropping to the right and top sides of the image. * If string ``uniform`` then the share of cropping is randomly and uniformly distributed over each side. Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``. * If string ``normal`` then the share of cropping is distributed based on a normal distribution, leading to a focus on the center of the images. Equivalent to ``(Clip(Normal(0.5, 0.45/2), 0, 1), Clip(Normal(0.5, 0.45/2), 0, 1))``. * If string ``center`` then center point of the cropping is identical to the image center. Equivalent to ``(0.5, 0.5)``. * If a string matching regex ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top`` or ``center-bottom`` then sets the center point of the cropping to the X-Y position matching that description. * If a tuple of float, then expected to have exactly two entries between ``0.0`` and ``1.0``, which will always be used as the combination the position matching (x, y) form. * If a ``StochasticParameter``, then that parameter will be queried once per call to ``augment_*()`` to get ``Nx2`` center positions in ``(x, y)`` form (with ``N`` the number of images). * If a ``tuple`` of ``StochasticParameter``, then expected to have exactly two entries that will both be queried per call to ``augment_*()``, each for ``(N,)`` values, to get the center positions. First parameter is used for ``x`` coordinates, second for ``y`` coordinates. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToFixedSize(width=100, height=100) For image sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing for the other sides. The cropping amounts are randomly (and uniformly) distributed over the sides of the image. >>> aug = iaa.CropToFixedSize(width=100, height=100, position="center") For sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing for the other sides. The cropping amounts are always equally distributed over the left/right sides of the image (and analogously for top/bottom). >>> aug = iaa.Sequential([ >>> iaa.PadToFixedSize(width=100, height=100), >>> iaa.CropToFixedSize(width=100, height=100) >>> ]) Pad images smaller than ``100x100`` until they reach ``100x100``. Analogously, crop images larger than ``100x100`` until they reach ``100x100``. The output images therefore have a fixed size of ``100x100``. """ def __init__(self, width, height, position="uniform", seed=None, name=None, **old_kwargs): super(CropToFixedSize, self).__init__( seed=seed, name=name, **old_kwargs) self.size = (width, height) # Position of where to crop. The further to the top left this is, # the larger the share of pixels that will be cropped from the top # and left sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) # to only crop at the top and left, # (Deterministic(1.0), Deterministic(1.0)) to only crop at the bottom # right. Analogously (0.5, 0.5) crops equally on both axis, # (0.0, 1.0) crops left and bottom, (1.0, 0.0) crops right and top. self.position = _handle_position_parameter(position) def _augment_batch_(self, batch, random_state, parents, hooks): # Providing the whole batch to _draw_samples() would not be necessary # for this augmenter. The number of rows would be sufficient. This # formulation however enables derived augmenters to use rowwise shapes # without having to compute them here for this augmenter. samples = self._draw_samples(batch, random_state) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, samples) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, samples) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): result = [] sizes, offset_xs, offset_ys = samples for i, (image, size) in enumerate(zip(images, sizes)): w, h = size height_image, width_image = image.shape[0:2] croppings = self._calculate_crop_amounts( height_image, width_image, h, w, offset_ys[i], offset_xs[i]) image_cropped = _crop_and_pad_arr(image, croppings, (0, 0, 0, 0), keep_size=False) result.append(image_cropped) return result def _augment_keypoints_by_samples(self, kpsois, samples): result = [] sizes, offset_xs, offset_ys = samples for i, (kpsoi, size) in enumerate(zip(kpsois, sizes)): w, h = size height_image, width_image = kpsoi.shape[0:2] croppings_img = self._calculate_crop_amounts( height_image, width_image, h, w, offset_ys[i], offset_xs[i]) kpsoi_cropped = _crop_and_pad_kpsoi_( kpsoi, croppings_img, (0, 0, 0, 0), keep_size=False) result.append(kpsoi_cropped) return result def _augment_maps_by_samples(self, augmentables, samples): sizes, offset_xs, offset_ys = samples for i, (augmentable, size) in enumerate(zip(augmentables, sizes)): w, h = size height_image, width_image = augmentable.shape[0:2] croppings_img = self._calculate_crop_amounts( height_image, width_image, h, w, offset_ys[i], offset_xs[i]) augmentables[i] = _crop_and_pad_hms_or_segmaps_( augmentable, croppings_img, (0, 0, 0, 0), keep_size=False) return augmentables @classmethod def _calculate_crop_amounts(cls, height_image, width_image, height_max, width_max, offset_y, offset_x): crop_top = 0 crop_right = 0 crop_bottom = 0 crop_left = 0 if height_max is not None and height_image > height_max: crop_top = int(offset_y * (height_image - height_max)) crop_bottom = height_image - height_max - crop_top if width_max is not None and width_image > width_max: crop_left = int(offset_x * (width_image - width_max)) crop_right = width_image - width_max - crop_left return crop_top, crop_right, crop_bottom, crop_left def _draw_samples(self, batch, random_state): nb_images = batch.nb_rows rngs = random_state.duplicate(2) if isinstance(self.position, tuple): offset_xs = self.position[0].draw_samples(nb_images, random_state=rngs[0]) offset_ys = self.position[1].draw_samples(nb_images, random_state=rngs[1]) else: offsets = self.position.draw_samples((nb_images, 2), random_state=rngs[0]) offset_xs = offsets[:, 0] offset_ys = offsets[:, 1] offset_xs = 1.0 - offset_xs offset_ys = 1.0 - offset_ys # We return here the sizes even though they are static as it allows # derived augmenters to define image-specific heights/widths. return [self.size] * nb_images, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.size[0], self.size[1], self.position] class CenterCropToFixedSize(CropToFixedSize): """Take a crop from the center of each image. This is an alias for :class:`~imgaug.augmenters.size.CropToFixedSize` with ``position="center"``. .. note:: If images already have a width and/or height below the provided width and/or height then this augmenter will do nothing for the respective axis. Hence, resulting images can be smaller than the provided axis sizes. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width : int or None See :func:`CropToFixedSize.__init__`. height : int or None See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> crop = iaa.CenterCropToFixedSize(height=20, width=10) Create an augmenter that takes ``20x10`` sized crops from the center of images. """ def __init__(self, width, height, seed=None, name=None, **old_kwargs): super(CenterCropToFixedSize, self).__init__( width=width, height=height, position="center", seed=seed, name=name, **old_kwargs) class CropToMultiplesOf(CropToFixedSize): """Crop images down until their height/width is a multiple of a value. .. note:: For a given axis size ``A`` and multiple ``M``, if ``A`` is in the interval ``[0 .. M]``, the axis will not be changed. As a result, this augmenter can still produce axis sizes that are not multiples of the given values. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_multiple : int or None Multiple for the width. Images will be cropped down until their width is a multiple of this value. If ``None``, image widths will not be altered. height_multiple : int or None Multiple for the height. Images will be cropped down until their height is a multiple of this value. If ``None``, image heights will not be altered. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that crops images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, position="uniform", seed=None, name=None, **old_kwargs): super(CropToMultiplesOf, self).__init__( width=None, height=None, position=position, seed=seed, name=name, **old_kwargs) self.width_multiple = width_multiple self.height_multiple = height_multiple def _draw_samples(self, batch, random_state): _sizes, offset_xs, offset_ys = super( CropToMultiplesOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] croppings = compute_croppings_to_reach_multiples_of( shape, height_multiple=self.height_multiple, width_multiple=self.width_multiple) # TODO change that # note that these are not in the same order as shape tuples # in CropToFixedSize new_size = ( width - croppings[1] - croppings[3], height - croppings[0] - croppings[2] ) sizes.append(new_size) return sizes, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_multiple, self.height_multiple, self.position] class CenterCropToMultiplesOf(CropToMultiplesOf): """Crop images equally on all sides until H/W are multiples of given values. This is the same as :class:`~imgaug.augmenters.size.CropToMultiplesOf`, but uses ``position="center"`` by default, which spreads the crop amounts equally over all image sides, while :class:`~imgaug.augmenters.size.CropToMultiplesOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_multiple : int or None See :func:`CropToMultiplesOf.__init__`. height_multiple : int or None See :func:`CropToMultiplesOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterCropToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that crops images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, seed=None, name=None, **old_kwargs): super(CenterCropToMultiplesOf, self).__init__( width_multiple=width_multiple, height_multiple=height_multiple, position="center", seed=seed, name=name, **old_kwargs) class PadToMultiplesOf(PadToFixedSize): """Pad images until their height/width is a multiple of a value. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_multiple : int or None Multiple for the width. Images will be padded until their width is a multiple of this value. If ``None``, image widths will not be altered. height_multiple : int or None Multiple for the height. Images will be padded until their height is a multiple of this value. If ``None``, image heights will not be altered. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that pads images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToMultiplesOf, self).__init__( width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) self.width_multiple = width_multiple self.height_multiple = height_multiple def _draw_samples(self, batch, random_state): _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super( PadToMultiplesOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] paddings = compute_paddings_to_reach_multiples_of( shape, height_multiple=self.height_multiple, width_multiple=self.width_multiple) # TODO change that # note that these are not in the same order as shape tuples # in PadToFixedSize new_size = ( width + paddings[1] + paddings[3], height + paddings[0] + paddings[2] ) sizes.append(new_size) return sizes, pad_xs, pad_ys, pad_modes, pad_cvals def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_multiple, self.height_multiple, self.pad_mode, self.pad_cval, self.position] class CenterPadToMultiplesOf(PadToMultiplesOf): """Pad images equally on all sides until H/W are multiples of given values. This is the same as :class:`~imgaug.augmenters.size.PadToMultiplesOf`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToMultiplesOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_multiple : int or None See :func:`PadToMultiplesOf.__init__`. height_multiple : int or None See :func:`PadToMultiplesOf.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that pads images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToMultiplesOf, self).__init__( width_multiple=width_multiple, height_multiple=height_multiple, pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) class CropToPowersOf(CropToFixedSize): """Crop images until their height/width is a power of a base. This augmenter removes pixels from an axis with size ``S`` leading to the new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a provided base (e.g. ``2``) and ``E`` is an exponent from the discrete interval ``[1 .. inf)``. .. note:: This augmenter does nothing for axes with size less than ``B^1 = B``. If you have images with ``S < B^1``, it is recommended to combine this augmenter with a padding augmenter that pads each axis up to ``B``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_base : int or None Base for the width. Images will be cropped down until their width fulfills ``width' = width_base ^ E`` with ``E`` being any natural number. If ``None``, image widths will not be altered. height_base : int or None Base for the height. Images will be cropped down until their height fulfills ``height' = height_base ^ E`` with ``E`` being any natural number. If ``None``, image heights will not be altered. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2) Create an augmenter that crops each image down to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, position="uniform", seed=None, name=None, **old_kwargs): super(CropToPowersOf, self).__init__( width=None, height=None, position=position, seed=seed, name=name, **old_kwargs) self.width_base = width_base self.height_base = height_base def _draw_samples(self, batch, random_state): _sizes, offset_xs, offset_ys = super( CropToPowersOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] croppings = compute_croppings_to_reach_powers_of( shape, height_base=self.height_base, width_base=self.width_base) # TODO change that # note that these are not in the same order as shape tuples # in CropToFixedSize new_size = ( width - croppings[1] - croppings[3], height - croppings[0] - croppings[2] ) sizes.append(new_size) return sizes, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_base, self.height_base, self.position] class CenterCropToPowersOf(CropToPowersOf): """Crop images equally on all sides until H/W is a power of a base. This is the same as :class:`~imgaug.augmenters.size.CropToPowersOf`, but uses ``position="center"`` by default, which spreads the crop amounts equally over all image sides, while :class:`~imgaug.augmenters.size.CropToPowersOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_base : int or None See :func:`CropToPowersOf.__init__`. height_base : int or None See :func:`CropToPowersOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2) Create an augmenter that crops each image down to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, seed=None, name=None, **old_kwargs): super(CenterCropToPowersOf, self).__init__( width_base=width_base, height_base=height_base, position="center", seed=seed, name=name, **old_kwargs) class PadToPowersOf(PadToFixedSize): """Pad images until their height/width is a power of a base. This augmenter adds pixels to an axis with size ``S`` leading to the new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a provided base (e.g. ``2``) and ``E`` is an exponent from the discrete interval ``[1 .. inf)``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_base : int or None Base for the width. Images will be padded down until their width fulfills ``width' = width_base ^ E`` with ``E`` being any natural number. If ``None``, image widths will not be altered. height_base : int or None Base for the height. Images will be padded until their height fulfills ``height' = height_base ^ E`` with ``E`` being any natural number. If ``None``, image heights will not be altered. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToPowersOf(height_base=3, width_base=2) Create an augmenter that pads each image to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToPowersOf, self).__init__( width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) self.width_base = width_base self.height_base = height_base def _draw_samples(self, batch, random_state): _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super( PadToPowersOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] paddings = compute_paddings_to_reach_powers_of( shape, height_base=self.height_base, width_base=self.width_base) # TODO change that # note that these are not in the same order as shape tuples # in PadToFixedSize new_size = ( width + paddings[1] + paddings[3], height + paddings[0] + paddings[2] ) sizes.append(new_size) return sizes, pad_xs, pad_ys, pad_modes, pad_cvals def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_base, self.height_base, self.pad_mode, self.pad_cval, self.position] class CenterPadToPowersOf(PadToPowersOf): """Pad images equally on all sides until H/W is a power of a base. This is the same as :class:`~imgaug.augmenters.size.PadToPowersOf`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToPowersOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_base : int or None See :func:`PadToPowersOf.__init__`. height_base : int or None See :func:`PadToPowersOf.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToPowersOf(height_base=5, width_base=2) Create an augmenter that pads each image to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToPowersOf, self).__init__( width_base=width_base, height_base=height_base, pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) class CropToAspectRatio(CropToFixedSize): """Crop images until their width/height matches an aspect ratio. This augmenter removes either rows or columns until the image reaches the desired aspect ratio given in ``width / height``. The cropping operation is stopped once the desired aspect ratio is reached or the image side to crop reaches a size of ``1``. If any side of the image starts with a size of ``0``, the image will not be changed. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- aspect_ratio : number The desired aspect ratio, given as ``width/height``. E.g. a ratio of ``2.0`` denotes an image that is twice as wide as it is high. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToAspectRatio(2.0) Create an augmenter that crops each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, position="uniform", seed=None, name=None, **old_kwargs): super(CropToAspectRatio, self).__init__( width=None, height=None, position=position, seed=seed, name=name, **old_kwargs) self.aspect_ratio = aspect_ratio def _draw_samples(self, batch, random_state): _sizes, offset_xs, offset_ys = super( CropToAspectRatio, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] if height == 0 or width == 0: croppings = (0, 0, 0, 0) else: croppings = compute_croppings_to_reach_aspect_ratio( shape, aspect_ratio=self.aspect_ratio) # TODO change that # note that these are not in the same order as shape tuples # in CropToFixedSize new_size = ( width - croppings[1] - croppings[3], height - croppings[0] - croppings[2] ) sizes.append(new_size) return sizes, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.aspect_ratio, self.position] class CenterCropToAspectRatio(CropToAspectRatio): """Crop images equally on all sides until they reach an aspect ratio. This is the same as :class:`~imgaug.augmenters.size.CropToAspectRatio`, but uses ``position="center"`` by default, which spreads the crop amounts equally over all image sides, while :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- aspect_ratio : number See :func:`CropToAspectRatio.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterCropToAspectRatio(2.0) Create an augmenter that crops each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, seed=None, name=None, **old_kwargs): super(CenterCropToAspectRatio, self).__init__( aspect_ratio=aspect_ratio, position="center", seed=seed, name=name, **old_kwargs) class PadToAspectRatio(PadToFixedSize): """Pad images until their width/height matches an aspect ratio. This augmenter adds either rows or columns until the image reaches the desired aspect ratio given in ``width / height``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- aspect_ratio : number The desired aspect ratio, given as ``width/height``. E.g. a ratio of ``2.0`` denotes an image that is twice as wide as it is high. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToAspectRatio(2.0) Create an augmenter that pads each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToAspectRatio, self).__init__( width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) self.aspect_ratio = aspect_ratio def _draw_samples(self, batch, random_state): _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super( PadToAspectRatio, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] paddings = compute_paddings_to_reach_aspect_ratio( shape, aspect_ratio=self.aspect_ratio) # TODO change that # note that these are not in the same order as shape tuples # in PadToFixedSize new_size = ( width + paddings[1] + paddings[3], height + paddings[0] + paddings[2] ) sizes.append(new_size) return sizes, pad_xs, pad_ys, pad_modes, pad_cvals def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.aspect_ratio, self.pad_mode, self.pad_cval, self.position] class CenterPadToAspectRatio(PadToAspectRatio): """Pad images equally on all sides until H/W matches an aspect ratio. This is the same as :class:`~imgaug.augmenters.size.PadToAspectRatio`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToAspectRatio` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- aspect_ratio : number See :func:`PadToAspectRatio.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. deterministic : bool, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToAspectRatio(2.0) Create am augmenter that pads each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToAspectRatio, self).__init__( aspect_ratio=aspect_ratio, position="center", pad_mode=pad_mode, pad_cval=pad_cval, seed=seed, name=name, **old_kwargs) class CropToSquare(CropToAspectRatio): """Crop images until their width and height are identical. This is identical to :class:`~imgaug.augmenters.size.CropToAspectRatio` with ``aspect_ratio=1.0``. Images with axis sizes of ``0`` will not be altered. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToSquare() Create an augmenter that crops each image until its square, i.e. height and width match. The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, position="uniform", seed=None, name=None, **old_kwargs): super(CropToSquare, self).__init__( aspect_ratio=1.0, position=position, seed=seed, name=name, **old_kwargs) class CenterCropToSquare(CropToSquare): """Crop images equally on all sides until their height/width are identical. In contrast to :class:`~imgaug.augmenters.size.CropToSquare`, this augmenter always tries to spread the columns/rows to remove equally over both sides of the respective axis to be cropped. :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads the croppings randomly. This augmenter is identical to :class:`~imgaug.augmenters.size.CropToSquare` with ``position="center"``, and thereby the same as :class:`~imgaug.augmenters.size.CropToAspectRatio` with ``aspect_ratio=1.0, position="center"``. Images with axis sizes of ``0`` will not be altered. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterCropToSquare() Create an augmenter that crops each image until its square, i.e. height and width match. The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, seed=None, name=None, **old_kwargs): super(CenterCropToSquare, self).__init__( position="center", seed=seed, name=name, **old_kwargs) class PadToSquare(PadToAspectRatio): """Pad images until their height and width are identical. This augmenter is identical to :class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToSquare() Create an augmenter that pads each image until its square, i.e. height and width match. The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToSquare, self).__init__( aspect_ratio=1.0, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) class CenterPadToSquare(PadToSquare): """Pad images equally on all sides until their height & width are identical. This is the same as :class:`~imgaug.augmenters.size.PadToSquare`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToSquare` by default spreads them randomly. This augmenter is thus also identical to :class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0, position="center"``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. deterministic : bool, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToSquare() Create an augmenter that pads each image until its square, i.e. height and width match. The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToSquare, self).__init__( pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) class KeepSizeByResize(meta.Augmenter): """Resize images back to their input sizes after applying child augmenters. Combining this with e.g. a cropping augmenter as the child will lead to images being resized back to the input size after the crop operation was applied. Some augmenters have a ``keep_size`` argument that achieves the same goal (if set to ``True``), though this augmenter offers control over the interpolation mode and which augmentables to resize (images, heatmaps, segmentation maps). Supported dtypes ---------------- See :func:`~imgaug.imgaug.imresize_many_images`. Parameters ---------- children : Augmenter or list of imgaug.augmenters.meta.Augmenter or None, optional One or more augmenters to apply to images. These augmenters may change the image size. interpolation : KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional The interpolation mode to use when resizing images. Can take any value that :func:`~imgaug.imgaug.imresize_single_image` accepts, e.g. ``cubic``. * If this is ``KeepSizeByResize.NO_RESIZE`` then images will not be resized. * If this is a single ``str``, it is expected to have one of the following values: ``nearest``, ``linear``, ``area``, ``cubic``. * If this is a single integer, it is expected to have a value identical to one of: ``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``. * If this is a ``list`` of ``str`` or ``int``, it is expected that each ``str``/``int`` is one of the above mentioned valid ones. A random one of these values will be sampled per image. * If this is a ``StochasticParameter``, it will be queried once per call to ``_augment_images()`` and must return ``N`` ``str`` s or ``int`` s (matching the above mentioned ones) for ``N`` images. interpolation_heatmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional The interpolation mode to use when resizing heatmaps. Meaning and valid values are similar to `interpolation`. This parameter may also take the value ``KeepSizeByResize.SAME_AS_IMAGES``, which will lead to copying the interpolation modes used for the corresponding images. The value may also be returned on a per-image basis if `interpolation_heatmaps` is provided as a ``StochasticParameter`` or may be one possible value if it is provided as a ``list`` of ``str``. interpolation_segmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional The interpolation mode to use when resizing segmentation maps. Similar to `interpolation_heatmaps`. **Note**: For segmentation maps, only ``NO_RESIZE`` or nearest neighbour interpolation (i.e. ``nearest``) make sense in the vast majority of all cases. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.KeepSizeByResize( >>> iaa.Crop((20, 40), keep_size=False) >>> ) Apply random cropping to input images, then resize them back to their original input sizes. The resizing is done using this augmenter instead of the corresponding internal resizing operation in ``Crop``. >>> aug = iaa.KeepSizeByResize( >>> iaa.Crop((20, 40), keep_size=False), >>> interpolation="nearest" >>> ) Same as in the previous example, but images are now always resized using nearest neighbour interpolation. >>> aug = iaa.KeepSizeByResize( >>> iaa.Crop((20, 40), keep_size=False), >>> interpolation=["nearest", "cubic"], >>> interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES, >>> interpolation_segmaps=iaa.KeepSizeByResize.NO_RESIZE >>> ) Similar to the previous example, but images are now sometimes resized using linear interpolation and sometimes using nearest neighbour interpolation. Heatmaps are resized using the same interpolation as was used for the corresponding image. Segmentation maps are not resized and will therefore remain at their size after cropping. """ NO_RESIZE = "NO_RESIZE" SAME_AS_IMAGES = "SAME_AS_IMAGES" def __init__(self, children, interpolation="cubic", interpolation_heatmaps=SAME_AS_IMAGES, interpolation_segmaps="nearest", seed=None, name=None, **old_kwargs): super(KeepSizeByResize, self).__init__( seed=seed, name=name, **old_kwargs) self.children = children def _validate_param(val, allow_same_as_images): valid_ips_and_resize = ia.IMRESIZE_VALID_INTERPOLATIONS \ + [KeepSizeByResize.NO_RESIZE] if allow_same_as_images and val == self.SAME_AS_IMAGES: return self.SAME_AS_IMAGES if val in valid_ips_and_resize: return iap.Deterministic(val) if isinstance(val, list): assert len(val) > 0, ( "Expected a list of at least one interpolation method. " "Got an empty list.") valid_ips_here = valid_ips_and_resize if allow_same_as_images: valid_ips_here = valid_ips_here \ + [KeepSizeByResize.SAME_AS_IMAGES] only_valid_ips = all([ip in valid_ips_here for ip in val]) assert only_valid_ips, ( "Expected each interpolations to be one of '%s', got " "'%s'." % (str(valid_ips_here), str(val))) return iap.Choice(val) if isinstance(val, iap.StochasticParameter): return val raise Exception( "Expected interpolation to be one of '%s' or a list of " "these values or a StochasticParameter. Got type %s." % ( str(ia.IMRESIZE_VALID_INTERPOLATIONS), type(val))) self.children = meta.handle_children_list(children, self.name, "then") self.interpolation = _validate_param(interpolation, False) self.interpolation_heatmaps = _validate_param(interpolation_heatmaps, True) self.interpolation_segmaps = _validate_param(interpolation_segmaps, True) def _augment_batch_(self, batch, random_state, parents, hooks): with batch.propagation_hooks_ctx(self, hooks, parents): images_were_array = None if batch.images is not None: images_were_array = ia.is_np_array(batch.images) shapes_orig = self._get_shapes(batch) samples = self._draw_samples(batch.nb_rows, random_state) batch = self.children.augment_batch_( batch, parents=parents + [self], hooks=hooks) if batch.images is not None: batch.images = self._keep_size_images( batch.images, shapes_orig["images"], images_were_array, samples) if batch.heatmaps is not None: # dont use shapes_orig["images"] because they might be None batch.heatmaps = self._keep_size_maps( batch.heatmaps, shapes_orig["heatmaps"], shapes_orig["heatmaps_arr"], samples[1]) if batch.segmentation_maps is not None: # dont use shapes_orig["images"] because they might be None batch.segmentation_maps = self._keep_size_maps( batch.segmentation_maps, shapes_orig["segmentation_maps"], shapes_orig["segmentation_maps_arr"], samples[2]) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._keep_size_keypoints, shapes_orig=shapes_orig[augm_name], interpolations=samples[0]) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch @classmethod def _keep_size_images(cls, images, shapes_orig, images_were_array, samples): interpolations, _, _ = samples gen = zip(images, interpolations, shapes_orig) result = [] for image, interpolation, input_shape in gen: if interpolation == KeepSizeByResize.NO_RESIZE: result.append(image) else: result.append( ia.imresize_single_image(image, input_shape[0:2], interpolation)) if images_were_array: # note here that NO_RESIZE can have led to different shapes nb_shapes = len({image.shape for image in result}) if nb_shapes == 1: result = np.array(result, dtype=images.dtype) return result @classmethod def _keep_size_maps(cls, augmentables, shapes_orig_images, shapes_orig_arrs, interpolations): result = [] gen = zip(augmentables, interpolations, shapes_orig_arrs, shapes_orig_images) for augmentable, interpolation, arr_shape_orig, img_shape_orig in gen: if interpolation == "NO_RESIZE": result.append(augmentable) else: augmentable = augmentable.resize( arr_shape_orig[0:2], interpolation=interpolation) augmentable.shape = img_shape_orig result.append(augmentable) return result @classmethod def _keep_size_keypoints(cls, kpsois_aug, shapes_orig, interpolations): result = [] gen = zip(kpsois_aug, interpolations, shapes_orig) for kpsoi_aug, interpolation, input_shape in gen: if interpolation == KeepSizeByResize.NO_RESIZE: result.append(kpsoi_aug) else: result.append(kpsoi_aug.on_(input_shape)) return result @classmethod def _get_shapes(cls, batch): result = dict() for column in batch.columns: result[column.name] = [cell.shape for cell in column.value] if batch.heatmaps is not None: result["heatmaps_arr"] = [ cell.arr_0to1.shape for cell in batch.heatmaps] if batch.segmentation_maps is not None: result["segmentation_maps_arr"] = [ cell.arr.shape for cell in batch.segmentation_maps] return result def _draw_samples(self, nb_images, random_state): rngs = random_state.duplicate(3) interpolations = self.interpolation.draw_samples((nb_images,), random_state=rngs[0]) if self.interpolation_heatmaps == KeepSizeByResize.SAME_AS_IMAGES: interpolations_heatmaps = np.copy(interpolations) else: interpolations_heatmaps = self.interpolation_heatmaps.draw_samples( (nb_images,), random_state=rngs[1] ) # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES` # works here only if the datatype of the array is such that it # may contain strings. It does not work properly for e.g. # integer arrays and will produce a single bool output, even # for arrays with more than one entry. same_as_imgs_idx = [ip == self.SAME_AS_IMAGES for ip in interpolations_heatmaps] interpolations_heatmaps[same_as_imgs_idx] = \ interpolations[same_as_imgs_idx] if self.interpolation_segmaps == KeepSizeByResize.SAME_AS_IMAGES: interpolations_segmaps = np.copy(interpolations) else: # TODO This used previously the same seed as the heatmaps part # leading to the same sampled values. Was that intentional? # Doesn't look like it should be that way. interpolations_segmaps = self.interpolation_segmaps.draw_samples( (nb_images,), random_state=rngs[2] ) # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES` # works here only if the datatype of the array is such that it # may contain strings. It does not work properly for e.g. # integer arrays and will produce a single bool output, even # for arrays with more than one entry. same_as_imgs_idx = [ip == self.SAME_AS_IMAGES for ip in interpolations_segmaps] interpolations_segmaps[same_as_imgs_idx] = \ interpolations[same_as_imgs_idx] return interpolations, interpolations_heatmaps, interpolations_segmaps def _to_deterministic(self): aug = self.copy() aug.children = aug.children.to_deterministic() aug.deterministic = True aug.random_state = self.random_state.derive_rng_() return aug def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.interpolation, self.interpolation_heatmaps] def get_children_lists(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_children_lists`.""" return [self.children] def __str__(self): pattern = ( "%s(" "interpolation=%s, " "interpolation_heatmaps=%s, " "name=%s, " "children=%s, " "deterministic=%s" ")") return pattern % ( self.__class__.__name__, self.interpolation, self.interpolation_heatmaps, self.name, self.children, self.deterministic)
from .common import * __all__ = ["TestReadWriteMemory"] class TestReadWriteMemory(MCPTestCase): def test_read_flash_ok(self): self.mcp.dev.read.return_value = self.xb0_00 self.assertEqual(self.mcp._read_flash(FlashDataSubcode.ChipSettings), self.xb0_00[4:14]) def test_read_sram_ok(self): self.mcp.dev.read.return_value = self.x61 self.assertEqual(self.mcp._read_sram(SramDataSubcode.ChipSettings), self.x61[4:22]) self.assertEqual(self.mcp._read_sram(SramDataSubcode.GPSettings), self.x61[22:26]) def test_read_flash_byte_ok(self): self.mcp.dev.read.return_value = self.xb0_00 for n in range(0,9): result = self.mcp._read_flash_byte(FlashDataSubcode.ChipSettings, n, range(8)) value = int("".join(["1" if x else "0" for x in reversed(result)]),2) self.assertEqual(value, self.xb0_00[4+n]) def test_read_sram_byte_ok(self): self.mcp.dev.read.return_value = self.x61 for n in range(0,9): result = self.mcp._read_sram_byte(SramDataSubcode.ChipSettings, n, range(8)) value = int("".join(["1" if x else "0" for x in reversed(result)]),2) self.assertEqual(value, self.x61[4+n]) def test_write_flash_byte_ok(self): # tests that 'write_flash_byte' sends the right data to hid write command xb1_00 = bytearray(64) xb1_00[0] = 0xb1 with patch.object(self.mcp, "_read_response", return_value = self.xb0_00): for byte in range(9): for bit in range(8): xb1_00[2:12] = self.xb0_00[4:14] xb1_00[2+byte] = self.mcp._MCP2221__and(xb1_00[2+byte], 0xff - (1<<bit)) self.mcp._write_flash_byte(FlashDataSubcode.ChipSettings, byte, [bit], [False]) self.assertEqual(self.mcp.dev.write.call_args[0][0], xb1_00) def test_write_sram_ok(self): # tests that 'write_sram' sends the right data to hid write command with patch.object(self.mcp, "_read_response", return_value = self.x61): v = 0xff for byte in range(9): self.mcp._write_sram(SramDataSubcode.ChipSettings, byte, v) self.assertEqual(self.mcp.dev.write.call_args[0][0][2+byte], v)
#!/usr/bin/env python from decimal import Decimal, getcontext from fractions import Fraction digits = 500 getcontext().prec = digits def leibnitz(n): """ Parameters ---------- n : int Returns ------- Fraction Approximation of pi. """ pi = Fraction(0) sign = 1 for k in range(1, n, 2): pi = pi + sign*Fraction(4, k) sign *= -1 return pi def calc_pi(n): """ Calculate PI. Parameters ---------- n : int Number of fractions. Returns ------- Fraction Approximation of pi. """ pi = Fraction(0) for k in range(n): # print(Fraction(-1,4)**k) pi += (Fraction(-1, 4)**k * (Fraction(1, 1+2*k) + Fraction(2, 1+4*k) + Fraction(1, 3+4*k))) return pi def get_correct_digits(approx): """ Get how many digits were correct. Parameters ---------- approx : str String representation of an approximation of pi. Returns ------- int The number of correct digits. If the number has too many correct digits, -1 is returned. """ pi = ("3.14159265358979323846264338327950288419716939937510582097494459230" "78164062862089986280348253421170679") for i, el in enumerate(pi): if len(approx) <= i: return i-1 if el != approx[i]: return i return -1 # Very good! if __name__ == "__main__": # for n in range(1,180): # approx = calc_pi(n) # dec =Decimal(approx.numerator) / Decimal(approx.denominator) # #print(dec) # print("correct digits: %s (n=%i)" % (get_correct_digits(str(dec)),n)) n = digits approx = calc_pi(n) dec = Decimal(approx.numerator) / Decimal(approx.denominator) print(dec)
import os.path from app.data.database import init_db, db_path, get_expected_pathname, set_path def db_exists(): return os.path.isfile(db_path) def check_db(): global db_path if (db_path != get_expected_pathname()): print('DB Check: Running backup') backup_database_to(get_expected_pathname()) init_db() if (not db_exists()): print('DB Check: No database found. Making a new one...') init_db() from app.data.camper_editing import reset_locs reset_locs() def backup_database_to(filename): global db_path from shutil import copy2 s = open('data/BACKUPDATA', 'a+') s.seek(0) prev_path = s.read() set_path(filename) db_path = filename #this line is a crude fix for some messy scoping s.truncate(0) s.seek(0) s.write(filename) if (prev_path == ""): print("No previous database found, a new one will be generated. This may happen if the BACKUPDATA file is missing or corrupt.") return False elif (prev_path == filename): print("Tried to back up to the same file!") else: print ("backing up & copying") from app.data.camper_editing import reset_locs copy2(prev_path, filename) reset_locs() return filename
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._enums import * __all__ = [ 'GetGroupResult', 'AwaitableGetGroupResult', 'get_group', 'get_group_output', ] @pulumi.output_type class GetGroupResult: def __init__(__self__, arn=None, configuration=None, description=None, resource_query=None, resources=None, tags=None): if arn and not isinstance(arn, str): raise TypeError("Expected argument 'arn' to be a str") pulumi.set(__self__, "arn", arn) if configuration and not isinstance(configuration, list): raise TypeError("Expected argument 'configuration' to be a list") pulumi.set(__self__, "configuration", configuration) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if resource_query and not isinstance(resource_query, dict): raise TypeError("Expected argument 'resource_query' to be a dict") pulumi.set(__self__, "resource_query", resource_query) if resources and not isinstance(resources, list): raise TypeError("Expected argument 'resources' to be a list") pulumi.set(__self__, "resources", resources) if tags and not isinstance(tags, list): raise TypeError("Expected argument 'tags' to be a list") pulumi.set(__self__, "tags", tags) @property @pulumi.getter def arn(self) -> Optional[str]: """ The Resource Group ARN. """ return pulumi.get(self, "arn") @property @pulumi.getter def configuration(self) -> Optional[Sequence['outputs.GroupConfigurationItem']]: return pulumi.get(self, "configuration") @property @pulumi.getter def description(self) -> Optional[str]: """ The description of the resource group """ return pulumi.get(self, "description") @property @pulumi.getter(name="resourceQuery") def resource_query(self) -> Optional['outputs.GroupResourceQuery']: return pulumi.get(self, "resource_query") @property @pulumi.getter def resources(self) -> Optional[Sequence[str]]: return pulumi.get(self, "resources") @property @pulumi.getter def tags(self) -> Optional[Sequence['outputs.GroupTag']]: return pulumi.get(self, "tags") class AwaitableGetGroupResult(GetGroupResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetGroupResult( arn=self.arn, configuration=self.configuration, description=self.description, resource_query=self.resource_query, resources=self.resources, tags=self.tags) def get_group(name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult: """ Schema for ResourceGroups::Group :param str name: The name of the resource group """ __args__ = dict() __args__['name'] = name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:resourcegroups:getGroup', __args__, opts=opts, typ=GetGroupResult).value return AwaitableGetGroupResult( arn=__ret__.arn, configuration=__ret__.configuration, description=__ret__.description, resource_query=__ret__.resource_query, resources=__ret__.resources, tags=__ret__.tags) @_utilities.lift_output_func(get_group) def get_group_output(name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]: """ Schema for ResourceGroups::Group :param str name: The name of the resource group """ ...
import numpy as np def partition(arr, low, high): i = (low-1) # index of smaller element pivot = arr[high] # pivot for j in range(low, high): # If current element is smaller than the pivot if arr[j] < pivot: # increment index of smaller element i = i+1 arr[i], arr[j] = arr[j], arr[i] arr[i+1], arr[high] = arr[high], arr[i+1] return (i + 1) def quickSort(arr, low, high): if low < high: # pi is partitioning index, arr[p] is now # at right place pi = partition(arr, low, high) # Separately sort elements before # partition and after partition quickSort(arr, low, pi-1) quickSort(arr, pi + 1, high) # Driver code to test above # arr = [10, 7, 8, 9, 1, 5] arr = np.random.randint(0, 1000000, 200000) n = len(arr) quickSort(arr, 0, n-1) # print(f"Sorted array is: {arr}")
#!/bin/env python3 # Steps requried to use # install requried libraries # (root)# dnf install python3-ldap3 # # Create python virtual environment directory # (user)$ python3 -m venv ./venv3 # # Enable virtual environment # (user)$ source ./venv3/bin/activate # # Update pip and then install needed libary # (user-venv3)$ pip install --upgrade pip # (user-venv3)$ pip install python-freeipa # (user-venv3)$ pip install ldap3 # # Execute Script: # (user-venv3)$ ./load_test.py -h # -- not required, saved as a note # dnf install python3-requests-kerberos python3-requests-gssapi import sys import time from datetime import datetime import re import argparse import logging #from linetimer import CodeTimer import itertools import pprint import subprocess import socket import dns.resolver import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # from ldap3 import Server, Connection, ALL, MODIFY_ADD import ldap3 from python_freeipa import ClientMeta # import requests #from requests_kerberos import HTTPKerberosAuth # generate a 4 digit randomizer from the current time # randomizer = int(time.time()) % 10000 randomizer = datetime.now().strftime("%d%H%M") start_timestr = datetime.now().strftime("%Y%m%d %H:%M") start_time = time.time() uid_template = "tuser{}_{{seq}}".format(randomizer) pp=pprint.PrettyPrinter(indent=2) class LogFilter(object): def __init__(self,level,type='ge'): self.__level = level self.__type = type def filter(self, logRecord): if self.__type == 'ge': return logRecord.levelno >= self.__level elif self.__type == 'eq': return logRecord.levelno == self.__level else: return logRecord.levelno <= self.__level class MyLogger(logging.getLoggerClass()): _PERF = 21 def __init__(self, name, **kwargs ): super().__init__(name, **kwargs) logging.addLevelName(self._PERF, 'PERF') def perf(self, message, *args, **kwargs): if self.isEnabledFor(self._PERF): self._log(self._PERF, message, args, **kwargs) logging.setLoggerClass(MyLogger) logger = logging.getLogger('IDM_user_load_tester') logger.setLevel(logging.INFO) _stout_handler = logging.StreamHandler() _stout_handler.setLevel(logging.INFO) logger.addHandler(_stout_handler) def iter_timer(iterable, step=10, label=""): start = time.time() last_t = start loop_tag = "loop {}{}{{}}".format(label, " "*bool(label)) logger.perf(loop_tag.format("start")) pos = 0 # step_count=len(iterable)//step for item in iterable: pos = pos + 1 if pos != 0 and pos % step == 0: logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t)) last_t = time.time() yield item logger.perf("{}: {:4.3f} {:4.3f}".format(pos,time.time() - start, time.time() - last_t)) logger.perf(loop_tag.format("end")) def loop_timer(count,step=10,label=""): start = time.time() last_t = start loop_tag = "loop {}{}{{}}".format(label, " "*bool(label)) logger.perf(loop_tag.format("start")) for item in range(count): if item != 0 and item % step == 0: logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t)) last_t = time.time() yield item logger.perf("{}: {:4.3f} {:4.3f}".format(count,time.time() - start, time.time() - last_t)) logger.perf(loop_tag.format("end")) # creates a generator to iterate through a list in chunks # returns an iterator chunk of the iterable of up to the given size. def chunker(iterable, size): it = iter(iterable) while True: chunk = tuple(itertools.islice(it,size)) if not chunk: return yield chunk def dump_ldap_stats(reset=True): logger.debug(ldap_conn.usage) if reset: ldap_conn.usage.reset() def generate_user(seq_num, ldif_out=False, dc_dn=None): #create a list/dict of user entries to use for passing to a function user = {} user["a_uid"] = uid_template.format(seq=seq_num) user["o_givenname"] = str(seq_num) user["o_sn"] = "tuser_{}".format(randomizer) user["o_cn"] = "{} {}".format(user["o_givenname"], user["o_sn"]) user["o_preferredlanguage"]='EN' user["o_employeetype"]="Created via load_test.py. Run started at: {}".format(start_timestr) # if the user is to be used for LDIF, strip the first two prepended chars if ldif_out: clean_rex = r"^._" keylist = list(user.keys()) user['attributes']={} for key in keylist: new_key = re.sub(clean_rex,'',key) user['attributes'][new_key]=user[key] del user[key] if dc_dn is not None: user['dn']="uid={},cn=staged users,cn=accounts,cn=provisioning,{}".format(user['attributes']['uid'],dc_dn) user['object_class']=['top','inetorgperson'] return user def add_users_api(total): users=[] for i in loop_timer(args.count,args.count//10,label="user_add_api"): user = generate_user(i) users.append(user["a_uid"]) logger.debug(user) if args.stage: user_out = client.stageuser_add(**user) else: user_out = client.user_add(**user) logger.debug(user_out) return users def add_users_stage(total): users=[] if args.ldap_stage: for i in loop_timer(args.count,args.count//10,label="user_add_stage_ldap"): user = generate_user(i, ldif_out=True, dc_dn=dom_dn) users.append(user['attributes']['uid']) user_dn=user['dn'] del user['dn'] ldap_conn.add(user_dn,**user) else: for i in loop_timer(args.count,args.count//10,label="user_add_stage"): user = generate_user(i) users.append(user["a_uid"]) logger.debug(user) user_out = client.stageuser_add(**user) logger.debug(user_out) for i in iter_timer(users,args.count//10,label="user_activate"): activate_out = client.stageuser_activate(i) logger.debug(activate_out) return users def get_users(template): logger.perf("Checking for user template '{}'".format(template)) if client.user_find(template,o_sizelimit=1)['count'] > 0: users = [ user['uid'][0] for user in client.user_find(template,o_sizelimit=0,o_timelimit=0)['result']] logger.perf("Found {} users".format(len(users))) else: logger.perf("Unable to find user template") exit(1) return users def get_users_ldap(template): logger.perf("Checking for user template '{}'".format(template)) results = client.user_find(template,o_sizelimit=1) if results['count'] > 0: result=results['result'][0] uid = result['uid'][0] user_dn=result['dn'] base_dn = re.sub("uid={},".format(uid),'',user_dn) entry_gen = ldap_conn.extend.standard.paged_search(search_base = base_dn, search_filter = "(uid={}*)".format(template), search_scope = ldap3.SUBTREE, attributes = '*', paged_size=1000, generator=True) total = 0 users=[] for entry in entry_gen: # print(entry) total += 1 if total % 10000 == 0: logger.perf("Loaded {} users".format(total)) dump_ldap_stats() # extract user uid. For some reason uid is a list, we only need the first users.append(entry['attributes']['uid'][0]) if args.user_limit>-1 and total >= args.user_limit: break logger.perf("Loaded {} users".format(len(users))) dump_ldap_stats() else: logger.perf("Unable to find user template") exit(1) return users def create_group_add_users_api(i,users): group_name = "group{}_{}".format(randomizer,i) group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr) logger.info("Creating group: {}".format(group_name)) result = client.group_add(group_name, o_description=group_desc) if result["value"]==group_name: logger.info("Success") logger.debug(result) logger.perf("Group: {}".format(group_name)) logger.info("Adding {} users".format(len(users))) result = client.group_add_member(group_name, o_user=users) logger.info("Done") logger.debug(result) def create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=-1): group_name = "group{}_{}".format(randomizer,i) group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr) logger.info("Creating group: {}".format(group_name)) result = client.group_add(group_name, o_description=group_desc,o_raw=True) group_dn=result['result']['dn'] logger.debug(result) mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_ADD, chunk) def remove_group_users_ldap(users, ldap_conn, base_user_dn, group_name, group_dn, chunk=-1): logger.info("Group to delete: {}".format(group_dn)) start = time.time() mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_DELETE, chunk) logger.perf("Removing users from group took: {:4.3f}".format(time.time() - start)) result = client.group_show(group_name) logger.info("Group show: {}".format(result)) logger.info("Delete group from IDM: {}".format(group_dn)) start = time.time() result = client.group_del(group_name) logger.perf("Delete group using API took: {:4.3f}".format(time.time() - start)) logger.info("Group del resul: {}".format(result)) def ldap_modify_retry(*fargs, **kwargs): for retry_num in range(args.max_retries+1): try: return(ldap_conn.modify(*fargs,**kwargs)) except Exception as e: logger.perf("Exception Occured") logger.perf("'{}'".format(e)) logger.perf("{} retries left".format(args.max_retries-retry_num)) ldap_conn.unbind() ldap_conn.bind() logger.info("LDAP Connection rebound") def mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap_mod_op, chunk=-1): if chunk==-1: chunk=len(users) user_dn_list = [base_user_dn.format(user) for user in users] for user_dn_chunk in chunker(user_dn_list,chunk): # print(user_dn_chunk) logger.perf("Chunk ({})".format(len(user_dn_chunk))) logger.debug("Showing fist 20 of user_dn_chunk: {}".format(user_dn_chunk[:20])) # result = ldap_conn.modify(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]}) result = ldap_modify_retry(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]}) dump_ldap_stats() logger.debug("LDAP Modify result: {}".format(result)) if args.rebind: logger.perf("rebinding LDAP connection") ldap_conn.unbind() ldap_conn.bind() if args.delay>0: logger.perf("Sleeping {} seconds".format(args.delay)) time.sleep(args.delay) def check_dns_record(server, domain, record): resolver = dns.resolver.Resolver() resolver.nameservers=[socket.gethostbyname(server)] try: rdata = resolver.query(record + "." + domain) logger.perf("Server [{}] answered with [{}]".format(server, rdata[0].address)) return 1 except dns.resolver.NXDOMAIN: logger.perf("Record [{}] doesn't exist on server [{}]".format(record + "." + domain, server)) return 0 parser = argparse.ArgumentParser(description="Generate load test data for IdM", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-v', dest='verbosity', action='count', default=0, help="Increase Verbosity, default is errors only. Only effective up to 3 levels.") parser.add_argument('-c', type=int, dest='count', help="Total count of users to add") parser.add_argument('-g', dest='group_count', default=1, type=int, help="Number of groups to create") parser.add_argument('-S', dest='server', type=str, help="Server to connect to") parser.add_argument('-U', dest='user', type=str, help="User account to use for connect") parser.add_argument('-P', dest='password', type=str, help="Password for connection") parser.add_argument('--stage', dest='stage', action='store_true', default=False, help="Create user in stage not active") parser.add_argument('--stage-ldap', dest='ldap_stage', default=False, action='store_true', help='Create stage users via ldap not API') parser.add_argument('--ldap-group', dest='ldap_group', default=False, action='store_true', help="Add users to group using LDAP directly") parser.add_argument('--ldap-group-remove', dest='ldap_group_del', type=str, help="Remove users from group using LDAP directly") parser.add_argument('-C', dest='chunk', type=int, default=-1, help="Chunk size for batching user adds to groups, -1 means all users given in count") parser.add_argument('-r', dest='reuse_template', type=str, help="Reuse existing users for group add using given user naming template") parser.add_argument('-D', dest='delay',type=int, default=0, help="Delay N seconds between chunks") parser.add_argument('--rebind', dest='rebind',default=False,action='store_true', help="Perform a unmind/bind operation between ldap operations.") parser.add_argument('-l', dest='user_limit', type=int, default=-1, help="Limit the number of users returned by reuse") parser.add_argument('--max-retries',dest='max_retries', type=int, default=0, help="Maximum number of retries for a failed chunk operation") parser.add_argument('--check-repl', dest='check_repl',default=False,action='store_true', help="Check when replication is finished by adding a DNS record") args=parser.parse_args() # setting up logger here to prevent log files being generated when showing help perf_logfile = "perf_{}".format(randomizer) _perf_handler = logging.FileHandler(perf_logfile) _perf_formatter = logging.Formatter("%(asctime)s; %(message)s") _perf_handler.setFormatter(_perf_formatter) _perf_handler.addFilter(LogFilter(MyLogger._PERF,type='eq')) logger.addHandler(_perf_handler) if args.verbosity: # Error is a level of 40. level=30-(args.verbosity*10) if level<0: level=0 logger.setLevel(level) levels={ 5: "CRITICAL", 4: "ERROR", 3: "WARNING", 2: "INFO", 1: "DEBUG", 0: "ALL" } if level!=30: log_file = "log_{}".format(randomizer) _file_handler = logging.FileHandler(log_file) _file_formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s') _file_handler.setFormatter(_file_formatter) _file_handler.addFilter(LogFilter(level)) logger.addHandler(_file_handler) logger.info("Logging to file '{}'".format(log_file)) logger.info("Debug level: {0} ({1})".format(levels[level // 10],level)) # client = ClientMeta('ipaserver0.example.com',False) # client.login('admin', 'admin123') # kerberos seems broken using OS rpms on RHEL 8 #client.login_kerberos() # user = client.user_add('test4', 'John', 'Doe', 'John Doe', o_preferredlanguage='EN') # Output some data to the user about the script options passed in # Not working as expected when git not found try: commit_info = str(subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:"%ci %H"']),"utf-8").strip() logger.perf("Commit Info: {}".format(commit_info)) except: logger.perf("No git info found") pass logger.perf("Start Time: {}".format(start_timestr)) logger.perf("User count: {} Group count: {}".format(args.count,args.group_count)) logger.perf("Server: {}".format(args.server)) logger.perf("Perf Log file: {}".format(perf_logfile)) if args.stage: if args.ldap_stage: logger.perf("Creating Stage users via ldap") else: logger.perf("Creating Stage users via API") else: logger.perf("Creating active users via API") if args.ldap_group: logger.perf("Adding users to groups via LDAP") if args.chunk>-1: logger.perf(" Using a chunk size of {}".format(args.chunk)) else: logger.perf("Adding users to groups via API") if args.reuse_template: logger.perf("Reusing users starting with: '{}'".format(args.reuse_template)) if args.user_limit>-1: logger.perf(" Limiting reuse to first {} users found".format(args.user_limit)) logger.debug(args) logger.perf('----') # end start header client = ClientMeta(args.server,False) client.login(args.user, args.password) dnszone = client.dnszone_find(o_forward_only=True)['result'][0] servers = dnszone['nsrecord'] domain = dnszone['idnsname'][0]['__dns_name__'] logger.info("Found servers: {} for domain: [{}]".format(servers, domain)) if args.ldap_group or args.ldap_stage: user_dn=client.user_show(args.user,o_all=True)['result']['dn'] base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn) dom_dn = re.search("(dc=.*)",user_dn, re.IGNORECASE).group(1) ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL) ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True) if args.reuse_template: user_dn=client.user_show(args.user,o_all=True)['result']['dn'] base_user_dn = re.sub("^uid={},".format(args.user),'',user_dn) logger.debug("base_user_dn: {}".format(base_user_dn)) ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL) ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True) users=get_users_ldap(args.reuse_template) else: logger.info("Creating {} users".format(args.count)) logger.info("template: {}".format(uid_template)) logger.info("Checking for existing templated users") user_check=client.user_find(uid_template.format(seq=0)) if user_check["count"]>0: sec_to_wait = 61 - datetime.now().second logger.error("Existing users found please wait {} seconds".format(sec_to_wait)) exit(1) else: logger.info("Proceeding") if args.stage: users = add_users_stage(args.count) else: users = add_users_api(args.count) if args.ldap_group: # print(ldap_server.info) # for i in iter_timer(range(args.group_count),step=1,label="group_add_user_ldap"): # create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk) for i in loop_timer(args.group_count,1,label="group_add_user_ldap"): create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk) elif args.ldap_group_del is not None: user_dn=client.user_show(args.user,o_all=True)['result']['dn'] group_dn=client.group_show(args.ldap_group_del,o_all=True)['result']['dn'] base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn) ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL) ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True) remove_group_users_ldap(users, ldap_conn, base_user_dn, args.ldap_group_del, group_dn, chunk=args.chunk) else: for i in loop_timer(args.group_count,1,label="group_add_user_api"): create_group_add_users_api(i,users) logger.perf('----') logger.perf("End Time: {}".format(datetime.now().strftime("%Y%m%d %H:%M"))) run_time=time.time() - start_time logger.perf("Total Run Time: {:.3f}sec".format(run_time)) logger.perf("Total Run time: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60)) if args.check_repl: record = "trecord{}".format(randomizer) client.dnsrecord_add(a_dnszoneidnsname=domain, a_idnsname=record, o_a_part_ip_address='1.1.1.1') check_result = 0 itr_ctr = 0 while check_result < len(servers) and itr_ctr < 600: time.sleep(1) check_result = 0 logger.perf("---- Iteration [{}] ----".format(itr_ctr)) for server in servers: check_result += check_dns_record(server, domain, record) itr_ctr += 1 logger.perf('----') logger.perf("End Time with replication: {}".format(datetime.now().strftime("%Y%m%d %H:%M"))) run_time=time.time() - start_time logger.perf("Total Run Time with replication: {:.3f}sec".format(run_time)) logger.perf("Total Run time with replication: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
"""Test UPnP/IGD config flow.""" from datetime import timedelta from unittest.mock import AsyncMock, patch from homeassistant import config_entries, data_entry_flow from homeassistant.components import ssdp from homeassistant.components.upnp.const import ( CONFIG_ENTRY_SCAN_INTERVAL, CONFIG_ENTRY_ST, CONFIG_ENTRY_UDN, DEFAULT_SCAN_INTERVAL, DISCOVERY_LOCATION, DISCOVERY_NAME, DISCOVERY_ST, DISCOVERY_UDN, DISCOVERY_UNIQUE_ID, DISCOVERY_USN, DOMAIN, DOMAIN_COORDINATORS, ) from homeassistant.components.upnp.device import Device from homeassistant.helpers.typing import HomeAssistantType from homeassistant.setup import async_setup_component from .mock_device import MockDevice from tests.common import MockConfigEntry async def test_flow_ssdp_discovery(hass: HomeAssistantType): """Test config flow: discovered + configured through ssdp.""" udn = "uuid:device_1" location = "dummy" mock_device = MockDevice(udn) discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object( Device, "async_discover", AsyncMock(return_value=discoveries) ), patch.object( Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0]) ): # Discovered via step ssdp. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data={ ssdp.ATTR_SSDP_LOCATION: location, ssdp.ATTR_SSDP_ST: mock_device.device_type, ssdp.ATTR_SSDP_USN: mock_device.usn, ssdp.ATTR_UPNP_UDN: mock_device.udn, }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "ssdp_confirm" # Confirm via step ssdp_confirm. result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == mock_device.name assert result["data"] == { CONFIG_ENTRY_ST: mock_device.device_type, CONFIG_ENTRY_UDN: mock_device.udn, } async def test_flow_ssdp_discovery_incomplete(hass: HomeAssistantType): """Test config flow: incomplete discovery through ssdp.""" udn = "uuid:device_1" location = "dummy" mock_device = MockDevice(udn) # Discovered via step ssdp. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data={ ssdp.ATTR_SSDP_ST: mock_device.device_type, # ssdp.ATTR_UPNP_UDN: mock_device.udn, # Not provided. ssdp.ATTR_SSDP_LOCATION: location, }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "incomplete_discovery" async def test_flow_user(hass: HomeAssistantType): """Test config flow: discovered + configured through user.""" udn = "uuid:device_1" location = "dummy" mock_device = MockDevice(udn) discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object( Device, "async_discover", AsyncMock(return_value=discoveries) ), patch.object( Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0]) ): # Discovered via step user. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" # Confirmed via step user. result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"unique_id": mock_device.unique_id}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == mock_device.name assert result["data"] == { CONFIG_ENTRY_ST: mock_device.device_type, CONFIG_ENTRY_UDN: mock_device.udn, } async def test_flow_import(hass: HomeAssistantType): """Test config flow: discovered + configured through configuration.yaml.""" udn = "uuid:device_1" mock_device = MockDevice(udn) location = "dummy" discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object( Device, "async_discover", AsyncMock(return_value=discoveries) ), patch.object( Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0]) ): # Discovered via step import. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == mock_device.name assert result["data"] == { CONFIG_ENTRY_ST: mock_device.device_type, CONFIG_ENTRY_UDN: mock_device.udn, } async def test_flow_import_already_configured(hass: HomeAssistantType): """Test config flow: discovered, but already configured.""" udn = "uuid:device_1" mock_device = MockDevice(udn) # Existing entry. config_entry = MockConfigEntry( domain=DOMAIN, data={ CONFIG_ENTRY_UDN: mock_device.udn, CONFIG_ENTRY_ST: mock_device.device_type, }, options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL}, ) config_entry.add_to_hass(hass) # Discovered via step import. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_flow_import_incomplete(hass: HomeAssistantType): """Test config flow: incomplete discovery, configured through configuration.yaml.""" udn = "uuid:device_1" mock_device = MockDevice(udn) location = "dummy" discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, # DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)): # Discovered via step import. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "incomplete_discovery" async def test_options_flow(hass: HomeAssistantType): """Test options flow.""" # Set up config entry. udn = "uuid:device_1" location = "http://192.168.1.1/desc.xml" mock_device = MockDevice(udn) discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] config_entry = MockConfigEntry( domain=DOMAIN, data={ CONFIG_ENTRY_UDN: mock_device.udn, CONFIG_ENTRY_ST: mock_device.device_type, }, options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL}, ) config_entry.add_to_hass(hass) config = { # no upnp, ensures no import-flow is started. } with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)): # Initialisation of component. await async_setup_component(hass, "upnp", config) await hass.async_block_till_done() # DataUpdateCoordinator gets a default of 30 seconds for updates. coordinator = hass.data[DOMAIN][DOMAIN_COORDINATORS][mock_device.udn] assert coordinator.update_interval == timedelta(seconds=DEFAULT_SCAN_INTERVAL) # Options flow with no input results in form. result = await hass.config_entries.options.async_init( config_entry.entry_id, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM # Options flow with input results in update to entry. result2 = await hass.config_entries.options.async_configure( result["flow_id"], user_input={CONFIG_ENTRY_SCAN_INTERVAL: 60}, ) assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert config_entry.options == { CONFIG_ENTRY_SCAN_INTERVAL: 60, } # Also updates DataUpdateCoordinator. assert coordinator.update_interval == timedelta(seconds=60)
from .BSD500 import BSD500 __all__ = ('BSD500')
week = ["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"] print(7-week.index(input()))
""" priorityqueue.py Priority Queue Implementation with a O(log n) Remove Method This file implements min- amd max-oriented priority queues based on binary heaps. I found the need for a priority queue with a O(log n) remove method. This can't be achieved with any of Python's built in collections including the heapq module, so I built my own. The heap is arranged according to a given key function. Usage: >>> from priorityqueue import MinHeapPriorityQueue >>> items = [4, 0, 1, 3, 2] >>> pq = MinHeapPriorityQueue(items) >>> pq.pop() 0 A priority queue accepts an optional key function. >>> items = ['yy', 'ttttttt', 'z', 'wwww', 'uuuuuu', 'vvvvv', 'xxx'] >>> pq = MinHeapPriorityQueue(items, key=len) >>> pq.pop() 'z' >>> pq.pop() 'yy' Internally, the queue is a list of tokens of type 'Locator', which contain the priority value, the item itself, and its current index in the heap. The index field is updated whenever the heap is modified. This is what allows us to remove in O(log n). Appending an item returns it's Locator. >>> token = pq.append('a') >>> token Locator(value=1, item='a', index=0) >>> pq.remove(token) 'a' If we want to be able to remove any item in the list we can maintain an auxiliary dictionary mapping items to their Locators. Here's a simple example with unique items: >>> items = [12, 46, 89, 101, 72, 81] >>> pq = MinHeapPriorityQueue() >>> locs = {} >>> for item in items: ... locs[item] = pq.append(item) >>> locs[46] Locator(value=46, item=46, index=1) >>> pq.remove(locs[46]) 46 Iterating with 'for item in pq' or iter() will produce the items, not the Locator instances used in the internal representation. The items will be generated in sorted order. >>> items = [3, 1, 0, 2, 4] >>> pq = MinHeapPriorityQueue(items) >>> for item in pq: ... print(item) 0 1 2 3 4 """ # Inspired by: # - AdaptableHeapPriorityQueue in 'Data Structures and Algorithms in Python' # - the Go Standard library's heap package # - Python's heapq module # - Raymond Hettinger's SortedCollection on ActiveState # - Peter Norvig's PriorityQueue in the Python AIMA repo class MinHeapPriorityQueue(): """A locator-based min-oriented priority queue implemented with a binary heap, arranged according to a key function. Operation Running Time len(P), P.peek() O(1) P.update(loc, value, item) O(log n) P.append(item) O(log n)* P.pop() O(log n)* P.remove(loc) O(log n)* *amortized due to occasional resizing of the underlying python list """ def __init__(self, iterable=(), key=lambda x: x): self._key = key decorated = [(key(item), item) for item in iterable] self._pq = [self.Locator(value, item, i) for i, (value, item) in enumerate(decorated)] if len(self._pq) > 1: self._heapify() class Locator: """Token for locating an entry of the priority queue.""" __slots__ = '_value', '_item', '_index' def __init__(self, value, item, i): self._value = value self._item = item self._index = i def __eq__(self, other): return self._value == other._value def __lt__(self, other): return self._value < other._value def __le__(self, other): return self._value <= other._value def __repr__(self): return '{}(value={!r}, item={!r}, index={})'.format( self.__class__.__name__, self._value, self._item, self._index ) #------------------------------------------------------------------------------ # non-public def _parent(self, j): return (j-1) // 2 def _left(self, j): return 2*j + 1 def _right(self, j): return 2*j + 2 def _swap(self, i, j): """Swap the elements at indices i and j of array.""" self._pq[i], self._pq[j] = self._pq[j], self._pq[i] # Update the indices in the Locator instances. self._pq[i]._index = i self._pq[j]._index = j def _upheap(self, i): parent = self._parent(i) if i > 0 and self._pq[i] < self._pq[parent]: self._swap(i, parent) self._upheap(parent) def _downheap(self, i): n = len(self._pq) left, right = self._left(i), self._right(i) if left < n: child = left if right < n and self._pq[right] < self._pq[left]: child = right if self._pq[child] < self._pq[i]: self._swap(i, child) self._downheap(child) def _fix(self, i): self._upheap(i) self._downheap(i) def _heapify(self): start = self._parent(len(self) - 1) # Start at parent of last leaf for j in range(start, -1, -1): # going to and includng the root. self._downheap(j) #------------------------------------------------------------------------------ # public def append(self, item): """Add an item to the heap""" token = self.Locator(self._key(item), item, len(self._pq)) self._pq.append(token) self._upheap(len(self._pq) - 1) # Upheap newly added position. return token def update(self, loc, newval, newitem): """Update the priority value and item for the entry identified by Locator loc.""" j = loc._index if not (0 <= j < len(self) and self._pq[j] is loc): raise ValueError('Invalid locator') loc._value = newval loc._item = newitem self._fix(j) def remove(self, loc): """Remove and return the item identified by Locator loc.""" j = loc._index if not (0 <= j < len(self) and self._pq[j] is loc): raise ValueError('Invalid locator') if j == len(self) - 1: self._pq.pop() else: self._swap(j, len(self) - 1) self._pq.pop() self._fix(j) return loc._item def peek(self): """Return but do not remove item with minimum priority value.""" loc = self._pq[0] return loc._item def pop(self): """Remove and return item with minimum priority value.""" self._swap(0, len(self._pq) - 1) loc = self._pq.pop() self._downheap(0) return loc._item @property def items(self): return [token._item for token in self._pq] def __len__(self): return len(self._pq) def __contains__(self, item): return item in self.items def __iter__(self): return iter(sorted(self.items)) def __repr__(self): return '{}({})'.format(self.__class__.__name__, self._pq) class MaxHeapPriorityQueue(MinHeapPriorityQueue): """A locator-based max-oriented priority queue implemented with a binary heap, arranged according to a key function. Operation Running Time len(P), P.peek() O(1) P.update(loc, value, item) O(log n) P.append(item) O(log n)* P.pop() O(log n)* P.remove(loc) O(log n)* *amortized due to occasional resizing of the underlying python list """ # Override all relevant private methods of MinHeapPriorityQueue # with max-oriented versions. def _upheap(self, i): parent = self._parent(i) if i > 0 and self._pq[parent] < self._pq[i]: self._swap(i, parent) self._upheap(parent) def _downheap(self, i): n = len(self._pq) left, right = self._left(i), self._right(i) if left < n: child = left if right < n and self._pq[left] < self._pq[right]: child = right if self._pq[i] < self._pq[child]: self._swap(i, child) self._downheap(child) def __iter__(self): return iter(sorted(self.items, reverse=True)) __doc__ += """ >>> import random; random.seed(42) >>> from priorityqueue import MinHeapPriorityQueue, MaxHeapPriorityQueue Function to verify the min-heap invariant is true for all elements of pq. >>> def verify(pq): ... n = len(pq._pq) ... for i in range(n): ... left, right = 2*i + 1, 2*i + 2 ... if left < n: ... assert pq._pq[i] <= pq._pq[left] ... if right < n: ... assert pq._pq[i] <= pq._pq[right] Function to verify the max-heap invariant is true for all elements of pq. >>> def verify_max(pq): ... n = len(pq._pq) ... for i in range(n): ... left, right = 2*i + 1, 2*i + 2 ... if left < n: ... assert pq._pq[i] >= pq._pq[left] ... if right < n: ... assert pq._pq[i] >= pq._pq[right] >>> items = [random.randint(1, 100) for _ in range(10000)] >>> pq = MinHeapPriorityQueue(items) >>> verify(pq) >>> pq = MaxHeapPriorityQueue(items) >>> verify_max(pq) Check multiple signs for priority values. >>> items = list(range(100, -100, -1)) >>> random.shuffle(items) >>> pq = MinHeapPriorityQueue(items) >>> verify(pq) >>> pq = MaxHeapPriorityQueue(items) >>> verify_max(pq) Test pop, peek, append, remove, update, __len__, and __contains__ operations. >>> items = ['jjjjjjjjjj', 'iiiiiiiii', 'hhhhhhhh', ... 'ggggggg', 'ffffff', 'eeeee', ... 'dddd', 'ccc', 'bb', 'a'] >>> pq = MinHeapPriorityQueue(items, key=len) >>> verify(pq) >>> pq.pop() 'a' >>> pq.pop() 'bb' >>> pq.peek() 'ccc' >>> pq.pop() 'ccc' >>> pq.pop() 'dddd' >>> pq.peek() 'eeeee' >>> pq.pop() 'eeeee' >>> _ = pq.append('a') >>> _ = pq.append('bb') >>> verify(pq) >>> pq = MaxHeapPriorityQueue(key=len) >>> pq.append([1, 2, 3]) Locator(value=3, item=[1, 2, 3], index=0) >>> pq.append([1, 2, 3, 4, 5, 6]) Locator(value=6, item=[1, 2, 3, 4, 5, 6], index=0) >>> pq.append([1]) Locator(value=1, item=[1], index=2) >>> pq.append([1, 2, 3, 4, 5, 6, 7, 8, 9]) Locator(value=9, item=[1, 2, 3, 4, 5, 6, 7, 8, 9], index=0) >>> len(pq) 4 >>> [1] in pq True >>> [1, 2, 3, 4, 5] in pq False >>> items = list(range(1, 10001)) >>> random.shuffle(items) >>> pq = MinHeapPriorityQueue(items) >>> verify(pq) >>> len(pq) == 10000 True >>> for i in range(1, 10001): ... x = pq.pop() ... assert x == i >>> pq = MinHeapPriorityQueue() >>> locs = {} >>> for x in items: ... locs[x] = pq.append(x) >>> pq.remove(locs[1]) 1 >>> pq.remove(locs[2]) 2 >>> pq.pop() 3 >>> for i in range(4, 100): ... _ = pq.remove(locs[i]) >>> pq.pop() 100 >>> verify(pq) >>> pq.update(locs[999], 1, 'test') >>> 999 in pq False >>> pq.pop() 'test' >>> 998 in pq True Test the items and __repr__ methods. >>> items = ['a', 'b', 'c'] >>> pq = MinHeapPriorityQueue(items) >>> pq MinHeapPriorityQueue([Locator(value='a', item='a', index=0), Locator(value='b', item='b', index=1), Locator(value='c', item='c', index=2)]) >>> pq.items == ['a', 'b', 'c'] True Check that __iter__ generates items in sorted order. >>> items = list(range(1000)) >>> pq = MinHeapPriorityQueue(items) >>> for i, x in enumerate(pq): ... assert i == x >>> pq = MaxHeapPriorityQueue(items) >>> for i, x in enumerate(pq): ... assert 999 - i == x """ if __name__ == "__main__": import doctest doctest.testmod()
from selenium import webdriver from fixture.session import SessionHelper from fixture.group import GroupHelper from fixture.contact import ContactHelper class Application: def __init__(self, browser, base_url): if browser == "firefox": self.wd = webdriver.Firefox() elif browser == "chrome": self.wd = webdriver.Chrome() elif browser == "ie": self.wd = webdriver.Ie() else: raise ValueError("Unrecognized browser %s" % browser) self.wd.implicitly_wait(5) self.session = SessionHelper(self) self.group = GroupHelper(self) self.contact = ContactHelper(self) self.base_url=base_url def is_valid(self): try: self.wd.current_url return True except: return False def open_home_page(self): wd = self.wd wd.get(self.base_url) def destroy(self): self.wd.quit()
import matplotlib.pyplot as plt from shapely.geometry import MultiLineString from .route_iterator import RouteIterator from .graphconverter import GraphConverter class TramLine(object): """Class represents single tram line for example '33: from Pilczyce to Sępolno' """ def __init__(self, number, direction_to, dl): """ Basic requirements to unambiguously define line :param number: number of line as str :param direction_to: :param dl: DataLoader object """ self.number = number # Stored as str self.direction_to = direction_to self.default_route = dl.load_single_line(number, direction_to) # As you can default_route is type LineString self.stops = dl.load_tram_stops(self.default_route) # List of shapely.Point objects self.current_route = self.default_route self.route_in_order = GraphConverter.find_route_in_order(dl, self) """ def show(self, with_stops=True): # Development tool. Plot line if isinstance(self.current_route, MultiLineString): for line in self.current_route: plt.plot(line.xy[0], line.xy[1]) else: plt.plot(self.current_route.xy[0], self.current_route.xy[1]) if with_stops: plt.scatter([p.x for p in self.stops], [p.y for p in self.stops]) plt.show() """
"""Hermes MQTT service for Rhasspy wakeword with snowboy""" import argparse import asyncio import dataclasses import itertools import json import logging import os import sys import typing from pathlib import Path import paho.mqtt.client as mqtt import rhasspyhermes.cli as hermes_cli from . import SnowboyModel, WakeHermesMqtt _DIR = Path(__file__).parent _LOGGER = logging.getLogger("rhasspywake_snowboy_hermes") # ----------------------------------------------------------------------------- def main(): """Main method.""" parser = argparse.ArgumentParser(prog="rhasspy-wake-snowboy-hermes") parser.add_argument( "--model", required=True, action="append", nargs="+", help="Snowboy model settings (model, sensitivity, audio_gain, apply_frontend)", ) parser.add_argument( "--model-dir", action="append", default=[], help="Directories with snowboy models", ) parser.add_argument( "--wakeword-id", action="append", help="Wakeword IDs of each keyword (default: use file name)", ) parser.add_argument( "--stdin-audio", action="store_true", help="Read WAV audio from stdin" ) parser.add_argument( "--udp-audio", nargs=3, action="append", help="Host/port/siteId for UDP audio input", ) parser.add_argument("--lang", help="Set lang in hotword detected message") hermes_cli.add_hermes_args(parser) args = parser.parse_args() hermes_cli.setup_logging(args) _LOGGER.debug(args) if args.model_dir: args.model_dir = [Path(d) for d in args.model_dir] # Use embedded models too args.model_dir.append(_DIR / "models") # Load model settings models: typing.List[SnowboyModel] = [] for model_settings in args.model: model_path = Path(model_settings[0]) if not model_path.is_file(): # Resolve relative to model directories for model_dir in args.model_dir: maybe_path = model_dir / model_path.name if maybe_path.is_file(): model_path = maybe_path break _LOGGER.debug("Loading model from %s", str(model_path)) model = SnowboyModel(model_path=model_path) if len(model_settings) > 1: model.sensitivity = model_settings[1] if len(model_settings) > 2: model.audio_gain = float(model_settings[2]) if len(model_settings) > 3: model.apply_frontend = model_settings[3].strip().lower() == "true" models.append(model) wakeword_ids = [ kn[1] for kn in itertools.zip_longest( args.model, args.wakeword_id or [], fillvalue="" ) ] if args.stdin_audio: # Read WAV from stdin, detect, and exit client = None hermes = WakeHermesMqtt(client, models, wakeword_ids) for site_id in args.site_id: hermes.load_detectors(site_id) if os.isatty(sys.stdin.fileno()): print("Reading WAV data from stdin...", file=sys.stderr) wav_bytes = sys.stdin.buffer.read() # Print results as JSON for result in hermes.handle_audio_frame(wav_bytes): result_dict = dataclasses.asdict(result) json.dump(result_dict, sys.stdout, ensure_ascii=False) return udp_audio = [] if args.udp_audio: udp_audio = [ (host, int(port), site_id) for host, port, site_id in args.udp_audio ] # Listen for messages client = mqtt.Client() hermes = WakeHermesMqtt( client, models, wakeword_ids, model_dirs=args.model_dir, udp_audio=udp_audio, site_ids=args.site_id, lang=args.lang, ) for site_id in args.site_id: hermes.load_detectors(site_id) _LOGGER.debug("Connecting to %s:%s", args.host, args.port) hermes_cli.connect(client, args) client.loop_start() try: # Run event loop asyncio.run(hermes.handle_messages_async()) except KeyboardInterrupt: pass finally: _LOGGER.debug("Shutting down") client.loop_stop() # ----------------------------------------------------------------------------- if __name__ == "__main__": main()