text
stringlengths 2
999k
|
|---|
from setuptools import setup
requirements = ["requests>=2.20.0,<3.0"]
with open("README.md") as f:
readme = f.read()
with open("CHANGELOG.md") as f:
changelog = f.read()
setup(
name="googlemaps",
version="4.4.4",
description="Python client library for Google Maps Platform",
long_description=readme + changelog,
long_description_content_type="text/markdown",
scripts=[],
url="https://github.com/googlemaps/google-maps-services-python",
packages=["googlemaps"],
license="Apache 2.0",
platforms="Posix; MacOS X; Windows",
setup_requires=requirements,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet",
],
python_requires='>=3.5'
)
|
#!/usr/bin/python
"""
Auto Mount Encrypted Drives on external Credentials
@Author: oliver.blakeman@carbonprojectfinance.co.uk
@Date: 2018-07-25
Shebangs: (amend #!/ path at top based on env and app)
ferret: #!/usr/bin/python
"""
# Standard import
import sys
import os
import pwd
import time
# other
from subprocess import call, STDOUT, PIPE, Popen
FNULL = open(os.devnull, 'w') # write to /dev/null
import Tkinter as tk
# logging
import logging
logfile = "/tmp/auto_enc_test.log"
logging.basicConfig(filename=logfile,level=logging.DEBUG)
#logging.basicConfig(filename=logfile,level=logging.ERROR)
################## env #################################### env #################################### env ##################
# path
current_env = os.environ['HOME']
base_dir = os.path.join(current_env, 'dev','auto_encrypted')
sys.path.append(base_dir)
# get user credentials
user_details = pwd.getpwuid(os.getuid())#[0]
user_name = user_details[0]
UID = user_details[2]
GID = user_details[3]
logging.debug('%s:%s: Script run as: %s (UID %s, GID %s)' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config', user_name, UID, GID))
# cli passed args
try:
action = os.path.basename(sys.argv[1])
try:
device = os.path.basename(sys.argv[2])
logging.debug('%s:%s: Search for volumes on device: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config', device))
except IndexError as e: # no second arg passed
device = False
logging.debug('%s:%s: Search for volumes on ALL external devices.' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config'))
except IndexError as e:
logging.debug('%s:%s: No arguments passed to script' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config'))
action = False
################## modules #################################### modules #################################### modules ##################
from crypt.secure import test_keys, secure_config, get_config
################## vars #################################### vars #################################### vars ##################
import config as config
mnt_ids = config.MNT_IDS.format(uid=UID,gid=GID) # format mount ids for user
################## functions #################################### functions #################################### functions ##################
def getpwd():
"""Password pop up dialogue."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running password dialogue script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
global password
password = True
# main screen
root = tk.Tk()
root.title("Mount Encrypted")
root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id()))
# text
tk.Label(root, text = 'Enter Password').pack(side = 'top', padx=60, pady=10)
# password box
pwdbox = tk.Entry(root, show = '*')
pwdbox.pack(side = 'top', padx=60, pady=10)
pwdbox.focus_set() # put cursor in pw box
def onpwdentry(evt):
global password
pw_retrieve = pwdbox.get()
if pw_retrieve:
password = pw_retrieve
root.destroy()
def onokclick():
global password
pw_retrieve = pwdbox.get()
if pw_retrieve:
password = pw_retrieve
root.destroy()
def oncancelclick():
global password
password = False
root.destroy()
# actions
pwdbox.bind('<Return>', onpwdentry)
tk.Button(root, command=onokclick, text = 'OK').pack(side = 'left', padx=20, pady=10)
tk.Button(root, command=oncancelclick, text = 'Cancel').pack(side = 'right', padx=20, pady=10)
root.mainloop()
return password
def confirm_mount(header,message):
"""Confirmation pop up dialogue."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running confirmation dialogue script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
# main screen
root = tk.Tk()
root.title(header)
root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id()))
# text
tk.Label(root, text = message).pack(side = 'top', padx=60, pady=10)
def onokclick():
root.destroy()
# actions
tk.Button(root, command=onokclick, text = 'OK').pack(side = 'top', padx=60, pady=10)
root.mainloop()
return True
def auth_device(private_key):
"""Authorize public / private keypair on device."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to find and auth device private key.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
# traverse auth device for public key
for dir_name, subdirs_name, file_names in os.walk(config.MNT_DIR, topdown=True):
for file_name in file_names:
if config.PUB_KF in file_name:
# get public_key
with open(os.path.join(dir_name, file_name), "r") as pub_file:
public_key = pub_file.read()
authed = test_keys(private_key,public_key)
if authed :
return True
return False
def get_mnt_devs():
"""Get list of eligible devices to mount - excluding config.MNT_EXC list."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to find ALL available device volumes.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
mount_list =[]
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.DEV_DIR):
for file_name in file_names :
# get only eligible volumes
if config.DEV in file_name and file_name[:3] not in config.MNT_EXC and len(file_name) == 4:
mount_dir = os.path.join(dir_name, file_name)
mount_list.append(mount_dir)
return mount_list
def get_base_mnt_devs():
"""Get list of eligible volumes to mount for given base device."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to find volumes for device from base device: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, device))
mount_list = []
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.DEV_DIR):
for file_name in file_names :
# get only eligible volumes
if device in file_name and len(file_name) > len(device):
mount_dir = os.path.join(dir_name, file_name)
mount_list.append(mount_dir)
return mount_list
def usb_unmount():
"""Unmount device from mount dir"""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to unmount device.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
u_command = "sudo umount %s" % (config.MNT_DIR) # unmount command using mount dir
success = call(u_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: Device %s unmounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, config.MNT_DIR, success))
return success
def usb_mount(private_key):
"""Mount and verify external devices
1. Mount available drives.
2. Authorize using public / private key pair if required by config.MNT_AUTH, return true if Authed
3. Dismount if not authed
4. Return False if no authed devices
> dev: mount device
< True, False
"""
func_name = sys._getframe().f_code.co_name
## mount and auth
logging.debug('%s:%s: Running script to mount & auth device.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
if device : # get volumes from device
mount_list = get_base_mnt_devs()
else: # get all device volumes
mount_list = get_mnt_devs()
## iterate devices
for dev in mount_list:
logging.debug('%s:%s: Testing device volume: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev))
# define mount commands
u_command = "sudo umount %s" % (dev)
m_command = "sudo mount -r -o %s --source %s --target %s" % (mnt_ids, dev, config.MNT_DIR)
#m_command = 'sudo mount -o %s,context="system_u:object_r:samba_share_t:s0" --source %s --target %s' % (mnt_ids, dev, config.MNT_DIR)
# call unmount - in case already mounted
success = call(u_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: %s dismounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev, success))
time.sleep(config.SYS_SLEEP)
# call mount
success = call(m_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: %s mounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev, success))
# Auth device
authed = auth_device(private_key)
# check if authed
if authed :
return True
else:
# call unmount
success = call(u_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: %s dismounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev, success))
return False
def get_configs(private_key):
"""Get list of encrypted mount configurations."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to decrypt encrypted mount configs.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
enc_list = []
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.MNT_DIR):
for file_name in file_names :
# iter required keyfiles
for enc_cfg in config.ENC_VOL_CFE :
# match key to file
if enc_cfg == file_name :
# prevent duplicates
config.ENC_VOL_CFE.remove(enc_cfg)
# decrypt config
enc_config = get_config(private_key, os.path.join(dir_name, file_name))
if enc_config:
enc_list.append(enc_config)
if config.ENC_VOL_CFE :
logging.error('%s:%s: Could not retrieve all configs, remaining: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, config.ENC_VOL_CFE))
return enc_list
def get_keyfiles(keyfiles):
"""Get list of keyfiles for mount."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to identify and return keyfiles.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
kf_list = []
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.MNT_DIR):
for file_name in file_names :
# iter required keyfiles
for key in keyfiles :
# match key to file
if key == file_name :
# prevent duplicates
keyfiles.remove(key)
kf_path = os.path.join(dir_name, file_name)
kf_list.append(kf_path)
if keyfiles :
logging.error('%s:%s: Could not retrieve all keyfiles, remaining: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, keyfiles))
return kf_list
def dismount_encrypted():
"""Dismount encrypted volumes."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running encrypted volume dismount ALL script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
denc_command = "sudo {vc} --force --dismount".format(vc=config.VC)
proc = Popen(denc_command, stdout=PIPE, stderr=STDOUT, shell=True)
for line in proc.stdout:
logging.debug('%s:%s: veracrypt report: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, line))
proc.wait()
logging.debug('%s:%s: Veracrypt dismount ALL, reported: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, proc.returncode))
return True
def mount_encrypted():
"""Mount encrypted volumes."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running encrypted volume mount script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
## get private_key
try:
pkf = os.path.join(config.PRV_KEY_DIR.format(home=current_env), config.PRV_KF)
with open(pkf, "r") as prv_file:
private_key = prv_file.read()
except IOError as e:
logging.error('%s:%s: Private key not present: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, pkf))
return False
## mount and ID device (pb/pk)
mounted = usb_mount(private_key)
if not mounted:
logging.error('%s:%s: No device mounted.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
return False
## get configuration files
enc_cfg_list = get_configs(private_key)
if not enc_cfg_list:
logging.error('%s:%s: No configurations present.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
return False
# incrementing & control
slot = 10
abort_mount = False
## iterate configured volumes
for enc_vol in enc_cfg_list :
## Get keyfiles
keyfiles = get_keyfiles(enc_vol.get('keyfiles',[]))
logging.debug('%s:%s: Retrieved keyfiles' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
## password
pw = enc_vol.get('pw',True)
# password retrieval logic
if isinstance(pw,(bool,type(None))):
if pw:
# get password from dialogue
password = getpwd()
if not password:
logging.debug('%s:%s: Dialogue yielded no password - abort.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
unmounted = usb_unmount()
return True # prevent encrypt dismount
else:
password = None
else:
password = pw
logging.debug('%s:%s: Retrieved password: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, '****'))
## get volume data
try:
volume = enc_vol['volume']
mount_point = enc_vol['mount_point']
except IndexError as e :
logging.error('%s:%s: Could not retrieve volume information: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, e))
return False
# get interactive mode
interactive = enc_vol.get('interactive',False)
if interactive:
interactive = ''
else:
interactive = '-t --non-interactive'
## check if volume is mounted on mount_point
mount_point_taken = os.path.ismount(mount_point) # returns boolean
if mount_point_taken :
## unmount usb
logging.debug('%s:%s: Calling unmount for device' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
unmounted = usb_unmount()
return True
## build veracrypt command
# keyfiles and password
if keyfiles and password:
kf_string = ','.join(keyfiles)
enc_command = "{vc} {ia} --keyfiles={kf} --password='{pw}' --slot={sl} {vo} {mt}".format(vc=config.VC,
ia=interactive,
kf=kf_string,
pw=password,
sl=slot,
vo=volume,
mt=mount_point)
# keyfiles only
elif keyfiles:
kf_string = ','.join(keyfiles)
enc_command = "{vc} {ia} --keyfiles={kf} --slot={sl} {vo} {mt}".format(vc=config.VC,
ia=interactive,
kf=kf_string,
sl=slot,
vo=volume,
mt=mount_point)
# password only
elif password:
enc_command = """{vc} {ia} --password='{pw}' --slot={sl} {vo} {mt}""".format(vc=config.VC,
ia=interactive,
pw=password,
sl=slot,
vo=volume,
mt=mount_point)
# no password or keyfiles ??
else:
enc_command = """{vc} {ia} --slot={sl} {vo} {mt}""".format(vc=config.VC,
ia=interactive,
sl=slot,
vo=volume,
mt=mount_point)
## make veracrypt call
logging.debug('%s:%s: Calling veracrypt mount: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, enc_command))
proc = Popen(enc_command, stdout=PIPE, stderr=STDOUT, shell=True)
for line in proc.stdout:
logging.debug('%s:%s: veracrypt mount output: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, line))
proc.wait()
logging.debug('%s:%s: veracrypt mount success: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, proc.returncode))
# attempt dismount volume if reported error on mount, e.g. already mounted
if proc.returncode > 0 :
enc_command = "{vc} -t --non-interactive --dismount {vo}".format(vc=config.VC, vo=volume)
success = call(enc_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: Veracrypt attempted dismount of volume %s, reported: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, volume, success))
return False
slot += 1
## unmount usb
logging.debug('%s:%s: Calling unmount for device' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
unmounted = usb_unmount()
# report mounted volumes
enc_list = "{vc} -t -lv".format(vc=config.VC) # verbose list
proc = Popen(enc_list, stdout=PIPE, stderr=STDOUT, shell=True)
for line in proc.stdout:
logging.debug('%s:%s: veracrypt report: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, line.rstrip()))
proc.wait()
return True
################## script #################################### script #################################### script ##################
# run script if called directly
if __name__ == "__main__":
func_name = 'auto_encrypted.__main__'
logging.debug('%s:%s: Running script as main.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
if action == 'mount' : # mount encrypted files
# sleep to avoid mount conflicts
time.sleep(config.SYS_SLEEP)
# perform mount
mounted = mount_encrypted()
logging.debug('%s:%s: Mounted encrypted volumes: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, mounted))
# attempt to dismount all
if not mounted:
dismount_encrypted()
usb_unmount()
# dialogue
confirmed = confirm_mount('No Mounted Volumes','No credentials available. \nAll encrypted volumes have been dismounted.')
exit(0)
elif action == 'config' : # generate encrypted configs
config_secured = secure_config(current_env)
logging.debug('%s:%s: Secured config files: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, config_secured))
# dialogue
confirmed = confirm_mount('Config Encrypted','Config file successfully encrypted.')
elif not action: # dismout all encrypted drives
dismount_encrypted()
usb_unmount()
# dialogue
confirmed = confirm_mount('Dismounted','All encrypted volumes have been dismounted.')
exit(0)
logging.debug('%s:%s: Argument not recognised: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, action))
exit(1)
|
for n in range(int(input())):
c=float(input())
aux=0
while c>1:
c/=2
aux+=1
print(f"{aux} dias")
aux=0
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import difflib
import json
import os
import sys
import warnings
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins import AnsiblePlugin, get_plugin_class
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
if PY3:
# OrderedDict is needed for a backwards compat shim on Python3.x only
# https://github.com/ansible/ansible/pull/49512
from collections import OrderedDict
else:
OrderedDict = None
global_display = Display()
__all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
class CallbackBase(AnsiblePlugin):
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
def __init__(self, display=None, options=None):
if display:
self._display = display
else:
self._display = global_display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
self._plugin_options = {}
if options is not None:
self.set_options(options)
self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
''' helper for callbacks, so they don't all have to include deepcopy '''
_copy_result = deepcopy
def set_option(self, k, v):
self._plugin_options[k] = v
def get_option(self, k):
return self._plugin_options[k]
def set_options(self, task_keys=None, var_options=None, direct=None):
''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
Also _options was already taken for CLI args and callbacks use _plugin_options instead.
'''
# load from config
self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
def _run_is_verbose(self, result, verbosity=0):
return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
and result._result.get('_ansible_verbose_override', False) is False)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
try:
jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
except TypeError:
# Python3 bug: throws an exception when keys are non-homogenous types:
# https://bugs.python.org/issue25457
# sort into an OrderedDict and then json.dumps() that instead
if not OrderedDict:
raise
jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
cls=AnsibleJSONEncoder, indent=indent,
ensure_ascii=False, sort_keys=False)
return jsonified_results
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self._display.deprecated(**warning)
del res['deprecations']
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result['exception'].strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + result['exception']
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
def _serialize_diff(self, diff):
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
if 'dst_binary' in diff:
ret.append(u"diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append(u"diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
if isinstance(diff[x], MutableMapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
if 'before_header' in diff:
before_header = u"before: %s" % diff['before_header']
else:
before_header = u'before'
if 'after_header' in diff:
after_header = u"after: %s" % diff['after_header']
else:
after_header = u'after'
before_lines = diff['before'].splitlines(True)
after_lines = diff['after'].splitlines(True)
if before_lines and not before_lines[-1].endswith(u'\n'):
before_lines[-1] += u'\n\\ No newline at end of file\n'
if after_lines and not after_lines[-1].endswith('\n'):
after_lines[-1] += u'\n\\ No newline at end of file\n'
differ = difflib.unified_diff(before_lines,
after_lines,
fromfile=before_header,
tofile=after_header,
fromfiledate=u'',
tofiledate=u'',
n=C.DIFF_CONTEXT)
difflines = list(differ)
if len(difflines) >= 3 and sys.version_info[:2] == (2, 6):
# difflib in Python 2.6 adds trailing spaces after
# filenames in the -- before/++ after headers.
difflines[0] = difflines[0].replace(u' \n', u'\n')
difflines[1] = difflines[1].replace(u' \n', u'\n')
# it also treats empty files differently
difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0')
has_diff = False
for line in difflines:
has_diff = True
if line.startswith(u'+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith(u'-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith(u'@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(diff['prepared'])
return u''.join(ret)
def _get_item_label(self, result):
''' retrieves the value to be displayed as a label for an item entry from a result object'''
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
else:
item = result.get('_ansible_item_label', result.get('item'))
return item
def _get_item(self, result):
''' here for backwards compat, really should have always been named: _get_item_label'''
cback = getattr(self, 'NAME', os.path.basename(__file__))
self._display.deprecated("The %s callback plugin should be updated to use the _get_item_label method instead" % cback,
version="2.11", collection_name='ansible.builtin')
return self._get_item_label(result)
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
def _clean_results(self, result, task_name):
''' removes data from results for display '''
# mostly controls that debug only outputs what it was meant to
if task_name in C._ACTION_DEBUG:
if 'msg' in result:
# msg should be alone
for key in list(result.keys()):
if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
result.pop(key)
else:
# 'var' value as field, so eliminate others and what is left should be varname
for hidme in self._hide_in_debug:
result.pop(hidme, None)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
# V2 METHODS, by default they call v1 counterparts if possible
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
# FIXME: not called
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
# FIXME: not called
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
# FIXME: not called
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
# FIXME: not called
def v2_playbook_on_cleanup_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
# FIXME: not called
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
# FIXME: not called
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
def v2_runner_on_start(self, host, task):
"""Event used when host begins execution of a task
.. versionadded:: 2.8
"""
pass
|
# -*- coding: utf-8 -*-
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = ['sphinx.ext.doctest','rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swifty'
copyright = u'2017 The Swifty Authors'
author = u'The Swifty Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'classic'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
#html_theme = 'pyramid'
#html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
from collections import deque, defaultdict
from itertools import islice
from functools import wraps
from fuzzysearch.common import FuzzySearchBase, Match, \
count_differences_with_maximum, get_best_match_in_group, group_matches
from fuzzysearch.compat import text_type
from fuzzysearch.search_exact import search_exact
def _check_arguments(subsequence, sequence, max_substitutions):
if not subsequence:
raise ValueError('Given subsequence is empty!')
if max_substitutions is None or max_substitutions < 0:
raise ValueError('Maximum number of substitutions must be >= 0!')
def has_near_match_substitutions(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
for start_index in search_exact(subsequence, sequence):
return True
return False
elif len(subsequence) // (max_substitutions + 1) >= 3:
return has_near_match_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return has_near_match_substitutions_lp(
subsequence, sequence, max_substitutions,
)
def find_near_matches_substitutions(subsequence, sequence, max_substitutions):
"""Find near-matches of the subsequence in the sequence.
This chooses a suitable fuzzy search implementation according to the given
parameters.
Returns a list of fuzzysearch.Match objects describing the matching parts
of the sequence.
"""
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
return [
Match(start_index, start_index + len(subsequence), 0,
sequence[start_index:start_index + len(subsequence)])
for start_index in search_exact(subsequence, sequence)
]
elif len(subsequence) // (max_substitutions + 1) >= 3:
return find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return find_near_matches_substitutions_lp(
subsequence, sequence, max_substitutions,
)
def find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the number of character substitutions must be less than max_substitutions
* no deletions or insertions are allowed
"""
_check_arguments(subsequence, sequence, max_substitutions)
return list(_find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions))
def _find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
# simple optimization: prepare some often used things in advance
_SUBSEQ_LEN = len(subsequence)
_SUBSEQ_LEN_MINUS_ONE = _SUBSEQ_LEN - 1
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
# prepare quick lookup of where a character appears in the subsequence
char_indexes_in_subsequence = defaultdict(list)
for (index, char) in enumerate(subsequence):
char_indexes_in_subsequence[char].append(index)
# we'll iterate over the sequence once, but the iteration is split into two
# for loops; therefore we prepare an iterator in advance which will be used
# in both of the loops
sequence_enum_iter = enumerate(sequence)
# We'll count the number of matching characters assuming various attempted
# alignments of the subsequence to the sequence. At any point in the
# sequence there will be N such alignments to update. We'll keep
# these in a "circular array" (a.k.a. a ring) which we'll rotate after each
# iteration to re-align the indexing.
# Initialize the candidate counts by iterating over the first N-1 items in
# the sequence. No possible matches in this step!
candidates = deque([0], maxlen=_SUBSEQ_LEN)
for (index, char) in islice(sequence_enum_iter, _SUBSEQ_LEN_MINUS_ONE):
for subseq_index in [idx for idx in char_indexes_in_subsequence[char] if idx <= index]:
candidates[subseq_index] += 1
candidates.appendleft(0)
# From the N-th item onwards, we'll update the candidate counts exactly as
# above, and additionally check if the part of the sequence whic began N-1
# items before the current index was a near enough match to the given
# sub-sequence.
for (index, char) in sequence_enum_iter:
for subseq_index in char_indexes_in_subsequence[char]:
candidates[subseq_index] += 1
# rotate the ring of candidate counts
candidates.rotate(1)
# fetch the count for the candidate which started N-1 items ago
n_substitutions = _SUBSEQ_LEN - candidates[0]
# set the count for the next index to zero
candidates[0] = 0
# if the candidate had few enough mismatches, yield a match
if n_substitutions <= max_substitutions:
yield make_match(
start=index - _SUBSEQ_LEN_MINUS_ONE,
end=index + 1,
dist=n_substitutions,
)
def has_near_match_substitutions_lp(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
for match in _find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
return True
return False
def find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the number of character substitutions must be less than max_substitutions
* no deletions or insertions are allowed
"""
_check_arguments(subsequence, sequence, max_substitutions)
match_starts = set()
matches = []
for match in _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if match.start not in match_starts:
match_starts.add(match.start)
matches.append(match)
return sorted(matches, key=lambda match: match.start)
def _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
subseq_len = len(subsequence)
seq_len = len(sequence)
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
ngram_len = subseq_len // (max_substitutions + 1)
if ngram_len == 0:
raise ValueError(
"The subsequence's length must be greater than max_substitutions!"
)
for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len):
ngram_end = ngram_start + ngram_len
subseq_before = subsequence[:ngram_start]
subseq_after = subsequence[ngram_end:]
for index in search_exact(
subsequence[ngram_start:ngram_end], sequence,
ngram_start, seq_len - (subseq_len - ngram_end),
):
n_substitutions = 0
seq_before = sequence[index - ngram_start:index]
if subseq_before != seq_before:
n_substitutions += count_differences_with_maximum(
seq_before, subseq_before,
max_substitutions - n_substitutions + 1)
if n_substitutions > max_substitutions:
continue
seq_after = sequence[index + ngram_len:index - ngram_start + subseq_len]
if subseq_after != seq_after:
if n_substitutions == max_substitutions:
continue
n_substitutions += count_differences_with_maximum(
seq_after, subseq_after,
max_substitutions - n_substitutions + 1)
if n_substitutions > max_substitutions:
continue
yield make_match(
start=index - ngram_start,
end=index - ngram_start + subseq_len,
dist=n_substitutions,
)
def has_near_match_substitutions_ngrams(subsequence, sequence,
max_substitutions):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the number of character substitutions must be less than max_substitutions
* no deletions or insertions are allowed
"""
_check_arguments(subsequence, sequence, max_substitutions)
for match in _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
return True
return False
try:
from fuzzysearch._substitutions_only import \
substitutions_only_has_near_matches_ngrams_byteslike, \
substitutions_only_find_near_matches_ngrams_byteslike as \
_subs_only_fnm_ngram_byteslike
except ImportError:
pass
else:
py_has_near_match_substitutions_ngrams = has_near_match_substitutions_ngrams
@wraps(py_has_near_match_substitutions_ngrams)
def has_near_match_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if not (
isinstance(subsequence, text_type) or
isinstance(sequence, text_type)
):
try:
return substitutions_only_has_near_matches_ngrams_byteslike(
subsequence, sequence, max_substitutions)
except TypeError:
pass
return py_has_near_match_substitutions_ngrams(
subsequence, sequence, max_substitutions)
py_find_near_matches_substitutions_ngrams = \
find_near_matches_substitutions_ngrams
@wraps(py_find_near_matches_substitutions_ngrams)
def find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if not (
isinstance(subsequence, text_type) or
isinstance(sequence, text_type)
):
try:
results = _subs_only_fnm_ngram_byteslike(
subsequence, sequence, max_substitutions)
except TypeError:
pass
else:
matches = [
Match(
index,
index + len(subsequence),
count_differences_with_maximum(
sequence[index:index+len(subsequence)],
subsequence,
max_substitutions + 1,
),
matched=sequence[index:index + len(subsequence)],
)
for index in results
]
return [
get_best_match_in_group(group)
for group in group_matches(matches)
]
return py_find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions)
class SubstitutionsOnlySearch(FuzzySearchBase):
@classmethod
def search(cls, subsequence, sequence, search_params):
actual_max_subs = min(
x for x in [search_params.max_l_dist,
search_params.max_substitutions]
if x is not None
)
return find_near_matches_substitutions(subsequence, sequence,
actual_max_subs)
@classmethod
def extra_items_for_chunked_search(cls, subsequence, search_params):
return 0
|
import json
from banal import ensure_list
from functools import cache
from pantomime.types import JSON
from requests.exceptions import RequestException
from opensanctions.core import Dataset, Context
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
@cache
def deref_url(context: Context, url):
try:
res = context.fetch_response(url)
return str(res.url)
except RequestException:
return url
def parse_result(context: Context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = context.make_slug(entity_number, dataset="us_ofac_sdn")
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
# TODO: make adjacent owner entity
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("provisions", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("authorityId", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
# TODO: deref
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
context.emit(sanction)
context.emit(entity, target=True)
h.audit_data(result, ignore=["standard_order"])
def crawl(context: Context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
|
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A function that trains a network on a dataset."""
from lottery_ticket.foundations import paths
from lottery_ticket.foundations import save_restore
import tensorflow as tf
def train(sess, dataset, model, optimizer_fn, training_len, output_dir,
**params):
"""Train a model on a dataset.
Training continues until training_len iterations or epochs have taken place.
Args:
sess: A tensorflow session
dataset: The dataset on which to train (a child of dataset_base.DatasetBase)
model: The model to train (a child of model_base.ModelBase)
optimizer_fn: A function that, when called, returns an instance of an
optimizer object to be used to optimize the network.
training_len: A tuple whose first value is the unit of measure
("epochs" or "iterations") and whose second value is the number of
units for which the network should be trained.
output_dir: The directory to which any output should be saved.
**params: Other parameters.
save_summaries is whether to save summary data.
save_network is whether to save the network before and after training.
test_interval is None if the test set should not be evaluated; otherwise,
frequency (in iterations) at which the test set should be run.
validate_interval is analogous to test_interval.
Returns:
A dictionary containing the weights before training and the weights after
training, as well as the trained model.
"""
# Create initial session parameters.
optimize = optimizer_fn().minimize(model.loss)
sess.run(tf.global_variables_initializer())
initial_weights = model.get_current_weights(sess)
train_handle = dataset.get_train_handle(sess)
test_handle = dataset.get_test_handle(sess)
validate_handle = dataset.get_validate_handle(sess)
# Optional operations to perform before training.
if params.get('save_summaries', False):
writer = tf.summary.FileWriter(paths.summaries(output_dir))
train_file = tf.gfile.GFile(paths.log(output_dir, 'train'), 'w')
test_file = tf.gfile.GFile(paths.log(output_dir, 'test'), 'w')
validate_file = tf.gfile.GFile(paths.log(output_dir, 'validate'), 'w')
if params.get('save_network', False):
save_restore.save_network(paths.initial(output_dir), initial_weights)
save_restore.save_network(paths.masks(output_dir), model.masks)
# Helper functions to collect and record summaries.
def record_summaries(iteration, records, fp):
"""Records summaries obtained from evaluating the network.
Args:
iteration: The current training iteration as an integer.
records: A list of records to be written.
fp: A file to which the records should be logged in an easier-to-parse
format than the tensorflow summary files.
"""
if params.get('save_summaries', False):
log = ['iteration', str(iteration)]
for record in records:
# Log to tensorflow summaries for tensorboard.
writer.add_summary(record, iteration)
# Log to text file for convenience.
summary_proto = tf.Summary()
summary_proto.ParseFromString(record)
value = summary_proto.value[0]
log += [value.tag, str(value.simple_value)]
fp.write(','.join(log) + '\n')
def collect_test_summaries(iteration):
if (params.get('save_summaries', False) and
'test_interval' in params and
iteration % params['test_interval'] == 0):
sess.run(dataset.test_initializer)
records = sess.run(model.test_summaries, {dataset.handle: test_handle})
record_summaries(iteration, records, test_file)
def collect_validate_summaries(iteration):
if (params.get('save_summaries', False) and
'validate_interval' in params and
iteration % params['validate_interval'] == 0):
sess.run(dataset.validate_initializer)
records = sess.run(model.validate_summaries,
{dataset.handle: validate_handle})
record_summaries(iteration, records, validate_file)
# Train for the specified number of epochs. This behavior is encapsulated
# in a function so that it is possible to break out of multiple loops
# simultaneously.
def training_loop():
"""The main training loop encapsulated in a function."""
iteration = 0
epoch = 0
last_train_acc = None
while True:
sess.run(dataset.train_initializer)
epoch += 1
# End training if we have passed the epoch limit.
if training_len[0] == 'epochs' and epoch > training_len[1]:
return last_train_acc
# One training epoch.
while True:
try:
iteration += 1
# End training if we have passed the iteration limit.
if training_len[0] == 'iterations' and iteration > training_len[1]:
return last_train_acc
# Train.
results = sess.run([optimize, model.accuracy] + model.train_summaries,
{dataset.handle: train_handle})
last_train_acc = results[1]
records = results[2:]
record_summaries(iteration, records, train_file)
# Collect test and validation data if applicable.
collect_test_summaries(iteration)
collect_validate_summaries(iteration)
# End of epoch handling.
except tf.errors.OutOfRangeError:
break
# Run the training loop.
final_train_acc = training_loop()
# Clean up.
if params.get('save_summaries', False):
train_file.close()
test_file.close()
validate_file.close()
# Retrieve the final weights of the model.
final_weights = model.get_current_weights(sess)
if params.get('save_network', False):
save_restore.save_network(paths.final(output_dir), final_weights)
return initial_weights, final_weights, final_train_acc
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the _License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for windowed sampling."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.experimental import distribute
from tensorflow_probability.python.experimental.mcmc import windowed_sampling
from tensorflow_probability.python.internal import callable_util
from tensorflow_probability.python.internal import distribute_test_lib
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.internal import unnest
JAX_MODE = False
tfb = tfp.bijectors
tfd = tfp.distributions
Root = tfd.JointDistributionCoroutine.Root
NUM_SCHOOLS = 8 # number of schools
TREATMENT_EFFECTS = [28., 8, -3, 7, -1, 1, 18, 12]
TREATMENT_STDDEVS = [15., 10, 16, 11, 9, 11, 10, 18]
def eight_schools_coroutine():
@tfd.JointDistributionCoroutine
def model():
avg_effect = yield Root(tfd.Normal(0., 5., name='avg_effect'))
avg_stddev = yield Root(tfd.HalfNormal(5., name='avg_stddev'))
school_effects_std = yield Root(
tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'))
yield tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')
return model
def eight_schools_sequential():
model = tfd.JointDistributionSequential([
tfd.Normal(0., 5., name='avg_effect'),
tfd.HalfNormal(5., name='avg_stddev'),
tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
# pylint: disable=g-long-lambda
lambda school_effects_std, avg_stddev, avg_effect: tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')])
# pylint: enable=g-long-lambda
return model
def eight_schools_named():
model = tfd.JointDistributionNamed(
dict(
avg_effect=tfd.Normal(0., 5., name='avg_effect'),
avg_stddev=tfd.HalfNormal(5., name='avg_stddev'),
school_effects_std=tfd.Sample(
tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
# pylint: disable=g-long-lambda
treatment_effects=lambda school_effects_std, avg_stddev, avg_effect:
tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')))
# pylint: enable=g-long-lambda
return model
def eight_schools_nested():
model = tfd.JointDistributionNamed(
dict(
effect_and_stddev=tfd.JointDistributionSequential([
tfd.Normal(0., 5., name='avg_effect'),
tfd.HalfNormal(5., name='avg_stddev')], name='effect_and_stddev'),
school_effects_std=tfd.Sample(
tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
# pylint: disable=g-long-lambda
treatment_effects=lambda school_effects_std, effect_and_stddev:
tfd.Independent(
tfd.Normal(loc=(effect_and_stddev[0][..., tf.newaxis] +
effect_and_stddev[1][..., tf.newaxis] *
school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')))
# pylint: enable=g-long-lambda
return model
def _gen_gaussian_updating_example(x_dim, y_dim, seed):
"""An implementation of section 2.3.3 from [1].
We initialize a joint distribution
x ~ N(mu, Lambda^{-1})
y ~ N(Ax, L^{-1})
Then condition the model on an observation for y. We can test to confirm that
Cov(p(x | y_obs)) is near to
Sigma = (Lambda + A^T L A)^{-1}
This test can actually check whether the posterior samples have the proper
covariance, and whether the windowed tuning recovers 1 / diag(Sigma) as the
diagonal scaling factor.
References:
[1] Bishop, Christopher M. Pattern Recognition and Machine Learning.
Springer, 2006.
Args:
x_dim: int
y_dim: int
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
Returns:
(tfd.JointDistribution, tf.Tensor), representing the joint distribution
above, and the posterior variance.
"""
seeds = samplers.split_seed(seed, 6)
x_mean = samplers.normal((x_dim,), seed=seeds[0])
x_scale_diag = samplers.normal((x_dim,), seed=seeds[1])
y_scale_diag = samplers.normal((y_dim,), seed=seeds[2])
scale_mat = samplers.normal((y_dim, x_dim), seed=seeds[3])
y_shift = samplers.normal((y_dim,), seed=seeds[4])
@tfd.JointDistributionCoroutine
def model():
x = yield Root(tfd.MultivariateNormalDiag(
x_mean, scale_diag=x_scale_diag, name='x'))
yield tfd.MultivariateNormalDiag(
tf.linalg.matvec(scale_mat, x) + y_shift,
scale_diag=y_scale_diag,
name='y')
dists, _ = model.sample_distributions(seed=seeds[5])
precision_x = tf.linalg.inv(dists.x.covariance())
precision_y = tf.linalg.inv(dists.y.covariance())
true_cov = tf.linalg.inv(precision_x +
tf.linalg.matmul(
tf.linalg.matmul(scale_mat, precision_y,
transpose_a=True),
scale_mat))
return model, tf.linalg.diag_part(true_cov)
@test_util.test_graph_and_eager_modes
class WindowedSamplingTest(test_util.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in
[eight_schools_coroutine, eight_schools_named, eight_schools_sequential,
eight_schools_nested])
def test_hmc_type_checks(self, model_fn):
model = model_fn()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function(autograph=False)
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_hmc(
3, model, num_leapfrog_steps=2, num_adaptation_steps=21,
seed=seed, **pins)
draws, _ = do_sample(test_util.test_seed())
self.evaluate(draws)
@parameterized.named_parameters(
dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in
[eight_schools_coroutine, eight_schools_named, eight_schools_sequential,
eight_schools_nested])
def test_nuts_type_checks(self, model_fn):
model = model_fn()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_nuts(
3, model, max_tree_depth=2, num_adaptation_steps=50,
seed=seed, **pins)
draws, _ = do_sample(test_util.test_seed())
self.evaluate(draws)
def test_hmc_samples_well(self):
model = eight_schools_named()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_hmc(
400, model, num_leapfrog_steps=12, seed=seed,
**pins)
draws, _ = do_sample(test_util.test_seed())
flat_draws = tf.nest.flatten(
model.experimental_pin(**pins)._model_flatten(draws))
max_scale_reduction = tf.reduce_max(
tf.nest.map_structure(tf.reduce_max,
tfp.mcmc.potential_scale_reduction(flat_draws)))
self.assertLess(self.evaluate(max_scale_reduction), 1.5)
def test_nuts_samples_well(self):
model = eight_schools_named()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample():
return tfp.experimental.mcmc.windowed_adaptive_nuts(
200, model, max_tree_depth=5, seed=test_util.test_seed(),
**pins)
draws, _ = do_sample()
flat_draws = tf.nest.flatten(
model.experimental_pin(**pins)._model_flatten(draws))
max_scale_reduction = tf.reduce_max(
tf.nest.map_structure(tf.reduce_max,
tfp.mcmc.potential_scale_reduction(flat_draws)))
self.assertLess(self.evaluate(max_scale_reduction), 1.05)
@parameterized.named_parameters(
dict(testcase_name=f'_{num_draws}', num_draws=num_draws)
for num_draws in [0, 1, 500, 499, 100, 10000])
def test_get_window_sizes(self, num_draws):
[first_window,
slow_window,
last_window] = windowed_sampling._get_window_sizes(num_draws)
self.assertEqual(first_window +
slow_window +
2 * slow_window +
4 * slow_window +
8 * slow_window +
last_window, num_draws)
if num_draws == 500:
self.assertEqual(slow_window, 25)
self.assertEqual(first_window, 75)
self.assertEqual(last_window, 50)
def test_explicit_init(self):
sample_dist = tfd.JointDistributionSequential(
[tfd.HalfNormal(1., name=f'dist_{idx}') for idx in range(4)])
explicit_init = [tf.ones(20) for _ in range(3)]
_, init, bijector, _, _, _ = windowed_sampling._setup_mcmc(
model=sample_dist,
n_chains=[20],
init_position=explicit_init,
seed=test_util.test_seed(),
dist_3=1.)
self.assertAllEqual(self.evaluate(init),
tf.convert_to_tensor(bijector(explicit_init)))
def test_explicit_init_samples(self):
stream = test_util.test_seed_stream()
# Compute everything in a function so it is consistent in graph mode
@tf.function
def do_sample():
jd_model = tfd.JointDistributionNamed({
'x': tfd.HalfNormal(1.),
'y': lambda x: tfd.Normal(0., x)})
init = {'x': tf.ones(64)}
return tfp.experimental.mcmc.windowed_adaptive_hmc(
10,
jd_model,
num_adaptation_steps=200,
current_state=init,
num_leapfrog_steps=5,
discard_tuning=False,
y=tf.constant(1.),
seed=stream(),
trace_fn=None)
self.evaluate(do_sample())
def test_valid_init(self):
class _HalfNormal(tfd.HalfNormal):
def _default_event_space_bijector(self):
# This bijector is intentionally mis-specified so that ~50% of
# initialiations will fail.
return tfb.Identity(validate_args=self.validate_args)
tough_dist = tfd.JointDistributionSequential(
[_HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])
# Twenty chains with three parameters gives a 1 / 2^60 chance of
# initializing with a finite log probability by chance.
_, init, _, _, _, _ = windowed_sampling._setup_mcmc(
model=tough_dist,
n_chains=[20],
seed=test_util.test_seed(),
dist_3=1.)
self.assertAllGreater(self.evaluate(init), 0.)
def test_extra_pins_not_required(self):
model = tfd.JointDistributionSequential([
tfd.Normal(0., 1., name='x'),
lambda x: tfd.Normal(x, 1., name='y')
])
pinned = model.experimental_pin(y=4.2)
# No explicit pins are passed, since the model is already pinned.
_, init, _, _, _, _ = windowed_sampling._setup_mcmc(
model=pinned, n_chains=[20],
seed=test_util.test_seed())
self.assertLen(init, 1)
def test_hmc_fitting_gaussian(self):
# See docstring to _gen_gaussian_updating_example
x_dim = 3
y_dim = 12
stream = test_util.test_seed_stream()
# Compute everything in a function so it is consistent in graph mode
@tf.function
def do_sample():
jd_model, true_var = _gen_gaussian_updating_example(
x_dim, y_dim, stream())
y_val = jd_model.sample(seed=stream()).y
_, trace = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
n_chains=1,
num_adaptation_steps=10000,
num_leapfrog_steps=16,
discard_tuning=False,
y=y_val,
seed=stream())
# Get the final scaling used for the mass matrix - this is a measure
# of how well the windowed adaptation recovered the true variance
final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]
return final_scaling, true_var
final_scaling, true_var = do_sample()
self.assertAllClose(true_var, final_scaling, rtol=0.15)
def test_nuts_fitting_gaussian(self):
# See docstring to _gen_gaussian_updating_example
x_dim = 3
y_dim = 12
stream = test_util.test_seed_stream()
# Compute everything in a function so it is consistent in graph mode
@tf.function
def do_sample():
jd_model, true_var = _gen_gaussian_updating_example(
x_dim, y_dim, stream())
y_val = jd_model.sample(seed=stream()).y
_, trace = tfp.experimental.mcmc.windowed_adaptive_nuts(
1,
jd_model,
n_chains=1,
num_adaptation_steps=10000,
max_tree_depth=5,
discard_tuning=False,
y=y_val,
seed=stream())
# Get the final scaling used for the mass matrix - this is a measure
# of how well the windowed adaptation recovered the true variance
final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]
return final_scaling, true_var
final_scaling, true_var = do_sample()
self.assertAllClose(true_var, final_scaling, rtol=0.1, atol=1e-3)
def test_f64_step_size(self):
dist = tfd.JointDistributionSequential([
tfd.Normal(
tf.constant(0., dtype=tf.float64),
tf.constant(1., dtype=tf.float64))
])
(target_log_prob_fn, initial_transformed_position, _, _, _, _
) = windowed_sampling._setup_mcmc(
dist, n_chains=[5], init_position=None, seed=test_util.test_seed())
init_step_size = windowed_sampling._get_step_size(
initial_transformed_position, target_log_prob_fn)
self.assertDTypeEqual(init_step_size, np.float64)
self.assertAllFinite(init_step_size)
def test_batch_of_problems_autobatched(self):
def model_fn():
x = yield tfd.MultivariateNormalDiag(
tf.zeros([10, 3]), tf.ones(3), name='x')
yield tfd.Multinomial(
logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)
samp = model.sample(seed=test_util.test_seed())
self.assertEqual((10, 3), samp.x.shape)
self.assertEqual((10, 4), samp.y.shape)
states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(
2, model.experimental_pin(y=samp.y), num_leapfrog_steps=3,
num_adaptation_steps=100, init_step_size=tf.ones([10, 1]),
seed=test_util.test_seed()))
self.assertEqual((2, 64, 10, 3), states.x.shape)
self.assertEqual((2, 10, 1), trace['step_size'].shape)
def test_batch_of_problems_named(self):
def mk_y(x):
return tfd.Multinomial(logits=tfb.Pad([(0, 1)])(x), total_count=10)
model = tfd.JointDistributionNamed(dict(
x=tfd.MultivariateNormalDiag(tf.zeros([10, 3]), tf.ones(3)),
y=mk_y))
samp = model.sample(seed=test_util.test_seed())
self.assertEqual((10, 3), samp['x'].shape)
self.assertEqual((10, 4), samp['y'].shape)
states, trace = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_hmc(
2,
model.experimental_pin(y=samp['y']),
num_leapfrog_steps=3,
num_adaptation_steps=100,
init_step_size=tf.ones([10, 1]),
seed=test_util.test_seed()))
self.assertEqual((2, 64, 10, 3), states['x'].shape)
self.assertEqual((2, 10, 1), trace['step_size'].shape)
def test_bijector(self):
dist = tfd.JointDistributionSequential([tfd.Dirichlet(tf.ones(2))])
bij, _ = windowed_sampling._get_flat_unconstraining_bijector(dist)
draw = dist.sample(seed=test_util.test_seed())
self.assertAllCloseNested(bij.inverse(bij(draw)), draw)
@parameterized.named_parameters(*(
(f'{kind}_{n_chains}', kind, n_chains) # pylint: disable=g-complex-comprehension
for kind in ('hmc', 'nuts') for n_chains in ([], 3, [2, 1], [2, 2, 2])))
def test_batches_of_chains(self, kind, n_chains):
def model_fn():
x = yield tfd.MultivariateNormalDiag(
tf.zeros(3), tf.ones(3), name='x')
yield tfd.Multinomial(
logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)
samp = model.sample(seed=test_util.test_seed())
states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(
5, model.experimental_pin(y=samp.y), n_chains=n_chains,
num_leapfrog_steps=3, num_adaptation_steps=100,
seed=test_util.test_seed()))
if isinstance(n_chains, int):
n_chains = [n_chains]
self.assertEqual((5, *n_chains, 3), states.x.shape)
self.assertEqual((5,), trace['step_size'].shape)
def test_dynamic_batch_shape(self):
"""Test correct handling of `TensorShape(None)`."""
if JAX_MODE:
self.skipTest('b/203858802')
n_features = 5
n_timepoints = 100
features = tfd.Normal(0., 1.).sample([100, n_features],
test_util.test_seed())
ar_sigma = 1.
rho = .25
@tfd.JointDistributionCoroutine
def jd_model():
beta = yield Root(tfd.Sample(tfd.Normal(0., 1.), n_features))
yhat = tf.einsum('ij,...j->...i', features, beta)
def ar_fun(y):
loc = tf.concat([tf.zeros_like(y[..., :1]), y[..., :-1]], axis=-1)
return tfd.Independent(
tfd.Normal(loc=loc * rho, scale=ar_sigma),
reinterpreted_batch_ndims=1)
# Autoregressive distribution defined as below introduce a batch shape:
# TensorShape(None)
yield tfd.Autoregressive(
distribution_fn=ar_fun,
sample0=tf.zeros_like(yhat),
num_steps=yhat.shape[-1],
name='y')
states, _ = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_nuts(
2,
jd_model,
num_adaptation_steps=25,
n_chains=3,
seed=test_util.test_seed()))
self.assertEqual((2, 3, n_timepoints), states.y.shape)
@parameterized.named_parameters(
('_nuts', tfp.experimental.mcmc.windowed_adaptive_nuts, {}),
('_hmc', tfp.experimental.mcmc.windowed_adaptive_hmc, {
'num_leapfrog_steps': 1
}),
)
def test_f64_state(self, method, method_kwargs):
states, _ = callable_util.get_output_spec(lambda: method( # pylint: disable=g-long-lambda
5,
tfd.Normal(tf.constant(0., tf.float64), 1.),
n_chains=2,
num_adaptation_steps=100,
seed=test_util.test_seed(),
**method_kwargs))
self.assertEqual(tf.float64, states.dtype)
@test_util.test_graph_and_eager_modes
class WindowedSamplingStepSizeTest(test_util.TestCase):
def test_supply_full_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = {'a': tf.reshape(tf.linspace(1., 2., 3), (3, 1)),
'b': tf.reshape(tf.linspace(1., 2., 9), (3, 3))}
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream(),
)
# Gets a newaxis because step size needs to have an event dimension.
self.assertAllCloseNested([init_step_size['a'],
init_step_size['b']],
[j[0] for j in actual_step_size])
def test_supply_partial_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = {'a': 1., 'b': 2.}
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream(),
)
actual_step = [j[0] for j in actual_step_size]
expected_step = [1., 2.]
self.assertAllCloseNested(expected_step, actual_step)
def test_supply_single_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = 1.
_, traced_step_size = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=20,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream()))
self.assertEqual((25 + 1,), traced_step_size.shape)
self.assertAllClose(1., traced_step_size[0])
def test_sequential_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionSequential(
[tfd.HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])
init_step_size = [1., 2., 3.]
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_nuts(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
dist_3=tf.constant(1.),
seed=stream(),
)
self.assertAllCloseNested(init_step_size,
[j[0] for j in actual_step_size])
def _beta_binomial(trials):
"""Returns a function that constructs a beta binomial distribution."""
def _beta_binomial_distribution(mean, inverse_concentration):
"""Returns a beta binomial distribution with the given parameters."""
# Mean and inverse concentration are broadcast across days.
mean = mean[..., tf.newaxis]
inverse_concentration = inverse_concentration[..., tf.newaxis]
beta_binomial = tfd.BetaBinomial(
total_count=trials,
concentration0=(1 - mean) / inverse_concentration,
concentration1=mean / inverse_concentration)
return tfd.Independent(beta_binomial, reinterpreted_batch_ndims=2)
return _beta_binomial_distribution
def get_joint_distribution(
trials,
mean_prior=lambda: tfd.Uniform(0., 1.),
inverse_concentration_prior=lambda: tfd.HalfNormal(5.)):
"""Returns a joint distribution over parameters and successes."""
param_shape = ps.shape(trials)[:1]
mean = tfd.Sample(mean_prior(), param_shape)
inverse_concentration = tfd.Sample(inverse_concentration_prior(), param_shape)
return tfd.JointDistributionNamed(
dict(mean=mean,
inverse_concentration=inverse_concentration,
successes=_beta_binomial(trials)),
name='jd')
class PrecompiledTest(test_util.TestCase):
def setUp(self):
super().setUp()
arms = 2
days = 3
seed = test_util.test_seed()
trial_seed, value_seed = tfp.random.split_seed(seed)
self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)
dist = get_joint_distribution(self.trials)
self.true_values = dist.sample(seed=value_seed)
def nuts_kwargs(self):
return {'max_tree_depth': 2}
def hmc_kwargs(self):
return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_base_kernel(self, kind):
self.skip_if_no_xla()
self.skipTest('b/195070752') # Test is broken by cl/393807414.
if JAX_MODE:
input_signature = None
else:
input_signature = (
tf.TensorSpec(
shape=[None, None], dtype=tf.float32, name='trials'),
tf.TensorSpec(
shape=[None, None], dtype=tf.float32, name='successes'),
tf.TensorSpec(
shape=[2], dtype=tf.int32, name='seed'))
@tf.function(jit_compile=True, input_signature=input_signature)
def do(trials, successes, seed):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=9,
joint_dist=get_joint_distribution(trials),
kind=kind,
n_chains=11,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=50,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
chain_axis_names=None,
seed=seed,
successes=successes)
self.evaluate(do(self.trials + 0., self.true_values['successes'],
test_util.test_seed(sampler_type='stateless')))
if JAX_MODE:
# TF runs into the `merge_call` error here (b/181800108).
@test_util.disable_test_for_backend(
disable_numpy=True,
reason='Sharding not available for NumPy backend.')
class DistributedTest(distribute_test_lib.DistributedTest):
def setUp(self):
super().setUp()
arms = 2
days = 3
seed = test_util.test_seed()
trial_seed, value_seed = tfp.random.split_seed(seed)
self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)
dist = get_joint_distribution(self.trials)
self.true_values = dist.sample(seed=value_seed)
def nuts_kwargs(self):
return {'max_tree_depth': 2}
def hmc_kwargs(self):
return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}
def test_can_extract_shard_axis_names_from_model(self):
joint_dist = distribute.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),
z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)
))
def do():
_, _, _, _, _, shard_axis_names = windowed_sampling._setup_mcmc(
model=joint_dist,
n_chains=[20],
seed=test_util.test_seed(), z=1.)
# _setup_mcmc will flatten the distribution
self.assertListEqual(shard_axis_names, [[], ['i']])
self.strategy_run(do, args=(), in_axes=None)
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_data_sharding(self, kind):
self.skip_if_no_xla()
joint_dist = distribute.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),
z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)
))
def do(seed, z):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=10,
joint_dist=joint_dist,
kind=kind,
n_chains=2,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=21,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
seed=seed,
chain_axis_names=None,
z=z)
self.evaluate(self.strategy_run(
do,
in_axes=(None, 0),
args=(samplers.zeros_seed(), self.shard_values(
tf.ones(distribute_test_lib.NUM_DEVICES)))))
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_chain_sharding(self, kind):
self.skip_if_no_xla()
joint_dist = tfd.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: tfd.Sample(tfd.Normal(x, 1.), 4),
z=lambda y: tfd.Independent(tfd.Normal(y, 1.), 1)
))
def do(seed, z):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=10,
joint_dist=joint_dist,
kind=kind,
n_chains=2,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=21,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
seed=seed,
chain_axis_names=self.axis_name,
z=z)
self.evaluate(self.strategy_run(
do,
in_axes=None,
args=(samplers.zeros_seed(),
tf.ones(distribute_test_lib.NUM_DEVICES))))
if __name__ == '__main__':
test_util.main()
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ expert.py ]
# Synopsis [ the phone linear downstream wrapper ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import math
import torch
import random
import pathlib
#-------------#
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, DistributedSampler
from torch.distributed import is_initialized
from torch.nn.utils.rnn import pad_sequence
#-------------#
from ..model import *
from .dataset import SpeakerClassifiDataset
from argparse import Namespace
from pathlib import Path
class DownstreamExpert(nn.Module):
"""
Used to handle downstream-specific operations
eg. downstream forward, metric computation, contents to log
"""
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
root_dir = Path(self.datarc['file_path'])
self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])
self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])
self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(
input_dim = self.modelrc['projector_dim'],
output_dim = self.train_dataset.speaker_num,
**model_conf,
)
self.objective = nn.CrossEntropyLoss()
self.logging = os.path.join(expdir, 'log.log')
self.register_buffer('best_score', torch.zeros(1))
def _get_train_dataloader(self, dataset):
sampler = DistributedSampler(dataset) if is_initialized() else None
return DataLoader(
dataset, batch_size=self.datarc['train_batch_size'],
shuffle=(sampler is None), sampler=sampler,
num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def _get_eval_dataloader(self, dataset):
return DataLoader(
dataset, batch_size=self.datarc['eval_batch_size'],
shuffle=False, num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
# Interface
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
# Interface
def forward(self, mode, features, labels, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
predicted, _ = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=-1).indices
records['acc'] += (predicted_classid == labels).view(-1).cpu().float().tolist()
records['loss'].append(loss.item())
return loss
# interface
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key, values in records.items():
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(
f'voxceleb1/{mode}-{key}',
average,
global_step=global_step
)
with open(self.logging, 'a') as f:
if key == 'acc':
f.write(f'{mode} at step {global_step}: {average}\n')
if mode == 'dev' and average > self.best_score:
self.best_score = torch.ones(1) * average
f.write(f'New best on {mode} at step {global_step}: {average}\n')
save_names.append(f'{mode}-best.ckpt')
return save_names
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'refs_to_shard_external_lib',
'type': 'static_library',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_exe',
'type': 'executable',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_dll',
'type': 'shared_library',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
]
}
|
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import CosineAnnealingLR
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
warmup_epoch: target learning rate is linearly reached at the warmup_epoch
scheduler: scheduler used after warmup_epoch (eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, warmup_epoch, multiplier=1.0, scheduler=None):
assert multiplier > 1., 'multiplier should be greater than 1.'
self.multiplier = multiplier
self.warmup_epoch = warmup_epoch
self.scheduler = scheduler
self.finish_warmup = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.warmup_epoch:
if self.scheduler:
if not self.finish_warmup:
self.scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finish_warmup = True
return self.scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr*((self.multiplier-1.)*self.last_epoch/self.warmup_epoch+1.) for base_lr in self.base_lrs]
def step(self, epoch=None, metrics=None):
if self.finish_warmup and self.scheduler:
if epoch is None:
self.scheduler.step(None)
else:
self.scheduler.step(epoch - self.warmup_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
if __name__ == '__main__':
import torch
v = torch.zeros(10, requires_grad=True)
optim = torch.optim.SGD([v], lr=0.01)
scheduler = CosineAnnealingLR(optim, 95)
scheduler = GradualWarmupScheduler(optim, multiplier=10, warmup_epoch=5, scheduler=scheduler)
for epoch in range(0, 100):
scheduler.step(epoch)
print(epoch, optim.param_groups[0]['lr'])
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
import glob
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.io.vasp import Vasprun, zpath
"""
This module defines tasks that acts as a glue between other vasp Firetasks to allow communication
between different Firetasks and Fireworks. This module also contains tasks that affect the control
flow of the workflow, e.g. tasks to check stability or the gap is within a certain range.
"""
import gzip
import os
import re
from pymatgen import MPRester
from pymatgen.io.vasp.sets import get_vasprun_outcar
from pymatgen.core.structure import Structure
from fireworks import explicit_serialize, FiretaskBase, FWAction
from atomate.utils.utils import env_chk, get_logger
from atomate.common.firetasks.glue_tasks import get_calc_loc, PassResult, \
CopyFiles, CopyFilesFromCalcLoc
logger = get_logger(__name__)
__author__ = 'Anubhav Jain, Kiran Mathew'
__email__ = 'ajain@lbl.gov, kmathew@lbl.gov'
@explicit_serialize
class CopyVaspOutputs(CopyFiles):
"""
Copy files from a previous VASP run directory to the current directory.
By default, copies 'INCAR', 'POSCAR' (default: via 'CONTCAR'), 'KPOINTS',
'POTCAR', 'OUTCAR', and 'vasprun.xml'. Additional files, e.g. 'CHGCAR',
can also be specified. Automatically handles files that have a ".gz"
extension (copies and unzips).
Note that you must specify either "calc_loc" or "calc_dir" to indicate
the directory containing the previous VASP run.
Required params:
(none) - but you must specify either "calc_loc" OR "calc_dir"
Optional params:
calc_loc (str OR bool): if True will set most recent calc_loc. If str
search for the most recent calc_loc with the matching name
calc_dir (str): path to dir that contains VASP output files.
filesystem (str): remote filesystem. e.g. username@host
additional_files ([str]): additional files to copy,
e.g. ["CHGCAR", "WAVECAR"]. Use $ALL if you just want to copy
everything
contcar_to_poscar(bool): If True (default), will move CONTCAR to
POSCAR (original POSCAR is not copied).
"""
optional_params = ["calc_loc", "calc_dir", "filesystem", "additional_files",
"contcar_to_poscar"]
def run_task(self, fw_spec):
calc_loc = get_calc_loc(self["calc_loc"],
fw_spec["calc_locs"]) if self.get(
"calc_loc") else {}
# determine what files need to be copied
files_to_copy = None
if not "$ALL" in self.get("additional_files", []):
files_to_copy = ['INCAR', 'POSCAR', 'KPOINTS', 'POTCAR', 'OUTCAR',
'vasprun.xml']
if self.get("additional_files"):
files_to_copy.extend(self["additional_files"])
# decide between poscar and contcar
contcar_to_poscar = self.get("contcar_to_poscar", True)
if contcar_to_poscar and "CONTCAR" not in files_to_copy:
files_to_copy.append("CONTCAR")
files_to_copy = [f for f in files_to_copy if
f != 'POSCAR'] # remove POSCAR
# setup the copy
self.setup_copy(self.get("calc_dir", None),
filesystem=self.get("filesystem", None),
files_to_copy=files_to_copy, from_path_dict=calc_loc)
# do the copying
self.copy_files()
def copy_files(self):
all_files = self.fileclient.listdir(self.from_dir)
# start file copy
for f in self.files_to_copy:
prev_path_full = os.path.join(self.from_dir, f)
dest_fname = 'POSCAR' if f == 'CONTCAR' and self.get(
"contcar_to_poscar", True) else f
dest_path = os.path.join(self.to_dir, dest_fname)
relax_ext = ""
relax_paths = sorted(
self.fileclient.glob(prev_path_full + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CopyVaspOutputs doesn't properly handle >9 relaxations!")
m = re.search('\.relax\d*', relax_paths[-1])
relax_ext = m.group(0)
# detect .gz extension if needed - note that monty zpath() did not seem useful here
gz_ext = ""
if not (f + relax_ext) in all_files:
for possible_ext in [".gz", ".GZ"]:
if (f + relax_ext + possible_ext) in all_files:
gz_ext = possible_ext
if not (f + relax_ext + gz_ext) in all_files:
raise ValueError("Cannot find file: {}".format(f))
# copy the file (minus the relaxation extension)
self.fileclient.copy(prev_path_full + relax_ext + gz_ext,
dest_path + gz_ext)
# unzip the .gz if needed
if gz_ext in ['.gz', ".GZ"]:
# unzip dest file
f = gzip.open(dest_path + gz_ext, 'rt')
file_content = f.read()
with open(dest_path, 'w') as f_out:
f_out.writelines(file_content)
f.close()
os.remove(dest_path + gz_ext)
@explicit_serialize
class CheckStability(FiretaskBase):
"""
Checks the stability of the entry against the Materials Project database.
If the stability is less than the cutoff (default is 0.1 eV/atom), then
the task will return a FWAction that will defuse all remaining tasks.
Required params:
(none) - but your MAPI key must be set as an environ var in this case
Optional params:
ehull_cutoff: (float) energy in eV/atom to use as ehull cutoff. Default
is 0.05 eV/atom.
MAPI_KEY: (str) set MAPI key directly. Supports env_chk.
calc_dir: (str) string to path containing vasprun.xml (default currdir)
"""
required_params = []
optional_params = ["ehull_cutoff", "MAPI_KEY", "calc_dir"]
def run_task(self, fw_spec):
mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."),
parse_dos=False,
parse_eigen=False)
my_entry = vasprun.get_computed_entry(inc_structure=False)
stored_data = mpr.get_stability([my_entry])[0]
if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
logger.info("CheckStability: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
else:
return FWAction(stored_data=stored_data)
@explicit_serialize
class CheckBandgap(FiretaskBase):
"""
Checks the band gap of an entry. If band gap is >min_gap or <max_gap, then
the task will return a FWAction that will defuse all remaining tasks.
Required params:
(none) - but you should set either min_gap or max_gap
Optional params:
min_gap: (float) minimum gap energy in eV to proceed
max_gap: (float) maximum gap energy in eV to proceed
vasprun_path: (str) path to vasprun.xml file
"""
required_params = []
optional_params = ["min_gap", "max_gap", "vasprun_path"]
def run_task(self, fw_spec):
vr_path = zpath(self.get("vasprun_path", "vasprun.xml"))
min_gap = self.get("min_gap", None)
max_gap = self.get("max_gap", None)
if not os.path.exists(vr_path):
relax_paths = sorted(glob.glob(vr_path + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CheckBandgap doesn't properly handle >9 relaxations!")
vr_path = relax_paths[-1]
logger.info("Checking the gap of file: {}".format(vr_path))
vr = Vasprun(vr_path)
gap = vr.get_band_structure().get_band_gap()["energy"]
stored_data = {"band_gap": gap}
logger.info(
"The gap is: {}. Min gap: {}. Max gap: {}".format(gap, min_gap,
max_gap))
if (min_gap and gap < min_gap) or (max_gap and gap > max_gap):
logger.info("CheckBandgap: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
return FWAction(stored_data=stored_data)
@explicit_serialize
class GetInterpolatedPOSCAR(FiretaskBase):
"""
Grabs CONTCARS from two previous calculations to create interpolated
structure.
The code gets the CONTCAR locations using get_calc_loc of two calculations
indicated by the start and end params, creates a folder named "interpolate"
in the current FireWork directory, and copies the two CONTCARs to this folder.
The two CONTCARs are then used to create nimages interpolated structures using
pymatgen.core.structure.Structure.interpolate. Finally, the structure indicated
by this_image is written as a POSCAR file.
Required params:
start (str): name of fw for start of interpolation.
end (str): name of fw for end of interpolation.
this_image (int): which interpolation this is.
nimages (int) : number of interpolations.
Optional params:
autosort_tol (float): parameter used by Structure.interpolate.
a distance tolerance in angstrom in which to automatically
sort end_structure to match to the closest
points in this particular structure. Default is 0.0.
"""
required_params = ["start", "end", "this_image", "nimages"]
optional_params = ["autosort_tol"]
def run_task(self, fw_spec):
structure = self.interpolate_poscar(fw_spec)
structure.to(fmt="POSCAR", filename=os.path.join(os.getcwd(), "POSCAR"))
def interpolate_poscar(self, fw_spec):
# make folder for poscar interpolation start and end structure files.
interpolate_folder = 'interpolate'
if not os.path.exists(os.path.join(os.getcwd(), interpolate_folder)):
os.makedirs(os.path.join(os.getcwd(), interpolate_folder))
# use method of GrabFilesFromCalcLoc to grab files from previous locations.
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["start"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_0").run_task(fw_spec=fw_spec)
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["end"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_1").run_task(fw_spec=fw_spec)
# assuming first calc_dir is polar structure for ferroelectric search
s1 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_0"))
s2 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_1"))
structs = s1.interpolate(s2, self["nimages"], interpolate_lattices=True,
autosort_tol=self.get("autosort_tol", 0.0))
# save only the interpolation needed for this run
i = self.get("this_image")
return structs[i]
def pass_vasp_result(pass_dict=None, calc_dir='.', filename="vasprun.xml.gz",
parse_eigen=False,
parse_dos=False, **kwargs):
"""
Function that gets a PassResult firework corresponding to output from a Vasprun. Covers
most use cases in which user needs to pass results from a vasp run to child FWs
(e. g. analysis FWs)
pass_vasp_result(pass_dict={'stress': ">>ionic_steps.-1.stress"})
Args:
pass_dict (dict): dictionary designating keys and values to pass
to child fireworks. If value is a string beginning with '>>',
the firework will search the parsed VASP output dictionary
for the designated property by following the sequence of keys
separated with periods, e. g. ">>ionic_steps.-1.stress" is used
to designate the stress from the last ionic_step. If the value
is not a string or does not begin with ">>" or "a>>" (for an
object attribute, rather than nested key of .as_dict() conversion),
it is passed as is. Defaults to pass the computed entry of
the Vasprun.
calc_dir (str): path to dir that contains VASP output files, defaults
to '.', e. g. current directory
filename (str): filename for vasp xml file to parse, defaults to
"vasprun.xml.gz"
parse_eigen (bool): flag on whether or not to parse eigenvalues,
defaults to false
parse_eigen (bool): flag on whether or not to parse dos,
defaults to false
**kwargs (keyword args): other keyword arguments passed to PassResult
e.g. mod_spec_key or mod_spec_cmd
"""
pass_dict = pass_dict or {"computed_entry": "a>>get_computed_entry"}
parse_kwargs = {"filename": filename, "parse_eigen": parse_eigen,
"parse_dos": parse_dos}
return PassResult(pass_dict=pass_dict, calc_dir=calc_dir,
parse_kwargs=parse_kwargs,
parse_class="pymatgen.io.vasp.outputs.Vasprun", **kwargs)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue488-base", "issue488-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
|
from unittest import TestCase
from unittest.mock import patch
from django.utils import timezone
from django.core import signing, mail
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import reverse
from comment.conf import settings
from comment.messages import EmailInfo
from comment.utils import (
get_model_obj, has_valid_profile, get_comment_context_data, id_generator, get_comment_from_key,
get_user_for_request, send_email_confirmation_request, process_anonymous_commenting, CommentFailReason,
get_gravatar_img, get_profile_instance)
from comment.tests.base import BaseCommentUtilsTest, Comment, RequestFactory
class CommentUtilsTest(BaseCommentUtilsTest):
def test_get_model_object(self):
data = {
'app_name': 'post',
'model_name': 'Post',
'model_id': self.post_1.id
}
model_object = get_model_obj(**data)
self.assertIsNotNone(model_object)
self.assertIsInstance(model_object, self.post_1.__class__)
@patch.object(settings, 'COMMENT_USE_GRAVATAR', True)
def test_get_gravatar_img(self):
# email is not provided
self.assertEqual(get_gravatar_img(''), '/static/img/default.png')
# email is provided
self.assertTrue(get_gravatar_img('test').startswith('https://www.gravatar.com/avatar/'))
# gravatar is disabled
patch.object(settings, 'COMMENT_USE_GRAVATAR', True).start()
self.assertEqual(get_gravatar_img(''), '/static/img/default.png')
def test_get_profile_instance(self):
# wrong content type
patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong').start()
self.assertIsNone(get_profile_instance(self.user_1))
# correct data
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
self.assertIsNotNone(get_profile_instance(self.user_1))
# profile model has no user related model
patch.object(settings, 'PROFILE_MODEL_NAME', None).start()
self.assertIsNone(get_profile_instance(self.user_1))
@patch.object(settings, 'COMMENT_USE_GRAVATAR', False)
def test_has_valid_profile(self):
patch.object(settings, 'PROFILE_APP_NAME', 'user_profile').start()
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
self.assertTrue(has_valid_profile())
# one of settings attribute is missing
patch.object(settings, 'PROFILE_MODEL_NAME', '').start()
self.assertFalse(has_valid_profile())
# settings attr provided with wrong value
patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong_value').start()
self.assertFalse(has_valid_profile())
# settings attr provided, profile model has no image
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
mocked_hasattr = patch('comment.utils.hasattr').start()
mocked_hasattr.return_value = False
self.assertFalse(has_valid_profile())
patch.object(settings, 'COMMENT_USE_GRAVATAR', True).start()
self.assertTrue(has_valid_profile())
def test_get_comment_context_data(self):
comment_per_page = 'COMMENT_PER_PAGE'
login_url = 'LOGIN_URL'
current_login_url = getattr(settings, login_url, '/profile/login/')
comment_allow_anonymous = 'COMMENT_ALLOW_ANONYMOUS'
comment_allow_translation = 'COMMENT_ALLOW_TRANSLATION'
oauth = 'oauth'
patch.object(settings, login_url, current_login_url).start()
patch.object(settings, comment_allow_anonymous, False).start()
patch.object(settings, comment_per_page, 0).start()
data = {
'model_object': self.post_1,
'model_name': 'post',
'model_id': self.post_1.id,
'app_name': 'post',
'user': self.post_1.author,
'page': 10,
oauth: 'True'
}
request = self.factory.post('/', data=data)
request.user = self.post_1.author
if current_login_url.startswith('/'):
patch.object(settings, login_url, current_login_url[1:]).start()
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].count(), self.increment)
# test inserting '/' to the beginning of login url
self.assertEqual(comment_context_data['login_url'], '/' + settings.LOGIN_URL)
self.assertEqual(comment_context_data['is_anonymous_allowed'], settings.COMMENT_ALLOW_ANONYMOUS)
self.assertEqual(comment_context_data['is_translation_allowed'], settings.COMMENT_ALLOW_TRANSLATION)
self.assertEqual(comment_context_data['oauth'], True)
patch.object(settings, login_url, current_login_url).start()
patch.object(settings, comment_allow_anonymous, True).start()
patch.object(settings, comment_allow_translation, False).start()
patch.object(settings, comment_per_page, 2).start()
request = self.factory.post('/', data=data)
request.user = self.post_1.author
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].paginator.per_page, 2)
self.assertTrue(comment_context_data['comments'].has_previous())
self.assertEqual(comment_context_data['login_url'], settings.LOGIN_URL)
self.assertEqual(comment_context_data['is_anonymous_allowed'], settings.COMMENT_ALLOW_ANONYMOUS)
self.assertEqual(comment_context_data['is_translation_allowed'], settings.COMMENT_ALLOW_TRANSLATION)
data.update({'page': 'not integer', oauth: 'False'})
request = self.factory.post('/', data=data)
request.user = self.post_1.author
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].paginator.per_page, 2)
self.assertTrue(comment_context_data['comments'].has_next())
self.assertEqual(comment_context_data[oauth], False)
def test_user_for_request(self):
request = self.factory.get('/')
request.user = AnonymousUser()
# test unauthenticated user
self.assertIsNone(get_user_for_request(request))
# test authenticated user
request.user = self.user_1
self.assertEqual(get_user_for_request(request), self.user_1)
class BaseAnonymousCommentTest(BaseCommentUtilsTest):
def setUp(self):
super().setUp()
self.time_posted = timezone.now()
_email = 'test-1@acme.edu'
_content = 'posting anonymous comment'
_parent = None
_factory = RequestFactory()
self.comment_obj = Comment(
content_object=self.post_1,
content=_content,
user=None,
parent=_parent,
email=_email,
posted=self.time_posted
)
self.key = signing.dumps(self.comment_obj.to_dict(), compress=True)
self.request = _factory.get('/')
self.site = get_current_site(self.request)
class TestGetCommentFromKey(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def test_bad_signature(self):
key = self.key + 'invalid'
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_key_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict.pop('model_name')
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_attribute_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict['model_name'] = 1
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_value_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict['user'] = 1
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_comment_exists(self):
comment_dict = self.comment_obj.to_dict().copy()
comment = self.create_anonymous_comment(posted=timezone.now(), email='a@a.com')
comment_dict.update({
'posted': str(comment.posted),
'email': comment.email
})
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.EXISTS)
self.assertIsNone(response.obj)
def test_success(self):
response = get_comment_from_key(self.key)
self.assertEqual(response.is_valid, True)
self.assertEqual(response.why_invalid, None)
self.assertIsInstance(response.obj, Comment)
# comment is saved
self.assertIsNotNone(response.obj.id)
self.assertEqual(response.obj.posted, self.time_posted)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', True)
class TestSendEmailConfirmationRequest(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def setUp(self):
super().setUp()
settings.COMMENT_CONTACT_EMAIL = 'contact@domain'
settings.COMMENT_FROM_EMAIL = 'no-reply@domain'
self.len_mailbox = len(mail.outbox)
self.confirmation_url = reverse('comment:confirm-comment', args=[self.key])
self.confirmation_url_drf = f'/api/comments/confirm/{self.key}/'
self.contact_email = settings.COMMENT_CONTACT_EMAIL
self.receivers = [self.comment_obj.to_dict()['email']]
self.sender = settings.COMMENT_FROM_EMAIL
self.subject = EmailInfo.SUBJECT
self.content_object_url = f'http://{self.site.domain}{self.comment_obj.content_object.get_absolute_url()}'
def email_contents_test(self, contents, api=False):
if not api:
confirmation_url = self.confirmation_url
else:
confirmation_url = self.confirmation_url_drf
# message context contains comment content, confirmation url, contact email, site name,\
# content object's absolute url.
self.assertEqual(True, self.comment_obj.content in contents)
self.assertEqual(True, confirmation_url in contents)
self.assertEqual(True, self.contact_email in contents)
self.assertEqual(True, self.site.name in contents)
self.assertEqual(True, self.content_object_url in contents)
def email_metadata_test(self, email, html=False):
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.to, self.receivers)
self.assertEqual(email.subject, self.subject)
if html:
self.assertEqual(email.alternatives[0][1], 'text/html')
else:
self.assertEqual(email.alternatives, [])
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', False)
def test_sending_only_text_template_with_django(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email)
self.email_contents_test(sent_email.body)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', False)
def test_sending_only_text_template_with_drf(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site, api=True)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email)
self.email_contents_test(sent_email.body, api=True)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', True)
def test_sending_both_text_and_html_template_with_django(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email, html=True)
self.email_contents_test(sent_email.body)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', True)
def test_sending_both_text_and_html_template_with_drf(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site, api=True)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email, html=True)
self.email_contents_test(sent_email.body, api=True)
class TestProcessAnonymousCommenting(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def setUp(self):
super().setUp()
self.request.user = AnonymousUser()
def test_for_django(self):
response = process_anonymous_commenting(self.request, self.comment_obj)
self.assertEqual(EmailInfo.CONFIRMATION_SENT, response)
def test_for_drf(self):
response = process_anonymous_commenting(self.request, self.comment_obj, api=True)
self.assertEqual(EmailInfo.CONFIRMATION_SENT, response)
class UtilsTest(TestCase):
"""Test general purpose utilities that aren't necessarily related to a comment"""
def setUp(self):
self.len_id = 6
def test_id_generator_length(self):
self.assertEqual(self.len_id, len(id_generator()))
def test_id_generator_generates_different_ids(self):
self.assertNotEqual(id_generator(), id_generator())
def test_id_generator_prefix(self):
prefix = 'comment'
output = id_generator(prefix=prefix)
self.assertEqual(True, output.startswith(prefix))
self.assertEqual(self.len_id + len(prefix), len(output))
def test_id_generator_suffix(self):
suffix = 'comment'
output = id_generator(suffix=suffix)
self.assertEqual(True, output.endswith(suffix))
self.assertEqual(self.len_id + len(suffix), len(output))
def test_id_generator_chars(self):
import string # flake8:no qa
chars = string.ascii_uppercase
output = id_generator(chars=chars)
self.assertEqual(output, output.upper())
def test_id_generator_len(self):
len_id = 8
self.assertEqual(len_id, len(id_generator(len_id=len_id)))
|
from django.contrib import admin
from .models import Cart, CartItem
class CartItemInline(admin.TabularInline):
model = CartItem
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
inlines = [CartItemInline]
|
import numpy as np
from pyflux.arma import ARIMA
from pyflux.families import Laplace
noise = np.random.normal(0,1,100)
data = np.zeros(100)
for i in range(1,len(data)):
data[i] = 0.9*data[i-1] + noise[i]
def test_no_terms():
"""
Tests an ARIMA model with no AR or MA terms, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = ARIMA(data=data, ar=0, ma=0, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 2)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms():
"""
Tests an ARIMA model with 1 AR and 1 MA term and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = ARIMA(data=data, ar=1, ma=1, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms_integ():
"""
Tests an ARIMA model with 1 AR and 1 MA term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = ARIMA(data=data, ar=1, ma=1, integ=1, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(model.predict(h=5).shape[0] == 5)
def test_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_predict_nans():
"""
Tests that the predictions are not nans
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('M-H', nsims=200, quiet_progress=True)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('M-H', nsims=200, quiet_progress=True)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_sample_model():
"""
Tests sampling function
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-2)
def test_ppc():
"""
Tests PPC value
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
|
import pymel.core as pm
s = pm.polySphere()[0] # second in list is the history node, if construction history is on
c = pm.polyCube()[0]
print c, s
c.setTranslation( [0,2,0] )
s.setTranslation( [1,-2,0] )
g = pm.group( s, c, n='newGroup' )
print "The children of %s are %s" % (g, g.getChildren())
#print g.getChildren()[0].getShape()
print "difference =", c.translate.get() - s.translate.get() # basic vector operation
s2 = s.duplicate()[0]
# move the new sphere relatively along the z axis
s2.setTranslation([0,0,-2], relative=1)
# cycle through and move some verts.
# we're moving each verts a relative amount based on its vertex number
num = s2.numVertices()
for i, vert in enumerate(s2.verts):
pm.move( vert, [ i / float(num), 0, 0 ], r=1)
# save the current scene scene
currScene = pm.saveAs( 'pymel_test_main.ma')
# the parent property gives the parent directory of the current scene.
# the / (slash or divide) operator serves as an os independent way of concatenating paths
# it is a shortut to os.path.join
exportScene = currScene.parent / 'pymel_test_ref.ma'
# if a file already exists where we want to export, delete it first
if exportScene.exists():
print "removing existing pymel export scene"
exportScene.remove()
print "exporting new scene:", exportScene
pm.exportSelected( exportScene, f=1 )
# delete the original group
pm.delete(g)
# reference it in a few times
for i in range(1,4):
ref = pm.createReference( exportScene, namespace=('foo%02d' % i) )
# offset each newly created reference:
# first we list all the nodes in the new reference, and get the first in the list.
# this will be the 'newGroup' node.
allRefNodes = ref.nodes()
print "moving" , allRefNodes[0]
allRefNodes[0].tx.set( 2*i )
# print out some information about our newly created references
allRefs = pm.listReferences()
for r in allRefs:
print r.namespace, r.refNode, r.withCopyNumber()
# the namespace property of the FileReference class can be used to set the namespace as well as to get it.
allRefs[2].namespace = 'super'
# but if we have to change the namespace of the objects after they have been imported
# there is a different, albeit, more complicated way
ns = allRefs[0].namespace
allRefs[0].importContents()
# heres one way to change the namespace
try:
pm.namespace( add = 'bar' )
except: pass
for node in pm.ls( ns + ':*', type='transform'):
newname = node.swapNamespace( 'bar')
print "renaming %s to %s" % (node, newname)
node.rename( newname )
# unload the other one
allRefs[1].unload()
|
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.python.utils import log_utils
from kernel.components.boosting.param import BoostingTreeParam
from kernel.examples.handler.component.component_base import Component
from kernel.examples.handler.interface import Input
from kernel.examples.handler.interface import Output
LOGGER = log_utils.get_logger()
class VertSecureBoost(Component, BoostingTreeParam):
def __init__(self, **kwargs):
Component.__init__(self, **kwargs)
# print(self.name)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
BoostingTreeParam.__init__(self, **new_kwargs)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name)
self._module_name = "VertSecureBoost"
self._param_name = "BoostingTreeParam"
|
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication
from Page.page import Page
class Attendance(Page):
def __init__(self, parent=None):
super(Page, self).__init__(parent)
self.setupUi(self)
self.getDataFromDB()
self.setRowHeader(self.row_sum)
self.field = ['编号', '姓名', '迟到', '早退', '病假', '事假', '旷工']
self.setColumnHeader(self.field)
self.col_sum = self.tableWidget.columnCount()
self.setItemColorAlignment()
self.initFormDate()
self.initSearchField()
self.setNumNameUneditable()
self.setFormStyleSheet()
self.createContextMenu()
self.history_record = {'add': [], 'del': [], 'update': {}}
self.submit.setEnabled(False)
# 初始化单元格改变信号标志
self.cell_changed_flag = False
# 初始化当前页面为1
self.form_cur_page_num = 1
# 统计下设置的行数可以分成多少页
row_sum = self.tableWidget.rowCount()
if row_sum%10:
self.form_page_total = int(row_sum/10) + 1
else:
self.form_page_total = int(row_sum/10)
# 初始化分页栏
self.initFormPageBar()
# 表格分页显示
self.pageBlockDisplay()
# 初始化信号连接
self.signalConnection()
'''获取数据'''
def getDataFromDB(self):
try:
self.connectDB()
self.cursor.execute('''
select Eno,Ename,Esex,Eage,Etel,Eedu,Dname,Pname,Eid,Intime,Gradu,Eaddr,Resume
from Employee,Department,Post
where Employee.Dno=Department.Dno
and Employee.Pno=Post.Pno
''')
self.row = self.cursor.fetchall()
self.row_sum = len(self.row)
except Exception as e:
print('getDataFromDB():\n'+repr(e))
sys.exit(-1)
'''初始化表格数据'''
def initFormDate(self):
for each_row in range(self.row_sum):
for each_col in range(self.col_sum):
if self.row[each_row][each_col]:
item_text = str(self.row[each_row][each_col])
self.tableWidget.item(each_row, each_col).setText(item_text)
'''初始化表格数据'''
def initFormDate(self):
for each_row in range(self.row_sum):
for each_col in range(self.col_sum):
if self.row[each_row][each_col]:
item_text = str(self.row[each_row][each_col])
self.tableWidget.item(each_row, each_col).setText(item_text)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
attendance = Attendance()
attendance.show()
sys.exit(app.exec_())
|
__version__ = "1.3.2"
|
import functools
import time
def timer(function):
@functools.wraps(function)
def wrapper(*args, **kwargs) -> float:
time_list = []
ans = None
for i in range(0, 10):
start = time.perf_counter()
ans = function(*args, **kwargs)
end = time.perf_counter()
time_list.append(end - start)
i += 0
return [round((sum(time_list) / len(time_list)) * 1000, 2),ans]
return wrapper
def sol_a(input: list,num: int):
m = num
last =input[len(input)-1]
turns = dict()
for i, val in enumerate(input):
turns[str(i)]= val
def get_turn_diff(val) -> int:
rev = [val for val in val["data"].values()][::-1]
ans = []
for i,ansa in enumerate(rev):
if ansa == val["val"]:
ans.append(len(rev)-i)
if len(ans) == 2:
break
return ans[0] - ans[1]
for i in range(len(input),m):
prev = last
cc = [val for val in turns.values()].count(prev)
if 1 == cc:
turns[str(i)] = 0
last = 0
else:
dtp = {"val": prev, "data": turns}
out = get_turn_diff(dtp)
turns[str(i)] = out
last = out
return turns[str(m-1)]
@timer
def sol_b(input: list,times: int):
indexer = dict()
last = input[len(input)-1]
def set_index(n,d):
try:
indexer[n]["count"] += 1
indexer[n]["index"].append(d)
except KeyError:
indexer[n] = {"index": [d], "count": 1}
for i,val in enumerate(input):
try:
l = indexer[str(val)]
continue
except KeyError:
indexer[str(val)] = {"index": [i], "count": 1}
for i in range(len(input),times):
try:
if indexer[str(last)]["count"] == 1:
set_index(str(0),i)
last = 0
else:
indexes = indexer[str(last)]["index"][::-1]
last = indexes[0] - indexes[1]
set_index(str(last),i)
except KeyError:
set_index(str(last),i)
#print(indexer)
return last
def sol_c(input: list,times: int):
indexer = dict()
last = input[len(input)-1]
def set_index(n,d):
try:
indexer[n] = (indexer[n][1],d)
except KeyError:
indexer[n] = (None,d)
for i,val in enumerate(input):
indexer[str(val)] = (None,i)
for i in range(len(input),times):
try:
if indexer[str(last)][0] == None:
set_index(str(0),i)
last = 0
else:
indexes = indexer[str(last)]
last = indexes[1] - indexes[0]
set_index(str(last),i)
except KeyError:
set_index(str(last),i)
#print(indexer)
return last
input = [0, 13, 1, 8, 6, 15]
print(f"Sol A: {sol_a(input, 2020)[0]}ms")
print(f"Sol B: {sol_b(input, 2020)[0]}ms")
print(f"Sol A: {sol_c(input, 2020)[0]}ms")
print(f"2020 answer: {sol_c(input,2020)[1]}")
print(f"2020 answer: {sol_c(input,30000000)[1]}")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple script to plot waveforms in one or more files.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from argparse import ArgumentParser
from obspy import Stream, __version__, read
from obspy.core.util.base import ENTRY_POINTS
from obspy.core.util.misc import MatplotlibBackend
def main(argv=None):
parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip())
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
help='Waveform format.')
parser.add_argument('-o', '--outfile',
help='Output filename.')
parser.add_argument('-n', '--no-automerge', dest='automerge',
action='store_false',
help='Disable automatic merging of matching channels.')
parser.add_argument('--full', dest='full', action='store_true',
help='Disable min/max-plot, i.e. always plot every '
'single sample (Stream.plot(..., method="full"), '
'for interactive zooming).')
parser.add_argument('files', nargs='+',
help='Files to plot.')
args = parser.parse_args(argv)
if args.outfile is not None:
MatplotlibBackend.switch_backend("AGG", sloppy=False)
st = Stream()
for f in args.files:
st += read(f, format=args.format)
kwargs = {"outfile": args.outfile,
"automerge": args.automerge}
if args.full:
kwargs['method'] = "full"
st.plot(**kwargs)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from lab import mapbias as mapp
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
#
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
print(font)
#
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('-m', '--model', help='model name to use')
parser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=256, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=128, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.01, type=float)
args = parser.parse_args()
figpath = './figs/'
bs, nc, aa = args.bs, args.nmesh, args.aa
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, kmin, ang)
dpath += 'L%04d-N%04d/'%(bs, nc)
################
def make_rep_plot():
"""Does the work of making the real-space xi(r) and b(r) figure."""
noises = np.loadtxt('/global/u1/c/chmodi/Programs/21cm/21cm_cleaning/data/summaryHI.txt').T
for i in range(noises[0].size):
if noises[0][i] == np.round(1/aa-1, 2): noise = noises[3][i]
print(noise)
datap = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap')
dataprsd = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap')
try:
datapup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap_up')
dataprsdup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap_up')
except Exception as e: print(e)
fig, ax = plt.subplots(1, 2, figsize=(9, 4))
def makeplot(bfit, datapp, lss, lww, cc, lbl=None):
rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]
ax[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc, label=lbl)
ax[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc)
#fits
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/%d-0.00/'%(nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
print(bpaths)
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datap
lss, lww, cc, lbl = '-', 2, 'C0', 'Fid'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample1/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datapup
lss, lww, cc, lbl = '-', 2, 'C1', 'Up1'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample2/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datapup
lss, lww, cc, lbl = '-', 2, 'C2', 'Up2'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
#rsd
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/%d-0.00/'%(nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsd
lss, lww, cc, lbl = '--', 2, 'C0', 'rsd'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample1/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsdup
lss, lww, cc, lbl = '--', 2, 'C1', 'rsd up'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample2/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsdup
lss, lww, cc, lbl = '--', 2, 'C2', 'rsd up2'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
ax[0].set_ylabel('$r_{cc}$', fontdict=font)
ax[1].set_ylabel(r'$\sqrt{P_{\rm mod}/P_{hh}}$', fontdict=font)
for axis in ax:
axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
axis.legend(prop=fontmanage)
# Put on some more labels.
for axis in ax:
axis.set_xscale('log')
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
##and finish
plt.tight_layout(rect=[0, 0, 1, 0.95])
if rank == 0: plt.savefig(figpath + '/rep_L%04d_%04d.pdf'%(bs, aa*10000))
################
if __name__=="__main__":
make_rep_plot()
#
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Artom Lifshitz <artom.lifshitz@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import eventlet
import fixtures
from mock import MagicMock
from designate import exceptions
from designate import objects
from designate.tests.test_backend import BackendTestCase
from designate.tests import resources
from designate.backend import impl_nsd4
class NSD4ServerStub:
recved_command = None
response = 'ok'
keyfile = os.path.join(resources.path, 'ssl', 'nsd_server.key')
certfile = os.path.join(resources.path, 'ssl', 'nsd_server.pem')
def handle(self, client_sock, client_addr):
stream = client_sock.makefile()
self.recved_command = stream.readline()
stream.write(self.response)
stream.flush()
def start(self):
self.port = 1025
while True:
try:
eventlet.spawn_n(eventlet.serve,
eventlet.wrap_ssl(
eventlet.listen(('127.0.0.1', self.port)),
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True),
self.handle)
break
except socket.error:
self.port = self.port + 1
def stop(self):
eventlet.StopServe()
class NSD4Fixture(fixtures.Fixture):
def setUp(self):
super(NSD4Fixture, self).setUp()
self.server = NSD4ServerStub()
self.server.start()
self.addCleanup(self.tearDown)
def tearDown(self):
self.server.stop()
# NOTE: We'll only test the specifics to the nsd4 backend here.
# Rest is handled via scenarios
class NSD4BackendTestCase(BackendTestCase):
def setUp(self):
super(NSD4BackendTestCase, self).setUp()
self.server_fixture = NSD4Fixture()
self.useFixture(self.server_fixture)
keyfile = os.path.join(resources.path, 'ssl', 'nsd_control.key')
certfile = os.path.join(resources.path, 'ssl', 'nsd_control.pem')
self.target = objects.PoolTarget.from_dict({
'id': '4588652b-50e7-46b9-b688-a9bad40a873e',
'type': 'nsd4',
'masters': [{'host': '192.0.2.1', 'port': 53},
{'host': '192.0.2.2', 'port': 35}],
'options': [
{'key': 'keyfile', 'value': keyfile},
{'key': 'certfile', 'value': certfile},
{'key': 'pattern', 'value': 'test-pattern'},
{'key': 'port', 'value': self.server_fixture.server.port}
],
})
self.backend = impl_nsd4.NSD4Backend(self.target)
def test_create_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
command = 'NSDCT1 addzone %s test-pattern\n' % domain['name']
self.assertEqual(command, self.server_fixture.server.recved_command)
def test_delete_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
self.backend.delete_domain(context, domain)
command = 'NSDCT1 delzone %s\n' % domain['name']
self.assertEqual(command, self.server_fixture.server.recved_command)
def test_server_not_ok(self):
self.server_fixture.server.response = 'goat'
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
def test_ssl_error(self):
self.backend._command = MagicMock(side_effect=ssl.SSLError)
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
def test_socket_error(self):
self.backend._command = MagicMock(side_effect=socket.error)
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GenericLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('where', models.CharField(default=b'', max_length=200, blank=True)),
('url', models.URLField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('show_in_admin', models.BooleanField(default=True)),
('rotate', models.CharField(max_length=100, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GenericLinkClick',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField()),
('created', models.DateTimeField(auto_now_add=True)),
('link', models.ForeignKey(to='generic_link_tracking.GenericLink')),
],
options={
},
bases=(models.Model,),
),
]
|
"""Support for monitoring the local system for anomalous events."""
from __future__ import annotations
import asyncio
import time
from dataclasses import dataclass
import datetime
import logging
from typing import Any, Dict, Optional, List
import pprint
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA,
BinarySensorEntity
)
from homeassistant.const import (
CONF_ICON,
CONF_SENSORS,
CONF_ID, CONF_NAME, EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_STARTED,
)
from homeassistant.core import HomeAssistant, Event
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
CONF_RELATED_ENTITY_ID = "related_entity_id"
CONF_PULSE_MINUTES = "pulse_minutes"
DEFAULT_ICON = "mdi.alarm"
SCAN_INTERVAL_MINUTES = 1
SIGNAL_HEARTBEAT_UPDATE = "heartbeat_update"
# TODO: Make id & name unique
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_RELATED_ENTITY_ID): cv.entity_id,
vol.Required(CONF_PULSE_MINUTES): cv.positive_int,
vol.Required(CONF_ICON, default=DEFAULT_ICON):
cv.icon
}
)
]
)
}
)
@dataclass
class PulseState:
"""Data for a missing pulse sensor."""
# The current state - true => pulse missing, false => pulse present
pulse_missing: bool
# Time by which, if no pulse has been received, the pulse will be
# considered missing.
receipt_deadline: Optional[datetime.datetime]
# Minutes between expected pulses.
pulse_minutes: int
# Related entity that is being monitored.
related_entity_id: str
# Time the state was changed last.
update_time: Optional[datetime.datetime]
# Last exception, if any.
last_exception: Optional[BaseException]
def set_next_deadline(self):
"""Set the next deadline by adding the number of minutes a pulse is
expected in, to the current date/time.
"""
self.receipt_deadline = datetime.datetime.now() + \
datetime.timedelta(minutes=self.pulse_minutes)
# noinspection PyUnusedLocal
# (discovery_info parameter)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: Optional[Any] = None
) -> None:
"""Set up the monitor condition sensors."""
entities: List[BinarySensorEntity] = []
sensor_registry: Dict[str, PulseState] = {}
for sensor_config in config[CONF_SENSORS]:
pulse_minutes = sensor_config[CONF_PULSE_MINUTES]
sensor_id = sensor_config[CONF_ID]
related_entity_id = sensor_config[CONF_RELATED_ENTITY_ID]
sensor_registry[sensor_id] = PulseState(
pulse_missing=False,
receipt_deadline=None,
pulse_minutes=pulse_minutes,
related_entity_id=related_entity_id,
update_time=None,
last_exception=None
)
_LOGGER.debug("Added sensor to registry: %s", sensor_id)
entities.append(PulseMissingSensor(
sensor_config[CONF_ID],
sensor_config[CONF_NAME],
sensor_config[CONF_ICON],
sensor_registry[sensor_id]
))
_LOGGER.debug("Created entity for sensor: %s", sensor_id)
async_add_entities(entities)
await async_manage_sensor_registry_updates(
hass,
sensor_registry
)
async def async_manage_sensor_registry_updates(
hass: HomeAssistant,
sensor_registry: Dict[str, PulseState]
) -> None:
"""Update the registry and create polling."""
_pulse_data_lock = asyncio.Lock()
_timeout_scheduled = False
def _handle_missing_pulse(sensor_id: str, pulse_state: PulseState) -> bool:
""" Called when pulse goes missing. Returns true if the pulse went
missing since the last time it was received -- i.e. it happened since
the last time it was updated.
"""
_LOGGER.debug(
"Handling missing pulse: "
"sensor=%s, related_entity_id=%s, current_state=%s",
sensor_id,
pulse_state.related_entity_id,
pulse_state.pulse_missing
)
if pulse_state.pulse_missing:
return False
pulse_state.pulse_missing = True
entity_id = pulse_state.related_entity_id
minutes = pulse_state.pulse_minutes
persistent_notification.async_create(
hass,
f"No updates received from '{entity_id}' in {minutes} minutes. ",
title=f"Pulse missing: {sensor_id}",
notification_id=sensor_id + '.' + str(int(time.time()))
)
return True
def _handle_pulse_event(sensor_id: str, pulse_state: PulseState) -> bool:
""" Update a pulse's state when a pulse event is received. Returns
True if the state goes from missing to present.
"""
_LOGGER.debug(
"Handling pulse event received: entity=%s; current_state=%s",
pulse_state.related_entity_id,
pulse_state.pulse_missing
)
state_changed = pulse_state.pulse_missing
pulse_state.pulse_missing = False
now = datetime.datetime.now()
pulse_state.update_time = now
pulse_state.last_exception = None
pulse_state.set_next_deadline()
entity_id = pulse_state.related_entity_id
if state_changed:
persistent_notification.async_create(
hass,
f"Missing pulse from '{entity_id}' resumed. ",
title=f"Pulse resumed: {sensor_id}",
notification_id=sensor_id + str(int(time.time()))
)
return state_changed
async def _set_next_deadline():
"""If a timeout has not been scheduled, schedule one for the closest
receipt_deadline in the future. Does not schedule a timeout if all the
pulses have gone missing.
Note that the callback timer's resolution is seconds, so 1 is added to
the timeout to avoid timeout times of zero.
"""
async with _pulse_data_lock:
nonlocal _timeout_scheduled
if _timeout_scheduled:
return
next_timeout: Optional[datetime.datetime] = None
now = datetime.datetime.now()
for sensor_id, pulse_state in sensor_registry.items():
if pulse_state.receipt_deadline < now:
continue
if next_timeout is None:
next_timeout = pulse_state.receipt_deadline
continue
if pulse_state.receipt_deadline < next_timeout:
next_timeout = pulse_state.receipt_deadline
if next_timeout is None:
_LOGGER.debug("No next timeout found")
return
_LOGGER.debug(
"Setting next pulse timeout: scheduled=%s",
next_timeout
)
_timeout_scheduled = True
next_timeout_seconds = int((next_timeout - now).total_seconds()) + 1
async_call_later(hass, next_timeout_seconds, _pulse_timeout)
# noinspection PyUnusedLocal
# timestamp ignored
async def _pulse_timeout(timestamp: datetime.datetime) -> None:
"""Given the current time, examines each of the sensors, and, if its
receipt_deadline is in the past, handles it as a missing pulse. Then,
sets the next timout.
"""
_LOGGER.debug("Pulse timeout!")
state_changed = False
async with _pulse_data_lock:
nonlocal _timeout_scheduled
_timeout_scheduled = False
now = datetime.datetime.now()
for sensor_id, pulse_state in sensor_registry.items():
_LOGGER.debug(
"State: sensor=%s; entity=%s, now=%s; deadline=%s",
sensor_id,
pulse_state.related_entity_id,
now,
pulse_state.receipt_deadline
)
if pulse_state.receipt_deadline < now:
state_changed |= _handle_missing_pulse(
sensor_id,
pulse_state
)
if state_changed:
async_dispatcher_send(hass, SIGNAL_HEARTBEAT_UPDATE)
await _set_next_deadline()
async def _event_to_pulse(event: Event):
"""Event listener, that, when the event's entity corresponds to one
of the sensors' related entities, resets that sensor's timeout. Also
calls _set_next_deadline() to handle the case where all the pulses
have gone missing, and the pulse timout has to be restarted.
"""
_LOGGER.debug("Event listener triggered!")
pp = pprint.PrettyPrinter()
pp.pprint(event)
state_changed: bool = False
async with _pulse_data_lock:
for sensor_id, sensor_data in sensor_registry.items():
_LOGGER.debug(
"Matching event: related_entity_id=%s; event_entity_id=%s",
sensor_data.related_entity_id,
event.data['entity_id']
)
if sensor_data.related_entity_id == event.data['entity_id']:
state_changed |= _handle_pulse_event(sensor_id, sensor_data)
_LOGGER.debug(
"Pulse received: entity_id=%s; state_changed=%s",
event.data['entity_id'],
state_changed
)
if state_changed:
async_dispatcher_send(hass, SIGNAL_HEARTBEAT_UPDATE)
await _set_next_deadline()
# For event_time, passed in by HASS, but not used.
# noinspection PyUnusedLocal
async def _start_pulse_monitor(event_time: datetime.datetime):
"""Start monitoring pulses, and set up the first pulse deadline."""
for sensor_id, pulse_state in sensor_registry.items():
pulse_state.set_next_deadline()
remove_listener = hass.bus.async_listen(
EVENT_STATE_CHANGED,
_event_to_pulse
)
# TODO: Remove
_LOGGER.debug("Event listener installed!")
pp = pprint.PrettyPrinter()
pp.pprint(remove_listener)
await _set_next_deadline()
# Start working once HASS is up.
hass.bus.async_listen(EVENT_HOMEASSISTANT_STARTED, _start_pulse_monitor)
class PulseMissingSensor(BinarySensorEntity):
"""A sensor that turns on when activity was not sensed within a given
time frame.
"""
def __init__(
self,
id_: str,
name: str,
icon: Optional[str],
pulse_state: PulseState
) -> None:
"""Initialize the sensor, with an id, name, and pulse period. Also,
give it access to the sensor data that is collected out of band.
"""
self._name: str = name
self._unique_id: str = id_
self._pulse_state: PulseState = pulse_state
self._icon: str = icon
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Return the unique ID."""
return self._unique_id
@property
def device_class(self) -> Optional[str]:
"""Return the class of this sensor."""
return None
@property
def icon(self) -> Optional[str]:
"""Icon to use in the frontend."""
return self._icon
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def should_poll(self) -> bool:
"""Entity does not poll."""
return False
@property
def data(self) -> PulseState:
"""Return registry entry for the data."""
return self._pulse_state
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until the database is avaialable"""
def handle(self, *args, **options):
"""Handle the command"""
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database Available!'))
|
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, Dot, Add, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
# df = pd.read_csv("./data/processed_rating.csv")
# N = df["user_idx"].max() + 1
# M = df["isbn_idx"].max() + 1
# df = shuffle(df)
# cut_off = int(0.8 * len(df))
# df_train = df.iloc[:cut_off]
# df_test = df.iloc[cut_off:]
# K = 15
# mu = df_train["Book-Rating"].mean()
# epochs = 15
# reg_penalty = 0.0
# u = Input(shape=(1, ))
# b = Input(shape=(1, ))
# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)
# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)
# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)
# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)
# x = Dot(axes=2)([u_embedding, b_embedding])
# x = Add()([x, u_bias, b_bias])
# x = Flatten()(x)
# model = Model(inputs=[u, b], outputs=x)
# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=["mse"])
# r = model.fit(
# x=[df_train["user_idx"].values, df_train["isbn_idx"].values],
# y=df_train["Book-Rating"].values - mu,
# epochs=epochs,
# batch_size=128,
# validation_data=([df_test["user_idx"].values,
# df_test["isbn_idx"].values], df_test["Book-Rating"].values - mu))
# plt.plot(r.history['loss'], label="train loss")
# plt.plot(r.history['val_loss'], label="test loss")
# plt.legend()
# plt.show()
df = pd.read_csv("./data/archive/ratings.csv")
# N = len(set(df["user_id"].values)) + 1
# M = len(set(df["book_id"].values)) + 1
# df = shuffle(df)
# cut_off = int(0.8 * len(df))
# df_train = df.iloc[:cut_off]
# df_test = df.iloc[cut_off:]
# K = 15
# mu = df_train["rating"].mean()
# epochs = 15
# reg_penalty = 0.0
# u = Input(shape=(1, ))
# b = Input(shape=(1, ))
# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)
# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)
# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)
# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)
# x = Dot(axes=2)([u_embedding, b_embedding])
# x = Add()([x, u_bias, b_bias])
# x = Flatten()(x)
# model = Model(inputs=[u, b], outputs=x)
# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=["mse"])
# r = model.fit(x=[df_train["user_id"].values, df_train["book_id"].values],
# y=df_train["rating"].values - mu,
# epochs=epochs,
# batch_size=128,
# validation_data=([
# df_test["user_id"].values, df_test["book_id"].values
# ], df_test["rating"].values - mu))
# model.save('regression_model.h5')
# plt.plot(r.history['loss'], label="train loss")
# plt.plot(r.history['val_loss'], label="test loss")
# plt.legend()
# plt.show()
def predict(user_id):
model = keras.models.load_model('regression_model.h5')
book_data = np.array(list(set(df.book_id)))
user = np.array([user_id for i in range(len(book_data))])
predictions = model.predict([user, book_data])
predictions = np.array([a[0] for a in predictions])
recommended_book_ids = (-predictions).argsort()[:5]
print(recommended_book_ids)
print(predictions[recommended_book_ids])
predict(1)
|
#coding:utf-8
class Config(object):
init_scale = 0.04
learning_rate = 0.001
max_grad_norm = 15
num_layers = 3
num_steps = 25 # number of steps to unroll the RNN for
hidden_size = 1000 # size of hidden layer of neurons
iteration = 40
save_freq = 5 #The step (counted by the number of iterations) at which the model is saved to hard disk.
keep_prob = 0.5
batch_size = 32
model_path = './model/Model' #the path of model that need to save or load
#parameters for generation
save_time = 40 #load save_time saved models
is_sample = True #true means using sample, if not using max
is_beams = True #whether or not using beam search
beam_size = 4 #size of beam search
len_of_generation = 10 #The number of characters by generated
start_sentence = u'如果' #the seed sentence to generate text
|
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import mean_squared_error, r2_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def test_tarnn():
"""Test TARNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load data
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args.test_file, args.word2vec_file, data_aug_flag=False)
logger.info("Data padding...")
x_test_content, x_test_question, x_test_option, y_test = dh.pad_data(test_data, args.pad_seq_len)
# Load tarnn model
OPTION = dh.option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_content = graph.get_operation_by_name("input_x_content").outputs[0]
input_x_question = graph.get_operation_by_name("input_x_question").outputs[0]
input_x_option = graph.get_operation_by_name("input_x_option").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output nodes
output_node_names = "output/scores"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-tarnn-{0}.pb".format(MODEL), as_text=False)
# Generate batches for one epoch
batches = dh.batch_iter(list(zip(x_test_content, x_test_question, x_test_option, y_test)),
args.batch_size, 1, shuffle=False)
test_counter, test_loss = 0, 0.0
# Collect the predictions here
true_labels = []
predicted_scores = []
for batch_test in batches:
x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_test)
feed_dict = {
input_x_content: x_batch_content,
input_x_question: x_batch_question,
input_x_option: x_batch_option,
input_y: y_batch,
dropout_keep_prob: 1.0,
is_training: False
}
batch_scores, cur_loss = sess.run([scores, loss], feed_dict)
# Prepare for calculating metrics
for i in y_batch:
true_labels.append(i)
for j in batch_scores:
predicted_scores.append(j)
test_loss = test_loss + cur_loss
test_counter = test_counter + 1
# Calculate PCC & DOA
pcc, doa = dh.evaluation(true_labels, predicted_scores)
# Calculate RMSE
rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5
r2 = r2_score(true_labels, predicted_scores)
test_loss = float(test_loss / test_counter)
logger.info("All Test Dataset: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}"
.format(test_loss, pcc, doa, rmse, r2))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", all_id=test_data.id,
all_labels=true_labels, all_predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_tarnn()
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# script repurposed from sentdex's edits and TensorFlow's example script. Pretty messy as not all unnecessary
# parts of the original have been removed
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'trained_model' # change to whatever folder has the new graph
# MODEL_FILE = MODEL_NAME + '.tar.gz' # these lines not needed as we are using our own model
# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'label.pbtxt') # our labels are in training/object-detection.pbkt
NUM_CLASSES = 3 # we only are using one class at the moment (mask at the time of edit)
# ## Download Model
# opener = urllib.request.URLopener() # we don't need to download model since we have our own
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(0, 60)] # adjust range for # of images in folder
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
i = 0
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np) # matplotlib is configured for command line only so we save the outputs instead
plt.savefig("outputs/detection_output{}.png".format(i)) # create an outputs folder for the images to be saved
i = i+1 # this was a quick fix for iteration, create a pull request if you'd like
|
# Databricks notebook source
import pandas as pd
from os import listdir
from os.path import join, basename
import struct
import pickle
import json
import os
from scipy import misc
import datetime as dt
from pyspark.sql.types import *
from pyspark.sql.functions import udf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# import matplotlib.pyplot as plt
# %matplotlib inline
# COMMAND ----------
# %pylab inline
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
from bigdl.dataset.transformer import *
from bigdl.dataset import mnist
from bigdl.transform.vision.image import *
from zoo.pipeline.nnframes.nn_image_reader import *
from zoo.pipeline.nnframes.nn_image_transformer import *
from zoo.pipeline.nnframes.nn_classifier import *
from zoo.common.nncontext import *
import urllib
# COMMAND ----------
def scala_T(input_T):
"""
Helper function for building Inception layers. Transforms a list of numbers to a dictionary with ascending keys
and 0 appended to the front. Ignores dictionary inputs.
:param input_T: either list or dict
:return: dictionary with ascending keys and 0 appended to front {0: 0, 1: realdata_1, 2: realdata_2, ...}
"""
if type(input_T) is list:
# insert 0 into first index spot, such that the real data starts from index 1
temp = [0]
temp.extend(input_T)
return dict(enumerate(temp))
# if dictionary, return it back
return input_T
# COMMAND ----------
def Inception_Layer_v1(input_size, config, name_prefix=""):
"""
Builds the inception-v1 submodule, a local network, that is stacked in the entire architecture when building
the full model.
:param input_size: dimensions of input coming into the local network
:param config: ?
:param name_prefix: string naming the layers of the particular local network
:return: concat container object with all of the Sequential layers' ouput concatenated depthwise
"""
'''
Concat is a container who concatenates the output of it's submodules along the provided dimension: all submodules
take the same inputs, and their output is concatenated.
'''
concat = Concat(2)
"""
In the above code, we first create a container Sequential. Then add the layers into the container one by one. The
order of the layers in the model is same with the insertion order.
"""
conv1 = Sequential()
#Adding layes to the conv1 model we jus created
#SpatialConvolution is a module that applies a 2D convolution over an input image.
conv1.add(SpatialConvolution(input_size, config[1][1], 1, 1, 1, 1).set_name(name_prefix + "1x1"))
conv1.add(ReLU(True).set_name(name_prefix + "relu_1x1"))
concat.add(conv1)
conv3 = Sequential()
conv3.add(SpatialConvolution(input_size, config[2][1], 1, 1, 1, 1).set_name(name_prefix + "3x3_reduce"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3_reduce"))
conv3.add(SpatialConvolution(config[2][1], config[2][2], 3, 3, 1, 1, 1, 1).set_name(name_prefix + "3x3"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3"))
concat.add(conv3)
conv5 = Sequential()
conv5.add(SpatialConvolution(input_size,config[3][1], 1, 1, 1, 1).set_name(name_prefix + "5x5_reduce"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5_reduce"))
conv5.add(SpatialConvolution(config[3][1], config[3][2], 5, 5, 1, 1, 2, 2).set_name(name_prefix + "5x5"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5"))
concat.add(conv5)
pool = Sequential()
pool.add(SpatialMaxPooling(3, 3, 1, 1, 1, 1, to_ceil=True).set_name(name_prefix + "pool"))
pool.add(SpatialConvolution(input_size, config[4][1], 1, 1, 1, 1).set_name(name_prefix + "pool_proj"))
pool.add(ReLU(True).set_name(name_prefix + "relu_pool_proj"))
concat.add(pool).set_name(name_prefix + "output")
return concat
# COMMAND ----------
def Inception_v1(class_num):
model = Sequential()
model.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False).set_name("conv1/7x7_s2"))
model.add(ReLU(True).set_name("conv1/relu_7x7"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool1/3x3_s2"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("pool1/norm1"))
model.add(SpatialConvolution(64, 64, 1, 1, 1, 1).set_name("conv2/3x3_reduce"))
model.add(ReLU(True).set_name("conv2/relu_3x3_reduce"))
model.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1).set_name("conv2/3x3"))
model.add(ReLU(True).set_name("conv2/relu_3x3"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("conv2/norm2"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool2/3x3_s2"))
model.add(Inception_Layer_v1(192, scala_T([scala_T([64]), scala_T(
[96, 128]), scala_T([16, 32]), scala_T([32])]), "inception_3a/"))
model.add(Inception_Layer_v1(256, scala_T([scala_T([128]), scala_T(
[128, 192]), scala_T([32, 96]), scala_T([64])]), "inception_3b/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(Inception_Layer_v1(480, scala_T([scala_T([192]), scala_T(
[96, 208]), scala_T([16, 48]), scala_T([64])]), "inception_4a/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([160]), scala_T(
[112, 224]), scala_T([24, 64]), scala_T([64])]), "inception_4b/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([128]), scala_T(
[128, 256]), scala_T([24, 64]), scala_T([64])]), "inception_4c/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([112]), scala_T(
[144, 288]), scala_T([32, 64]), scala_T([64])]), "inception_4d/"))
model.add(Inception_Layer_v1(528, scala_T([scala_T([256]), scala_T(
[160, 320]), scala_T([32, 128]), scala_T([128])]), "inception_4e/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(Inception_Layer_v1(832, scala_T([scala_T([256]), scala_T(
[160, 320]), scala_T([32, 128]), scala_T([128])]), "inception_5a/"))
model.add(Inception_Layer_v1(832, scala_T([scala_T([384]), scala_T(
[192, 384]), scala_T([48, 128]), scala_T([128])]), "inception_5b/"))
model.add(SpatialAveragePooling(7, 7, 1, 1).set_name("pool5/7x7_s1"))
model.add(Dropout(0.4).set_name("pool5/drop_7x7_s1"))
model.add(View([1024], num_input_dims=3))
model.add(Linear(1024, class_num).set_name("loss3/classifier"))
model.add(LogSoftMax().set_name("loss3/loss3"))
model.reset()
return model
# COMMAND ----------
# MAGIC %md ## Download the images from Amazon s3
# MAGIC
# MAGIC Make sure you have AWS command line interface to recursively download all images in s3 folder. You can set up aws cli from this link: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
# COMMAND ----------
import urllib
from os import path
MODEL_ROOT = "/mnt/nobigdl/few-inceptionv1"
# dbutils.fs.mkdirs(MODEL_ROOT)
#local_folder = DATA_ROOT + '/vegnonveg-samples'
checkpoint_path = path.join(MODEL_ROOT, "checkpoints")
# if not path.isdir(local_folder):
# os.system('aws s3 cp --recursive s3://vegnonveg/vegnonveg-fewsamples %s' % local_folder)
# COMMAND ----------
# MAGIC %md ## Save images and load to Spark as BigDL ImageFrame
# MAGIC
# MAGIC save data to parquet files and load to spark. Add label to each image.
# COMMAND ----------
DATA_ROOT = "/data/worldbank/"
sample_path = DATA_ROOT + 'samples/'
# sample_path = DATA_ROOT + 'imagenet_samples/'
# sample_path = '/mnt/nobigdl/vegnonveg-samples100/'
label_path = DATA_ROOT + 'vegnonveg-samples_labels.csv'
parquet_path = DATA_ROOT + 'sample_parquet/'
# dbutils.fs.rm(parquet_path, True)
# COMMAND ----------
sparkConf = create_spark_conf().setMaster("local[2]").setAppName("test_validation")
sc = get_spark_context(sparkConf)
sqlContext = SQLContext(sc)
#intializa bigdl
init_engine()
redire_spark_logs()
# This only runs at the first time to generate parquet files
image_frame = NNImageReader.readImages(sample_path, sc, minParitions=32)
# save dataframe to parquet files
# image_frame.write.parquet(parquet_path)
# ImageFrame.write_parquet(sample_path, parquet_path, sc, partition_num=32)
# COMMAND ----------
# load parquet file into spark cluster
import time
start = time.time()
image_raw_DF = sqlContext.read.parquet(parquet_path)
end = time.time()
print("Load data time is: " + str(end-start) + " seconds")
# COMMAND ----------
# create dict from item_name to label
labels_csv = pd.read_csv(label_path)
unique_labels = labels_csv['item_name'].unique().tolist()
label_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))
class_num = len(label_dict)
# COMMAND ----------
# create label dataframe
label_raw_DF = sqlContext.read.format("com.databricks.spark.csv")\
.option("header", "true")\
.option("mode", "DROPMALFORMED")\
.load(label_path)
get_label = udf(lambda item_name: float(label_dict[item_name]), FloatType())
change_name = udf(lambda uid: uid+".jpg", StringType())
labelDF = label_raw_DF.withColumn("label", get_label("item_name")).withColumn("image_name", change_name("obs_uid"))
labelDF.show(truncate=False)
# COMMAND ----------
get_name = udf(lambda row: row[0].split("/")[-1], StringType())
imageDF = image_raw_DF.withColumn("image_name", get_name("image"))
imageDF.show(truncate=False)
dataDF = imageDF.join(labelDF, "image_name", "inner").select("image", "image_name", "label")
dataDF.show(truncate=False)
# COMMAND ----------
# MAGIC %md ## Do Train/Test Split and preprocessing
# MAGIC Split Train/Test split with some ratio and preprocess images.
# COMMAND ----------
data = dataDF.randomSplit([0.8, 0.2], seed=10)
train_image = data[0]
val_image = data[1]
type(train_image)
# COMMAND ----------
IMAGE_SIZE = 224
train_transformer = NNImageTransformer(
Pipeline([Resize(256, 256), RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),
MatToTensor()])
).setInputCol("image").setOutputCol("features")
train_data = train_transformer.transform(train_image)
# COMMAND ----------
train_size = train_image.count()
# COMMAND ----------
print(train_size)
# COMMAND ----------
val_transformer = NNImageTransformer(
Pipeline([Resize(256,256),
CenterCrop(IMAGE_SIZE, IMAGE_SIZE),
ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),
MatToTensor(to_rgb=True)]
)
).setInputCol("image").setOutputCol("features")
# COMMAND ----------
test_data = val_transformer.transform(val_image)
# COMMAND ----------
# MAGIC %md ## Define Model
# COMMAND ----------
# Network Parameters
n_classes = len(label_dict)# item_name categories
model = Inception_v1(n_classes)
# COMMAND ----------
# Parameters
learning_rate = 0.2
# parameters for
batch_size = 2 #depends on dataset
no_epochs = 1 #stop when validation accuracy doesn't improve anymore
# COMMAND ----------
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, [3,IMAGE_SIZE,IMAGE_SIZE])\
.setBatchSize(batch_size)\
.setMaxEpoch(no_epochs)\
.setLearningRate(learning_rate)
start = time.time()
trained_model = classifier.fit(train_data)
end = time.time()
print("Optimization Done.")
print("Training time is: %s seconds" % str(end-start))
# + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
# COMMAND ----------
throughput = train_size * no_epochs / (end - start)
print("Average throughput is: %s" % str(throughput))
# COMMAND ----------
#predict
predict_model = trained_model.setBatchSize(batch_size)
predictionDF = predict_model.transform(test_data)
predictionDF.show()
# COMMAND ----------
num_preds = 1
preds = predictionDF.select("label", "prediction").take(num_preds)
for idx in range(num_preds):
# true_label = str(map_to_label(map_groundtruth_label(truth[idx].label)))
true_label = preds[idx][0]
pred_label = preds[idx][1]
print(idx + 1, ')', 'Ground Truth label: ', true_label)
print(idx + 1, ')', 'Predicted label: ', pred_label)
print("correct" if true_label == pred_label else "wrong")
# COMMAND ----------
'''
Measure Test Accuracy w/Test Set
'''
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictionDF)
# expected error should be less than 10%
print("Accuracy = %g " % accuracy)
|
from typing import Tuple, Optional, List, Union
import torch
from torch.nn import *
import math
def gmm(x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:
return torch.einsum('ndo,bnd->bno', w, x)
class GraphLinear(Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.in_features = in_features
self.out_features = out_features
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
#stdv = 1. / math.sqrt(self.weight.size(1))
#self.weight.data.uniform_(-stdv, stdv)
#if self.learn_influence:
# self.G.data.uniform_(-stdv, stdv)
if len(self.weight.shape) == 3:
self.weight.data[1:] = self.weight.data[0]
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: torch.Tensor, g: Optional[torch.Tensor] = None) -> torch.Tensor:
if g is None and self.learn_influence:
g = torch.nn.functional.normalize(self.G, p=1., dim=1)
#g = torch.softmax(self.G, dim=1)
elif g is None:
g = self.G
w = self.weight[self.node_type_index]
output = self.mm(input, w.transpose(-2, -1))
if self.bias is not None:
bias = self.bias[self.node_type_index]
output += bias
output = g.matmul(output)
return output
class DynamicGraphLinear(GraphLinear):
def __init__(self, num_node_types: int = 1, *args):
super().__init__(*args)
def forward(self, input: torch.Tensor, g: torch.Tensor = None, t: torch.Tensor = None) -> torch.Tensor:
assert g is not None or t is not None, "Either Graph Influence Matrix or Node Type Vector is needed"
if g is None:
g = self.G[t][:, t]
return super().forward(input, g)
class StaticGraphLinear(GraphLinear):
def __init__(self, *args, bias: bool = True, num_nodes: int = None, graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, node_types: torch.Tensor = None, weights_per_type: bool = False):
"""
:param in_features: Size of each input sample
:param out_features: Size of each output sample
:param num_nodes: Number of nodes.
:param graph_influence: Graph Influence Matrix
:param learn_influence: If set to ``False``, the layer will not learn an the Graph Influence Matrix.
:param node_types: List of Type for each node. All nodes of same type will share weights.
Default: All nodes have unique types.
:param weights_per_type: If set to ``False``, the layer will not learn weights for each node type.
:param bias: If set to ``False``, the layer will not learn an additive bias.
"""
super().__init__(*args)
self.learn_influence = learn_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight = Parameter(torch.Tensor(num_node_types, self.out_features, self.in_features))
self.mm = gmm
self.node_type_index = node_types
else:
self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))
self.mm = torch.matmul
self.node_type_index = None
if bias:
if node_types is not None:
self.bias = Parameter(torch.Tensor(num_node_types, self.out_features))
else:
self.bias = Parameter(torch.Tensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
GraphLSTMState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]
class BN(Module):
def __init__(self, num_nodes, num_features):
super().__init__()
self.num_nodes = num_nodes
self.num_features = num_features
self.bn = BatchNorm1d(num_nodes * num_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.bn(x.view(-1, self.num_nodes * self.num_features)).view(-1, self.num_nodes, self.num_features)
class LinearX(Module):
def __init__(self):
super().__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input
class StaticGraphLSTMCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
"""
:param input_size: The number of expected features in the input `x`
:param hidden_size: The number of features in the hidden state `h`
:param num_nodes:
:param dropout:
:param recurrent_dropout:
:param graph_influence:
:param learn_influence:
:param additive_graph_influence:
:param learn_additive_graph_influence:
:param node_types:
:param weights_per_type:
:param bias:
"""
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphLSTMState, t: int = 0) -> Tuple[torch.Tensor, GraphLSTMState]:
hx, cx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if cx is None:
cx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
#gx = torch.softmax(self.G, dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(cx)
gates = (self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) +
self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh)
gates = torch.matmul(gx, gates)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 2)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = c_mask * ((forgetgate * cx) + (ingate * cellgate)) + (1 - c_mask) * cx
hy = outgate * torch.tanh(cy)
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
#gx = torch.softmax(gx, dim=1)
return hy, (hy, cy, gx)
class StaticGraphLSTM_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphLSTMCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphLSTMCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphLSTMState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphLSTMState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n, n)] * len(self.layers)
output_states: List[GraphLSTMState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphLSTM(*args, **kwargs):
return torch.jit.script(StaticGraphLSTM_(*args, **kwargs))
GraphGRUState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]
class StaticGraphGRUCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
"""
:param input_size: The number of expected features in the input `x`
:param hidden_size: The number of features in the hidden state `h`
:param num_nodes:
:param dropout:
:param recurrent_dropout:
:param graph_influence:
:param learn_influence:
:param additive_graph_influence:
:param learn_additive_graph_influence:
:param node_types:
:param weights_per_type:
:param bias:
"""
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
#if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
# weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphGRUState, t: int = 0) -> Tuple[torch.Tensor, GraphGRUState]:
hx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
#gx = torch.softmax(self.G, dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
if self.bias_ih is not None:
bias_ih = self.bias_ih[self.node_type_index]
else:
bias_ih = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(hx)
x_results = self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) + bias_ih
h_results = self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh
x_results = torch.matmul(gx, x_results)
h_results = torch.matmul(gx, h_results)
i_r, i_z, i_n = x_results.chunk(3, 2)
h_r, h_z, h_n = h_results.chunk(3, 2)
r = torch.sigmoid(i_r + h_r)
z = torch.sigmoid(i_z + h_z)
n = torch.tanh(i_n + r * h_n)
hy = n - torch.mul(n, z) + torch.mul(z, hx)
hy = c_mask * hy + (1 - c_mask) * hx
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
#gx = torch.softmax(gx, dim=1)
return hy, (hy, gx)
class StaticGraphGRU_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphGRUCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphGRUCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphGRUState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphGRUState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n)] * len(self.layers)
output_states: List[GraphGRUState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphGRU(*args, **kwargs):
return torch.jit.script(StaticGraphGRU_(*args, **kwargs))
|
from htmltreediff.diff_core import Differ
from htmltreediff.edit_script_runner import EditScriptRunner
from htmltreediff.changes import (
split_text_nodes,
sort_del_before_ins,
_strip_changes_new,
_strip_changes_old,
)
from htmltreediff.util import (
minidom_tostring,
node_compare,
parse_minidom,
remove_dom_attributes,
walk_dom,
)
def reverse_edit_script(edit_script):
if edit_script is None:
return None
def opposite_action(action):
if action == 'delete':
return 'insert'
elif action == 'insert':
return 'delete'
reverse_script = []
for action, location, node_properties in reversed(edit_script):
reverse_script.append(
(opposite_action(action), location, node_properties),
)
return reverse_script
def reverse_changes_html(changes):
dom = parse_minidom(changes)
reverse_changes(dom)
return minidom_tostring(dom)
def reverse_changes(dom):
nodes = dom.getElementsByTagName('del') + dom.getElementsByTagName('ins')
for node in nodes:
if node.tagName == 'del':
node.tagName = 'ins'
elif node.tagName == 'ins':
node.tagName = 'del'
sort_del_before_ins(dom)
def get_edit_script(old_html, new_html):
old_dom = parse_minidom(old_html)
new_dom = parse_minidom(new_html)
split_text_nodes(old_dom)
split_text_nodes(new_dom)
differ = Differ(old_dom, new_dom)
return differ.get_edit_script()
def html_patch(old_html, edit_script):
old_dom = parse_minidom(old_html)
split_text_nodes(old_dom)
runner = EditScriptRunner(old_dom, edit_script)
return minidom_tostring(runner.run_edit_script())
def strip_changes_old(html):
dom = parse_minidom(html)
_strip_changes_old(dom)
return minidom_tostring(dom)
def strip_changes_new(html):
dom = parse_minidom(html)
_strip_changes_new(dom)
return minidom_tostring(dom)
def remove_attributes(html):
dom = parse_minidom(html)
remove_dom_attributes(dom)
return minidom_tostring(dom)
def collapse(html):
"""Remove any indentation and newlines from the html."""
return ''.join([line.strip() for line in html.split('\n')]).strip()
class Case(object):
pass
def parse_cases(cases):
for args in cases:
case = Case()
if len(args) == 4:
case.name, case.old_html, case.new_html, case.target_changes = args
case.edit_script = None
elif len(args) == 5:
(
case.name,
case.old_html,
case.new_html,
case.target_changes,
case.edit_script,
) = args
else:
raise ValueError('Invalid test spec: %r' % (args,))
yield case
def test_node_compare():
del_node = list(walk_dom(parse_minidom('<del/>')))[-1]
ins_node = list(walk_dom(parse_minidom('<ins/>')))[-1]
assert -1 == node_compare(del_node, ins_node)
assert 1 == node_compare(ins_node, del_node)
|
""" module SimPEG.electromagnetics.natural_source
SimPEG implementation of the natural source problem
(including magenetotelluric, tipper and ZTEM)
"""
from . import utils
from . import sources as Src
from . import receivers as Rx
from .survey import Survey, Data
from .fields import Fields1DPrimarySecondary, Fields3DPrimarySecondary
from .simulation import Simulation1DPrimarySecondary, Simulation3DPrimarySecondary
from . import sources
from . import receivers
from .simulation_1d import Simulation1DRecursive
|
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
import sys, select, termios, tty
import rospy
import numpy as np
from std_msgs.msg import Bool
from geometry_msgs.msg import Twist, Accel, Vector3
class KeyBoardVehicleTeleop:
def __init__(self):
# Class Variables
self.settings = termios.tcgetattr(sys.stdin)
# Speed setting
self.speed = 1 # 1 = Slow, 2 = Fast
self.l = Vector3(0, 0, 0) # Linear Velocity for Publish
self.a = Vector3(0, 0, 0) # Angular Velocity for publishing
self.linear_increment = 0.05 # How much to increment linear velocities by, to avoid jerkyness
self.linear_limit = 0.2 # Linear velocity limit = self.linear_limit * self.speed
self.angular_increment = 0.05
self.angular_limit = 0.25
# User Interface
self.msg = """
Control Your Vehicle!
---------------------------
Moving around:
W/S: X-Axis
A/D: Y-Axis
X/Z: Z-Axis
Q/E: Yaw
I/K: Pitch
J/L: Roll
Slow / Fast: 1 / 2
CTRL-C to quit
"""
# Default message remains as twist
self._msg_type = 'twist'
if rospy.has_param('~type'):
self._msg_type = rospy.get_param('~type')
if self._msg_type not in ['twist', 'accel']:
raise rospy.ROSException('Teleoperation output must be either '
'twist or accel')
# Name Publisher topics accordingly
if self._msg_type == 'twist':
self._output_pub = rospy.Publisher('output', Twist, queue_size=1)
# self._output_pub = rospy.Publisher('/rexrov2/cmd_vel', Twist, queue_size=1)
else:
self._output_pub = rospy.Publisher('output', Accel, queue_size=1)
print(self.msg)
# Ros Spin
rate = rospy.Rate(50) # 50hz
while not rospy.is_shutdown():
rate.sleep()
self._parse_keyboard()
# Every spin this function will return the key being pressed
# Only works for one key per spin currently, thus limited control exploring alternative methods
def _get_key(self):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
return key
# Function to gradually build up the speed and avoid jerkyness #
def _speed_windup(self, speed, increment, limit, reverse):
if reverse == True:
speed -= increment * self.speed
if speed < -limit * self.speed:
speed = -limit * self.speed
else:
speed += increment * self.speed
if speed > limit * self.speed:
speed = limit * self.speed
return speed
def _parse_keyboard(self):
# Save key peing pressed
key_press = self._get_key()
# Set Vehicle Speed #
if key_press == "1":
self.speed = 1
if key_press == "2":
self.speed = 2
# Choose ros message accordingly
if self._msg_type == 'twist':
cmd = Twist()
else:
cmd = Accel()
# If a key is pressed assign relevent linear / angular vel
if key_press!='':
# Linear velocities:
# Forward
if key_press == "w":
self.l.x = self._speed_windup(self.l.x, self.linear_increment, self.linear_limit, False)
# Backwards
if key_press == "s":
self.l.x = self._speed_windup(self.l.x, self.linear_increment, self.linear_limit, True)
# Left
if key_press == "a":
self.l.y = self._speed_windup(self.l.y, self.linear_increment, self.linear_limit, False)
# Right
if key_press == "d":
self.l.y = self._speed_windup(self.l.y, self.linear_increment, self.linear_limit, True)
# Up
if key_press == "x":
self.l.z = self._speed_windup(self.l.z, self.linear_increment, self.linear_limit, False)
# Down
if key_press == "z":
self.l.z = self._speed_windup(self.l.z, self.linear_increment, self.linear_limit, True)
# Angular Velocities
# Roll Left
if key_press == "j":
self.a.x = self._speed_windup(self.a.x, self.linear_increment, self.linear_limit, True)
# Roll Right
if key_press == "l":
self.a.x = self._speed_windup(self.a.x, self.linear_increment, self.linear_limit, False)
# Pitch Down
if key_press == "i":
self.a.y = self._speed_windup(self.a.y, self.linear_increment, self.linear_limit, False)
# Pitch Up
if key_press == "k":
self.a.y = self._speed_windup(self.a.y, self.linear_increment, self.linear_limit, True)
# Yaw Left
if key_press == "q":
self.a.z = self._speed_windup(self.a.z, self.angular_increment, self.angular_limit, False)
# Yaw Right
if key_press == "e":
self.a.z = self._speed_windup(self.a.z, self.angular_increment, self.angular_limit, True)
else:
# If no button is pressed reset velocities to 0
self.l = Vector3(0, 0, 0)
self.a = Vector3(0, 0, 0)
# Store velocity message into Twist format
cmd.angular = self.a
cmd.linear = self.l
# If ctrl+c kill node
if (key_press == '\x03'):
rospy.loginfo('Keyboard Interrupt Pressed')
rospy.loginfo('Shutting down [%s] node' % node_name)
# Set twists to 0
cmd.angular = Vector3(0, 0, 0)
cmd.linear = Vector3(0, 0, 0)
self._output_pub.publish(cmd)
exit(-1)
# Publish message
self._output_pub.publish(cmd)
if __name__ == '__main__':
# Wait for 5 seconds, so the instructions are the last thing to print in terminal
time.sleep(5)
# Start the node
node_name = os.path.splitext(os.path.basename(__file__))[0]
rospy.init_node(node_name)
rospy.loginfo('Starting [%s] node' % node_name)
teleop = KeyBoardVehicleTeleop()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
rospy.loginfo('Shutting down [%s] node' % node_name)
|
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VariableLinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'str',
'org': 'str',
'labels': 'str'
}
attribute_map = {
'_self': 'self',
'org': 'org',
'labels': 'labels'
}
def __init__(self, _self=None, org=None, labels=None): # noqa: E501,D401,D403
"""VariableLinks - a model defined in OpenAPI.""" # noqa: E501
self.__self = None
self._org = None
self._labels = None
self.discriminator = None
if _self is not None:
self._self = _self
if org is not None:
self.org = org
if labels is not None:
self.labels = labels
@property
def _self(self):
"""Get the _self of this VariableLinks.
:return: The _self of this VariableLinks.
:rtype: str
""" # noqa: E501
return self.__self
@_self.setter
def _self(self, _self):
"""Set the _self of this VariableLinks.
:param _self: The _self of this VariableLinks.
:type: str
""" # noqa: E501
self.__self = _self
@property
def org(self):
"""Get the org of this VariableLinks.
:return: The org of this VariableLinks.
:rtype: str
""" # noqa: E501
return self._org
@org.setter
def org(self, org):
"""Set the org of this VariableLinks.
:param org: The org of this VariableLinks.
:type: str
""" # noqa: E501
self._org = org
@property
def labels(self):
"""Get the labels of this VariableLinks.
:return: The labels of this VariableLinks.
:rtype: str
""" # noqa: E501
return self._labels
@labels.setter
def labels(self, labels):
"""Set the labels of this VariableLinks.
:param labels: The labels of this VariableLinks.
:type: str
""" # noqa: E501
self._labels = labels
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, VariableLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
from default_types_fwrap import *
__doc__ = u'''
>>> bar(100,200,300) == (1, 2.0, 3.0)
True
'''
|
from django.db import models
class CapitalizeField(models.CharField):
def __init__(self, *args, **kwargs):
super(CapitalizeField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname, None)
if value:
value = value.capitalize()
setattr(model_instance, self.attname, value)
return value
else:
return super(CapitalizeField, self).pre_save(model_instance, add)
class CustomManager(models.Manager):
"""
Custom manager so as not to return deleted objects
"""
def get_queryset(self):
return super(CustomManager, self).get_queryset().filter(deleted=False)
class AbstractBase(models.Model):
"""
This contains all common object attributes
Every model will inherit this class to avoid repetition
Its abstract hence can't be instatiated
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(
default=False,
help_text="This is to make sure deletes are not actual deletes"
)
# everything will be used to query deleted objects e.g Model.everything.all()
everything = models.Manager()
objects = CustomManager()
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
class Meta:
ordering = ['-updated_at', '-created_at']
abstract = True
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/10/21 10:39
@Auth : Qi
@IDE : PyCharm
@Title: 6. Z 字形变换
@Link : https://leetcode-cn.com/problems/zigzag-conversion/
"""
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows <= 0:
return ''
if numRows == 1:
return s
ret = ''
for i in range(numRows):
tmp = i
time = numRows * 2 - 2
while tmp < len(s):
if i == 0 or i == numRows - 1 and tmp:
ret += s[tmp]
tmp += time
else:
ret += s[tmp]
if tmp + time - i * 2 < len(s):
ret += s[tmp + time - i * 2]
else:
break
tmp += time
return ret
if __name__ == '__main__':
# 测试用例
s = Solution()
print(s.convert('ABCDE', 4))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Based on Kenneth Reitz's setup.py:
# https://github.com/kennethreitz/setup.py
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'nwswx'
DESCRIPTION = 'A Python 3 client for retrieving data from the NWS Weather Forecast API'
URL = 'https://github.com/stacybrock/nws-wx-client'
EMAIL = 'kalrnux@gmail.com'
AUTHOR = 'Stacy Brock'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'requests',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# ------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=['nwswx'],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='Apache-2.0',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
from threading import Event
class Message:
def __init__(self, timeout=10):
self._ready = Event()
self._timeout = timeout
self._response = None
@property
def result(self):
received = self._ready.wait(timeout=self._timeout)
if not received:
raise MqttError("CONNECTION", "No Response Received")
if not self._response['ok']:
raise MqttError(self._response['errorCode'], self._response['error'])
return self._response['data']
@result.setter
def result(self, dato):
self._response = dato
self._ready.set()
def __len__(self):
return len(self.result)
def __getitem__(self, key):
return self.result[key]
def __iter__(self):
return self.result.__iter__()
def __contains__(self, key):
return key in self.result
class MqttError(Exception):
def __init__(self, error_code, description):
self.error_code = error_code
self.description = description
|
import numpy as np
# from scipy.misc import imread, imresize
from scipy import misc
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
def _imread(image_name):
return misc.imread(image_name)
def _imresize(image_array, size):
return misc.imresize(image_array, size)
def to_categorical(integer_classes, num_classes=2):
integer_classes = np.asarray(integer_classes, dtype='int')
num_samples = integer_classes.shape[0]
categorical = np.zeros((num_samples, num_classes))
categorical[np.arange(num_samples), integer_classes] = 1
return categorical
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyNeurolab(PythonPackage):
"""Simple and powerfull neural network library for python"""
homepage = "http://neurolab.googlecode.com/"
pypi = "neurolab/neurolab-0.3.5.tar.gz"
version('0.3.5', sha256='96ec311988383c63664f3325668f27c30561cf4349e3bc5420665c042a3b9191')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
|
import unittest
from conans.client.conf import get_default_settings_yml
from conans.client.generators.pkg_config import PkgConfigGenerator
from conans.model.build_info import CppInfo
from conans.model.conan_file import ConanFile
from conans.model.env_info import EnvValues
from conans.model.ref import ConanFileReference
from conans.model.settings import Settings
from conans.test.utils.mocks import TestBufferConanOutput
class PkgGeneratorTest(unittest.TestCase):
def variables_setup_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "my_pkg"
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg1/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "MYPKG1"
cpp_info.defines = ["MYDEFINE11"]
cpp_info.cflags.append("-Flag1=21")
cpp_info.version = "1.7"
cpp_info.description = "My other cool description"
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["MyPkg2.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: MyPkg2
Description: Conan package: MyPkg2
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg
""")
self.assertEqual(files["mypkg1.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: mypkg1
Description: My other cool description
Version: 1.7
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=21 -DMYDEFINE11
Requires: my_pkg
""")
self.assertEqual(files["my_pkg.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg
Description: My cool description
Version: 1.3
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=23 -DMYDEFINE1
""")
def pkg_config_custom_names_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "my_pkg"
cpp_info.names["pkg_config"] = "my_pkg_custom_name"
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg1/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "MYPKG1"
cpp_info.names["pkg_config"] = "my_pkg1_custom_name"
cpp_info.defines = ["MYDEFINE11"]
cpp_info.cflags.append("-Flag1=21")
cpp_info.version = "1.7"
cpp_info.description = "My other cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.names["pkg_config"] = "my_pkg2_custom_name"
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg", "MyPkg1"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("zlib/1.2.11@lasote/stable")
cpp_info = CppInfo(ref.name, "dummy_root_folder_zlib")
cpp_info.name = "ZLIB"
cpp_info.defines = ["MYZLIBDEFINE2"]
cpp_info.version = "1.2.11"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("bzip2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.name = "BZip2"
cpp_info.names["pkg_config"] = "BZip2"
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg", "MyPkg1", "zlib"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["my_pkg2_custom_name.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg2_custom_name
Description: Conan package: my_pkg2_custom_name
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg_custom_name my_pkg1_custom_name
""")
self.assertEqual(files["my_pkg1_custom_name.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg1_custom_name
Description: My other cool description
Version: 1.7
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=21 -DMYDEFINE11
""")
self.assertEqual(files["my_pkg_custom_name.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg_custom_name
Description: My cool description
Version: 1.3
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=23 -DMYDEFINE1
""")
self.assertEqual(files["BZip2.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: BZip2
Description: Conan package: BZip2
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg_custom_name my_pkg1_custom_name zlib
""")
def apple_frameworks_test(self):
settings = Settings.loads(get_default_settings_yml())
settings.compiler = "apple-clang"
settings.os = "Macos"
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
conanfile.settings = settings
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.frameworks = ['AudioUnit', 'AudioToolbox']
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["MyPkg.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: MyPkg
Description: My cool description
Version: 1.3
Libs: -L${libdir} -Wl,-rpath,"${libdir}" -framework AudioUnit -framework AudioToolbox
Cflags: -I${includedir}
""")
|
from decisionengine.framework.modules import Source, SourceProxy
BillingInfoSourceProxy = SourceProxy.SourceProxy
Source.describe(BillingInfoSourceProxy)
|
"""Serialization tests."""
|
from dataclasses import dataclass, field
from typing import Dict
@dataclass
class FooType:
class Meta:
name = "fooType"
value: str = field(
default="",
metadata={
"required": True,
}
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
}
)
@dataclass
class Root(FooType):
class Meta:
name = "root"
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, List, Optional
import torch
from torch.nn.parallel import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import unwrap_lightning_module
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin
from pytorch_lightning.utilities import _XLA_AVAILABLE
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp
class ParallelPlugin(TrainingTypePlugin, ABC):
""" Plugin for training with multiple processes in parallel. """
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
):
super().__init__()
self.parallel_devices = parallel_devices
self.cluster_environment = cluster_environment
@property
@abstractmethod
def root_device(self) -> torch.device:
raise NotImplementedError
@property
def on_gpu(self) -> bool:
return self.root_device.type == "cuda" and torch.cuda.is_available()
@property
def on_tpu(self) -> bool:
return self.root_device.type == "xla" and _XLA_AVAILABLE
@property
def lightning_module(self):
return unwrap_lightning_module(self._model)
@property
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
@property
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
@property
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
@property
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
@property
def is_global_zero(self) -> bool:
return self.global_rank == 0
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)
return distributed_sampler_kwargs
def reconciliate_processes(self, trace: str):
"""
Function to re-conciliate processes on failure
"""
def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:
"""Perform a all_gather on all processes """
return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
def reduce_boolean_decision(self, decision: bool) -> bool:
decision = torch.tensor(int(decision), device=self.lightning_module.device)
decision = self.reduce(decision, reduce_op=ReduceOp.SUM)
decision = bool(decision == self.world_size)
return decision
@property
def torch_distributed_backend(self):
torch_backend = os.getenv("PL_TORCH_DISTRIBUTED_BACKEND")
if torch_backend is None:
torch_backend = "nccl" if self.on_gpu else "gloo"
return torch_backend
@staticmethod
def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Override to synchronize batchnorm between specific process groups instead
of the whole world or use a different sync_bn like `apex`'s version.
Args:
model: pointer to current :class:`LightningModule`.
Return:
LightningModule with batchnorm layers synchronized between process groups
"""
return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
@contextmanager
def block_backward_sync(self):
"""
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
Returns: context manager with sync behaviour off
"""
if isinstance(self.model, DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
def teardown(self) -> None:
# Un-reference the wrapper if any was used.
# todo (tchaton): Add support for all plugins.
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
|
{
"targets": [
{
"target_name": "modsecurity",
"sources": [ "modsecurity_wrap.cxx" ],
"include_dirs": ['/usr/include/modsecurity/',],
"libraries": ['/usr/lib/libmodsecurity.a',
'/usr/lib/libmodsecurity.so',
'/usr/lib/libmodsecurity.a',
'/usr/lib/libmodsecurity.so.3.0.0',
'/usr/lib/x86_64-linux-gnu/libxml2.so',
'/usr/lib/x86_64-linux-gnu/libcurl.so',
'/lib/x86_64-linux-gnu/libpcre.so.3',
'/usr/lib/x86_64-linux-gnu/libyajl.so',
'/usr/lib/x86_64-linux-gnu/libGeoIP.so',
'/usr/lib/x86_64-linux-gnu/liblmdb.so'],
"cflags" : [ "-std=c++11" ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ]
}
]
}
|
import tempfile
import webbrowser
import time
import os
import pygtk
import gtk
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from grab import Grab
from .base import CaptchaBackend
pygtk.require('2.0')
class CaptchaWindow(object):
def __init__(self, path, solution):
self.solution = solution
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.show()
self.window.connect('destroy', self.destroy)
self.box = gtk.HBox()
self.image = gtk.Image()
self.image.set_from_file(path)
self.entry = gtk.Entry()
self.entry.connect('activate', self.solve)
self.button = gtk.Button('Go')
self.button.connect('clicked', self.solve)
self.window.add(self.box)
self.box.pack_start(self.image)
self.box.pack_start(self.entry)
self.box.pack_start(self.button)
self.box.show()
self.image.show()
self.button.show()
self.entry.show()
self.entry.grab_focus()
def destroy(self, *args):
gtk.main_quit()
def solve(self, *args):
self.solution.append(self.entry.get_text())
self.window.hide()
gtk.main_quit()
def main(self):
gtk.main()
class GuiBackend(CaptchaBackend):
def get_submit_captcha_request(self, data):
fd, path = tempfile.mkstemp()
with open(path, 'w') as out:
out.write(data)
url = 'file://' + path
g = Grab()
g.setup(url=url)
return g
def parse_submit_captcha_response(self, res):
return res.url.replace('file://', '')
def get_check_solution_request(self, captcha_id):
url = 'file://' + captcha_id
g = Grab()
g.setup(url=url)
return g
def parse_check_solution_response(self, res):
path = res.url.replace('file://', '')
solution = []
window = CaptchaWindow(path, solution)
window.main()
os.unlink(path)
return solution[0]
|
supported_archs = ["x86"]
supported_bits = [32, 64]
|
#%%
import numpy as np
import pandas as pd
import futileprot.viz
import altair as alt
import altair_saver
import scipy.stats
colors, palette = futileprot.viz.altair_style()
# Add metadata
DATE = '2021-08-14'
RUN_NO = 1
STRAINS = 'DoubleKO'
MEDIUM = 'acetate'
# Load the measurement data
data = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')
# Perform a simplistic inference of the growth rate to get a sense of what
# the result is.
# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()
data = data[['strain', 'elapsed_time_hr', 'od_600nm']]
# For each strain, infer the growth rate and compute the fit
layout = False
for g, d in data.groupby(['strain']):
time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)
# Perform the regression
popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))
slope, intercept, err = popt[0], popt[1], popt[-1]
print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')
# Compute the fit
fit = np.exp(intercept + slope * time_range)
fit_df = pd.DataFrame([])
fit_df['elapsed_time_hr'] = time_range
fit_df['od_600nm'] = fit
# Generate the plot
points = alt.Chart(
data=d,
width=300,
height=150
).mark_point(
color=colors['primary_blue']
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density [a.u]',
scale=alt.Scale(type='log'))
)
fit = alt.Chart(data=fit_df,
title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'
).mark_line(
color=colors['primary_blue']
).encode(
x='elapsed_time_hr:Q',
y='od_600nm:Q'
)
merge = points + fit
if layout == False:
layout = merge
else:
layout &= merge
altair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',
scale_factor=2)
# %%
|
# -*- coding: utf-8 -*-
from . import wizard_wxwork_contacts_sync
from . import wizard_wxwork_sync_tag
from . import wizard_wxwork_sync_user
|
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
if self.views < 0:
self.views = 0
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
first_visit = models.DateTimeField(default=timezone.now)
last_visit = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User)
# The additional attributes we wish to include.
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
# Override the __unicode__() method to return out something meaningful!
def __str__(self):
return self.user.username
|
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
print('run features.py')
def rgb_to_hsv(rgb_list):
rgb_normalized = [1.0 * rgb_list[0] / 255, 1.0 * rgb_list[1] / 255, 1.0 * rgb_list[2] / 255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized
def compute_color_histograms(cloud, using_hsv=False):
# Compute histograms for the clusters
point_colors_list = []
# Step through each point in the point cloud
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
# Populate lists with color values
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
# TODO: Compute histograms
nbins = 32
bins_range = (0, 256)
# TODO: Concatenate and normalize the histograms
channel_1_hist = np.histogram(channel_1_vals, bins=nbins, range=bins_range)
channel_2_hist = np.histogram(channel_2_vals, bins=nbins, range=bins_range)
channel_3_hist = np.histogram(channel_3_vals, bins=nbins, range=bins_range)
hist_features = np.concatenate((channel_1_hist[0], channel_2_hist[0], channel_3_hist[0])).astype(np.float64)
normed_features = hist_features / np.sum(hist_features)
# Generate random features for demo mode.
# Replace normed_features with your feature vectorl
# normed_features = np.random.random(96)
# print('run normed_features finished')
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
nbins = 32
bins_range = (-1, 1)
for norm_component in pc2.read_points(normal_cloud,
field_names=('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
# TODO: Compute histograms of normal values (just like with color)
norm_x_hist = np.histogram(norm_x_vals, bins=nbins, range=bins_range)
norm_y_hist = np.histogram(norm_y_vals, bins=nbins, range=bins_range)
norm_z_hist = np.histogram(norm_z_vals, bins=nbins, range=bins_range)
# TODO: Concatenate and normalize the histograms
norm_hist_features = np.concatenate((norm_x_hist[0], norm_y_hist[0], norm_z_hist[0])).astype(np.float64)
normed_features = norm_hist_features / np.sum(norm_hist_features)
# Generate random features for demo mode.
# Replace normed_features with your feature vector
# normed_feature = np.random.random(96)
# print('run compute_normal_histograms function finished')
return normed_features
|
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.conf import settings
from django.contrib import messages
from django.core.context_processors import csrf
from form_designer.forms import DesignedForm
from form_designer.models import FormDefinition
def process_form(request, form_definition, context={}, is_cms_plugin=False):
success_message = form_definition.success_message or _('Thank you, the data was submitted successfully.')
error_message = form_definition.error_message or _('The data could not be submitted, please try again.')
message = None
form_error = False
form_success = False
is_submit = False
# If the form has been submitted...
if request.method == 'POST' and request.POST.get(form_definition.submit_flag_name):
form = DesignedForm(form_definition, None, request.POST)
is_submit = True
if request.method == 'GET' and request.GET.get(form_definition.submit_flag_name):
form = DesignedForm(form_definition, None, request.GET)
is_submit = True
if is_submit:
if form.is_valid():
# Successful submission
messages.success(request, success_message)
message = success_message
form_success = True
if form_definition.log_data:
form_definition.log(form)
if form_definition.mail_to:
form_definition.send_mail(form)
if form_definition.success_redirect and not is_cms_plugin:
# TODO Redirection does not work for cms plugin
return HttpResponseRedirect(form_definition.action or '?')
if form_definition.success_clear:
form = DesignedForm(form_definition) # clear form
else:
form_error = True
messages.error(request, error_message)
message = error_message
else:
if form_definition.allow_get_initial:
form = DesignedForm(form_definition, initial_data=request.GET)
else:
form = DesignedForm(form_definition)
context.update({
'message': message,
'form_error': form_error,
'form_success': form_success,
'form': form,
'form_definition': form_definition
})
context.update(csrf(request))
return context
def detail(request, object_name):
form_definition = get_object_or_404(FormDefinition, name=object_name)
result = process_form(request, form_definition)
if isinstance(result, HttpResponseRedirect):
return result
result.update({
'form_template': form_definition.form_template_name or settings.DEFAULT_FORM_TEMPLATE
})
return render_to_response('html/formdefinition/detail.html', result,
context_instance=RequestContext(request))
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the blackdiamondcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "blackdiamondcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import json
import configparser
from test_framework.test_falcon import (
FalconTestFramework,
isclose,
getIndexAtProperty,
)
from test_framework.test_framework import SkipTest
from test_framework.util import assert_raises_rpc_error
from test_framework.authproxy import JSONRPCException
class USBDeviceTest(FalconTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000', '-txindex'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.connect_nodes_bi(1, 2)
self.sync_all()
def run_test(self):
# Check that falcon has been built with USB device enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_USBDEVICE"):
raise SkipTest("falcond has not been built with usb device enabled.")
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
ro = nodes[1].listdevices()
assert(len(ro) == 1)
assert(ro[0]['vendor'] == 'Debug')
assert(ro[0]['product'] == 'Device')
ro = nodes[1].getdeviceinfo()
assert(ro['device'] == 'debug')
ro = nodes[1].getdevicepublickey('0')
assert(ro['address'] == 'praish9BVxVdhykpqBYEs6L65AQ7iKd9z1')
assert(ro['path'] == "m/44'/1'/0'/0")
ro = nodes[1].getdevicepublickey('0/1')
assert(ro['address'] == 'peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR')
assert(ro['path'] == "m/44'/1'/0'/0/1")
ro = nodes[1].getdevicexpub("m/44'/1'/0'", "")
assert(ro == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
message = 'This is just a test message'
sig = nodes[1].devicesignmessage('0/1', message)
assert(True == nodes[1].verifymessage('peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR', sig, message))
ro = nodes[1].initaccountfromdevice('test_acc')
assert(ro['extkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro['path'] == "m/44'/1'/0'")
ro = nodes[1].extkey('list', 'true')
assert(len(ro) == 1)
assert(ro[0]['path'] == "m/44h/1h/0h")
assert(ro[0]['epkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro[0]['label'] == 'test_acc')
assert(ro[0]['hardware_device'] == '0xffff 0x0001')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addr1_0 = nodes[1].getnewaddress('lbl1_0')
ro = nodes[1].filteraddresses()
assert(len(ro) == 1)
assert(ro[0]['path'] == 'm/0/0')
assert(ro[0]['owned'] == 'true')
assert(ro[0]['label'] == 'lbl1_0')
va_addr1_0 = nodes[1].getaddressinfo(addr1_0)
assert(va_addr1_0['ismine'] == True)
assert(va_addr1_0['iswatchonly'] == False)
assert(va_addr1_0['isondevice'] == True)
assert(va_addr1_0['path'] == 'm/0/0')
try:
nodes[1].getnewstealthaddress()
raise AssertionError('Should have failed.')
except JSONRPCException as e:
pass
extaddr1_0 = nodes[1].getnewextaddress()
txnid0 = nodes[0].sendtoaddress(addr1_0, 6)
txnid1 = nodes[0].sendtoaddress(extaddr1_0, 6)
self.stakeBlocks(1)
block_txns = nodes[0].getblock(nodes[0].getblockhash(nodes[0].getblockcount()))['tx']
assert(txnid0 in block_txns)
assert(txnid1 in block_txns)
ro = nodes[1].getwalletinfo()
assert(isclose(ro['balance'], 12.0))
addr0_0 = nodes[0].getnewaddress()
hexRaw = nodes[1].createrawtransaction([], {addr0_0:10})
hexFunded = nodes[1].fundrawtransaction(hexRaw)['hex']
txDecoded = nodes[1].decoderawtransaction(hexFunded)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['complete'] == True)
txnid1 = nodes[1].sendrawtransaction(ro['hex'])
self.sync_all()
self.stakeBlocks(1)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['errors'][0]['error'] == 'Input not found or already spent')
prevtxns = []
for vin in txDecoded['vin']:
rtx = nodes[1].getrawtransaction(vin['txid'], True)
prev_out = rtx['vout'][vin['vout']]
prevtxns.append({'txid': vin['txid'], 'vout': vin['vout'], 'scriptPubKey': prev_out['scriptPubKey']['hex'], 'amount': prev_out['value']})
ro = nodes[1].devicesignrawtransaction(hexFunded, prevtxns, ['0/0', '2/0'])
assert(ro['complete'] == True)
ro = nodes[1].listunspent()
assert(ro[0]['ondevice'] == True)
txnid2 = nodes[1].sendtoaddress(addr0_0, 0.1)
self.sync_all()
nodes[0].syncwithvalidationinterfacequeue()
assert(nodes[0].filtertransactions()[0]['txid'] == txnid2)
hwsxaddr = nodes[1].devicegetnewstealthaddress()
assert(hwsxaddr == 'tps1qqpdwu7gqjqz9s9wfek843akvkzvw0xq3tkzs93sj4ceq60cp54mvzgpqf4tp6d7h0nza2xe362am697dax24hcr33yxqwvq58l5cf6j6q5hkqqqgykgrc')
hwsxaddr2 = nodes[1].devicegetnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True)
assert(hwsxaddr2 == 'tps1qqpewyspjp93axk82zahx5xfjyprpvypfgnp95n9aynxxw3w0qs63acpq0s5z2rwk0raczg8jszl9qy5stncud76ahr5etn9hqmp30e3e86w2qqypgh9sgv0')
ro = nodes[1].getaddressinfo(hwsxaddr2)
assert(ro['prefix_num_bits'] == 4)
assert(ro['prefix_bitfield'] == '0x000a')
assert(ro['isondevice'] == True)
ro = nodes[1].liststealthaddresses()
assert(len(ro[0]['Stealth Addresses']) == 2)
ro = nodes[1].filteraddresses()
assert(len(ro) == 3)
txnid3 = nodes[0].sendtoaddress(hwsxaddr, 0.1, '', '', False, 'test msg')
self.stakeBlocks(1)
ro = nodes[1].listtransactions()
assert(len(ro) == 5)
assert('test msg' in self.dumpj(ro[4]))
ro = nodes[1].listunspent()
inputs = []
for output in ro:
if output['txid'] == txnid3:
inputs.append({'txid' : txnid3, 'vout' : output['vout']})
break
assert(len(inputs) > 0)
hexRaw = nodes[1].createrawtransaction(inputs, {addr0_0:0.09})
ro = nodes[1].devicesignrawtransactionwithwallet(hexRaw)
assert(ro['complete'] == True)
# import privkey in node2
rootkey = nodes[2].extkeyaltversion('xparFdrwJK7K2nfYzrkEqAKr5EcJNdY4c6ZNoLFFx1pMXQSQpo5MAufjogrS17RkqsLAijZJaBDHhG3G7SuJjtsTmRRTEKZDzGMnVCeX59cQCiR')
ro = nodes[2].extkey('import', rootkey, 'master key', True)
ro = nodes[2].extkey('setmaster', ro['id'])
assert(ro['result'] == 'Success.')
ro = nodes[2].extkey('deriveaccount', 'test account')
ro = nodes[2].extkey('setdefaultaccount', ro['account'])
assert(ro['result'] == 'Success.')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addrtest = nodes[2].getnewaddress()
ro = nodes[1].getdevicepublickey('0/0')
assert(addrtest == ro['address'])
addrtest = nodes[2].getnewstealthaddress('', '0', '', True, True)
assert(addrtest == hwsxaddr)
addrtest2 = nodes[2].getnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True, True)
assert(addrtest2 == hwsxaddr2)
extaddr2_0 = nodes[2].getnewextaddress()
assert(extaddr1_0 == extaddr2_0)
# Ensure account matches after node restarts
account1 = nodes[1].extkey('account')
self.restart_node(1, extra_args=self.extra_args[1] + ['-wallet=default_wallet',])
account1_r = nodes[1].extkey('account')
assert(json.dumps(account1) == json.dumps(account1_r))
# Test for coverage
assert(nodes[1].promptunlockdevice()['sent'] is True)
assert(nodes[1].unlockdevice('123')['unlocked'] is True)
assert_raises_rpc_error(-8, 'Neither a pin nor a passphraseword was provided.', nodes[1].unlockdevice)
assert('complete' in nodes[1].devicebackup())
assert('complete' in nodes[1].deviceloadmnemonic())
if __name__ == '__main__':
USBDeviceTest().main()
|
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
# init RoITransformer
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
# self.classnames = self.dataset.CLASSES
self.classnames = ('1', '2', '3', '4', '5')
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def inference_single(self, imagname):
img = mmcv.imread(imagname)
height, width, channel = img.shape
# slide_h, slide_w = slide_size
# hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
# print(self.classnames)
chip_detections = inference_detector(self.model, img)
# nms
for i in range(5):
keep = py_cpu_nms_poly_fast_np(chip_detections[i], 0.1)
chip_detections[i] = chip_detections[i][keep]
return chip_detections
def inference_single_vis(self, srcpath, dstpath):
detections = self.inference_single(srcpath)
print(detections)
img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.3)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
import tqdm
roitransformer = DetectorModel(r'configs/Huojianjun/faster_rcnn_RoITrans_r101x_fpn_1x_anchors_augs_augfpn.py',
r'work_dirs/faster_rcnn_RoITrans_r101_all_aug_rote_1333_crop_rote/epoch_278.pth')
# roitransformer.inference_single_vis(r'demo/48.tif',
# r'demo/48_out.tif',
# (1024, 1024),
# (1024, 1024))
threshold=0.0001
class_names=('1', '2', '3', '4', '5')
import os
path="/media/ubuntu/data/huojianjun/科目四/科目四/test2"
file_img_name=os.listdir(path)
result_file=open("./科目四_莘莘学子.txt",'w')
# print(file_img_name)
count=0
def filer(x):
x=int(x)
if x>1024:
return 1024
if x<0:
return 0
else:
return x
for name in tqdm.tqdm(file_img_name):
# count+=1
path_img=os.path.join(path,name)
detection_result=roitransformer.inference_single(path_img)
for j, name_cls in enumerate(class_names):
dets = detection_result[j]
for det in dets:
bbox = det[:8]
score = round(det[-1],2)
if score < threshold:
continue
bbox = list(map(filer, bbox))
# print(bbox)
# print(score)
# print(name_cls)
result_file.writelines(name+" "+str(name_cls)+" "+str(score)+" "
+str(bbox[0])
+" "+str(bbox[1])+" "+str(bbox[2])+" "+str(bbox[3])
+" "+str(bbox[4])+" "+str(bbox[5])+" "+str(bbox[6])
+" "+str(bbox[7]))
result_file.writelines("\n")
count+=1
# if name=="3.tif":
# print(count)
# if count==3:
# break
# print(path_img)
|
from __future__ import unicode_literals
import ast
from collections import OrderedDict
import json
import logging
from django.contrib.auth import get_user_model
from rest_framework import fields, serializers
from rest_framework_bulk import BulkSerializerMixin, BulkListSerializer
from . import auth
from .. import exc, models, validators
from ..util import get_field_attr
log = logging.getLogger(__name__)
###############
# Custom Fields
###############
class JSONDataField(fields.Field):
"""
Base field used to represent attributes as JSON <-> ``field_type``.
It is an error if ``field_type`` is not defined in a subclass.
"""
field_type = None
def to_representation(self, value):
return value
def to_internal_value(self, data):
log.debug('JSONDictField.to_internal_value() data = %r', data)
if self.field_type is None:
raise NotImplementedError(
'You must subclass JSONDataField and define field_type'
)
if not data:
data = self.field_type()
if isinstance(data, self.field_type):
return data
# Try it as a regular JSON object
try:
return json.loads(data)
except ValueError:
# Or try it as a Python object
try:
return ast.literal_eval(data)
except (SyntaxError, ValueError) as err:
raise exc.ValidationError(err)
except Exception as err:
raise exc.ValidationError(err)
return data
class JSONDictField(JSONDataField):
"""Field used to represent attributes as JSON <-> Dict."""
field_type = dict
class JSONListField(JSONDataField):
"""Field used to represent attributes as JSON <-> List."""
field_type = list
class MACAddressField(fields.Field):
"""Field used to validate MAC address objects as integer or string."""
def to_representation(self, value):
return value
def to_internal_value(self, value):
return validators.validate_mac_address(value)
###################
# Base Serializer #
###################
class NsotSerializer(serializers.ModelSerializer):
"""Base serializer that logs change events."""
def to_internal_value(self, data):
"""Inject site_pk from view's kwargs if it's not already in data."""
kwargs = self.context['view'].kwargs
log.debug(
'NsotSerializer.to_internal_value() data [before] = %r', data
)
if 'site_id' not in data and 'site_pk' in kwargs:
data['site_id'] = kwargs['site_pk']
log.debug('NsotSerializer.to_internal_value() data [after] = %r', data)
return super(NsotSerializer, self).to_internal_value(data)
def to_representation(self, obj):
"""Always return the dict representation."""
if isinstance(obj, OrderedDict):
return obj
return obj.to_dict()
######
# User
######
class UserSerializer(serializers.ModelSerializer):
"""
UserProxy model serializer that takes optional `with_secret_key` argument
that controls whether the secret_key for the user should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass `with_secret_key` up to the superclass
self.with_secret_key = kwargs.pop('with_secret_key', None)
super(UserSerializer, self).__init__(*args, **kwargs)
# If we haven't passed `with_secret_key`, don't show the secret_key
# field.
if self.with_secret_key is None:
self.fields.pop('secret_key')
permissions = fields.ReadOnlyField(source='get_permissions')
class Meta:
model = get_user_model()
fields = ('id', 'email', 'permissions', 'secret_key')
######
# Site
######
class SiteSerializer(serializers.ModelSerializer):
class Meta:
model = models.Site
fields = '__all__'
#########
# Changes
#########
class ChangeSerializer(NsotSerializer):
"""Used for displaying Change events."""
class Meta:
model = models.Change
fields = '__all__'
###########
# Attribute
###########
class AttributeSerializer(NsotSerializer):
"""Used for GET, DELETE on Attributes."""
class Meta:
model = models.Attribute
fields = '__all__'
class AttributeCreateSerializer(AttributeSerializer):
"""Used for POST on Attributes."""
constraints = JSONDictField(
required=False,
label=get_field_attr(models.Attribute, 'constraints', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'constraints', 'help_text')
)
site_id = fields.IntegerField(
label=get_field_attr(models.Attribute, 'site', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'site', 'help_text')
)
class Meta:
model = models.Attribute
fields = ('name', 'description', 'resource_name', 'required',
'display', 'multi', 'constraints', 'site_id')
class AttributeUpdateSerializer(BulkSerializerMixin,
AttributeCreateSerializer):
"""
Used for PUT, PATCH, on Attributes.
Currently because Attributes have only one required field (name), and it
may not be updated, there is not much functional difference between PUT and
PATCH.
"""
class Meta:
model = models.Attribute
list_serializer_class = BulkListSerializer
fields = ('id', 'description', 'required', 'display', 'multi',
'constraints')
#######
# Value
#######
class ValueSerializer(serializers.ModelSerializer):
"""Used for GET, DELETE on Values."""
class Meta:
model = models.Value
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
# Not sure if we want to view an attribute value w/ so much context just
# yet.
# def to_representation(self, obj):
# return obj.to_dict()
class ValueCreateSerializer(ValueSerializer):
"""Used for POST on Values."""
class Meta:
model = models.Value
read_only_fields = ('id', 'name', 'resource_name')
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
###########
# Resources
###########
class ResourceSerializer(NsotSerializer):
"""For any object that can have attributes."""
attributes = JSONDictField(
required=False,
help_text='Dictionary of attributes to set.'
)
def create(self, validated_data, commit=True):
"""Create that is aware of attributes."""
# Remove the related fields before we write the object
attributes = validated_data.pop('attributes', {})
# Save the base object to the database.
obj = super(ResourceSerializer, self).create(validated_data)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_attributes(attributes)
except exc.ValidationError:
obj.delete()
raise
else:
if commit:
obj.save()
return obj
def update(self, instance, validated_data, commit=True):
"""
Update that is aware of attributes.
This will not set attributes if they are not provided during a partial
update.
"""
# Remove related fields before we write the object
attributes = validated_data.pop('attributes', None)
# Save the object to the database.
obj = super(ResourceSerializer, self).update(
instance, validated_data
)
# If attributes have been provided, populate them and save the object,
# allowing any validation errors to raise before saving.
obj.set_attributes(attributes, partial=self.partial)
if commit:
obj.save()
return obj
########
# Device
########
class DeviceSerializer(ResourceSerializer):
"""Used for GET, DELETE on Devices."""
class Meta:
model = models.Device
fields = '__all__'
class DeviceCreateSerializer(DeviceSerializer):
"""Used for POST on Devices."""
site_id = fields.IntegerField(
label=get_field_attr(models.Device, 'site', 'verbose_name'),
help_text=get_field_attr(models.Device, 'site', 'help_text')
)
class Meta:
model = models.Device
fields = ('hostname', 'attributes', 'site_id')
class DeviceUpdateSerializer(BulkSerializerMixin, DeviceCreateSerializer):
"""Used for PUT on Devices."""
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
class DevicePartialUpdateSerializer(BulkSerializerMixin,
DeviceCreateSerializer):
"""Used for PATCH on Devices."""
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
#########
# Network
#########
class NetworkSerializer(ResourceSerializer):
"""Used for GET, DELETE on Networks."""
class Meta:
model = models.Network
fields = '__all__'
class NetworkCreateSerializer(NetworkSerializer):
"""Used for POST on Networks."""
cidr = fields.CharField(
write_only=True, required=False, label='CIDR',
help_text=(
'IPv4/IPv6 CIDR address. If provided, this overrides the value of '
'network_address & prefix_length. If not provided, '
'network_address & prefix_length are required.'
)
)
network_address = fields.ModelField(
model_field=models.Network._meta.get_field('network_address'),
required=False,
label=get_field_attr(
models.Network, 'network_address', 'verbose_name'
),
help_text=get_field_attr(
models.Network, 'network_address', 'help_text'
),
)
prefix_length = fields.IntegerField(
required=False,
label=get_field_attr(models.Network, 'prefix_length', 'verbose_name'),
help_text=get_field_attr(models.Network, 'prefix_length', 'help_text'),
)
site_id = fields.IntegerField(
label=get_field_attr(models.Network, 'site', 'verbose_name'),
help_text=get_field_attr(models.Network, 'site', 'help_text')
)
class Meta:
model = models.Network
fields = ('cidr', 'network_address', 'prefix_length', 'attributes',
'state', 'site_id')
class NetworkUpdateSerializer(BulkSerializerMixin, NetworkCreateSerializer):
"""Used for PUT on Networks."""
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
class NetworkPartialUpdateSerializer(BulkSerializerMixin,
NetworkCreateSerializer):
"""Used for PATCH on Networks."""
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
###########
# Interface
###########
class InterfaceSerializer(ResourceSerializer):
"""Used for GET, DELETE on Interfaces."""
parent_id = fields.IntegerField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'parent', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'parent', 'help_text'),
)
class Meta:
model = models.Interface
fields = '__all__'
def create(self, validated_data):
log.debug('InterfaceCreateSerializer.create() validated_data = %r',
validated_data)
# Remove the related fields before we write the object
addresses = validated_data.pop('addresses', [])
# Create the base object to the database, but don't save attributes
# yet.
obj = super(InterfaceSerializer, self).create(
validated_data, commit=False
)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_addresses(addresses)
except exc.ValidationError:
obj.delete()
raise
else:
obj.save()
return obj
def update(self, instance, validated_data):
log.debug('InterfaceUpdateSerializer.update() validated_data = %r',
validated_data)
# Remove related fields before we write the object. Attributes are
# handled by the parent.
addresses = validated_data.pop('addresses', None)
# Update the attributes in the database, but don't save them yet.
obj = super(InterfaceSerializer, self).update(
instance, validated_data, commit=False
)
# Assign the address objects to the Interface.
obj.set_addresses(addresses, overwrite=True, partial=self.partial)
obj.save()
return obj
class InterfaceCreateSerializer(InterfaceSerializer):
"""Used for POST on Interfaces."""
addresses = JSONListField(
required=False, help_text='List of host addresses to assign.'
)
mac_address = MACAddressField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'mac_address', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'mac_address', 'help_text'),
)
class Meta:
model = models.Interface
fields = ('device', 'name', 'description', 'type', 'mac_address',
'speed', 'parent_id', 'addresses', 'attributes')
class InterfaceUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
"Used for PUT on Interfaces."""
addresses = JSONListField(
required=True, help_text='List of host addresses to assign.'
)
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
class InterfacePartialUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
"Used for PATCH on Interfaces."""
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
#########
# Circuit
#########
class CircuitSerializer(ResourceSerializer):
"""Used for GET, DELETE on Circuits"""
class Meta:
model = models.Circuit
fields = '__all__'
class CircuitCreateSerializer(CircuitSerializer):
"""Used for POST on Circuits."""
class Meta:
model = models.Circuit
# Display name and site are auto-generated, don't include them here
fields = ('endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitUpdateSerializer(BulkSerializerMixin, CircuitCreateSerializer):
"""Used for PUT on Circuits."""
attributes = JSONDictField(
required=True, help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitPartialUpdateSerializer(BulkSerializerMixin,
CircuitCreateSerializer):
"""Used for PATCH on Circuits."""
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
###########
# AuthToken
###########
class AuthTokenSerializer(serializers.Serializer):
"""
AuthToken authentication serializer to validate username/secret_key inputs.
"""
email = serializers.CharField(help_text='Email address of the user.')
secret_key = serializers.CharField(
label='Secret Key', help_text='Secret key of the user.'
)
def validate(self, attrs):
email = attrs.get('email')
secret_key = attrs.get('secret_key')
if email and secret_key:
auth_func = auth.SecretKeyAuthentication().authenticate_credentials
user, secret_key = auth_func(email, secret_key)
if user:
if not user.is_active:
msg = 'User account is disabled.'
raise exc.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = 'Unable to login with provided credentials.'
raise exc.ValidationError(msg)
else:
msg = 'Must include "email" and "secret_key"'
raise exc.ValidationError(msg)
|
# coding: utf-8
import datetime
import pytest
import numpy as np
from ...models.transition.linear import ConstantVelocity
from ...predictor.kalman import (
KalmanPredictor, ExtendedKalmanPredictor, UnscentedKalmanPredictor,
SqrtKalmanPredictor)
from ...types.prediction import GaussianStatePrediction
from ...types.state import GaussianState, SqrtGaussianState
from ...types.track import Track
@pytest.mark.parametrize(
"PredictorClass, transition_model, prior_mean, prior_covar",
[
( # Standard Kalman
KalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
),
( # Extended Kalman
ExtendedKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
),
( # Unscented Kalman
UnscentedKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
)
],
ids=["standard", "extended", "unscented"]
)
def test_kalman(PredictorClass, transition_model,
prior_mean, prior_covar):
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
time_interval = new_timestamp - timestamp
# Define prior state
prior = GaussianState(prior_mean,
prior_covar,
timestamp=timestamp)
transition_model_matrix = transition_model.matrix(time_interval=time_interval)
transition_model_covar = transition_model.covar(time_interval=time_interval)
# Calculate evaluation variables
eval_prediction = GaussianStatePrediction(
transition_model_matrix @ prior.mean,
transition_model_matrix@prior.covar@transition_model_matrix.T + transition_model_covar)
# Initialise a kalman predictor
predictor = PredictorClass(transition_model=transition_model)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior,
timestamp=new_timestamp)
assert np.allclose(prediction.mean,
eval_prediction.mean, 0, atol=1.e-14)
assert np.allclose(prediction.covar,
eval_prediction.covar, 0, atol=1.e-14)
assert prediction.timestamp == new_timestamp
# TODO: Test with Control Model
def test_lru_cache():
predictor = KalmanPredictor(ConstantVelocity(noise_diff_coeff=0))
timestamp = datetime.datetime.now()
state = GaussianState([[0.], [1.]], np.diag([1., 1.]), timestamp)
track = Track([state])
prediction_time = timestamp + datetime.timedelta(seconds=1)
prediction1 = predictor.predict(track, prediction_time)
assert np.array_equal(prediction1.state_vector, np.array([[1.], [1.]]))
prediction2 = predictor.predict(track, prediction_time)
assert prediction2 is prediction1
track.append(GaussianState([[1.], [1.]], np.diag([1., 1.]), prediction_time))
prediction3 = predictor.predict(track, prediction_time)
assert prediction3 is not prediction1
def test_sqrt_kalman():
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
# Define prior state
prior_mean = np.array([[-6.45], [0.7]])
prior_covar = np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
prior = GaussianState(prior_mean,
prior_covar,
timestamp=timestamp)
sqrt_prior_covar = np.linalg.cholesky(prior_covar)
sqrt_prior = SqrtGaussianState(prior_mean, sqrt_prior_covar,
timestamp=timestamp)
transition_model = ConstantVelocity(noise_diff_coeff=0.1)
# Initialise a kalman predictor
predictor = KalmanPredictor(transition_model=transition_model)
sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model)
# Can swap out this method
sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model, qr_method=True)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior, timestamp=new_timestamp)
sqrt_prediction = sqrt_predictor.predict(prior=sqrt_prior,
timestamp=new_timestamp)
assert np.allclose(prediction.mean, sqrt_prediction.mean, 0, atol=1.e-14)
assert np.allclose(prediction.covar,
sqrt_prediction.sqrt_covar@sqrt_prediction.sqrt_covar.T, 0,
atol=1.e-14)
assert np.allclose(prediction.covar, sqrt_prediction.covar, 0, atol=1.e-14)
assert prediction.timestamp == sqrt_prediction.timestamp
|
import numpy as np
import cv2
import imageMarker
lucas_kanade_params = dict(
winSize= (4, 4),
maxLevel= 3, #level of pyramids used
criteria= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
def mark_features_on_all_images(images, features_coordinates):
marked_images = []
marked_frame_coordinates = []
last_gs_img = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
p0 = []
for coordinate in features_coordinates:
p0.append([coordinate,])
p0 = np.float32(p0)
mask = np.zeros_like(images[0])
status_arr = []
for fr in range(1, len(images)):
marked_coordinates = []
if images[fr] is None:
print('change detection problematic frame', fr)
print('len of given images', len(images))
frame = images[fr].copy()
gs_img = cv2.cvtColor(images[fr], cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(last_gs_img, gs_img, p0, None, **lucas_kanade_params)
status_arr.append(st)
if p1 is None:
marked_images.append(frame)
marked_frame_coordinates.append(features_coordinates if len(images) == 1 else marked_frame_coordinates[-1])
continue
new_points = []
for index in range(len(p1)):
if st[index] == 1:
new_points.append(p1[index])
else:
new_points.append(p0[index])
new_points = np.array(new_points)
for index, point in enumerate(new_points):
x, y = point.ravel()
marked_coordinates.append([x,y])
imageMarker.mark_image_at_point(frame, int(y), int(x), 9, imageMarker.colors[index])
marked_frame_coordinates.append(marked_coordinates)
img = cv2.add(frame,mask)
marked_images.append(img)
# update last frame and point
last_gs_img = gs_img.copy()
p0 = new_points.reshape(-1,1,2)
return marked_images, marked_frame_coordinates, status_arr
|
def handle(foo, **args, <error descr="multiple ** parameters are not allowed">**moreargs</error>):
print(foo, args, moreargs)
def handle(foo, **args: int, <error descr="multiple ** parameters are not allowed">**moreargs: int</error>):
print(foo, args, moreargs)
|
"""
ANN for evaluating model biases, differences, and other thresholds using
explainable AI (add warmth/cool GFDL-CM3 model only)
Reference : Barnes et al. [2020, JAMES]
Author : Zachary M. Labe
Date : 20 July 2021
Version : 4 - subsamples random weight class (#8) for mmmean
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
from keras.layers import Dense, Activation
from keras import regularizers
from keras import metrics
from keras import optimizers
from keras.models import Sequential
import tensorflow.keras as keras
import tensorflow as tf
import pandas as pd
import random
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import calc_LRPclass as LRP
import innvestigate
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
### Prevent tensorflow 2.+ deprecation warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
### LRP param
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .001
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataENS = '/Users/zlabe/Data/SMILE/'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
directoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
###############################################################################
###############################################################################
modelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',
'GFDL_CM3','GFDL_ESM2M','lens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = 'T2M'
reg_name = 'LowerArctic'
timeper = 'historical'
###############################################################################
###############################################################################
# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',
# 'GFDL_ESM2M','lens']
# pickSMILE = ['CCCma_canesm2','MPI','lens']
pickSMILE = []
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE)
else:
lenOfPicks = len(modelGCMs)
###############################################################################
###############################################################################
land_only = False
ocean_only = False
if land_only == True:
maskNoiseClass = 'land'
elif ocean_only == True:
maskNoiseClass = 'ocean'
else:
maskNoiseClass = 'none'
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
if timeper == 'historical':
baseline = np.arange(1951,1980+1,1)
elif timeper == 'future':
baseline = np.arange(2021,2050+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
sizeOfTwin = 4 # name of experiment for adding noise class #8
if sizeOfTwin > 0:
sizeOfTwinq = 1
else:
sizeOfTwinq = sizeOfTwin
###############################################################################
###############################################################################
factorObs = 10 # factor to add to obs
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950+window,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020+window,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
lensalso = True
if len(pickSMILE) == 0:
if modelGCMs[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
elif len(pickSMILE) != 0:
if pickSMILE[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks + sizeOfTwinq
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
if variq == 'T2M':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 20 # random noise value to add/subtract from each grid point
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
if variq == 'T2M':
integer = 25 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 15 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
if variq == 'T2M':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
### Select how to save files
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Create sample class labels for each model for my own testing
### Appends a twin set of classes for the random noise class
if seasons != 'none':
classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))
for i in range(lenOfPicks):
classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i)
if sizeOfTwin > 0:
### Add random noise models
randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)
classesl = np.append(classesl,randomNoiseClass,axis=0)
if ensTypeExperi == 'ENS':
classeslnew = np.swapaxes(classesl,0,1)
elif ensTypeExperi == 'GCM':
classeslnew = classesl
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Begin ANN and the entire script
for sis,singlesimulation in enumerate(datasetsingle):
lrpsns = []
for seas in range(len(seasons)):
###############################################################################
###############################################################################
###############################################################################
### ANN preliminaries
simuqq = datasetsingle[0]
monthlychoice = seasons[seas]
lat_bounds,lon_bounds = UT.regions(reg_name)
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',
'RMSE Train','RMSE Test',
'ridge penalty','zero mean',
'zero merid mean','land only?','ocean only?'])
### Define primary dataset to use
dataset = singlesimulation
modelType = dataset
### Whether to test and plot the results using obs data
if dataset_obs == '20CRv3':
year_obsall = np.arange(yearsall[sis].min(),2015+1,1)
elif dataset_obs == 'ERA5':
year_obsall = np.arange(1979+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1979,2019+1,1)
elif dataset_obs == 'ERA5BE':
year_obsall = np.arange(1950+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1950,2019+1,1)
if monthlychoice == 'DJF':
obsyearstart = year_obsall.min()+1
year_obs = year_obsall[1:]
else:
obsyearstart = year_obsall.min()
year_obs = year_obsall
### Remove the annual mean? True to subtract it from dataset ##########
if rm_annual_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Rove the ensemble mean? True to subtract it from dataset ##########
if rm_ensemble_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Split the data into training and testing sets? value of 1 will use all
### data as training
segment_data_factor = .75
### Hiddens corresponds to the number of hidden layers the nnet will use - 0
### for linear model, or a list [10, 20, 5] for multiple layers of nodes
### (10 nodes in first layer, 20 in second, etc); The "loop" part
### allows you to loop through multiple architectures. For example,
### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the
### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,
### and the next would be 3 hidden layers of 1 node each.
### Set useGPU to True to use the GPU, but only if you selected the GPU
### Runtime in the menu at the top of this page
useGPU = False
### Set Cascade to True to utilize the nnet's cascade function
cascade = False
### Plot within the training loop - may want to set to False when testing out
### larget sets of parameters
plot_in_train = False
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,
lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
###############################################################################
### Select data to test, train on
def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):
global random_segment_seed,trainIndices,testIndices
if random_segment_seed == None:
random_segment_seed = int(int(np.random.randint(1, 100000)))
np.random.seed(random_segment_seed)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### Large Ensemble experiment
if ensTypeExperi == 'ENS':
### Flip GCM and ensemble member axes
datanew = np.swapaxes(data,0,1)
classeslnew = np.swapaxes(classesl,0,1)
if fac < 1 :
nrows = datanew.shape[0]
segment_train = int(np.round(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'ensembles, testing on',segment_test)
### Picking out random ensembles
i = 0
trainIndices = list()
while i < segment_train:
line = np.random.randint(0, nrows)
if line not in trainIndices:
trainIndices.append(line)
i += 1
else:
pass
i = 0
testIndices = list()
while i < segment_test:
line = np.random.randint(0, nrows)
if line not in trainIndices:
if line not in testIndices:
testIndices.append(line)
i += 1
else:
pass
### Training segment----------
data_train = np.empty((len(trainIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(trainIndices):
data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytrain[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('\nTraining on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((len(testIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytest = np.empty((len(testIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(testIndices):
data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytest[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('Training on ensembles: %s' % len(trainIndices))
print('Testing on ensembles: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### GCM type experiments without ensembles
elif ensTypeExperi == 'GCM':
if data.ndim == 5:
datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))
classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))
else:
datanew = data
classeslnew = classesl
if fac < 1 :
nrows = datanew.shape[1]
segment_train = int(np.floor(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'years, testing on',segment_test)
### Picking out random ensembles
firstyears = int(np.floor(segment_test/2))
lastyears = -int(np.floor(segment_test/2))
trainIndices = np.arange(firstyears,firstyears+segment_train,1)
testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)
### Training segment----------
data_train = np.empty((datanew.shape[0],len(trainIndices),
datanew.shape[2],datanew.shape[3]))
Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))
for index,ensemble in enumerate(trainIndices):
data_train[:,index,:,:] = datanew[:,ensemble,:,:]
Ytrain[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('\nTraining on years: ',trainIndices)
print('Testing on years: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((datanew.shape[0],len(testIndices),
datanew.shape[2],datanew.shape[3]))
Ytest = np.empty((classeslnew.shape[0],len(testIndices)))
for index,ensemble in enumerate(testIndices):
data_test[:,index,:,:] = datanew[:,ensemble,:,:]
Ytest[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('Training on years: %s' % len(trainIndices))
print('Testing on years: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
else:
print(ValueError('WRONG EXPERIMENT!'))
return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight
###############################################################################
###############################################################################
###############################################################################
### Plotting functions
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
###############################################################################
###############################################################################
###############################################################################
### Create a class weight dictionary to help if the classes are unbalanced
def class_weight_creator(Y):
class_dict = {}
weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)
for i in range( Y.shape[-1] ):
class_dict[i] = weights[i]
return class_dict
###############################################################################
###############################################################################
###############################################################################
### Neural Network Creation & Training
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def defineNN(hidden, input_shape, output_shape, ridgePenalty):
model = Sequential()
### Initialize first layer
### Model is a single node with activation function
model.add(Dense(hidden[0],input_shape=(input_shape,),
activation=actFun, use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Initialize other layers
for layer in hidden[1:]:
model.add(Dense(layer,activation=actFun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS AN ANN!\n')
#### Initialize output layer
model.add(Dense(output_shape,activation=None,use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Add softmax layer at the end
model.add(Activation('softmax'))
return model
def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):
global lr_here, batch_size
lr_here = 0.001
model.compile(optimizer=optimizers.SGD(lr=lr_here,
momentum=0.9,nesterov=True),
loss = 'categorical_crossentropy',
metrics=[metrics.categorical_accuracy])
# model.compile(optimizer=optimizers.Nadam(lr=lr_here),
# loss = 'categorical_crossentropy',
# metrics=[metrics.categorical_accuracy])
### Declare the relevant model parameters
batch_size = 24
print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----')
### Callbacks
time_callback = TimeHistory()
early_stopping = keras.callbacks.EarlyStopping(monitor='loss',
patience=2,
verbose=1,
mode='auto')
history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,
shuffle=True,verbose=verbose,
callbacks=[time_callback,early_stopping],
validation_split=0.)
print('******** done training ***********')
return model, history
def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):
"""or loops to iterate through training iterations, ridge penalty,
and hidden layer list
"""
results = {}
global nnet,random_network_seed
for niter in iterations:
for penalty in ridge_penalty:
for hidden in hiddens:
### Check / use random seed
if random_network_seed == None:
np.random.seed(None)
random_network_seed = int(np.random.randint(1, 100000))
np.random.seed(random_network_seed)
random.seed(random_network_seed)
tf.set_random_seed(0)
### Standardize the data
Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean,Xstd = stdVals
### Define the model
model = defineNN(hidden,
input_shape=np.shape(Xtrain)[1],
output_shape=np.shape(Ytrain)[1],
ridgePenalty=penalty)
### Train the net
model, history = trainNN(model,Xtrain,
Ytrain,niter,class_weight,verbose=1)
### After training, use the network with training data to
### check that we don't have any errors and output RMSE
rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))
if type(Ytest) != bool:
rmse_test = 0.
rmse_test = dSS.rmse(Ytest,model.predict(Xtest))
else:
rmse_test = False
this_result = {'iters': niter,
'hiddens' : hidden,
'RMSE Train' : rmse_train,
'RMSE Test' : rmse_test,
'ridge penalty': penalty,
'zero mean' : rm_annual_mean,
'zero merid mean' : rm_merid_mean,
'land only?' : land_only,
'ocean only?' : ocean_only,
'Segment Seed' : random_segment_seed,
'Network Seed' : random_network_seed }
results.update(this_result)
global experiment_result
experiment_result = experiment_result.append(results,
ignore_index=True)
#if True to plot each iter's graphs.
if plot_in_train == True:
plt.figure()
plt.subplot(1,1,1)
plt.plot(history.history['loss'],label = 'training')
plt.title(history.history['loss'][-1])
plt.xlabel('epoch')
plt.xlim(2,len(history.history['loss'])-1)
plt.legend()
plt.grid(True)
plt.show()
#'unlock' the random seed
np.random.seed(None)
random.seed(None)
tf.set_random_seed(None)
return experiment_result, model
###############################################################################
###############################################################################
###############################################################################
### Results
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
### Parameters
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[10,10]]
ridge_penalty = [0.1]
# hiddensList = [[8,8]]
# ridge_penalty = [0.2]
actFun = 'relu'
if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[8,8]]
ridge_penalty = [0.10]
actFun = 'relu'
expList = [(0)] # (0,1)
expN = np.size(expList)
iterations = [100]
random_segment = True
foldsN = 1
for avgHalfChunk in (0,):
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
for loop in ([0]):
### Get info about the region
lat_bounds,lon_bounds = UT.regions(reg_name)
data_all,lats,lons = read_primary_dataset(variq,dataset,
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,
dataset_obs,
numOfEns,
lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
###############################################################################
###############################################################################
###############################################################################
for exp in expList:
### Get the data together
data, data_obs, = data_all, data_obs_all,
###############################################################################
if len(pickSMILE) >= 1:
data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)
print('\n*Pick models to analysis from %s*\n' % pickSMILE)
###############################################################################
if calculate_anomalies == True:
data, data_obs = dSS.calculate_anomalies(data,data_obs,
lats,lons,baseline,yearsall)
print('\n*Calculate anomalies for %s-%s*\n' % (baseline.min(),baseline.max()))
###############################################################################
if rm_annual_mean == True:
data, data_obs = dSS.remove_annual_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed annual mean*\n')
###############################################################################
if rm_merid_mean == True:
data, data_obs = dSS.remove_merid_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed meridional mean*\n')
###############################################################################
if rm_ensemble_mean == True:
data = dSS.remove_ensemble_mean(data,ravel_modelens,
ravelmodeltime,
rm_standard_dev,
numOfEns)
print('\n*Removed ensemble mean*')
###############################################################################
if rm_standard_dev == True:
data = dSS.rm_standard_dev(data,window,ravelmodeltime,
numOfEns)
print('\n*Removed standard deviation*')
###############################################################################
if rm_observational_mean == True:
data = dSS.remove_observations_mean(data,data_obs,lats,lons)
print('\n*Removed observational data*')
###############################################################################
if land_only == True:
data, data_obs = dSS.remove_ocean(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed ocean data*')
###############################################################################
if ocean_only == True:
data, data_obs = dSS.remove_land(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed land data*')
###############################################################################
### Adding random data
if sizeOfTwin > 0:
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Modify the GFDL-CM3 model for warmth and cooling that model only
print('\n <<< FACTOR FOR OBS IS %s! >>>\n' % factorObs)
if factorObs == 0:
data = data
elif factorObs == 1: # warm its mean state
GFDL = data[4,:,:,:,:]
GFDLwarmer = GFDL + 3
data[4,:,:,:,:] = GFDLwarmer
elif factorObs == 2: # cool its mean state
GFDL = data[4,:,:,:,:]
GFDLcooler = GFDL - 3
data[4,:,:,:,:] = GFDLcooler
elif factorObs == 3: # warm recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 4: # cool recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 5: # warm the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 6: # cool the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 7: # warm the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 8: # cool the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 9: # warm early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 10: # cool early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Loop over folds
for loop in np.arange(0,foldsN):
K.clear_session()
#---------------------------
# random_segment_seed = 34515
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
#---------------------------
Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)
YtrainClassMulti = Ytrain
YtestClassMulti = Ytest
# For use later
XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean, Xstd = stdVals
#---------------------------
random_network_seed = 87750
#---------------------------
# Create and train network
exp_result,model = test_train_loopClass(Xtrain,
YtrainClassMulti,
Xtest,
YtestClassMulti,
iterations=iterations,
ridge_penalty=ridge_penalty,
hiddens=hiddensList,class_weight=class_weight,
plot_in_train = True)
model.summary()
################################################################################################################################################
# save the model
dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'
savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)
if(reg_name=='Globe'):
regSave = ''
else:
regSave = '_' + reg_name
if(rm_annual_mean==True):
savename = savename + '_AnnualMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'
savename = savename + regSave
# model.save(dirname + savename + '.h5')
# np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)
print('saving ' + savename)
###############################################################
### Make final plot
### Get obs
dataOBSERVATIONS = data_obs
latsOBSERVATIONS = lats_obs
lonsOBSERVATIONS = lons_obs
Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])
annType = 'class'
if monthlychoice == 'DJF':
startYear = yearsall[sis].min()+1
endYear = yearsall[sis].max()
else:
startYear = yearsall[sis].min()
endYear = yearsall[sis].max()
years = np.arange(startYear,endYear+1,1)
Xmeanobs = np.nanmean(Xobs,axis=0)
Xstdobs = np.nanstd(Xobs,axis=0)
XobsS = (Xobs-Xmeanobs)/Xstdobs
XobsS[np.isnan(XobsS)] = 0
xtrainpred = (Xtrain-Xmean)/Xstd
xtrainpred[np.isnan(xtrainpred)] = 0
xtestpred = (Xtest-Xmean)/Xstd
xtestpred[np.isnan(xtestpred)] = 0
if(annType=='class'):
YpredObs = model.predict(XobsS)
YpredTrain = model.predict(xtrainpred)
YpredTest = model.predict(xtestpred)
#######################################################
#######################################################
#######################################################
### Check null hypothesis of random data!
randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])
randarraymean = np.nanmean(randarrayn,axis=0)
randarraystd = np.nanstd(randarrayn,axis=0)
randarrayS = (randarrayn-randarraymean)/randarraystd
### Prediction on random data
YpredRand = model.predict(randarrayS)
#######################################################
#######################################################
#######################################################
### Get output from model
trainingout = YpredTrain
testingout = YpredTest
if ensTypeExperi == 'ENS':
classesltrain = classeslnew[trainIndices,:,:].ravel()
classesltest = classeslnew[testIndices,:,:].ravel()
elif ensTypeExperi == 'GCM':
classesltrain = classeslnew[:,:,trainIndices].ravel()
classesltest = classeslnew[:,:,testIndices].ravel()
### Random data tests
randout = YpredRand
labelsrand = np.argmax(randout,axis=1)
uniquerand,countrand = np.unique(labelsrand,return_counts=True)
np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)
np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)
### Observations
obsout = YpredObs
labelsobs = np.argmax(obsout,axis=1)
uniqueobs,countobs = np.unique(labelsobs,return_counts=True)
print(labelsobs)
np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)
np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)
def truelabel(data):
"""
Calculate argmax
"""
maxindexdata= np.argmax(data[:,:],axis=1)
return maxindexdata
def accuracyTotalTime(data_pred,data_true):
"""
Compute accuracy for the entire time series
"""
data_truer = data_true
data_predr = data_pred
accdata_pred = accuracy_score(data_truer,data_predr)
return accdata_pred
##############################################################################
##############################################################################
##############################################################################
indextrain = truelabel(trainingout)
acctrain = accuracyTotalTime(indextrain,classesltrain)
indextest = truelabel(testingout)
acctest = accuracyTotalTime(indextest,classesltest)
print('\n\nAccuracy Training == ',acctrain)
print('Accuracy Testing == ',acctest)
## Save the output for plotting
np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)
np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)
np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)
np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)
np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)
np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)
### See more more details
model.layers[0].get_config()
## Define variable for analysis
print('\n\n------------------------')
print(variq,'= Variable!')
print(monthlychoice,'= Time!')
print(reg_name,'= Region!')
print(lat_bounds,lon_bounds)
print(dataset,'= Model!')
print(dataset_obs,'= Observations!\n')
print(rm_annual_mean,'= rm_annual_mean')
print(rm_merid_mean,'= rm_merid_mean')
print(rm_ensemble_mean,'= rm_ensemble_mean')
print(land_only,'= land_only')
print(ocean_only,'= ocean_only')
## Variables for plotting
lons2,lats2 = np.meshgrid(lons,lats)
observations = data_obs
modeldata = data
modeldatamean = np.nanmean(modeldata,axis=1)
spatialmean_obs = UT.calc_weightedAve(observations,lats2)
spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)
spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)
plt.figure()
plt.plot(yearsall,spatialmean_modmean.transpose())
plt.plot(yearsall,spatialmean_modmean.transpose()[:,4],linewidth=3,color='red',label=r'GFDL-CM3 - %s-Experiment' % factorObs)
plt.xlabel('Years')
plt.ylabel('Average Arctic Temperature')
plt.legend()
plt.ylim([-14.5,-1])
plt.savefig('/Users/zlabe/Desktop/factor-%s.png' % factorObs,dpi=300)
plt.figure()
plt.plot(spatialmean_obs)
##############################################################################
##############################################################################
##############################################################################
## Visualizing through LRP
numLats = lats.shape[0]
numLons = lons.shape[0]
numDim = 3
##############################################################################
##############################################################################
##############################################################################
lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),
np.append(Ytrain,Ytest,axis=0),
biasBool,annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
meanlrp = np.nanmean(lrpall,axis=0)
fig=plt.figure()
plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)
### For training data only
lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For training data only
lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For observations data only
lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
### For random data only
lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
##############################################################################
##############################################################################
##############################################################################
def netcdfLRP(lats,lons,var,directory,typemodel,saveData):
print('\n>>> Using netcdfLRP function!')
from netCDF4 import Dataset
import numpy as np
name = 'LRPMap' + typemodel + '_' + saveData + '.nc'
filename = directory + name
ncfile = Dataset(filename,'w',format='NETCDF4')
ncfile.description = 'LRP maps for using selected seed'
### Dimensions
ncfile.createDimension('years',var.shape[0])
ncfile.createDimension('lat',var.shape[1])
ncfile.createDimension('lon',var.shape[2])
### Variables
years = ncfile.createVariable('years','f4',('years'))
latitude = ncfile.createVariable('lat','f4',('lat'))
longitude = ncfile.createVariable('lon','f4',('lon'))
varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))
### Units
varns.units = 'unitless relevance'
ncfile.title = 'LRP relevance'
ncfile.instituion = 'Colorado State University'
ncfile.references = 'Barnes et al. [2020]'
### Data
years[:] = np.arange(var.shape[0])
latitude[:] = lats
longitude[:] = lons
varns[:] = var
ncfile.close()
print('*Completed: Created netCDF4 File!')
netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)
netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)
netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)
netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)
|
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
default: null
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
default: null
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
default: null
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
choices:
- yes
- "no"
default: "yes"
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects:
container: testcont
dest: ~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects:
container: testcont
src: file1
dest: ~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects:
container: testcont
src: file1,file2,file3
dest: ~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects:
container: testcont
method: delete
dest: file1
- name: "Delete several objects in test container"
rax_files_objects:
container: testcont
method: delete
dest: file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects:
container: testcont
method: delete
- name: "Upload all files to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file3
expires: 60
- name: "Attempt to get remote object that does not exist"
rax_files_objects:
container: testcont
method: get
src: FileThatDoesNotExist.jpg
dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects:
container: testcont
method: delete
dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects:
container: testcont
type: meta
dest: file2
- name: "Get metadata on several objects"
rax_files_objects:
container: testcont
type: meta
src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects:
container: testcont
type: meta
src: file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects:
container: testcont
type: meta
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
""" Uploads a folder to Cloud Files.
"""
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
if __name__ == '__main__':
main()
|
class _OBJECT_ATTRIBUTES(_OBJECT_ATTRIBUTES):
@classmethod
def from_string(cls, path, attributes=OBJ_CASE_INSENSITIVE): # Directly on constructor ?
self = cls()
self.Length = ctypes.sizeof(self)
self.RootDirectory = 0
self.ObjectName = ctypes.pointer(LSA_UNICODE_STRING.from_string(path))
self.Attributes = attributes
self.SecurityDescriptor = 0
self.SecurityQualityOfService = 0
return self
def __repr__(self):
if not self.ObjectName:
return super(_OBJECT_ATTRIBUTES, self).__repr__()
return """<{0} ObjectName="{1}">""".format(type(self).__name__, self.ObjectName[0].str)
|
import pytest
from bitgesellx.lib.script import OpCodes, is_unspendable_legacy, is_unspendable_genesis
@pytest.mark.parametrize("script, iug", (
(bytes([OpCodes.OP_RETURN]), False),
(bytes([OpCodes.OP_RETURN]) + bytes([2, 28, 50]), False),
(bytes([OpCodes.OP_0, OpCodes.OP_RETURN]), True),
(bytes([OpCodes.OP_0, OpCodes.OP_RETURN]) + bytes([2, 28, 50]), True)
))
def test_op_return_legacy(script, iug):
assert is_unspendable_legacy(script)
assert is_unspendable_genesis(script) is iug
@pytest.mark.parametrize("script", (
bytes([]),
bytes([OpCodes.OP_1, OpCodes.OP_RETURN]) + bytes([2, 28, 50]),
bytes([OpCodes.OP_0]),
bytes([OpCodes.OP_0, OpCodes.OP_1]),
bytes([OpCodes.OP_HASH160]),
))
def test_not_op_return(script):
assert not is_unspendable_legacy(script)
assert not is_unspendable_genesis(script)
|
# -*- coding: utf-8 -*-
import aiounittest
from datetime import datetime
from .fixtures_aio import fixture_data, Worker, Workers, Account
from graphenecommon import exceptions
class Testcases(aiounittest.AsyncTestCase):
def setUp(self):
fixture_data()
async def test_worker(self):
w = await Worker("1.14.139")
self.assertIsInstance(w["work_end_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["daily_pay"], int)
account = await w.account
self.assertIsInstance(account, Account)
self.assertEqual(account["id"], "1.2.100")
await Worker(w)
async def test_nonexist(self):
with self.assertRaises(exceptions.WorkerDoesNotExistsException):
await Worker("foobar")
async def test_workers(self):
ws = await Workers()
self.assertEqual(len(ws), 2)
|
# pylint: disable=unused-import
import os
import docker
import pytest
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import build_and_tag_test_image, get_test_project_docker_image
from dagster_k8s_test_infra.cluster import ( # isort:skip
dagster_instance,
dagster_instance_for_user_deployments_subchart_disabled,
dagster_instance_for_daemon,
define_cluster_provider_fixture,
helm_postgres_url,
helm_postgres_url_for_user_deployments_subchart_disabled,
helm_postgres_url_for_daemon,
)
pytest_plugins = ["dagster_k8s_test_infra.helm"]
cluster_provider = define_cluster_provider_fixture()
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
@pytest.fixture(scope="session")
def dagster_docker_image():
docker_image = get_test_project_docker_image()
if not IS_BUILDKITE:
try:
client = docker.from_env()
client.images.get(docker_image)
print( # pylint: disable=print-call
"Found existing image tagged {image}, skipping image build. To rebuild, first run: "
"docker rmi {image}".format(image=docker_image)
)
except docker.errors.ImageNotFound:
build_and_tag_test_image(docker_image)
return docker_image
# See: https://stackoverflow.com/a/31526934/324449
def pytest_addoption(parser):
# We catch the ValueError to support cases where we are loading multiple test suites, e.g., in
# the VSCode test explorer. When pytest tries to add an option twice, we get, e.g.
#
# ValueError: option names {'--cluster-provider'} already added
# Use kind or some other cluster provider?
try:
parser.addoption("--cluster-provider", action="store", default="kind")
except ValueError:
pass
# Specify an existing kind cluster name to use
try:
parser.addoption("--kind-cluster", action="store")
except ValueError:
pass
# Keep resources around after tests are done
try:
parser.addoption("--no-cleanup", action="store_true", default=False)
except ValueError:
pass
# Use existing Helm chart/namespace
try:
parser.addoption("--existing-helm-namespace", action="store")
except ValueError:
pass
|
"""
File: __init__.py
Author: Panyi Dong
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_legacy/__init__.py
File Created: Thursday, 7th April 2022 3:59:55 pm
Author: Panyi Dong (panyid2@illinois.edu)
-----
Last Modified: Friday, 8th April 2022 10:25:42 pm
Modified By: Panyi Dong (panyid2@illinois.edu)
-----
MIT License
Copyright (c) 2022 - 2022, Panyi Dong
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from My_AutoML._hpo._legacy import (
AutoTabular,
AutoTabularClassifier,
AutoTabularRegressor,
)
|
from bokeh.io import show, output_notebook
from bokeh.models import (CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter,
GeoJSONDataSource, HoverTool,
LinearColorMapper, Slider)
from bokeh.layouts import column, row, widgetbox
# pylint: disable=no-name-in-module
from bokeh.palettes import brewer
from bokeh.plotting import figure
def plot(ny):
# Input GeoJSON source that contains features for plotting
ny_source = GeoJSONDataSource(geojson = ny.to_json())
# Define color palettes
palette = brewer['OrRd'][8]
palette = palette[::-1] # reverse order of colors so higher values have darker colors
# Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors.
color_mapper = LinearColorMapper(palette = palette, low = ny['Points'].min(), high = ny['Points'].max())
# Create color bar.
color_bar = ColorBar(color_mapper = color_mapper,
label_standoff = 8,
width = 500, height = 20,
border_line_color = None,
location = (0,0),
orientation = 'horizontal')
# Create figure object.
p = figure(title = 'Calculated Weighted Points',
plot_height = 650 ,
plot_width = 950,
toolbar_location = 'below',
tools = "pan, wheel_zoom, box_zoom, reset",
output_backend="webgl")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# Add patch renderer to figure.
states = p.patches('xs','ys', source = ny_source,
fill_color = {'field' :'Points',
'transform' : color_mapper},
line_color = "gray",
line_width = 0.25,
fill_alpha = 1)
# Create hover tool
p.add_tools(HoverTool(renderers = [states],
tooltips = [('PO Name','@PO_NAME'),
('Points','@Points')
]))
color_bar = ColorBar(color_mapper = color_mapper,
label_standoff = 8,
width = 950, height = 20,
border_line_color = None,
location = (0,0),
orientation = 'horizontal')
p.add_layout(color_bar, 'below')
show(p)
|
# Copyright 2014 ksyun.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various KSYUN protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
from xml.etree import ElementTree
import calendar
from kscore.compat import six
from kscore.compat import json, formatdate
from kscore.utils import parse_to_aware_datetime
from kscore.utils import percent_encode
from kscore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': self.headers,
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
def _serialize_not_shape(self, data, parameters):
pass
def _serialize_data(self, serialized, data):
serialized['body'] = data
return serialized
@property
def headers(self):
return {}
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value):
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower())
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
class QuerySerializer(Serializer):
"""
BASE HTTP QUERY REQUEST
"""
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
serialized['headers'].update(
{
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion'],
}
)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
else:
self._serialize_not_shape(body_params, parameters)
return self._serialize_data(serialized, body_params)
def _serialize_not_shape(self, data, parameters):
pass
def _serialize_data(self, serialized, data):
serialized['body'] = data
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(value)
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class QueryAcceptJsonSerializer(QuerySerializer):
@property
def headers(self):
return {"Accept": 'application/json'}
def _serialize_not_shape(self, data, parameters):
data.update(parameters)
def _serialize_data(self, serialized, data):
if serialized['method'].lower() == "get":
serialized['body'] = {}
serialized['query_string'] = data
else:
serialized['body'] = data
return serialized
class KCSSerializer(QueryAcceptJsonSerializer):
def _serialize_data(self, serialized, data):
serialized['body'] = {}
serialized['query_string'] = data
return serialized
class CustomBodySerializer(QueryAcceptJsonSerializer):
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
serialized['headers'].update(
{
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion'],
}
)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
body_params = self.MAP_TYPE()
custom_body = None
if 'Body' in parameters:
custom_body = parameters.pop('Body')
if shape is not None:
self._serialize(body_params, parameters, shape)
else:
self._serialize_not_shape(body_params, parameters)
return self._serialize_data(serialized, body_params, custom_body)
def _serialize_data(self, serialized, data, body=None):
if body is not None:
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
serialized['query_string'] = data
return serialized
class JSONSerializer(Serializer):
"""
BASE JSON REQUEST all method with json body
"""
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
serialized['query_string'] = self.MAP_TYPE()
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion']
}
body = self.MAP_TYPE()
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
else:
self._serialize_not_shape(body, parameters)
return self._serialize_data(serialized, body)
def _serialize_not_shape(self, data, parameters):
data.update(parameters)
def _serialize_data(self, serialized, data):
serialized['body'] = json.dumps(data).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(value)
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class NotGetJsonSerializer(JSONSerializer):
def _serialize_data(self, serialized, data):
if serialized['method'].lower() == "get":
serialized['body'] = {}
serialized['query_string'].update(data)
else:
serialized['body'] = json.dumps(data).encode(self.DEFAULT_ENCODING)
return serialized
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['headers'] = {
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion']
}
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
return self._timestamp_rfc822(timestamp)
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(params)
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = str(params)
SERIALIZERS = {
'kcs': KCSSerializer,
'ec2': EC2Serializer,
'query': QuerySerializer,
'query-json': QueryAcceptJsonSerializer,
'json': JSONSerializer,
'json2': NotGetJsonSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
'custom-body': CustomBodySerializer,
}
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import ComputeManagementClientConfiguration
from .operations_async import Operations
from .operations_async import AvailabilitySetsOperations
from .operations_async import ProximityPlacementGroupsOperations
from .operations_async import VirtualMachineExtensionImagesOperations
from .operations_async import VirtualMachineExtensionsOperations
from .operations_async import VirtualMachineImagesOperations
from .operations_async import UsageOperations
from .operations_async import VirtualMachinesOperations
from .operations_async import VirtualMachineSizesOperations
from .operations_async import ImagesOperations
from .operations_async import VirtualMachineScaleSetsOperations
from .operations_async import VirtualMachineScaleSetExtensionsOperations
from .operations_async import VirtualMachineScaleSetRollingUpgradesOperations
from .operations_async import VirtualMachineScaleSetVMsOperations
from .operations_async import LogAnalyticsOperations
from .operations_async import VirtualMachineRunCommandsOperations
from .operations_async import DisksOperations
from .operations_async import SnapshotsOperations
from .. import models
class ComputeManagementClient(object):
"""Compute Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.compute.v2018_04_01.aio.operations_async.Operations
:ivar availability_sets: AvailabilitySetsOperations operations
:vartype availability_sets: azure.mgmt.compute.v2018_04_01.aio.operations_async.AvailabilitySetsOperations
:ivar proximity_placement_groups: ProximityPlacementGroupsOperations operations
:vartype proximity_placement_groups: azure.mgmt.compute.v2018_04_01.aio.operations_async.ProximityPlacementGroupsOperations
:ivar virtual_machine_extension_images: VirtualMachineExtensionImagesOperations operations
:vartype virtual_machine_extension_images: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineExtensionImagesOperations
:ivar virtual_machine_extensions: VirtualMachineExtensionsOperations operations
:vartype virtual_machine_extensions: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineExtensionsOperations
:ivar virtual_machine_images: VirtualMachineImagesOperations operations
:vartype virtual_machine_images: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineImagesOperations
:ivar usage: UsageOperations operations
:vartype usage: azure.mgmt.compute.v2018_04_01.aio.operations_async.UsageOperations
:ivar virtual_machines: VirtualMachinesOperations operations
:vartype virtual_machines: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachinesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
:vartype virtual_machine_sizes: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineSizesOperations
:ivar images: ImagesOperations operations
:vartype images: azure.mgmt.compute.v2018_04_01.aio.operations_async.ImagesOperations
:ivar virtual_machine_scale_sets: VirtualMachineScaleSetsOperations operations
:vartype virtual_machine_scale_sets: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetsOperations
:ivar virtual_machine_scale_set_extensions: VirtualMachineScaleSetExtensionsOperations operations
:vartype virtual_machine_scale_set_extensions: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetExtensionsOperations
:ivar virtual_machine_scale_set_rolling_upgrades: VirtualMachineScaleSetRollingUpgradesOperations operations
:vartype virtual_machine_scale_set_rolling_upgrades: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetRollingUpgradesOperations
:ivar virtual_machine_scale_set_vms: VirtualMachineScaleSetVMsOperations operations
:vartype virtual_machine_scale_set_vms: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetVMsOperations
:ivar log_analytics: LogAnalyticsOperations operations
:vartype log_analytics: azure.mgmt.compute.v2018_04_01.aio.operations_async.LogAnalyticsOperations
:ivar virtual_machine_run_commands: VirtualMachineRunCommandsOperations operations
:vartype virtual_machine_run_commands: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineRunCommandsOperations
:ivar disks: DisksOperations operations
:vartype disks: azure.mgmt.compute.v2018_04_01.aio.operations_async.DisksOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.compute.v2018_04_01.aio.operations_async.SnapshotsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.availability_sets = AvailabilitySetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.proximity_placement_groups = ProximityPlacementGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_extensions = VirtualMachineScaleSetExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_rolling_upgrades = VirtualMachineScaleSetRollingUpgradesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.log_analytics = LogAnalyticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_run_commands = VirtualMachineRunCommandsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
import datetime
from dimagi.ext.couchdbkit import (Document, StringProperty,
ListProperty, DictProperty, DateProperty)
from corehq.apps.groups.models import Group
from .constants import *
class LegacyWeeklyReport(Document):
"""
This doc stores the aggregate weekly results per site.
Example:
domain: 'mikesproject',
site: 'Pennsylvania State Elementary School',
week_end_date: Saturday Sept 28, 2013,
site_strategy: [3, -1, 0, 4, 2],
site_game: [2, 4, 3, 1, 0],
individual: {
'mikeo': {
'strategy': [2, 4, 0, 1, 3],
'game': [1, 2, 4, 1, 0],
'weekly_totals': [
['Sept 9', 3],
['Sept 16', 2],
['Sept 23', 5], # current week
],
},
},
'weekly_totals': [
['Sept 9', 11],
['Sept 16', 6],
['Sept 23', 9], # current week
],
Where each week is a 5 element list. 0 indicates that
no strategies/games were recorded, -1 indicates an off
day (nothing recorded, but that's okay).
"""
domain = StringProperty()
site = StringProperty()
week_end_date = DateProperty()
site_strategy = ListProperty()
site_game = ListProperty()
individual = DictProperty()
weekly_totals = ListProperty()
@classmethod
def by_site(cls, site, date=None):
if isinstance(site, Group):
site = site.name
if date is None:
# get the most recent saturday (isoweekday==6)
days = [6, 7, 1, 2, 3, 4, 5]
today = datetime.date.today()
date = today - datetime.timedelta(
days=days.index(today.isoweekday())
)
report = cls.view(
'penn_state/smiley_weekly_reports',
key=[DOMAIN, site, str(date)],
reduce=False,
include_docs=True,
).first()
return report
@classmethod
def by_user(cls, user, date=None):
# Users should only have one group, and it should be a report group
groups = Group.by_user(user).all()
# if len(groups) != 1 or not groups[0].reporting:
if len(groups) == 0 or not groups[0].reporting:
return
site = groups[0].name
return cls.by_site(site, date)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceAsyncClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import pagers
from google.cloud.gaming_v1beta.services.game_server_clusters_service import transports
from google.cloud.gaming_v1beta.types import common
from google.cloud.gaming_v1beta.types import game_server_clusters
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert GameServerClustersServiceClient._get_default_mtls_endpoint(None) is None
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.GameServerClustersServiceGrpcTransport, "grpc"),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_game_server_clusters_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_client_get_transport_class():
transport = GameServerClustersServiceClient.get_transport_class()
available_transports = [
transports.GameServerClustersServiceGrpcTransport,
]
assert transport in available_transports
transport = GameServerClustersServiceClient.get_transport_class("grpc")
assert transport == transports.GameServerClustersServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"true",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"false",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_game_server_clusters_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_game_server_clusters_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = GameServerClustersServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.ListGameServerClustersRequest, dict,]
)
def test_list_game_server_clusters(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGameServerClustersPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_game_server_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
client.list_game_server_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
@pytest.mark.asyncio
async def test_list_game_server_clusters_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.ListGameServerClustersRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGameServerClustersAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_from_dict():
await test_list_game_server_clusters_async(request_type=dict)
def test_list_game_server_clusters_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse()
client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
await client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_game_server_clusters_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_game_server_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_game_server_clusters_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_game_server_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
def test_list_game_server_clusters_pager(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_game_server_clusters(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in results
)
def test_list_game_server_clusters_pages(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = list(client.list_game_server_clusters(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pager():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
async_pager = await client.list_game_server_clusters(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in responses
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pages():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_game_server_clusters(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [game_server_clusters.GetGameServerClusterRequest, dict,]
)
def test_get_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
response = client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_get_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
client.get_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
@pytest.mark.asyncio
async def test_get_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.GetGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_game_server_cluster_async_from_dict():
await test_get_game_server_cluster_async(request_type=dict)
def test_get_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster()
client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
await client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.CreateGameServerClusterRequest, dict,]
)
def test_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
client.create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.CreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_game_server_cluster_async_from_dict():
await test_create_game_server_cluster_async(request_type=dict)
def test_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
def test_create_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewCreateGameServerClusterRequest, dict,]
)
def test_preview_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_create_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
client.preview_create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewCreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async_from_dict():
await test_preview_create_game_server_cluster_async(request_type=dict)
def test_preview_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
await client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.DeleteGameServerClusterRequest, dict,]
)
def test_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
client.delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.DeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async_from_dict():
await test_delete_game_server_cluster_async(request_type=dict)
def test_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewDeleteGameServerClusterRequest, dict,]
)
def test_preview_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_delete_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
client.preview_delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewDeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async_from_dict():
await test_preview_delete_game_server_cluster_async(request_type=dict)
def test_preview_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
await client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.UpdateGameServerClusterRequest, dict,]
)
def test_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
client.update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.UpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_game_server_cluster_async_from_dict():
await test_update_game_server_cluster_async(request_type=dict)
def test_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_update_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewUpdateGameServerClusterRequest, dict,]
)
def test_preview_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_update_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
client.preview_update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewUpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async_from_dict():
await test_preview_update_game_server_cluster_async(request_type=dict)
def test_preview_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
await client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = GameServerClustersServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.GameServerClustersServiceGrpcTransport,
)
def test_game_server_clusters_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_game_server_clusters_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_game_server_clusters",
"get_game_server_cluster",
"create_game_server_cluster",
"preview_create_game_server_cluster",
"delete_game_server_cluster",
"preview_delete_game_server_cluster",
"update_game_server_cluster",
"preview_update_game_server_cluster",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_game_server_clusters_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_game_server_clusters_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport()
adc.assert_called_once()
def test_game_server_clusters_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
GameServerClustersServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.GameServerClustersServiceGrpcTransport, grpc_helpers),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_game_server_clusters_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"gameservices.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="gameservices.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_game_server_clusters_service_host_no_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com"
),
)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_host_with_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com:8000"
),
)
assert client.transport._host == "gameservices.googleapis.com:8000"
def test_game_server_clusters_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.GameServerClustersServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_game_server_clusters_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_game_server_clusters_service_grpc_lro_client():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_game_server_clusters_service_grpc_lro_async_client():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_game_server_cluster_path():
project = "squid"
location = "clam"
realm = "whelk"
cluster = "octopus"
expected = "projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}".format(
project=project, location=location, realm=realm, cluster=cluster,
)
actual = GameServerClustersServiceClient.game_server_cluster_path(
project, location, realm, cluster
)
assert expected == actual
def test_parse_game_server_cluster_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"realm": "cuttlefish",
"cluster": "mussel",
}
path = GameServerClustersServiceClient.game_server_cluster_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_game_server_cluster_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = GameServerClustersServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = GameServerClustersServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = GameServerClustersServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = GameServerClustersServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = GameServerClustersServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = GameServerClustersServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = GameServerClustersServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = GameServerClustersServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = GameServerClustersServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = GameServerClustersServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = GameServerClustersServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
__version__ = "0.7.1"
from .vault_settings import VaultParameterError, vault_config_settings_source
__all__ = ["vault_config_settings_source", "VaultParameterError"]
|
# Auto-generated at 2021-09-27T17:01:26.691956+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelFreeFormNotificationRequest
from ...models import RestapiErrorResponseBody
class FreeFormNotification(Operation):
"""send freeform notification to a user (freeFormNotification)
Properties:
url: /notification/namespaces/{namespace}/freeform
method: POST
tags: notification
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
body: (body) REQUIRED ModelFreeFormNotificationRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
202: Accepted - (Accepted)
400: Bad Request - RestapiErrorResponseBody (Bad Request)
401: Unauthorized - RestapiErrorResponseBody (Unauthorized)
403: Forbidden - RestapiErrorResponseBody (Forbidden)
404: Not Found - RestapiErrorResponseBody (Not Found)
"""
# region fields
_url: str = "/notification/namespaces/{namespace}/freeform"
_method: str = "POST"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelFreeFormNotificationRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelFreeFormNotificationRequest) -> FreeFormNotification:
self.body = value
return self
def with_namespace(self, value: str) -> FreeFormNotification:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelFreeFormNotificationRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, RestapiErrorResponseBody]]:
"""Parse the given response.
202: Accepted - (Accepted)
400: Bad Request - RestapiErrorResponseBody (Bad Request)
401: Unauthorized - RestapiErrorResponseBody (Unauthorized)
403: Forbidden - RestapiErrorResponseBody (Forbidden)
404: Not Found - RestapiErrorResponseBody (Not Found)
"""
if code == 202:
return HttpResponse.create(code, "Accepted"), None
if code == 400:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 401:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 403:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 404:
return None, RestapiErrorResponseBody.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelFreeFormNotificationRequest,
namespace: str,
) -> FreeFormNotification:
instance = cls()
instance.body = body
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> FreeFormNotification:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelFreeFormNotificationRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelFreeFormNotificationRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
}
# endregion static methods
|
#!/usr/bin/env python
from Bio import SeqIO
import argparse
import pathlib
def get_arguments():
parser = argparse.ArgumentParser(description='Extract CDS from a genbank to output a fasta',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input', type=str,
help='Path to input genbank file')
parser.add_argument('output', type=str,help='Path to put file/folder output')
parser.add_argument('-i', '--ignore', type=str, metavar = 'KEY', default=None, help="if 'key' matches a CDS name it won't be included in the output")
parser.add_argument('-m', '--multi', action ='store_true', help = "Specify if the input file is a multigenbank, in which case the CDS of each entry would be extracted in a different fasta file in an output directory in the specified output path")
args = parser.parse_args()
return args
def get_features(record, key):
cds = {}
if key == None:
for i,ft in enumerate(record.features):
if ft.type == "CDS":
if "gene" in ft.qualifiers.keys():
gene = ft.qualifiers["gene"][0]
cds[gene] = ft.extract(record)
else:
for i,ft in enumerate(record.features):
if ft.type == "CDS":
if "gene" in ft.qualifiers.keys():
if key not in ft.qualifiers["gene"][0]:
gene = ft.qualifiers["gene"][0]
cds[gene] = ft.extract(record)
return cds
def reformat(cds):
for gene, record in cds.items():
record.id = gene
record.description = ""
return cds
def main():
args = get_arguments()
#if args.ignore == None:
# args.ignore == ""
if args.multi is True:
recs = SeqIO.parse(args.input,"gb")
taxa = {}
for rec in recs:
specie = rec.annotations["organism"].replace(" ","_")
taxa[specie] = reformat(get_features(rec, args.ignore))
## Create directory
pathlib.Path(args.output.rstrip("/")+'/extract_cds_output').mkdir(parents=True, exist_ok=True)
## Write fastas
for specie, genes in taxa.items():
filepath = args.output.rstrip("/")+'/extract_cds_output'+"/"+specie+".fasta"
SeqIO.write(genes.values(),filepath,"fasta")
else:
rec = SeqIO.read(args.input, "gb")
aux = get_features(rec, args.ignore)
cds = reformat(aux)
## Write filenames
filename = args.output.strip("/")
# filename = args.output.strip("/") + "/" + rec.annotations["organism"].replace(" ","_") + ".fasta"
SeqIO.write(cds.values(), filename, "fasta")
if __name__ == '__main__':
main()
|
import logging
import pandas as pd
from stock.models import MyStock
from stock.models import ValuationRatio
from yahooquery import Ticker
logger = logging.getLogger("stock")
class MyValuationRatio:
def __init__(self, symbol):
self.stock = MyStock.objects.get(symbol=symbol)
def get(self):
s = Ticker(self.stock.symbol, timeout=15)
# all numbers convert to million
df = s.valuation_measures
if "unavailable" in df or "error" in df:
logger.error("{}: {}".format(self.stock.symbol, df))
return
# DB doesn't like NaN
df = df.where(pd.notnull(df), 0)
mapping = {
"forward_pe": "ForwardPeRatio",
"pb": "PbRatio",
"pe": "PeRatio",
"peg": "PegRatio",
"ps": "PsRatio",
}
# enumerate data frame
for row in df.itertuples(index=False):
i, created = ValuationRatio.objects.get_or_create(
stock=self.stock, on=row.asOfDate.date()
)
for key, val in mapping.items():
try:
tmp = float(getattr(row, val))
except AttributeError:
tmp = 0
# set value
setattr(i, key, tmp)
i.save()
# if all values are 0, discard the record
ValuationRatio.objects.filter(
forward_pe=0, pb=0, pe=0, peg=0, ps=0
).delete()
|
from six.moves.urllib_parse import urlparse
import chardet
import itertools
import json
import os
import re
import six
from collections import OrderedDict, namedtuple
from itertools import chain
from scrapely.htmlpage import HtmlPage, HtmlTagType
from scrapy.utils.misc import load_object
from w3lib.encoding import html_body_declared_encoding
TAGID = u"data-tagid"
GENERATEDTAGID = u"data-genid"
OPEN_TAG = HtmlTagType.OPEN_TAG
CLOSE_TAG = HtmlTagType.CLOSE_TAG
UNPAIRED_TAG = HtmlTagType.UNPAIRED_TAG
# Encodings: https://w3techs.com/technologies/overview/character_encoding/all
ENCODINGS = ['UTF-8', 'ISO-8859-1', 'Windows-1251', 'Shift JIS',
'Windows-1252', 'GB2312', 'EUC-KR', 'EUC-JP', 'GBK', 'ISO-8859-2',
'Windows-1250', 'ISO-8859-15', 'Windows-1256', 'ISO-8859-9',
'Big5', 'Windows-1254', 'Windows-874']
MimeType = namedtuple('MimeType', ['type', 'maintype', 'subtype', 'params'])
def content_type(response):
full_content_type = decode(response.headers.get('Content-Type') or u'')
type_ = full_content_type.split(';', 1)
split = type_[0].split('/', 1)
if len(split) < 2:
maintype = type_
subtype = ''
else:
maintype, subtype = split
# Parse params if needed
return MimeType(full_content_type, maintype, subtype, [])
def encode(html, default=None):
if isinstance(html, six.binary_type):
return html
return _encode_or_decode_string(html, type(html).encode, default)
def decode(html, default=None):
if isinstance(html, six.text_type):
return html
return _encode_or_decode_string(html, type(html).decode, default)
def _encode_or_decode_string(html, method, default):
if not default:
encoding = html_body_declared_encoding(html)
if encoding:
default = [encoding]
else:
default = []
elif isinstance(default, six.string_types):
default = [default]
for encoding in itertools.chain(default, ENCODINGS):
try:
return method(html, encoding)
except (UnicodeDecodeError, UnicodeEncodeError, LookupError):
pass
except AttributeError:
return html
encoding = chardet.detect(html).get('encoding')
return method(html, encoding)
def iter_unique_scheme_hostname(urls):
"""Return an iterator of tuples (scheme, hostname) over the given urls,
filtering dupes
"""
scheme_hostname = set()
for x in urls:
p = urlparse(x)
scheme_hostname.add((p.scheme, p.hostname))
return list(scheme_hostname)
def open_project_from_dir(project_dir):
storage = Storage(project_dir)
specs = {"spiders": SpiderLoader(storage)}
for name in ['project', 'items', 'extractors']:
try:
specs[name] = storage.open('{}.json'.format(name))
except IOError:
specs[name] = {}
return specs
def read(fp, encoding='utf-8'):
content = fp.read()
if hasattr(content, 'decode'):
content = content.decode('utf-8')
return content
def _build_sample(sample, legacy=False):
from slybot.plugins.scrapely_annotations.builder import Annotations
Annotations(sample, legacy=legacy).build()
sample['annotated'] = True
return sample
def htmlpage_from_response(response, _add_tagids=False):
body = response.body_as_unicode()
if _add_tagids:
body = add_tagids(body)
return HtmlPage(response.url, response.headers, body,
encoding=response.encoding)
def load_plugins(settings):
if settings.get('LOADED_PLUGINS', None):
return settings.get('LOADED_PLUGINS', None)
plugins = settings['PLUGINS']
if plugins:
return [load_object(p) if isinstance(p, str) else p for p in plugins]
else:
from slybot.plugins.scrapely_annotations import Annotations
return [Annotations]
def load_plugin_names(settings):
"""
Generate a unique name for a plugin based on the class name module name
and path
>>> settings = {'PLUGINS': ['a', 'b.c', 'a.c']}
>>> load_plugin_names(settings)
['a', 'c', 'a.c']
"""
seen = set()
def generate_name(path, maxsplit=0, splits=None):
if splits is None:
splits = len(path.split('.')) - 1
name = '.'.join(path.split('.', splits - maxsplit)[-1].rsplit('.',
maxsplit))
if name not in seen or maxsplit >= splits:
seen.add(name)
return name
return generate_name(path, maxsplit + 1, splits)
if settings['PLUGINS']:
return [generate_name(path) for path in settings['PLUGINS']]
else:
return ['Annotations']
def include_exclude_filter(include_patterns, exclude_patterns):
filterf = None
includef = None
if include_patterns:
pattern = include_patterns[0] if len(include_patterns) == 1 else \
"(?:%s)" % '|'.join(include_patterns)
includef = re.compile(pattern).search
filterf = includef
if exclude_patterns:
pattern = exclude_patterns[0] if len(exclude_patterns) == 1 else \
"(?:%s)" % '|'.join(exclude_patterns)
excludef = re.compile(pattern).search
if not includef:
filterf = lambda x: not excludef(x)
else:
filterf = lambda x: includef(x) and not excludef(x)
return filterf if filterf else bool
class IndexedDict(OrderedDict):
"""
Ordered dictionary where values can also be obtained by their index as if
they were in a list
>>> idd = IndexedDict([('spam', 1), ('eggs', 2), ('bacon', 3)])
>>> idd['spam']
1
>>> idd[0]
1
>>> idd['bacon']
3
>>> idd[2]
3
>>> idd[2] = 'ham'
Traceback (most recent call last):
...
TypeError: keys must not be an integers
>>> idd[3]
Traceback (most recent call last):
...
IndexError: index out of range
"""
def __setitem__(self, key, value):
if isinstance(key, int):
raise TypeError("keys must not be an integers")
super(IndexedDict, self).__setitem__(key, value)
def __getitem__(self, key):
if isinstance(key, int):
if key >= len(self):
raise IndexError('index out of range')
for i, k in enumerate(self):
if i == key:
key = k
break
return super(IndexedDict, self).__getitem__(key)
def _quotify(mystr):
"""
quotifies an html tag attribute value.
Assumes then, that any ocurrence of ' or " in the
string is escaped if original string was quoted
with it.
So this function does not altere the original string
except for quotation at both ends, and is limited just
to guess if string must be quoted with '"' or "'"
"""
quote = '"'
l = len(mystr)
for i in range(l):
if mystr[i] == "\\" and i + 1 < l and mystr[i + 1] == "'":
quote = "'"
break
elif mystr[i] == "\\" and i + 1 < l and mystr[i + 1] == '"':
quote = '"'
break
elif mystr[i] == "'":
quote = '"'
break
elif mystr[i] == '"':
quote = "'"
break
return quote + mystr + quote
def serialize_tag(tag):
"""
Converts a tag into a string when a slice [tag.start:tag.end]
over the source can't be used because tag has been modified
"""
out = "<"
if tag.tag_type == HtmlTagType.CLOSE_TAG:
out += "/"
out += tag.tag
attributes = []
for key, val in tag.attributes.items():
aout = key
if val is not None:
aout += "=" + _quotify(val)
attributes.append(aout)
if attributes:
out += " " + " ".join(attributes)
if tag.tag_type == HtmlTagType.UNPAIRED_TAG:
out += "/"
return out + ">"
def _must_add_tagid(element):
return (hasattr(element, 'tag_type') and
hasattr(element, 'tag') and
element.tag_type != CLOSE_TAG and
element.tag != 'ins')
def _modify_tagids(source, add=True):
"""Add or remove tags ids to/from HTML document"""
output = []
tagcount = 0
if not isinstance(source, HtmlPage):
source = HtmlPage(body=source)
for element in source.parsed_body:
if _must_add_tagid(element):
if add:
element.attributes[TAGID] = str(tagcount)
tagcount += 1
else: # Remove previously added tagid
element.attributes.pop(TAGID, None)
output.append(serialize_tag(element))
else:
output.append(source.body[element.start:element.end])
return u''.join(output)
def add_tagids(source):
"""
Applies a unique attribute code number for each tag element in order to be
identified later in the process of apply annotation"""
return _modify_tagids(source, True)
def remove_tagids(source):
"""remove from the given page, all tagids previously added by add_tagids()
"""
return _modify_tagids(source, False)
class Storage(object):
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def rel_path(self, *args):
return os.sep.join(args)
def _path(self, *args):
return os.path.join(self.base_path, self.rel_path(*args))
def isdir(self, *args, **kwargs):
return os.path.isdir(self._path(*args), **kwargs)
def listdir(self, *args, **kwargs):
return os.listdir(self._path(*args), **kwargs)
def open(self, *args, **kwargs):
"""Open files from filesystem."""
raw = kwargs.pop('raw', False)
with open(self._path(*args), encoding = 'utf-8') as f:
return decode(f.read()) if raw else json.load(f)
class SpiderLoader(object):
def __init__(self, storage):
if isinstance(storage, six.string_types):
self.storage = Storage(storage)
else:
fsattrs = ['isdir', 'listdir', 'open', 'rel_path']
if any(not hasattr(storage, attr) for attr in fsattrs):
raise TypeError('Storage class must have "{}" methods'.format(
'", "'.join(fsattrs)))
self.storage = storage
self.spider_dir = self.storage.rel_path('spiders')
self.spider_names = {
s[:-len('.json')] for s in self.storage.listdir(self.spider_dir)
if s.endswith('.json')
}
self._spiders = {}
def __getitem__(self, key):
if key not in self.spider_names:
raise KeyError('The spider "{}" does not exist'.format(key))
if key not in self._spiders:
self._spiders[key] = self.load_spider(key)
return self._spiders[key]
def load_spider(self, spider_name):
spec = self.storage.open(self.spider_dir,
'{}.json'.format(spider_name))
try:
if spec.get('templates'):
templates = []
for template in spec.get('templates', []):
if template.get('version', '') < '0.13.0':
templates.append(template)
else:
templates.append(_build_sample(template))
spec['templates'] = templates
else:
templates = self.load_external_templates(self.spider_dir,
spider_name)
spec.setdefault("templates", []).extend(templates)
return spec
except ValueError as e:
raise ValueError(
"Error parsing spider (invalid JSON): %s: %s" %
(spider_name, e)
)
def keys(self):
for spider_name in self.spider_names:
yield spider_name
def items(self):
spiders = chain(self._spiders, self.spider_names - set(self._spiders))
for spider_name in spiders:
yield spider_name, self[spider_name]
def values(self):
for _, spider in self.items():
yield spider
def load_external_templates(self, spec_base, spider_name):
"""A generator yielding the content of all passed `template_names` for
`spider_name`.
"""
spider_dir = self.storage.rel_path('spiders', spider_name)
if not self.storage.isdir(spider_dir):
raise StopIteration
for name in self.storage.listdir(spider_dir):
if not name.endswith('.json'):
continue
path = self.storage.rel_path(spider_dir, name)
sample = self.storage.open(path)
if not sample:
continue
sample_dir = path[:-len('.json')]
if self.storage.isdir(sample_dir):
for fname in self.storage.listdir(sample_dir):
if fname.endswith('.html'):
attr = fname[:-len('.html')]
html = self.storage.open(sample_dir, fname, raw=1)
sample[attr] = html
if 'original_body' not in sample:
sample['original_body'] = u'<html></html>'
version = sample.get('version', '')
yield _build_sample(sample, legacy=version < '0.13.0')
|
# author: Justus Schock (justus.schock@rwth-aachen.de)
import torch
class CustomGroupNorm(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, x):
"""
Forward batch through network
Parameters
----------
x : :class:`torch.Tensor`
batch to forward
Returns
-------
:class:`torch.Tensor`
normalized results
"""
return self.norm(x)
|
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
filter_installed_packages,
apt_install,
)
if filter_installed_packages(['git']) != []:
apt_install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
try:
self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Timed calls to subprocess, so that real execution times can be obtained.
"""
__author__ = "Miguel Hernández Cabronero <miguel.hernandez@uab.cat>"
__date__ = "23/05/2020"
import os
import subprocess
import re
import time
import platform
import shutil
class InvocationError(Exception):
"""Raised when an invocation fails.
"""
pass
def get_status_output_time(invocation, expected_status_value=0, wall=False):
"""Run invocation, and return its status, output, and total
(wall or user+system) time in seconds.
:param expected_status_value: if not None, status must be equal to this value or
an InvocationError is raised.
:param wall: if True, execution wall time is returned. Otherwise, user+system CPU time is returned.
(both in seconds).
:return: status, output, time
"""
if "Darwin" in platform.system():
time_command = "/usr/local/bin/gtime"
else:
time_command = "/usr/bin/time"
if os.path.isfile(time_command):
invocation = f"{time_command} -f 'u%U@s%S' {invocation}"
else:
invocation = f"{invocation}"
wall = True
wall_time_before = time.time()
status, output = subprocess.getstatusoutput(invocation)
wall_time_after = time.time()
output_lines = output.splitlines()
output = "\n".join(output_lines[:-1] if not wall else output_lines)
if expected_status_value is not None and status != expected_status_value:
raise InvocationError(
f"status={status} != {expected_status_value}.\nInput=[{invocation}].\nOutput=[{output}]".format(
status, invocation, output))
if wall:
measured_time = wall_time_after - wall_time_before
else:
m = re.fullmatch(r"u(\d+\.\d+)@s(\d+\.\d+)", output_lines[-1])
if m is not None:
measured_time = float(m.group(1)) + float(m.group(2))
else:
raise InvocationError(f"Output {output_lines} did not contain a valid time signature")
return status, output, measured_time
|
def _close_(fh):
"""Implementation of perl close"""
global AUTODIE, TRACEBACK, OS_ERROR, TRACE_RUN
try:
if hasattr(fh, '_sp'): # issue 72: subprocess
fh.flush()
fh._sp.communicate()
if TRACE_RUN:
sp = subprocess.CompletedProcess(f"open({fh._file})", fh._sp.returncode)
_carp(f'trace close({fh._file}): {repr(sp)}', skip=2)
fh.close()
if fh._sp.returncode:
raise IOError(f"close({fh._file}): failed with {fh._sp.returncode}")
return 1
if fh is None:
raise TypeError(f"close(None): failed")
#if WARNING and fh.closed:
#_carp(f"close failed: Filehandle is already closed", skip=2)
fh.close()
return 1
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
_cluck(OS_ERROR,skip=2)
if AUTODIE:
raise
return 0
|
import win32com.client as wc
from utils import vstr
from utils import vshort
from utils import vstrarr
from utils import check_error
from bc import SiebelBusObject
from ps import SiebelPropertySet
from bs import SiebelService
PROGID = 'SiebelDataServer.ApplicationObject'
class SiebelApplication(object):
def __init__(self, conf):
self._sa = wc.Dispatch(PROGID)
self._sa.LoadObjects(vstr(conf), vshort(0))
def getLastErrText(self):
return self._sa.GetLastErrText
@check_error
def getBusObject(self, name):
return SiebelBusObject(self._sa.GetBusObject(vstr(name), vshort(0)),
self._sa)
@check_error
def getProfileAttr(self, name):
return self._sa.GetProfileAttr(vstr(name), vshort(0))
@check_error
def getService(self, name):
return SiebelService(self._sa.GetService(vstr(name), vshort(0)),
self._sa)
@check_error
def getSharedGlobal(self, name):
return self._sa.GetSharedGlobal(vstr(name), vshort(0))
@check_error
def invokeMethod(self, methodName, *methodArgs):
return self._sa.InvokeMethod(vstr(methodName),
vstrarr(list(methodArgs)),
vshort(0))
@check_error
def currencyCode(self):
return self._sa.CurrencyCode(vshort(0))
@check_error
def login(self, login, password):
self._sa.Login(vstr(login), vstr(password), vshort(0))
@check_error
def loginId(self):
return self._sa.LoginId(vshort(0))
@check_error
def loginName(self):
return self._sa.LoginName(vshort(0))
@check_error
def newPropertySet(self):
return SiebelPropertySet(self._sa.NewPropertySet(vshort(0)), self._sa)
@check_error
def positionId(self):
return self._sa.PositionId(vshort(0))
@check_error
def positionName(self):
return self._sa.PositionName(vshort(0))
@check_error
def setPositionId(self, value):
self._sa.SetPositionId(vstr(value), vshort(0))
@check_error
def setPositionName(self, value):
self._sa.SetPositionName(vstr(value), vshort(0))
@check_error
def setProfileAttr(self, name, value):
self._sa.SetProfileAttr(vstr(name), vstr(value), vshort(0))
@check_error
def setSharedGlobal(self, name, value):
self._sa.SetSharedGlobal(vstr(name), vstr(value), vshort(0))
@check_error
def trace(self, msg):
self._sa.Trace(vstr(msg), vshort(0))
@check_error
def traceOff(self):
self._sa.TraceOff(vshort(0))
@check_error
def traceOn(self, file_name, category, source):
self._sa.TraceOn(vstr(file_name), vstr(
category), vstr(source), vshort(0))
def evalExpr(self, expr):
bo = self.getBusObject('Employee')
bc = bo.getBusComp('Employee')
return bc.invokeMethod('EvalExpr', expr)
def repositoryId(self):
return self.evalExpr("RepositoryId()")
|
from lxml import html
import requests
url = 'https://www.transfermarkt.com/ac-mailand/transfers/verein/5/saison_id/2017'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
page = requests.get(url, headers=headers)
tree = html.fromstring(page.content)
players = tree.xpath('//a[@class="spielprofil_tooltip"]/text()')
print('Players: ', players)
|
def for_O():
"""printing capital 'O' using for loop"""
for row in range(5):
for col in range(5):
if col==0 and row not in(0,4) or col==4 and row not in(0,4) or row==0 and col in(1,2,3) or row==4 and col in(1,2,3) :
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_O():
"""printing capital 'O' using while loop"""
i=0
while i<5:
j=0
while j<5:
if j==0 and i not in(0,4) or i==0 and j not in(0,4)or i==4 and j not in(0,4)or j==4 and i not in(0,4):
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
|
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitcoinnano.plugins import BasePlugin, hook
from bitcoinnano.i18n import _
class HW_PluginBase(BasePlugin):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
def is_enabled(self):
return True
def device_manager(self):
return self.parent.device_manager
@hook
def close_wallet(self, wallet):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
|
from __future__ import print_function
import os
import sys
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Retinaface')
parser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--dataset_folder', default='./data/widerface/widerface/val/images/', type=str, help='dataset path')
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('-s', '--save_image', action="store_true", default=False, help='show detection results')
parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
# testing dataset
testset_folder = args.dataset_folder
print (testset_folder)
testset_list = args.dataset_folder + "test_list.txt"
test_dataset = []
#print (testset_list)
with open(testset_list, 'r') as fr:
content = fr.readlines()
test_dataset = [line.strip() for line in content]
num_images = len(test_dataset)
print (num_images)
_t = {'forward_pass': Timer(), 'misc': Timer()}
# testing begin
for i, img_name in enumerate(test_dataset):
image_path = testset_folder + img_name
print (image_path)
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
# testing scale
target_size = 1600
max_size = 2150
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if args.origin_size:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
_t['forward_pass'].tic()
loc, conf, landms = net(img) # forward pass
_t['forward_pass'].toc()
_t['misc'].tic()
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1]
# order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
# dets = dets[:args.keep_top_k, :]
# landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1)
_t['misc'].toc()
# --------------------------------------------------------------------
save_name = args.save_folder + img_name[:-4] + ".txt"
dirname = os.path.dirname(save_name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(save_name, "w") as fd:
bboxs = dets
file_name = os.path.basename(save_name)[:-4] + "\n"
bboxs_num = str(len(bboxs)) + "\n"
fd.write(file_name)
fd.write(bboxs_num)
for box in bboxs:
x = int(box[0])
y = int(box[1])
w = int(box[2]) - int(box[0])
h = int(box[3]) - int(box[1])
confidence = str(box[4])
line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
fd.write(line)
print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))
# save image
if args.save_image:
for b in dets:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
if not os.path.exists("./results_handtask/"):
os.makedirs("./results_handtask/")
name = "./results_handtask/%05d.jpg" % i
cv2.imwrite(name, img_raw)
|
import os
import hashlib
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QMessageBox
from internationalization import LANGUAGE
from logic import Hash
from windows.message import Message
from databaseAccess import DbMethods
class AddWindow(QMainWindow):
def __init__(self, lang):
QMainWindow.__init__(self)
uic.loadUi("windows/AddUser.ui", self)
self.lang = lang
self.reload_text()
self.back_button.clicked.connect(self.go_to_back)
self.add_button.clicked.connect(self.add_user)
def reload_text(self):
"""Change the language of the window according to the chosen previously"""
self.language = LANGUAGE.get(self.lang)
self.setWindowTitle(self.language["add_user"])
self.user_name_label.setText(self.language["username"])
self.pass_label.setText(self.language["password"])
self.confirm_pass_label.setText(self.language["confirm_pass"])
self.add_button.setText(self.language["add_user"])
self.back_button.setText(self.language["back"])
def add_user(self):
"""Add a new user to the game"""
if len(self.user_name_text.text()) < 4:
message = Message(self.language["inv_username"], self.language["user_not_long"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
elif len(self.password_text.text()) < 8:
message = Message(self.language["inv_pass"], self.language["pass_not_long"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
else:
if self.password_text.text() == self.confirm_pass_text.text():
data_acces = DbMethods()
response = data_acces.add_player(self.user_name_text.text(), Hash.encrypt(self.password_text.text()))
if response == True:
message = Message(self.language["registered"], self.language["welcome"])
information_message = message.create_iw_message(self.language["ok"], "information")
information_message.exec()
elif response == False:
message = Message(self.language["other_name"], self.language["existing_user"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
self.user_name_text.clear()
self.password_text.clear()
self.confirm_pass_text.clear()
else:
message = Message(self.language["pass_problem"], self.language["pass_dont_match"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
def go_to_back(self):
"""Return to administration window"""
from adminWindow import AdminWindow
self.admin = AdminWindow(self.lang)
self.admin.show()
self.close()
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2oparse_raw():
"""
Python API test: h2o.parse_raw(setup, id=None, first_line_is_header=0)
copied from pyunit_hexdev_29_parse_false.py
"""
fraw = h2o.import_file(pyunit_utils.locate("smalldata/jira/hexdev_29.csv"), parse=False)
assert isinstance(fraw, list)
fhex = h2o.parse_raw(h2o.parse_setup(fraw), id='hexdev_29.hex', first_line_is_header=0)
fhex.summary()
assert_is_type(fhex, H2OFrame)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oparse_raw)
else:
h2oparse_raw()
|
import json
import boto3
import os
client = boto3.client('dynamodb')
CURRENT_COUNTS_TABLE_NAME = os.environ['CURRENT_COUNTS_TABLE_NAME']
AVERAGE_COUNTS_TABLE_NAME = os.environ['AVERAGE_COUNTS_TABLE_NAME']
def lambda_handler(event, context):
if "getCurrentCounts" in event:
response = client.scan(TableName=CURRENT_COUNTS_TABLE_NAME)
return {
'statusCode': 200,
'body': response
}
if "getAverageCounts" in event:
response = client.scan(TableName=AVERAGE_COUNTS_TABLE_NAME)
return {
'statusCode': 200,
'body': response
}
response = {}
return {
'statusCode': 200,
'body': response
}
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Tuple, Union, Optional
from overrides import overrides, EnforceOverrides
from torch.utils.data.dataset import Dataset
import torchvision
from torchvision.transforms import transforms
from torch.utils.data import ConcatDataset
from archai.datasets.dataset_provider import DatasetProvider, register_dataset_provider, TrainTestDatasets
from archai.common.config import Config
from archai.common import utils
class SvhnProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',
download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',
download=True, transform=transform_train)
trainset = ConcatDataset([trainset, extraset])
if load_test:
testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',
download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self)->tuple:
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.20100]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform
register_dataset_provider('svhn', SvhnProvider)
|
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.resources_site_shared_credential import ResourcesSiteSharedCredential # noqa: E501
from swagger_client.rest import ApiException
class TestResourcesSiteSharedCredential(unittest.TestCase):
"""ResourcesSiteSharedCredential unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResourcesSiteSharedCredential(self):
"""Test ResourcesSiteSharedCredential"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.resources_site_shared_credential.ResourcesSiteSharedCredential() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
import sys
from setuptools import setup
if sys.version_info < (3, 5):
print('rets requires Python 3.5 or later')
sys.exit(1)
long_desc = 'Python 3 client for the Real Estate Transaction Standard (RETS) Version 1.7.2'
install_requires = [
'requests>=2.12.3',
'requests-toolbelt>=0.7.0,!=0.9.0',
'udatetime==0.0.16',
'docopts',
'lxml>=4.3.0',
]
setup_requires = [
'pytest-runner',
]
tests_requires = [
'flake8',
'pytest',
]
packages = [
'rets',
'rets.client',
'rets.http',
'rets.http.parsers',
]
setup(
name='rets-python',
version='0.4.2',
description='rets-python',
long_description=long_desc,
author='Martin Liu',
author_email='martin@opendoor.com',
url='https://github.com/opendoor-labs/rets',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
],
license='MIT License',
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_requires,
packages=packages,
)
|
from logging import debug, info, warning, error, exception
import re
from datetime import datetime, timedelta
from .. import AbstractServiceHandler
from data.models import Episode, UnprocessedStream
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://crunchyroll.com/{id}"
_show_re = re.compile("crunchyroll.com/([\w-]+)", re.I)
_episode_rss = "http://crunchyroll.com/{id}.rss"
_backup_rss = "http://crunchyroll.com/rss/anime"
_season_url = "http://crunchyroll.com/lineup"
def __init__(self):
super().__init__("crunchyroll", "Crunchyroll", False)
# Episode finding
def get_all_episodes(self, stream, **kwargs):
info("Getting live episodes for Crunchyroll/{}".format(stream.show_key))
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
# Check data validity and digest
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data, stream.show_key):
try:
episodes.append(_digest_episode(episode_data))
except:
exception("Problem digesting episode for Crunchyroll/{}".format(stream.show_key))
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
else:
debug(" No episodes found")
return episodes
def _get_feed_episodes(self, show_key, **kwargs):
"""
Always returns a list.
"""
info("Getting episodes for Crunchyroll/{}".format(show_key))
url = self._get_feed_url(show_key)
# Send request
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get latest show for Crunchyroll/{}".format(show_key))
return list()
# Parse RSS feed
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
return response.get("entries", list())
@classmethod
def _get_feed_url(cls, show_key):
# Sometimes shows don't have an RSS feed
# Use the backup global feed when it doesn't
if show_key is not None:
return cls._episode_rss.format(id=show_key)
else:
debug(" Using backup feed")
return cls._backup_rss
# Remote info getting
_title_fix = re.compile("(.*) Episodes", re.I)
def get_stream_info(self, stream, **kwargs):
info("Getting stream info for Crunchyroll/{}".format(stream.show_key))
url = self._get_feed_url(stream.show_key)
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get feed")
return None
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
stream.name = response.feed.title
match = self._title_fix.match(stream.name)
if match:
stream.name = match.group(1)
return stream
def get_seasonal_streams(self, **kwargs):
debug("Getting season shows")
# Request page
response = self.request(self._season_url, html=True, **kwargs)
if response is None:
error("Failed to get seasonal streams page")
return list()
# Find sections (continuing simulcast, new simulcast, new catalog)
lists = response.find_all(class_="lineup-grid")
if len(lists) < 2:
error("Unsupported structure of lineup page")
return list()
elif len(lists) < 2 or len(lists) > 3:
warning("Unexpected number of lineup grids")
# Parse individual shows
# WARNING: Some may be dramas and there's nothing distinguishing them from anime
show_elements = lists[1].find_all(class_="element-lineup-anime")
raw_streams = list()
for show in show_elements:
title = show["title"]
if "to be announced" not in title.lower():
debug(" Show: {}".format(title))
url = show["href"]
debug(" URL: {}".format(url))
url_match = self._show_re.search(url)
if not url_match:
error("Failed to parse show URL: {}".format(url))
continue
key = url_match.group(1)
debug(" Key: {}".format(key))
remote_offset, display_offset = self._get_stream_info(key)
raw_stream = UnprocessedStream(self.key, key, None, title, remote_offset, display_offset)
raw_streams.append(raw_stream)
return raw_streams
def _get_stream_info(self, show_key):
#TODO: load show page and figure out offsets based on contents
return 0, 0
# Local info formatting
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def extract_show_key(self, url):
match = self._show_re.search(url)
if match:
return match.group(1)
return None
# Episode feeds
def _verify_feed(feed):
debug("Verifying feed")
if feed.bozo:
debug(" Feed was malformed")
return False
if "crunchyroll" not in feed.namespaces or feed.namespaces["crunchyroll"] != "http://www.crunchyroll.com/rss":
debug(" Crunchyroll namespace not found or invalid")
return False
if feed.feed.language != "en-us":
debug(" Language not en-us")
return False
debug(" Feed verified")
return True
def _is_valid_episode(feed_episode, show_id):
# We don't want non-episodes (PVs, VA interviews, etc.)
if feed_episode.get("crunchyroll_isclip", False) or not hasattr(feed_episode, "crunchyroll_episodenumber"):
debug("Is PV, ignoring")
return False
# Sanity check
if _get_slug(feed_episode.link) != show_id:
debug("Wrong ID")
return False
# Don't check really old episodes
episode_date = datetime(*feed_episode.published_parsed[:6])
date_diff = datetime.utcnow() - episode_date
if date_diff >= timedelta(days=2):
debug(" Episode too old")
return False
return True
_episode_name_correct = re.compile("Episode \d+ - (.*)")
_episode_count_fix = re.compile("([0-9]+)[abc]?", re.I)
def _digest_episode(feed_episode):
debug("Digesting episode")
# Get data
num_match = _episode_count_fix.match(feed_episode.crunchyroll_episodenumber)
if num_match:
num = int(num_match.group(1))
else:
warning("Unknown episode number format \"{}\"".format(feed_episode.crunchyroll_episodenumber))
num = 0
debug(" num={}".format(num))
name = feed_episode.title
match = _episode_name_correct.match(name)
if match:
debug(" Corrected title from \"{}\"".format(name))
name = match.group(1)
debug(" name={}".format(name))
link = feed_episode.link
debug(" link={}".format(link))
date = feed_episode.published_parsed
debug(" date={}".format(date))
return Episode(num, name, link, date)
_slug_regex = re.compile("crunchyroll.com/([a-z0-9-]+)/", re.I)
def _get_slug(episode_link):
match = _slug_regex.search(episode_link)
if match:
return match.group(1)
return None
# Season page
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMultcomp(RPackage):
"""Simultaneous tests and confidence intervals for general linear
hypotheses in parametric models, including linear, generalized linear,
linear mixed effects, and survival models. The package includes demos
reproducing analyzes presented in the book "Multiple Comparisons Using R"
(Bretz, Hothorn, Westfall, 2010, CRC Press)."""
homepage = "http://multcomp.r-forge.r-project.org/"
url = "https://cloud.r-project.org/src/contrib/multcomp_1.4-6.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/multcomp"
version('1.4-10', sha256='29bcc635c0262e304551b139cd9ee655ab25a908d9693e1cacabfc2a936df5cf')
version('1.4-8', sha256='a20876619312310e9523d67e9090af501383ce49dc6113c6b4ca30f9c943a73a')
version('1.4-6', sha256='fe9efbe671416a49819cbdb9137cc218faebcd76e0f170fd1c8d3c84c42eeda2')
depends_on('r-mvtnorm@1.0-10:', type=('build', 'run'))
depends_on('r-survival@2.39-4:', type=('build', 'run'))
depends_on('r-th-data@1.0-2:', type=('build', 'run'))
depends_on('r-sandwich@2.3-0:', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run'))
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.middle.Reduce import ReduceReplacer
from mo.middle.passes.eliminate_test import build_graph
from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
# dictionary with node attributes.
nodes_attributes = {
# Placeholder layers
'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Reshape layers
'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},
'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape layers
'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Pooling
'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},
'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},
# Power
'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},
'power_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
}
class ReduceReplacerTest(unittest.TestCase):
def test1(self):
# Original graph
# data(1,64,1)-->Reduce(axis=1,keep_dims=True)-->data(1,1,1)
#
# Reference graph
# data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reduce_1': {'axis': np.array([1]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reshape_1': {'dim': np.array([1, 1, 64, 1])},
'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 1, 1, 1])},
'reshape_2': {'dim': np.array([1, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test2(self):
# Original graph
# data(1,3,64,64)-->Reduce(axis=2,keep_dims=True)-->data(1,3,1,64)
#
# Reference graph
# data(1,3,64,64)->Reshape->Pool(1,3,1,64)->Reshape(1,3,1,64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reduce_1': {'axis': np.array([2]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reshape_1': {'dim': np.array([1, 3, 64, 64])},
'reshape_1_data': {'shape': np.array([1, 3, 64, 64])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 3, 1, 64])},
'reshape_2': {'dim': np.array([1, 3, 1, 64])},
'reshape_2_data': {'shape': np.array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test3(self):
# Original graph
# data(1,3,64,64)-->Reduce(axis=[2,3],keep_dims=True)-->data(1,3,1,1)
#
# Reference graph
# data(1,3,64,64)->Reshape->Pool(1,3,1,1)->Reshape(1,3,1,1)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reduce_1': {'axis': np.array([2, 3]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reshape_1': {'dim': np.array([1, 3, 64 * 64, 1])},
'reshape_1_data': {'shape': np.array([1, 3, 64 * 64, 1])},
'pooling': {'window': np.array([1, 1, 64 * 64, 1])},
'pooling_data': {'shape': np.array([1, 3, 1, 1])},
'reshape_2': {'dim': np.array([1, 3, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test4(self):
# Original graph
# data(2,3,64,64)-->Reduce(axis=[1,2,3],keep_dims=False)-->data(2)
#
# Reference graph
# data(2,3,64,64)->Reshape(2,1,3*64*64,1)->Pool(2,1,1,1)->Reshape(2)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
'reduce_1': {'axis': np.array([1, 2, 3]), 'keep_dims': False, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([2])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
'reshape_1': {'dim': np.array([2, 1, 3 * 64 * 64, 1])},
'reshape_1_data': {'shape': np.array([2, 1, 3 * 64 * 64, 1])},
'pooling': {'window': np.array([1, 1, 3 * 64 * 64, 1])},
'pooling_data': {'shape': np.array([2, 1, 1, 1])},
'reshape_2': {'dim': np.array([2])},
'reshape_2_data': {'shape': np.array([2])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test5(self):
# Original graph
# data(1, 16, 64, 64, 64, 4)-->Reduce(axis=[5],keep_dims=False)-->data(1, 16, 64, 64, 64)
#
# Reference graph
# data(1, 16, 64, 64, 64, 4)->Reshape(1*16*64*64, 64, 4, 1)->Pool(1, 1, 4, 1)->Reshape(1, 16, 64, 64, 64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
'reduce_1': {'axis': np.array([5]), 'keep_dims': False, 'reduce_type': 'max'},
'reduce_1_data': {'shape': np.array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
'reshape_1': {'dim': np.array([65536, 64, 4, 1])},
'reshape_1_data': {'shape': np.array([65536, 64, 4, 1])},
'pooling': {'window': np.array([1, 1, 4, 1])},
'pooling_data': {'shape': np.array([65536, 64, 1, 1])},
'reshape_2': {'dim': np.array([1, 16, 64, 64, 64])},
'reshape_2_data': {'shape': np.array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test6(self):
# Original graph
# data(1,64,1)-->Reduce(axis=-2,keep_dims=True, reduce_type=Sum)-->data(1,1,1)
#
# Reference graph
# data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)->Power(scale=64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reduce_1': {'axis': np.array([-2]), 'keep_dims': True, 'reduce_type': 'Sum'},
'reduce_1_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'power'),
('power', 'power_data'),
('power_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reshape_1': {'dim': np.array([1, 1, 64, 1])},
'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 1, 1, 1])},
'reshape_2': {'dim': np.array([1, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 1, 1])},
'power': {'scale': 64.0},
'power_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.