text
stringlengths 2
999k
|
|---|
import json
import os, errno
import sys
import time
import shutil
import subprocess
from subprocess import Popen, PIPE
EXECUTABLE = 'hcbr_learning'
BUILD_FOLDER = '../build'
DATA_FOLDER = '../data'
KFOLD_SCRIPT = 'kfold_validation.py'
ACCURACY_ROW = 4
#METAOPTIMIZATION = '../tuning/hyperopt_wrapper.py'
#METAOPTIMIZATION_TIMEOUT = 60
METAOPTIMIZATION = '../script/genetic_algorithm.py'
def convert_paramILS_to_HCBR_params(paramILS):
convert_map = {
'e': 'eta',
'd': 'delta',
'g': 'gamma',
'i': 'online',
'p': 'learning_phases',
'z': 'heuristic'
}
def if_exists(k, v):
if k in convert_map:
return convert_map[k], v
else:
return None, None
params = {}
for k, v in paramILS.iteritems():
key, val = if_exists(k, v)
if key is not None:
params[key] = val
return params
def read_outcomes(path):
cases = []
headers = []
with open(path, 'rb') as csvfile:
reader = csvfile.readlines()
n = len(reader[0].split())
for i, row in enumerate(reader):
cases.append(int(row))
return cases
def main():
executable_path = os.path.join(BUILD_FOLDER, EXECUTABLE)
k = int(sys.argv[1])
l = float(sys.argv[2])
instance_name = sys.argv[3]
seed = None
if len(sys.argv) > 4:
seed = sys.argv[4]
only_analysis = False
if len(sys.argv) > 5:
only_analysis = True if sys.argv[5] == 'True' else False
if len(sys.argv) > 6:
nested_CV = True if sys.argv[6] == 'True' else False
suffix = ""
if len(sys.argv) > 7:
suffix = "_" + sys.argv[7]
path = instance_name
file_name = path.split('/')[-1].split('.')[0]
base_name = file_name.split('.')[0]
# Check build, executable and paths
base_output_path = "{}{}".format(instance_name, suffix)
if not only_analysis:
try:
shutil.rmtree(base_output_path)
except:
pass
try:
os.makedirs(base_output_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Create the casebase
print('# Create casebase and outcome files...')
process_script = os.path.join(DATA_FOLDER, "process_{}.py".format(instance_name))
data_location = os.path.join(DATA_FOLDER, "{}.txt".format(instance_name))
cmd = "python {} {}".format(process_script, data_location)
rc = subprocess.call(cmd, shell=True)
print('CMD: {}'.format(cmd))
print('RC: {}'.format(rc))
if rc:
exit(1)
path_casebase = os.path.join("{}_casebase.txt".format(instance_name))
path_outcomes = os.path.join("{}_outcomes.txt".format(instance_name))
try:
outcomes = read_outcomes(path_outcomes)
except Exception as e:
print(e)
exit(1)
n = len(outcomes)
# Create the k-folds
print('# Create k-folds files for validation...')
fold_creation_output = os.path.join(base_output_path, 'kfold_creation.log')
cmd_fold_validation = "python {} {} {} {} {} {} > {}".format(
KFOLD_SCRIPT,
k,
path_casebase,
path_outcomes,
os.path.join(base_output_path, "input_data"),
seed if seed is not None else "",
fold_creation_output
)
print('CMD: {}'.format(cmd_fold_validation))
rc = subprocess.call(cmd_fold_validation, shell=True)
print('RC: {}'.format(rc))
if rc:
exit(1)
# Read configuration
print('# Read configuration for this instance...')
examples = int(round(n * l))
parameters_path = os.path.join(DATA_FOLDER, "parameters", "{}.params.json".format(instance_name))
default_params = {
# TODO
}
parameters = None
try:
with open(parameters_path) as json_data:
parameters = json.load(json_data)
except Exception as e:
print('[ERROR] Could not retrieve parameters. Use default parameters.')
print(e)
if parameters is None:
parameters = default_params
else:
for key, v in default_params.iteritems():
if key not in parameters:
print('# - Add {}={} as parameter because value not found'.format(key, v))
parameters[key] = v
print('# Configuration: {}'.format(parameters))
# Start validation runs
print('# Start validation runs...')
average_accuracy = 0
for i in range(0, k):
print('\n#########################')
print('# - Run {}'.format(i))
print('#########################')
run_nb = 'run_{}'.format(i)
fold_casebase = os.path.join("../experiments", base_output_path, "input_data", "{}_casebase.fold_{}.txt".format(instance_name, i))
fold_outcomes = os.path.join("../experiments", base_output_path, "input_data", "{}_outcomes.fold_{}.txt".format(instance_name, i))
fold_output_path = os.path.join("../experiments", base_output_path, run_nb)
parameters_path = os.path.join(DATA_FOLDER, "parameters", "{}.params.json".format(instance_name))
default_params = {
# TODO
}
parameters = None
try:
with open(parameters_path) as json_data:
parameters = json.load(json_data)
except Exception as e:
print('[ERROR] Could not retrieve parameters. Use default parameters.')
print(e)
parameters["input"]["casebase"] = fold_casebase
parameters["input"]["outcomes"] = fold_outcomes
parameters["parameters"]["limit"] = examples
parameters["parameters"]["run_id"] = i
if not only_analysis:
try:
shutil.rmtree(fold_output_path)
except:
pass
try:
os.makedirs(fold_output_path)
except OSError as e:
if e.errno != errno.EEXIST:
print('[ERROR] Could not create output path for {}'.format(run_nb))
continue
if(nested_CV):
print('# Start Meta-optimization for Model Selection')
print('# Preliminary run')
fold_param_file = os.path.join(fold_output_path, 'params_{}.init.json'.format(run_nb))
with open(fold_param_file, 'w') as f:
f.write(json.dumps(parameters, indent=4))
print('# Initial configuration: {}'.format(parameters))
cmd = "{} --params {} > {} 2> {}".format(executable_path,
fold_param_file,
os.path.join(fold_output_path, 'output_{}.init.txt'.format(run_nb)),
os.path.join(fold_output_path, 'log_{}.init.txt'.format(run_nb))
)
'''
cmd = "{} -c {} -o {} -l {} -s -p {} -e {} -d {} -g {} {} {} -b {} > {} 2> {}".format(
executable_path,
fold_casebase,
fold_outcomes,
examples,
parameters['learning_phases'],
parameters['eta'],
parameters['delta'],
parameters['gamma'],
'-i' if int(parameters['online']) == 1 else "",
'-z' if int(parameters['heuristic']) == 1 else "",
i,
os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb)),
os.path.join(fold_output_path, 'log_{}.txt'.format(run_nb))
)
'''
print('# CMD: {}'.format(cmd))
rc = subprocess.call(cmd, shell=True)
p = Popen(['tail', '-n', '1', os.path.join(fold_output_path, 'output_{}.init.txt'.format(run_nb))], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
prun_accuracy = float(output.split()[ACCURACY_ROW])
print('# Preliminary run accuracy: {}'.format(prun_accuracy))
cmd = "python {} \
--weights ../experiments/W.txt \
--mu0 ../experiments/Mu_0_post_training.txt \
--mu1 ../experiments/Mu_1_post_training.txt \
--outcomes {}".format(METAOPTIMIZATION, fold_outcomes)
print('# CMD: {}'.format(cmd))
p = Popen(cmd.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
parameters_path = os.path.join(DATA_FOLDER, "parameters", "{}.optimized.params.json".format(instance_name))
parameters = json.load(open(parameters_path))
parameters["deserialization"]["mu0_file"] = "../experiments/Mu_0_optimized.txt"
parameters["deserialization"]["mu1_file"] = "../experiments/Mu_1_optimized.txt"
parameters["input"]["casebase"] = fold_casebase
parameters["input"]["outcomes"] = fold_outcomes
parameters["parameters"]["limit"] = examples
parameters["parameters"]["run_id"] = i
fold_param_file = os.path.join(fold_output_path, 'params_{}.json'.format(run_nb))
with open(fold_param_file, 'w') as f:
f.write(json.dumps(parameters, indent=4))
print('# Final configuration: {}'.format(parameters))
cmd = "{} --params {} > {} 2> {}".format(executable_path,
fold_param_file,
os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb)),
os.path.join(fold_output_path, 'log_{}.txt'.format(run_nb))
)
print('# CMD: {}'.format(cmd))
rc = subprocess.call(cmd, shell=True)
try:
shutil.move("training.run_{}.log.csv".format(i), os.path.join(base_output_path, "run_{}".format(i), "training.run_{}.log.csv".format(i)))
shutil.move("prediction.run_{}.log.csv".format(i), os.path.join(base_output_path, "run_{}".format(i), "prediction.run_{}.log.csv".format(i)))
shutil.move("overlap.run_{}.log.csv".format(i), os.path.join(base_output_path, "run_{}".format(i), "overlap.run_{}.log.csv".format(i)))
shutil.move("strength.run_{}.log.csv".format(i), os.path.join(base_output_path, "run_{}".format(i), "strength.run_{}.log.csv".format(i)))
except Exception as e:
pass
p = Popen(['tail', '-n', '1', os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb))], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
run_accuracy = float(output.split()[ACCURACY_ROW])
average_accuracy += run_accuracy
print("# Accuracy: {}".format(run_accuracy))
print('# Analyze the results...')
try:
# Confusion matrix
cmd_confusion_matrix = "python ../utils/confusion_matrix.py {}".format(os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb)))
cmd_cm_gp = "gnuplot {}".format('output_{}_confusion_matrix.gp'.format(run_nb))
rc = subprocess.call(cmd_confusion_matrix, shell=True)
rc = subprocess.call(cmd_cm_gp, shell=True)
shutil.move('output_{}_confusion_matrix.gp'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix.gp'.format(run_nb)))
shutil.move('output_{}_confusion_matrix.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix.txt'.format(run_nb)))
shutil.move('output_{}_confusion_matrix_0.png'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix_0.png'.format(run_nb)))
shutil.move('output_{}_confusion_matrix_1.png'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix_1.png'.format(run_nb)))
shutil.move('output_{}_confusion_matrix_2.png'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix_2.png'.format(run_nb)))
shutil.move('output_{}_confusion_matrix_0.svg'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix_0.svg'.format(run_nb)))
shutil.move('output_{}_confusion_matrix_1.svg'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix_1.svg'.format(run_nb)))
shutil.move('output_{}_confusion_matrix_2.svg'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_confusion_matrix_2.svg'.format(run_nb)))
# Prediction analysis
cmd_prediction_analysis ="python ../utils/prediction_analysis.py {path} ".format(
path=os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb))
)
cmd_pa_gp = "gnuplot {}".format('output_{}_diff_pred.gp'.format(run_nb))
rc = subprocess.call(cmd_prediction_analysis, shell=True)
rc = subprocess.call(cmd_pa_gp, shell=True)
shutil.move('output_{}_diff_bad_pred.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_bad_pred.txt'.format(run_nb)))
shutil.move('output_{}_diff_negative_bad_pred.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_negative_bad_pred.txt'.format(run_nb)))
shutil.move('output_{}_diff_negative_pred.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_negative_pred.txt'.format(run_nb)))
shutil.move('output_{}_diff_positive_bad_pred.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_positive_bad_pred.txt'.format(run_nb)))
shutil.move('output_{}_diff_pred.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_pred.txt'.format(run_nb)))
shutil.move('output_{}_positive_diff_pred.txt'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_positive_diff_pred.txt'.format(run_nb)))
shutil.move('output_{}_diff_pred.gp'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_pred.gp'.format(run_nb)))
shutil.move('output_{}_diff_pred_0.png'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_pred_0.png'.format(run_nb)))
shutil.move('output_{}_diff_pred_1.png'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_pred_0.png'.format(run_nb)))
shutil.move('output_{}_diff_pred_0.svg'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_pred_0.svg'.format(run_nb)))
shutil.move('output_{}_diff_pred_1.svg'.format(run_nb), os.path.join(base_output_path, "run_{}".format(i), 'output_{}_diff_pred_0.svg'.format(run_nb)))
# ROC
cmd_roc ="python ../utils/roc.py {path} ".format(
path=os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb))
)
print('CMD: {}'.format(cmd_roc))
rc = subprocess.call(cmd_roc, shell=True)
shutil.move('roc.png', os.path.join(base_output_path, "run_{}".format(i), 'roc.png'))
# Time
cmd_time ="python ../utils/time_analysis.py {path} ".format(
path=os.path.join(os.path.join(fold_output_path, 'output_{}.txt'.format(run_nb)))
)
cmd_time_gp = "gnuplot {}".format(os.path.join(base_output_path, "run_{}".format(i), 'output_{}_time.gp'.format(run_nb)).format(run_nb))
#rc = subprocess.call(cmd_time, shell=True)
#rc = subprocess.call(cmd_time_gp, shell=True)
except Exception as e:
print(e)
print('# Analyze all runs...')
try:
cmd_analyze_runs ="python ../utils/analyze_runs.py {path} {instance} {k} {instance} 'table:{instance}' '{caption}'".format(
instance=instance_name,
path="hcbr.global.log.csv" if not only_analysis else os.path.join(base_output_path, "hcbr.global.log.csv"),
k=k,
caption="Confusion matrix and performances indicators for the \\texttt{" + instance_name +"} dataset."
)
rc = subprocess.call(cmd_analyze_runs, shell=True)
print('CMD: {}'.format(cmd_analyze_runs))
cmd_confusion_matrix = "python ../utils/confusion_matrix.py {}".format(os.path.join(base_output_path, 'output.average.txt'))
cmd_cm_gp = "gnuplot {}".format('output_confusion_matrix.gp')
rc = subprocess.call(cmd_confusion_matrix, shell=True)
rc = subprocess.call(cmd_cm_gp, shell=True)
shutil.move('output_confusion_matrix.gp', os.path.join(base_output_path, 'output_confusion_matrix.gp'))
shutil.move('output_confusion_matrix.txt', os.path.join(base_output_path, 'output_confusion_matrix.txt'))
shutil.move('output_confusion_matrix_0.png', os.path.join(base_output_path, 'output_confusion_matrix_0.png'))
shutil.move('output_confusion_matrix_1.png', os.path.join(base_output_path, 'output_confusion_matrix_1.png'))
shutil.move('output_confusion_matrix_2.png', os.path.join(base_output_path, 'output_confusion_matrix_2.png'))
shutil.move('output_confusion_matrix_0.svg', os.path.join(base_output_path, 'output_confusion_matrix_0.svg'))
shutil.move('output_confusion_matrix_1.svg', os.path.join(base_output_path, 'output_confusion_matrix_1.svg'))
shutil.move('output_confusion_matrix_2.svg', os.path.join(base_output_path, 'output_confusion_matrix_2.svg'))
# Prediction analysis
cmd_prediction_analysis ="python ../utils/prediction_analysis.py {path} ".format(
path=os.path.join(base_output_path, 'output.average.txt')
)
cmd_pa_gp = "gnuplot {}".format('output.average_diff_pred.gp')
rc = subprocess.call(cmd_prediction_analysis, shell=True)
rc = subprocess.call(cmd_pa_gp, shell=True)
shutil.move('output.average_diff_bad_pred.txt', os.path.join(base_output_path, 'output.average_diff_bad_pred.txt'))
shutil.move('output.average_diff_negative_bad_pred.txt', os.path.join(base_output_path, 'output.average_diff_negative_bad_pred.txt'))
shutil.move('output.average_diff_negative_pred.txt', os.path.join(base_output_path, 'output.average_diff_negative_pred.txt'))
shutil.move('output.average_diff_positive_bad_pred.txt', os.path.join(base_output_path, 'output.average_diff_positive_bad_pred.txt'))
shutil.move('output.average_diff_pred.txt', os.path.join(base_output_path, 'output.average_diff_pred.txt'))
shutil.move('output.average_positive_diff_pred.txt', os.path.join(base_output_path, 'output.average_positive_diff_pred.txt'))
shutil.move('output.average_diff_pred.gp', os.path.join(base_output_path, 'output.average_diff_pred.gp'))
shutil.move('output.average_diff_pred_0.png', os.path.join(base_output_path, 'output.average_diff_pred_0.png'))
shutil.move('output.average_diff_pred_1.png', os.path.join(base_output_path, 'output.average_diff_pred_1.png'))
shutil.move('output.average_diff_pred_0.svg', os.path.join(base_output_path, 'output.average_diff_pred_0.svg'))
shutil.move('output.average_diff_pred_1.svg', os.path.join(base_output_path, 'output.average_diff_pred_1.svg'))
# ROC
cmd_roc ="python ../utils/roc.py {path} ".format(
path=os.path.join(base_output_path, 'output.average.txt')
)
print('CMD: {}'.format(cmd_roc))
rc = subprocess.call(cmd_roc, shell=True)
shutil.move('roc.png', os.path.join(base_output_path, 'roc.png'))
# Time
cmd_time ="python ../utils/time_analysis.py {path} {column}".format(
path=os.path.join(base_output_path, 'output.average.txt'),
column=10
)
cmd_time_gp = "gnuplot {}".format(os.path.join(base_output_path, 'output.average_time.gp'))
rc = subprocess.call(cmd_time, shell=True)
rc = subprocess.call(cmd_time_gp, shell=True)
cmd_time ="python ../utils/time_analysis.py {path} {column}".format(
path=os.path.join(base_output_path, 'overlap.average.log.csv'),
column=3
)
cmd_time_gp = "gnuplot {}".format(os.path.join(base_output_path, 'overlap.average.log_time.gp'))
rc = subprocess.call(cmd_time, shell=True)
rc = subprocess.call(cmd_time_gp, shell=True)
cmd_time ="python ../utils/time_analysis.py {path} {column}".format(
path=os.path.join(base_output_path, 'strength.average.log.csv'),
column=5
)
cmd_time_gp = "gnuplot {}".format(os.path.join(base_output_path, 'strength.average.log_time.gp'))
rc = subprocess.call(cmd_time, shell=True)
rc = subprocess.call(cmd_time_gp, shell=True)
cmd_time ="python ../utils/time_analysis.py {path} {column}".format(
path=os.path.join(base_output_path, 'training.average.log.csv'),
column=1
)
cmd_time_gp = "gnuplot {}".format(os.path.join(base_output_path, 'training.average.log_time.gp'))
#rc = subprocess.call(cmd_time, shell=True)
#rc = subprocess.call(cmd_time_gp, shell=True)
except Exception as e:
print(e)
if not only_analysis:
print('# Copy the results...')
shutil.move("hcbr.global.log.csv", os.path.join(base_output_path, "hcbr.global.log.csv"))
shutil.move("{}_casebase.txt".format(instance_name), os.path.join(base_output_path, "{}_casebase.txt".format(instance_name)))
shutil.move("{}_outcomes.txt".format(instance_name), os.path.join(base_output_path, "{}_outcomes.txt".format(instance_name)))
msg = "{} {} {}\n".format(instance_name, seed, average_accuracy / float(k))
sys.stderr.write(msg)
print(msg)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: DIYer22@github
@mail: ylxx@live.com
Created on Thu Jan 16 18:17:20 2020
"""
from boxx import *
from boxx import deg2rad, np, pi
import bpy
import random
def set_cam_pose(cam_radius=1, cam_deg=45, cam_x_deg=None, cam=None):
cam_rad = deg2rad(cam_deg)
if cam_x_deg is None:
cam_x_deg = random.uniform(0, 360)
cam_x_rad = deg2rad(cam_x_deg)
z = cam_radius * np.sin(cam_rad)
xy = (cam_radius ** 2 - z ** 2) ** 0.5
x = xy * np.cos(cam_x_rad)
y = xy * np.sin(cam_x_rad)
cam = cam or bpy.data.objects["Camera"]
cam.location = x, y, z
cam.rotation_euler = pi / 2 - cam_rad, 0.1, pi / 2 + cam_x_rad
cam.scale = (0.1,) * 3
return cam
def set_cam_intrinsic(cam, intrinsic_K, hw=None):
"""
K = [[f_x, 0, c_x],
[0, f_y, c_y],
[0, 0, 1]]
Refrence: https://www.rojtberg.net/1601/from-blender-to-opencv-camera-and-back/
"""
if hw is None:
scene = bpy.context.scene
hw = scene.render.resolution_y, scene.render.resolution_x
near = lambda x, y=0, eps=1e-5: abs(x - y) < eps
assert near(intrinsic_K[0][1], 0)
assert near(intrinsic_K[1][0], 0)
h, w = hw
f_x = intrinsic_K[0][0]
f_y = intrinsic_K[1][1]
c_x = intrinsic_K[0][2]
c_y = intrinsic_K[1][2]
cam = cam.data
cam.shift_x = -(c_x / w - 0.5)
cam.shift_y = (c_y - 0.5 * h) / w
cam.lens = f_x / w * cam.sensor_width
pixel_aspect = f_y / f_x
scene.render.pixel_aspect_x = 1.0
scene.render.pixel_aspect_y = pixel_aspect
def remove_useless_data():
"""
remove all data and release RAM
"""
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def clear_all():
[
bpy.data.objects.remove(obj)
for obj in bpy.data.objects
if obj.type in ("MESH", "LIGHT", "CURVE")
]
remove_useless_data()
def set_shading_mode(mode="SOLID", screens=[]):
"""
Performs an action analogous to clicking on the display/shade button of
the 3D view. Mode is one of "RENDERED", "MATERIAL", "SOLID", "WIREFRAME".
The change is applied to the given collection of bpy.data.screens.
If none is given, the function is applied to bpy.context.screen (the
active screen) only. E.g. set all screens to rendered mode:
set_shading_mode("RENDERED", bpy.data.screens)
"""
screens = screens if screens else [bpy.context.screen]
for s in screens:
for spc in s.areas:
if spc.type == "VIEW_3D":
spc.spaces[0].shading.type = mode
break # we expect at most 1 VIEW_3D space
def add_stage(size=2, transparency=False):
"""
add PASSIVE rigidbody cube for physic stage or depth background
Parameters
----------
size : float, optional
size of stage. The default is 2.
transparency : bool, optional
transparency for rgb but set limit for depth. The default is False.
"""
import bpycv
bpy.ops.mesh.primitive_cube_add(size=size, location=(0, 0, -size / 2))
stage = bpy.context.active_object
stage.name = "stage"
with bpycv.activate_obj(stage):
bpy.ops.rigidbody.object_add()
stage.rigid_body.type = "PASSIVE"
if transparency:
stage.rigid_body.use_margin = True
stage.rigid_body.collision_margin = 0.04
stage.location.z -= stage.rigid_body.collision_margin
material = bpy.data.materials.new("transparency_stage_bpycv")
material.use_nodes = True
material.node_tree.nodes.clear()
with bpycv.activate_node_tree(material.node_tree):
bpycv.Node("ShaderNodeOutputMaterial").Surface = bpycv.Node(
"ShaderNodeBsdfPrincipled", Alpha=0
).BSDF
stage.data.materials.append(material)
return stage
if __name__ == "__main__":
pass
|
#
#
# Copyright (C) University of Melbourne 2013
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
"""Module subclassing TxMultiGeneratorBase that provides an implementation for
multi-site generators.
"""
from tools import mureilexception, mureilbuilder
import copy
import numpy
from generator import txmultigeneratorbase
import logging
logger = logging.getLogger(__name__)
class TxMultiGeneratorMultiSite(txmultigeneratorbase.TxMultiGeneratorBase):
"""Module subclassing TxMultiGeneratorBase that provides an implementation of
state_handle and related handling functions for multi-site generators.
The 'capacity' term in state_handle is implemented as a dict with one item per site.
Each site item is a list of tuples containing (site_index,build_period,decommissioning_period),
describing the set of installed capacity.
"""
def __init__(self):
"""Initialise as for the base class, and also initialise the params_to_site map.
"""
txmultigeneratorbase.TxMultiGeneratorBase.__init__(self)
# params_to_site maps the index in the params list to the site indices.
self.params_to_site = []
def get_config_spec(self):
"""Return a list of tuples of format (name, conversion function, default),
e.g. ('capex', float, 2.0). Put None if no conversion required, or if no
default value, e.g. ('name', None, None)
Configuration:
time_period_yrs: float - the length of the time period in years
time_scale_up_mult: float - the value to multiply non-discounted items,
such as carbon emissions, by to account for a shorter dataset than the
calculation period length.
variable_cost_mult: as for time_scale_up_mult, but may include a factor for
cost discounting.
size: float, optional - relates param to new capacity
carbon_price_m: float - carbon price in $M/tonne
startup_data_name: string, optional - the name of the data array that contains
data on startup capacities.
startup_data_string: string, optional - a python format data array suitable for
input into set_startup_state, all on a single line.
params_to_site_data_name: string, optional - the name of the data array that
contains a list of how the input params list maps to site indices.
params_to_site_data_string: list of integers, optional - the site indices,
listed separated by spaces, defining the site index corresponding to
each optimisation param, in order.
vom: float, default 0 - variable operating and maintenance cost, in $/MWh, same for all sites
capital_cost: float, default 0 - cost in $M per MW for new capacity.
install_cost: float, default 0 - cost in $M per site, when site has an
installation from this generator for the first time.
decommissioning_cost: float, optional (default 0) - cost in $M per MW for
decommissioning.
lifetime_yrs: float, default 20 - the time in years that new capacity lasts
"""
return txmultigeneratorbase.TxMultiGeneratorBase.get_config_spec(self) + [
('variable_cost_mult', float, 1.0),
('time_scale_up_mult', float, 1.0),
('carbon_price_m', float, 0.0),
('startup_data_name', None, ''),
('startup_data_string', mureilbuilder.python_eval, 'None'),
('params_to_site_data_name', None, ''),
('params_to_site_data_string', mureilbuilder.make_int_list, ''),
('decommissioning_cost', float, 0),
('vom', float, 0),
('capital_cost', float, 0),
('install_cost', float, 0),
('time_period_yrs', float, None),
('lifetime_yrs', float, 20),
('size', float, 1.0),
('start_min_param', int, 1e20),
('start_max_param', int, 1e20),
('timestep_hrs', float, None)
]
def complete_configuration_pre_expand(self):
"""Complete the configuration prior to expanding the
period configs.
This implementation checks that the lifetime_yrs is a multiple
of time_period_yrs, and sets the startup state and params_to_site from the
configuration strings.
"""
time_period_yrs = self.config['time_period_yrs']
lifetime_yrs = self.config['lifetime_yrs']
error = None
if isinstance(lifetime_yrs, dict):
for value in lifetime_yrs.itervalues():
div = value / time_period_yrs
if not (float(int(div)) == div):
error = value
else:
div = lifetime_yrs / time_period_yrs
if not (float(int(div)) == div):
error = lifetime_yrs
if error is not None:
msg = ('In section ' + self.config['section'] + ', lifetime_yrs = ' +
str(error) + ' which is required to be a multiple of time_period_yrs of ' +
str(time_period_yrs))
raise mureilexception.ConfigException(msg, {})
# Set the startup state and the params to site from the configuration strings.
if self.config['startup_data_string'] is not None:
self.set_startup_state(self.config['startup_data_string'])
if len(self.config['params_to_site_data_string']) > 0:
self.params_to_site = self.config['params_to_site_data_string']
def get_data_types(self):
"""Return a list of keys for each type of
data required, for example ts_wind, ts_demand.
Outputs:
data_type: list of strings - each a key name
describing the data required for this generator.
"""
data_types = []
if len(self.config['startup_data_name']) > 0:
data_types.append(self.config['startup_data_name'])
if len(self.config['params_to_site_data_name']) > 0:
data_types.append(self.config['params_to_site_data_name'])
return data_types
def set_data(self, data):
"""Set the data dict with the data series required
for the generator.
This implementation looks for the data types:
self.config['startup_data_name']: Interpets this into
the startup state, using the set_startup_state function.
self.config['params_to_site_data_name']: Sets self.params_to_site
to this.
Inputs:
data: dict - with keys matching those requested by
get_data_types.
"""
startup_data_name = self.config['startup_data_name']
if (len(startup_data_name) > 0) and (startup_data_name in data):
self.set_startup_state(data[startup_data_name])
params_to_site_name = self.config['params_to_site_data_name']
if (len(params_to_site_name) > 0) and (params_to_site_name in data):
self.params_to_site = data[params_to_site_name]
def set_startup_state(self, startup_data):
"""Set the startup state from the data provided. Sets
self.startup_state from this.
Inputs:
startup_data: An array of generators * 4:
[[site_index, capacity, build_date, decommissioning_period],
...]
"""
# Check if the startup data is empty. If so, just return.
if len(startup_data) == 0:
return
# Find out which build periods are covered.
startup_data = numpy.array(startup_data)
if not (len(startup_data.shape) == 2):
raise mureilexception.ConfigException('startup data array for module ' +
self.config['section'] + ' is not rectangular.', {})
if not (startup_data.shape[1] == 4):
raise mureilexception.ConfigException('startup data array for module ' +
self.config['section'] + ' shape ' + str(startup_data.shape) +
' but (n, 4) is required.', {})
self.extra_periods = map(int,
(list(set(startup_data[:,2].tolist() + self.extra_periods))))
self.extra_periods.sort()
# And insert each existing generator into the starting state.
cap_list = self.startup_state['capacity']
hist_list = self.startup_state['history']
for i in range(startup_data.shape[0]):
site_index = int(startup_data[i, 0])
new_cap = startup_data[i, 1]
period = int(startup_data[i, 2])
decomm_date = int(startup_data[i, 3])
new_entry = (new_cap, period, decomm_date)
if decomm_date < self.run_periods[0]:
logger.warning('Model in section ' + self.config['section'] +
' adds startup capacity decommissioned at end of ' + decomm_date +
' but the first run period is ' + self.run_periods[0] +
' so it has been removed from the startup state.')
if site_index not in hist_list:
hist_list[site_index] = []
hist_list[site_index].append(new_entry)
else:
new_entry = (new_cap, period, decomm_date)
if site_index not in cap_list:
cap_list[site_index] = []
cap_list[site_index].append(new_entry)
def get_param_count(self):
"""Return the number of parameters that this generator,
as configured, requires to be optimised, per time period.
Outputs:
param_count: non-negative integer - the number of
parameters required per time period.
"""
return len(self.params_to_site)
def get_param_starts(self):
"""Return two nested lists - one for min, one max, for starting values for the
params. Must be either [[]] or [len(run_periods),param_count].
Outputs:
min_start_list: list of param integers, or [[]]
max_start_list: list of param integers, or [[]]
"""
param_count = self.get_param_count()
period_count = len(self.run_periods)
if param_count > 0:
if (self.config['start_min_param'] == 1e20):
start_mins = [[]]
else:
start_mins = (numpy.ones((period_count, param_count)) * self.config['start_min_param']).tolist()
if (self.config['start_max_param'] == 1e20):
start_maxs = [[]]
else:
start_maxs = (numpy.ones((period_count, param_count)) * self.config['start_max_param']).tolist()
else:
start_mins = [[]]
start_maxs = [[]]
return start_mins, start_maxs
def update_state_new_period_list(self, state_handle, period, new_capacity):
"""Implements update_state_new_period_list as defined in txmultigeneratorbase,
for the state_handle format for this multi-site implementation.
"""
state_handle['curr_period'] = period
cap_list = state_handle['capacity']
for site_index, new_cap, decomm_date in new_capacity:
site_index = int(site_index)
new_entry = (new_cap, period, int(decomm_date))
if site_index not in cap_list:
cap_list[site_index] = []
cap_list[site_index].append(new_entry)
return None
def update_state_new_period_params(self, state_handle, period, new_params):
"""Implements update_state_new_period_params as defined in txmultigeneratorbase,
for the state_handle format for this multi-site implementation.
Filters any negative new_params values to 0.
"""
state_handle['curr_period'] = period
curr_conf = self.period_configs[period]
decomm_date = int(curr_conf['lifetime_yrs'] - curr_conf['time_period_yrs'] + period)
cap_list = state_handle['capacity']
new_cap = numpy.array(new_params).clip(0) * curr_conf['size']
for i in (numpy.nonzero(new_cap)[0]):
site_index = self.params_to_site[i]
new_entry = (new_cap[i], period, decomm_date)
if site_index not in cap_list:
cap_list[site_index] = []
cap_list[site_index].append(new_entry)
return None
def calculate_update_decommission(self, state_handle):
"""Implements update_decommission as defined in txmultigeneratorbase,
for the state_handle format for this multi-site implementation.
"""
period = state_handle['curr_period']
cap_list = state_handle['capacity']
hist_list = state_handle['history']
total_cost = 0.0
sites = []
cost = []
decommissioned = []
fully_decommissioned = []
decomm_cost = self.period_configs[period]['decommissioning_cost']
for site, site_caps in cap_list.iteritems():
decomm = [tup for tup in site_caps if (tup[2] == period)]
if len(decomm) > 0:
sites.append(site)
decom_cap = sum([tup[0] for tup in decomm])
decommissioned.append(decom_cap)
this_cost = decom_cap * decomm_cost
cost.append(this_cost)
total_cost += this_cost
# add the decommissioned capacity to the 'history' list
if not site in hist_list:
hist_list[site] = []
hist_list[site] += decomm
# and rebuild the list of what's left
# note that the expression in here is the complement of that to compute
# decomm above.
new_list = [tup for tup in site_caps if not (tup[2] == period)]
# if all capacity is gone from this site
if len(new_list) == 0:
fully_decommissioned.append(site)
else:
cap_list[site] = new_list
for site in fully_decommissioned:
del cap_list[site]
return total_cost, zip(sites, decommissioned, cost)
def calculate_new_capacity_cost(self, state_handle):
"""Implements calculate_new_capacity_cost as defined in TxMultiGeneratorBase,
for the state_handle format for this multi-site implementation. Calculates
the cost as a simple multiple of the new capacity size.
"""
period = state_handle['curr_period']
cap_list = state_handle['capacity']
hist_list = state_handle['history']
total_cost = 0.0
sites = []
cost = []
new_capacity = []
for site, value in cap_list.iteritems():
try:
hist = hist_list[site]
except KeyError:
hist = []
this_cost, new_cap = self.calculate_capital_cost_site(
(value, hist), period, site)
if new_cap > 0:
sites.append(site)
new_capacity.append(new_cap)
cost.append(this_cost)
total_cost += this_cost
return total_cost, zip(sites, new_capacity, cost)
def calculate_capital_cost_site(self, site_data, period, site):
""""Calculate the incremental capital cost incurred in this
period by the new capacity, for this site.
This is a useful function for generators to override to implement
cost functions that depend on the existing installed capacity.
This function charges a per-MW cost plus an install figure if all
the current capacity is new, and the site has not been used before
for this type of generator.
Inputs:
site_data: a pair of lists - (current_capacity, history), each
a list of tuples of (capacity, build, decom) from the
state_handle.
period: the current period, an integer
site: the site index
Outputs:
cost: the cost in $M of this new capacity
new_capacity: the total new capacity installed at this site
"""
new_cap_list = [tup[0] for tup in site_data[0] if (tup[1] == period)]
new_cap = sum(new_cap_list)
capacity_cost = self.period_configs[period]['capital_cost']
this_cost = new_cap * capacity_cost
install_cost = self.period_configs[period]['install_cost']
if install_cost > 0:
# check if all the current capacity is new
if len(new_cap_list) == len(site_data[0]):
# and check if the site has been used before, ever
if len(site_data[1]) == 0:
# the site is new, so charge the 'install' as well
this_cost += install_cost
return this_cost, new_cap
def get_capacity(self, state_handle):
"""Implement the get_capacity function as defined in TxMultiGeneratorBase, for this
multi-site implementation.
"""
index_list = self.get_site_indices(state_handle)
cap_list = state_handle['capacity']
capacity = []
for site in index_list:
capacity.append(sum([tup[0] for tup in cap_list[site]]))
return capacity
def get_site_indices(self, state_handle):
"""Implement the get_site_indices function as defined in TxMultiGeneratorBase, for this
multi-site implementation.
"""
site_indices = state_handle['capacity'].keys()
site_indices.sort()
return site_indices
def calculate_time_period_simple(self, state_handle, period, new_params,
supply_request, full_results=False):
"""Implement calculate_time_period_simple as defined in TxMultiGeneratorBase for
the multi-site generator model.
"""
curr_config = self.period_configs[period]
# Update the state and get the calculations for each site
self.update_state_new_period_params(state_handle, period, new_params)
site_indices = self.get_site_indices(state_handle)
capital_cost, new_capacity = self.calculate_new_capacity_cost(state_handle)
supply_list, variable_cost_list, carbon_emissions_list, other_list = (
self.calculate_outputs_and_costs(state_handle, supply_request))
if full_results:
capacity = self.get_capacity(state_handle)
# Compute the total supply
supply = numpy.sum(supply_list, axis=0)
# Compute the total variable costs, including carbon cost, for the timeseries, scaled up
cost = ((numpy.sum(variable_cost_list, axis=0) +
(numpy.sum(carbon_emissions_list, axis=0) * curr_config['carbon_price_m'])) * (
curr_config['variable_cost_mult']))
# Do the decommissioning
decomm_cost, decommissioned = self.calculate_update_decommission(state_handle)
# Add the capital and decommissioning costs
cost += decomm_cost
cost += capital_cost
if not full_results:
return site_indices, cost, supply
if full_results:
results = {}
results['site_indices'] = site_indices
results['cost'] = cost
results['aggregate_supply'] = supply
results['capacity'] = capacity
results['decommissioned'] = decommissioned
results['new_capacity'] = new_capacity
results['supply'] = supply_list
results['variable_cost_period'] = variable_cost_list * curr_config['variable_cost_mult']
results['carbon_emissions_period'] = (carbon_emissions_list *
curr_config['time_scale_up_mult'])
results['total_supply_period'] = (curr_config['time_scale_up_mult'] * numpy.sum(supply) *
curr_config['timestep_hrs'])
results['other'] = other_list
results['desc_string'] = self.get_simple_desc_string(results, state_handle)
return site_indices, cost, supply, results
def calculate_time_period_full(self, state_handle, period, new_params, supply_request,
max_supply=[], price=[], make_string=False, do_decommissioning=True):
"""Implement calculate_time_period_full as defined in TxMultiGeneratorBase for
the multi-site generator model.
"""
results = {}
self.update_state_new_period_params(state_handle, period, new_params)
results['site_indices'] = self.get_site_indices(state_handle)
results['capacity'] = self.get_capacity(state_handle)
dummy, results['new_capacity'] = self.calculate_new_capacity_cost(state_handle)
results['supply'], results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (
self.calculate_outputs_and_costs(state_handle, supply_request, max_supply, price))
if do_decommissioning:
dummy, results['decommissioned'] = (
self.calculate_update_decommissioning(state_handle))
else:
results['decommissioned'] = []
if make_string:
results['desc_string'] = self.get_full_desc_string(results, state_handle)
return results
def recalculate_time_period_full(self, state_handle, results, supply_request, max_supply=[], price=[], make_string=False):
"""Implement recalculate_time_period_full as defined in TxMultiGeneratorBase for
the multi-site generator model.
"""
results['supply'], results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (
self.calculate_outputs_and_costs(state_handle, supply_request, max_supply, price))
if make_string:
results['desc_string'] = self.get_full_desc_string(results, state_handle)
return results
else:
return results
def calculate_costs_from_schedule_and_finalise(self, state_handle, schedule, make_string=False):
"""Calculate the costs, given the schedule from the dispatcher.
Finalise the decommissioning for that period.
This assumes that update_state_new_period_params has been called previously,
and the offer quantities have been determined for the active sites.
Inputs:
state_handle:
as for calculate_time_period_full in txmultigeneratorbase.py
schedule: a set of timeseries for each active site, as previously
listed in the call to get_offers_*
Outputs:
as for calculate_time_period_full in txmultigeneratorbase.py
"""
results = {}
site_indices = self.get_site_indices(state_handle)
results['site_indices'] = site_indices
results['capacity'] = self.get_capacity(state_handle)
results['new_capacity_total_cost'], results['new_capacity'] = self.calculate_new_capacity_cost(state_handle)
results['supply'] = schedule
results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (
self.calculate_variable_costs(state_handle, site_indices, schedule))
results['decomm_total_cost'], results['decommissioned'] = (
self.calculate_update_decommission(state_handle))
if make_string:
results['desc_string'] = self.get_full_desc_string(results, state_handle)
return results
|
import pandas as pd
from .entity import CatalogEntity
from .repository.dataset_repo import get_dataset_repo
from .repository.variable_repo import get_variable_repo
from .repository.constants import VARIABLE_FILTER
from .summary import variable_describe, head, tail, counts, quantiles, top_values, histogram
_DESCRIPTION_LENGTH_LIMIT = 50
class Variable(CatalogEntity):
"""This class represents a :py:class:`Variable <cartoframes.data.observatory.Variable>`
of datasets in the :py:class:`Catalog <cartoframes.data.observatory.Catalog>`.
Variables contain column names, description, data type, aggregation method, and some other metadata that is
useful to understand the underlying data inside a :obj:`Dataset`
Examples:
List the variables of a :py:class:`Dataset <cartoframes.data.observatory.Dataset>`
in combination with nested filters (categories, countries, etc.)
>>> dataset = Dataset.get('mbi_retail_turn_705247a')
>>> dataset.variables
[<Variable.get('RT_CI_95050c10')> #'Retail Turnover: index (country eq.100)', ...]
"""
_entity_repo = get_variable_repo()
@property
def datasets(self):
"""Get the list of datasets related to this variable.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>` List of Dataset instances.
Raises:
CatalogError: if there's a problem when connecting to the catalog or no datasets are found.
"""
return get_dataset_repo().get_all({VARIABLE_FILTER: self.id})
@property
def name(self):
"""Name of this variable."""
return self.data['name']
@property
def description(self):
"""Description of this variable."""
return self.data['description']
@property
def column_name(self):
"""Column name of the actual table related to the variable in the :obj:`Dataset`."""
return self.data['column_name']
@property
def db_type(self):
"""Type in the database.
Returns:
str
Examples: INTEGER, STRING, FLOAT, GEOGRAPHY, JSON, BOOL, etc.
"""
return self.data['db_type']
@property
def dataset(self):
"""ID of the :obj:`Dataset` to which this variable belongs."""
return self.data['dataset_id']
@property
def agg_method(self):
"""Text representing a description of the aggregation method used to compute the values in this `Variable`"""
return self.data['agg_method']
@property
def variable_group(self):
"""If any, ID of the variable group to which this variable belongs."""
return self.data['variable_group_id']
@property
def summary(self):
"""JSON object with extra metadata that summarizes different properties of this variable."""
return self.data['summary_json']
@property
def project_name(self):
project, _, _, _ = self.id.split('.')
return project
@property
def schema_name(self):
_, schema, _, _ = self.id.split('.')
return schema
@property
def dataset_name(self):
_, _, dataset, _ = self.id.split('.')
return dataset
def describe(self, autoformat=True):
"""Shows a summary of the actual stats of the variable (column) of the dataset.
Some of the stats provided per variable are: avg, max, min, sum, range,
stdev, q1, q3, median and interquartile_range
Args:
autoformat (boolean): set automatic format for values. Default is True.
Example:
.. code::
# avg average value
# max max value
# min min value
# sum sum of all values
# range
# stdev standard deviation
# q1 first quantile
# q3 third quantile
# median median value
# interquartile_range
"""
FLOAT_FORMAT = 'display.float_format'
if autoformat:
pd.set_option(FLOAT_FORMAT, lambda x: '%.3f' % x)
data = self.data['summary_json']
return variable_describe(data)
def head(self):
"""Returns a sample of the 10 first values of the variable data.
For the cases of datasets with a content fewer than 10 rows
(i.e. zip codes of small countries), this method won't return anything
"""
data = self.data['summary_json']
return head(self.__class__, data)
def tail(self):
"""Returns a sample of the 10 last values of the variable data.
For the cases of datasets with a content fewer than 10 rows
(i.e. zip codes of small countries), this method won't return anything
"""
data = self.data['summary_json']
return tail(self.__class__, data)
def counts(self):
"""Returns a summary of different counts over the actual variable values.
Example:
.. code::
# all total number of values
# null total number of null values
# zero number of zero-valued entries
# extreme number of values 3stdev outside the interquartile range
# distinct number of distinct (unique) entries
# outliers number of outliers (outside 1.5stdev the interquartile range
# zero_percent percent of values that are zero
# distinct_percent percent of values that are distinct
"""
data = self.data['summary_json']
return counts(data)
def quantiles(self):
"""Returns the quantiles of the variable data."""
data = self.data['summary_json']
return quantiles(data)
def top_values(self):
"""Returns information about the top values of the variable data."""
data = self.data['summary_json']
return top_values(data)
def histogram(self):
"""Plots an histogram with the variable data."""
data = self.data['summary_json']
return histogram(data)
def __repr__(self):
descr = self.description
if descr and len(descr) > _DESCRIPTION_LENGTH_LIMIT:
descr = descr[0:_DESCRIPTION_LENGTH_LIMIT] + '...'
return "<{classname}.get('{entity_id}')> #'{descr}'" \
.format(classname=self.__class__.__name__, entity_id=self._get_print_id(), descr=descr)
|
from libcrypto import hamming_distance
from libcrypto import split_blocks
from libcrypto import xor
from libcrypto import freq_score
from base64 import b64decode
from operator import itemgetter
def main():
file64 = ""
for line in open("../assets/inputS1C6.txt","r"):
file64 += line.rstrip()
file = bytearray(b64decode(file64))
distances = []
for keysize in range(2,40):
dist = 0
sample_size = 10
for ctr in range(0, sample_size):
b1 = bytearray(file[(keysize*ctr):(keysize*(ctr+1))])
b2 = bytearray(file[(keysize*(ctr+1)):(keysize*(ctr+2))])
dist += hamming_distance(b1, b2) / float(keysize)
dist /= sample_size
distances.append([keysize, dist])
distances = sorted(distances,key=itemgetter(1))[:1]
print("Possible Solutions...\n")
for key in distances:
passphrase = ""
key = key[0]
blocks = split_blocks(key,file)
transposed_blocks = []
for idx in range(0,key):
tblock = bytearray()
for block in blocks:
try:
tblock.append(block[idx])
except IndexError:
pass
transposed_blocks.append(tblock)
for block in transposed_blocks:
bytekeys = []
for i in range(1,int("ff",16)):
xor_bytes = xor(bytearray(bytes({i})),block)
try:
xor_string = xor_bytes.decode("ascii")
bytekeys.append([i,xor_string,freq_score(xor_string)])
except UnicodeDecodeError:
next
bytekeys.sort(key=lambda x: x[2], reverse=True)
bkey = bytekeys[:1][0]
passphrase += chr(bkey[0])
print("Key:{0}\n".format(passphrase))
print(xor(bytearray(passphrase.encode()),bytearray(file)).decode())
if __name__ == "__main__":
main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes], [1]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes], [1]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
if eval_data:
read_input.label.set_shape((1,))
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
|
# A simple CLI runner for slurm that can be used when running Galaxy from a
# non-submit host and using a Slurm cluster.
from logging import getLogger
try:
from galaxy.model import Job
job_states = Job.states
except ImportError:
# Not in Galaxy, map Galaxy job states to Pulsar ones.
from pulsar.util import enum
job_states = enum(RUNNING='running', OK='complete', QUEUED='queued', ERROR="failed")
from ..job import BaseJobExec
log = getLogger(__name__)
argmap = {
'memory': '-M', # There is code in job_script_kwargs relying on this name's setting
'cores': '-n',
'queue': '-q',
'working_dir': '-cwd',
'project': '-P'
}
class LSF(BaseJobExec):
def __init__(self, **params):
self.params = {}
for k, v in params.items():
self.params[k] = v
def job_script_kwargs(self, ofile, efile, job_name):
scriptargs = {'-o': ofile,
'-e': efile,
'-J': job_name}
# Map arguments using argmap.
for k, v in self.params.items():
if k == 'plugin':
continue
try:
if k == 'memory':
# Memory requires both -m and -R rusage[mem=v] request
scriptargs['-R'] = "\"rusage[mem=%s]\"" % v
if not k.startswith('-'):
k = argmap[k]
scriptargs[k] = v
except Exception:
log.warning('Unrecognized long argument passed to LSF CLI plugin: %s' % k)
# Generated template.
template_scriptargs = ''
for k, v in scriptargs.items():
template_scriptargs += '#BSUB %s %s\n' % (k, v)
return dict(headers=template_scriptargs)
def submit(self, script_file):
# bsub returns Job <9147983> is submitted to default queue <research-rh7>.
# This should be really handled outside with something like
# parse_external. Currently CLI runner expect this to just send it in the last position
# of the string.
return "bsub <%s | awk '{ print $2}' | sed 's/[<>]//g'" % script_file
def delete(self, job_id):
return 'bkill %s' % job_id
def get_status(self, job_ids=None):
return "bjobs -a -o \"id stat\" -noheader" # check this
def get_single_status(self, job_id):
return "bjobs -o stat -noheader " + job_id
def parse_status(self, status, job_ids):
# Get status for each job, skipping header.
rval = {}
for line in status.splitlines():
job_id, state = line.split()
if job_id in job_ids:
# map job states to Galaxy job states.
rval[job_id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
if not status:
# Job not found in LSF, most probably finished and forgotten.
# lsf outputs: Job <num> is not found -- but that is on the stderr
# Note: a very old failed job job will not be shown here either,
# which would be badly handled here. So this only works well when Galaxy
# is constantly monitoring the jobs. The logic here is that DONE jobs get forgotten
# faster than failed jobs.
log.warning("Job id '%s' not found LSF status check" % job_id)
return job_states.OK
return self._get_job_state(status)
def get_failure_reason(self, job_id):
return "bjobs -l " + job_id
def parse_failure_reason(self, reason, job_id):
# LSF will produce the following in the job output file:
# TERM_MEMLIMIT: job killed after reaching LSF memory usage limit.
# Exited with exit code 143.
for line in reason.splitlines():
if "TERM_MEMLIMIT" in line:
from galaxy.jobs import JobState
return JobState.runner_states.MEMORY_LIMIT_REACHED
return None
def _get_job_state(self, state):
# based on:
# https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.3/lsf_admin/job_state_lsf.html
# https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.2/lsf_command_ref/bjobs.1.html
try:
return {
'EXIT': job_states.ERROR,
'RUN': job_states.RUNNING,
'PEND': job_states.QUEUED,
'DONE': job_states.OK,
'PSUSP': job_states.ERROR,
'USUSP': job_states.ERROR,
'SSUSP': job_states.ERROR,
'UNKWN': job_states.ERROR,
'WAIT': job_states.QUEUED,
'ZOMBI': job_states.ERROR
}.get(state)
except KeyError:
raise KeyError("Failed to map LSF status code [%s] to job state." % state)
__all__ = ('LSF',)
|
"""DenseNet models for Keras.
# Reference paper
- [Densely Connected Convolutional Networks]
(https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
# Reference implementation
- [Torch DenseNets]
(https://github.com/liuzhuang13/DenseNet/blob/master/models/densenet.lua)
- [TensorNets]
(https://github.com/taehoonlee/tensornets/blob/master/tensornets/densenets.py)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
from keras.layers import Input, Add, Dense, Activation, Flatten, Convolution2D, MaxPooling2D, ZeroPadding2D, \
AveragePooling2D, TimeDistributed, BatchNormalization, Dropout
from keras import layers
from keras_frcnn.RoiPoolingConv import RoiPoolingConv
"""
couple of functions for frcnn..
"""
def get_weight_path():
return os.path.join("pretrain", 'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
def get_img_output_length(width, height):
def get_output_length(input_length):
# zero_pad
input_length += 6
# apply 4 strided convolutions
filter_sizes = [7, 3, 1, 1]
stride = 2
for filter_size in filter_sizes:
input_length = (input_length - filter_size + stride) // stride
return input_length
return get_output_length(width), get_output_length(height)
BASE_WEIGTHS_PATH = (
'https://github.com/keras-team/keras-applications/'
'releases/download/densenet/')
DENSENET121_WEIGHT_PATH = (
BASE_WEIGTHS_PATH +
'densenet121_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET169_WEIGHT_PATH = (
BASE_WEIGTHS_PATH +
'densenet169_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET201_WEIGHT_PATH = (
BASE_WEIGTHS_PATH +
'densenet201_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGTHS_PATH +
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5')
def dense_block(x, blocks, name):
"""A dense block.
# Arguments
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
# Returns
output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
"""A transition block.
# Arguments
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
# Returns
output tensor for the block.
"""
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_bn')(x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(int(K.int_shape(x)[bn_axis] * reduction), 1,
use_bias=False,
name=name + '_conv')(x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool', padding='same')(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
# Arguments
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
# Returns
Output tensor for the block.
"""
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(axis=bn_axis,
epsilon=1.001e-5,
name=name + '_0_bn')(x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(4 * growth_rate, 1,
use_bias=False,
name=name + '_1_conv')(x1)
x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn')(x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(growth_rate, 3,
padding='same',
use_bias=False,
name=name + '_2_conv')(x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def nn_base(input_tensor=None,
blocks=[6, 12, 24, 16],
include_top=False,
weights='imagenet',
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the DenseNet architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x)
x = layers.Activation('relu', name='conv1/relu')(x)
# x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, blocks[0], name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, blocks[1], name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, blocks[2], name='conv4')
# here, the output size is similar to resnet50. stop here.
# x = transition_block(x, 0.5, name='pool4')
# x = dense_block(x, blocks[3], name='conv5')
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
return x
def rpn(base_layers,num_anchors):
x = Convolution2D(512, (3, 3), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers)
x_class = Convolution2D(num_anchors, (1, 1), padding="same", activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x)
x_regr = Convolution2D(num_anchors * 4, (1, 1), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x)
return [x_class, x_regr, base_layers]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 14
input_shape = (num_rois,14,14,1024) # densenet output channels are 1024..
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois,4096,7,7)
# from vgg version..
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out_roi_pool = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out_roi_pool)
out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
|
#!/usr/bin/env python3
import gevent.monkey
gevent.monkey.patch_all()
import argparse
import json
import re
import string
import socket
import urllib.parse
import webbrowser
import flask
import gevent.pool
import gevent.pywsgi
import requests
app = flask.Flask(__name__, template_folder='.')
config = {
'host': '127.0.0.1',
'port': 48230,
'blur': 60,
'no_censor': False,
'no_colors': False,
'no_browser': False,
'width': 5,
'height': 3,
}
@app.route('/')
def index():
return flask.render_template('main.html.tpl', config=config)
@app.route('/users')
def list_users():
page = requests.get('https://chaturbate.com/').text
user_names = re.findall(r'<div class="details">\s*<div class="title">\s*<a\s*href=\s*"/(.+?)/">', page)
pool = gevent.pool.Pool(10)
stream_urls = pool.map(get_user_stream, user_names)
users = [{'name': name, 'stream': stream} for name, stream in zip(user_names, stream_urls) if stream]
return flask.Response(json.dumps(users), mimetype='application/json')
def get_user_stream(user):
page = requests.get('https://chaturbate.com/{}/?use_html_chat=1'.format(user)).text
match = re.search('<video .*src="(.+?)"', page)
if not match:
return ''
playlist = match.group(1).replace(' ', '%20')
playlist = urllib.parse.urlparse(playlist)
server_number = re.sub('[^\d]', '', playlist.netloc.split('.')[0])
return '/streams/{}{}'.format(server_number, playlist.path)
@app.route('/streams/<int:server>/<path:path>')
def get_stream_file(server, path):
full_url = 'http://edge{}.stream.highwebmedia.com:1935/{}'.format(server, path)
resp = requests.get(full_url, stream=True)
content = resp.iter_content(chunk_size=2 ** 16)
status_code = resp.status_code
content_type = resp.headers.get('content-type', 'application/octet-stream')
return flask.Response(content, status=status_code, mimetype=content_type)
def get_args_config():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--host', default=config['host'], help=
'The host the server will bind to. Use 0.0.0.0 for all interfaces.')
parser.add_argument('--port', type=int, default=config['port'], help=
'The port the server will bind to.')
parser.add_argument('--width', type=int, default=config['width'], help=
'Number of elements used from left to right.')
parser.add_argument('--height', type=int, default=config['height'], help=
'Number of elements used from top to bottom.')
parser.add_argument('--blur', type=int, default=config['blur'], help=
'Pixels used in the gaussian blur.')
parser.add_argument('--no-censor', action='store_true', help=
'Disables gaussian blur.')
parser.add_argument('--no-colors', action='store_true', help=
'Disables hue rotation.')
return vars(parser.parse_args())
def make_socket():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((config['host'], config['port']))
sock.listen(5)
return sock
def start_wsgi_server():
sock = make_socket()
server = gevent.pywsgi.WSGIServer(sock, app)
server.serve_forever()
if __name__ == '__main__':
try:
config = get_args_config()
print('Listening on http://{}:{}'.format(config['host'], config['port']))
start_wsgi_server()
except Exception as e:
print(repr(e))
print('Press enter to exit')
_ = input()
|
'''
Система линейных уравнений - 2
'''
a = float(input())
b = float(input())
c = float(input())
d = float(input())
e = float(input())
f = float(input())
if a == 0 and b == 0 and c == 0 and d == 0 and e == 0 and f == 0:
print(5)
elif a * d == b * c and a * f != c * e:
print(0)
elif a == 0 and b == 0 and e != 0:
print(0)
elif c == 0 and d == 0 and f != 0:
print(0)
elif a == 0 and c == 0 and b * f != d * e:
print(0)
elif b == 0 and d == 0 and a * f != c * e:
print(0)
elif a * d == b * c and a * f == c * e:
if b == 0 and d == 0:
if a != 0 and c != 0:
print(3, e / a)
elif a == 0:
if e == 0:
print(3, f / c)
elif c == 0:
if f == 0:
print(3, e / a)
elif a == 0 and c == 0:
if b != 0:
print(4, e / b)
elif d != 0:
print(4, f / d)
elif b != 0:
print(1, -a / b, e / b)
elif d != 0:
print(1, -c / d, f / d)
else:
x = (e * d - b * f) / (a * d - b * c)
y = (a * f - e * c) / (a * d - b * c)
print(2, x, y)
|
# -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
bozhang@nao.cas.cn
Created on
----------
- Fri Jul 3 13:13:06 2015 read_spectrum
Modifications
-------------
- Fri Nov 20 10:16:59 2015 reformatting code
- Sun Feb 28 14:39:16 2016 migrated to bopy.spec.lamost
- Fri Jul 15 16:08:00 2016 migrate read_spectrum to read_spectrum.py
Aims
----
- generate LAMOST spectra file name/path
"""
# from __future__ import print_function
import os
import numpy as np
# from astropy.io import fits
# from astropy.table import Table, Column
def lamost_filepath(planid, mjd, spid, fiberid, dirpath="", extname=".fits"):
""" generate file path of a LAMOST spectrum
Parameters
----------
planid: string
planid
mjd: 5-digit integer
mjd (use lmjd rather than mjd for DR3 and after!)
spid: 2-digit integer
spid, the number of the spectrogragh
fiberid: 3-digit integer
fiberid
dirpath: string
the root directory for storing spectra.
Returns
--------
filepath: string
the path of root dir of directory (prefix).
if un-specified, return file name.
"""
# pre-processing: strip
if np.isscalar(planid):
planid = planid.strip()
else:
planid = [_.strip() for _ in planid]
if dirpath == "" or dirpath is None:
# return file name
if np.isscalar(mjd):
# if only input one item
return "spec-%05d-%s_sp%02d-%03d%s" \
% (mjd, planid, spid, fiberid, extname)
else:
# if input a list of items
return np.array(["spec-%05d-%s_sp%02d-%03d%s" %
(mjd[i], planid[i], spid[i], fiberid[i], extname)
for i in range(len(mjd))])
else:
# return file path
if not dirpath[-1] == os.path.sep:
dirpath += os.path.sep
if np.isscalar(mjd):
# if only input one item
return "%s%s%sspec-%05d-%s_sp%02d-%03d%s" \
% (dirpath, planid, os.path.sep,
mjd, planid, spid, fiberid, extname)
else:
# if input a list of items
return np.array(["%s%s%sspec-%05d-%s_sp%02d-%03d%s" %
(dirpath, planid[i], os.path.sep, mjd[i],
planid[i], spid[i], fiberid[i], extname)
for i in range(len(mjd))])
def _test_lamost_filepath():
"""test function **lamost_filepath**
"""
print(lamost_filepath("GAC_061N46_V3", 55939, 7, 78))
print(lamost_filepath("GAC_061N46_V3", 55939, 7, 78, "/"))
print(lamost_filepath("GAC_061N46_V3", 55939, 7, 78, "/pool"))
print(lamost_filepath("GAC_061N46_V3", 55939, 7, 78, "/pool/"))
def sdss_filepath(plate, mjd, fiberid, dirpath="", extname=".fits"):
""" generate file path of a LAMOST spectrum
Parameters
----------
plate: string
plate
mjd: 5-digit integer
mjd (use lmjd rather than mjd for DR3 and after!)
fiberid: 4-digit integer
fiberid
dirpath: string
the root directory for storing spectra.
extname: string
in case that users want to synthesize other data format
Returns
--------
filepath: string
the path of root dir of directory (prefix).
if un-specified, return file name.
"""
if dirpath == "" or dirpath is None:
# return file name
if np.isscalar(mjd):
# if only input one item
return "spec-%04d-%05d-%04d%s" % (plate, mjd, fiberid, extname)
else:
# if input a list of items
return np.array(["spec-%04d-%05d-%04d%s" %
(plate[i], mjd[i], fiberid[i], extname)
for i in range(len(mjd))])
else:
# return file path
if not dirpath[-1] == os.path.sep:
dirpath += os.path.sep
if np.isscalar(mjd):
# if only input one item
return "%s%04d%sspec-%04d-%05d-%04d%s" \
% (dirpath, plate, os.path.sep,
plate, mjd, fiberid, extname)
else:
# if input a list of items
return np.array(["%s%04d%sspec-%04d-%05d-%04d%s" %
(dirpath, plate[i], os.path.sep, plate[i],
mjd[i], fiberid[i], extname)
for i in range(len(mjd))])
def _test_sdss_filepath():
print(sdss_filepath(2238, 52059, 1, "/"))
if __name__ == "__main__":
print("")
print("@Cham: start to test the module ...")
print("")
print("@Cham: testing ""lamost_filepath"" ...")
_test_lamost_filepath()
_test_sdss_filepath()
print("@Cham: OK")
|
class BaseDownsizing:
def __init__(self, raw_file_f, raw_file_r=None):
self.raw_file_f = raw_file_f
self.raw_file_f = raw_file_f
self._downsized_f = None
if raw_file_r:
self.raw_file_r = raw_file_r
self.raw_file_r = raw_file_r
self._downsized_r = None
def downsize_single(self):
"""Overridden in child classes to perform specified downsizing of fragment reads"""
return self.raw_file_f
def downsize_pair_uncompressed(self):
"""Overridden in child classes to perform specified downsizing of paired-ends reads"""
return self.raw_file_f, self.raw_file_r
def downsize_pair_gzip(self):
"""Overridden in child classes to perform specified downsizing of gzip compressed paired-ends reads"""
return self.raw_file_f, self.raw_file_r
@property
def downsized_pair_uncompressed(self):
if getattr(self, "._downsized_f", None) is None:
self._downsized_f, self_downsized_r = self.downsize_pair()
self.raw_file_f = self._downsized_f
self.raw_file_r = self._downsized_r
return self._downsized_f, self._downsized_r
@property
def downsized_pair_gzip(self):
if getattr(self, "._downsized_f", None) is None:
self._downsized_f, self_downsized_r = self.downsize_pair()
self.raw_file_f = self._downsized_f
self.raw_file_r = self._downsized_r
return self._downsized_f, self._downsized_r
@property
def downsized_single(self):
if getattr(self, "._downsized_f", None) is None:
self._downsized_f = self.downsize_single()
self.raw_file_f = self._downsized_f
return self._downsized_f
|
from collections import OrderedDict
from django.conf import settings
from django.db.models import Count, F
from django.http import HttpResponseForbidden, HttpResponse
from django.shortcuts import get_object_or_404
from drf_yasg import openapi
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, ViewSet
from circuits.models import Circuit
from dcim import filters
from dcim.models import (
Cable, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,
DeviceBayTemplate, DeviceRole, DeviceType, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate,
Manufacturer, InventoryItem, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort,
PowerPortTemplate, Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site,
VirtualChassis,
)
from extras.api.serializers import RenderedGraphSerializer
from extras.api.views import CustomFieldModelViewSet
from extras.models import Graph
from ipam.models import Prefix, VLAN
from utilities.api import (
get_serializer_for_model, IsAuthenticatedOrLoginNotRequired, ModelViewSet, ServiceUnavailable,
)
from utilities.utils import get_subquery
from virtualization.models import VirtualMachine
from . import serializers
from .exceptions import MissingFilterException
# Mixins
class CableTraceMixin(object):
@action(detail=True, url_path='trace')
def trace(self, request, pk):
"""
Trace a complete cable path and return each segment as a three-tuple of (termination, cable, termination).
"""
obj = get_object_or_404(self.queryset.model, pk=pk)
# Initialize the path array
path = []
for near_end, cable, far_end in obj.trace()[0]:
# Serialize each object
serializer_a = get_serializer_for_model(near_end, prefix='Nested')
x = serializer_a(near_end, context={'request': request}).data
if cable is not None:
y = serializers.TracedCableSerializer(cable, context={'request': request}).data
else:
y = None
if far_end is not None:
serializer_b = get_serializer_for_model(far_end, prefix='Nested')
z = serializer_b(far_end, context={'request': request}).data
else:
z = None
path.append((x, y, z))
return Response(path)
#
# Regions
#
class RegionViewSet(ModelViewSet):
queryset = Region.objects.annotate(
site_count=Count('sites')
)
serializer_class = serializers.RegionSerializer
filterset_class = filters.RegionFilterSet
#
# Sites
#
class SiteViewSet(CustomFieldModelViewSet):
queryset = Site.objects.prefetch_related(
'region', 'tenant', 'tags'
).annotate(
device_count=get_subquery(Device, 'site'),
rack_count=get_subquery(Rack, 'site'),
prefix_count=get_subquery(Prefix, 'site'),
vlan_count=get_subquery(VLAN, 'site'),
circuit_count=get_subquery(Circuit, 'terminations__site'),
virtualmachine_count=get_subquery(VirtualMachine, 'cluster__site'),
)
serializer_class = serializers.SiteSerializer
filterset_class = filters.SiteFilterSet
@action(detail=True)
def graphs(self, request, pk):
"""
A convenience method for rendering graphs for a particular site.
"""
site = get_object_or_404(Site, pk=pk)
queryset = Graph.objects.filter(type__model='site')
serializer = RenderedGraphSerializer(queryset, many=True, context={'graphed_object': site})
return Response(serializer.data)
#
# Rack groups
#
class RackGroupViewSet(ModelViewSet):
queryset = RackGroup.objects.prefetch_related('site').annotate(
rack_count=Count('racks')
)
serializer_class = serializers.RackGroupSerializer
filterset_class = filters.RackGroupFilterSet
#
# Rack roles
#
class RackRoleViewSet(ModelViewSet):
queryset = RackRole.objects.annotate(
rack_count=Count('racks')
)
serializer_class = serializers.RackRoleSerializer
filterset_class = filters.RackRoleFilterSet
#
# Racks
#
class RackViewSet(CustomFieldModelViewSet):
queryset = Rack.objects.prefetch_related(
'site', 'group__site', 'role', 'tenant', 'tags'
).annotate(
device_count=get_subquery(Device, 'rack'),
powerfeed_count=get_subquery(PowerFeed, 'rack')
)
serializer_class = serializers.RackSerializer
filterset_class = filters.RackFilterSet
@swagger_auto_schema(
responses={200: serializers.RackUnitSerializer(many=True)},
query_serializer=serializers.RackElevationDetailFilterSerializer
)
@action(detail=True)
def elevation(self, request, pk=None):
"""
Rack elevation representing the list of rack units. Also supports rendering the elevation as an SVG.
"""
rack = get_object_or_404(Rack, pk=pk)
serializer = serializers.RackElevationDetailFilterSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, 400)
data = serializer.validated_data
if data['render'] == 'svg':
# Render and return the elevation as an SVG drawing with the correct content type
drawing = rack.get_elevation_svg(
face=data['face'],
unit_width=data['unit_width'],
unit_height=data['unit_height'],
legend_width=data['legend_width'],
include_images=data['include_images'],
base_url=request.build_absolute_uri('/')
)
return HttpResponse(drawing.tostring(), content_type='image/svg+xml')
else:
# Return a JSON representation of the rack units in the elevation
elevation = rack.get_rack_units(
face=data['face'],
exclude=data['exclude'],
expand_devices=data['expand_devices']
)
# Enable filtering rack units by ID
q = data['q']
if q:
elevation = [u for u in elevation if q in str(u['id']) or q in str(u['name'])]
page = self.paginate_queryset(elevation)
if page is not None:
rack_units = serializers.RackUnitSerializer(page, many=True, context={'request': request})
return self.get_paginated_response(rack_units.data)
#
# Rack reservations
#
class RackReservationViewSet(ModelViewSet):
queryset = RackReservation.objects.prefetch_related('rack', 'user', 'tenant')
serializer_class = serializers.RackReservationSerializer
filterset_class = filters.RackReservationFilterSet
# Assign user from request
def perform_create(self, serializer):
serializer.save(user=self.request.user)
#
# Manufacturers
#
class ManufacturerViewSet(ModelViewSet):
queryset = Manufacturer.objects.annotate(
devicetype_count=get_subquery(DeviceType, 'manufacturer'),
inventoryitem_count=get_subquery(InventoryItem, 'manufacturer'),
platform_count=get_subquery(Platform, 'manufacturer')
)
serializer_class = serializers.ManufacturerSerializer
filterset_class = filters.ManufacturerFilterSet
#
# Device types
#
class DeviceTypeViewSet(CustomFieldModelViewSet):
queryset = DeviceType.objects.prefetch_related('manufacturer').prefetch_related('tags').annotate(
device_count=Count('instances')
)
serializer_class = serializers.DeviceTypeSerializer
filterset_class = filters.DeviceTypeFilterSet
#
# Device type components
#
class ConsolePortTemplateViewSet(ModelViewSet):
queryset = ConsolePortTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.ConsolePortTemplateSerializer
filterset_class = filters.ConsolePortTemplateFilterSet
class ConsoleServerPortTemplateViewSet(ModelViewSet):
queryset = ConsoleServerPortTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.ConsoleServerPortTemplateSerializer
filterset_class = filters.ConsoleServerPortTemplateFilterSet
class PowerPortTemplateViewSet(ModelViewSet):
queryset = PowerPortTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.PowerPortTemplateSerializer
filterset_class = filters.PowerPortTemplateFilterSet
class PowerOutletTemplateViewSet(ModelViewSet):
queryset = PowerOutletTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.PowerOutletTemplateSerializer
filterset_class = filters.PowerOutletTemplateFilterSet
class InterfaceTemplateViewSet(ModelViewSet):
queryset = InterfaceTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.InterfaceTemplateSerializer
filterset_class = filters.InterfaceTemplateFilterSet
class FrontPortTemplateViewSet(ModelViewSet):
queryset = FrontPortTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.FrontPortTemplateSerializer
filterset_class = filters.FrontPortTemplateFilterSet
class RearPortTemplateViewSet(ModelViewSet):
queryset = RearPortTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.RearPortTemplateSerializer
filterset_class = filters.RearPortTemplateFilterSet
class DeviceBayTemplateViewSet(ModelViewSet):
queryset = DeviceBayTemplate.objects.prefetch_related('device_type__manufacturer')
serializer_class = serializers.DeviceBayTemplateSerializer
filterset_class = filters.DeviceBayTemplateFilterSet
#
# Device roles
#
class DeviceRoleViewSet(ModelViewSet):
queryset = DeviceRole.objects.annotate(
device_count=get_subquery(Device, 'device_role'),
virtualmachine_count=get_subquery(VirtualMachine, 'role')
)
serializer_class = serializers.DeviceRoleSerializer
filterset_class = filters.DeviceRoleFilterSet
#
# Platforms
#
class PlatformViewSet(ModelViewSet):
queryset = Platform.objects.annotate(
device_count=get_subquery(Device, 'platform'),
virtualmachine_count=get_subquery(VirtualMachine, 'platform')
)
serializer_class = serializers.PlatformSerializer
filterset_class = filters.PlatformFilterSet
#
# Devices
#
class DeviceViewSet(CustomFieldModelViewSet):
queryset = Device.objects.prefetch_related(
'device_type__manufacturer', 'device_role', 'tenant', 'platform', 'site', 'rack', 'parent_bay',
'virtual_chassis__master', 'primary_ip4__nat_outside', 'primary_ip6__nat_outside', 'tags',
)
filterset_class = filters.DeviceFilterSet
def get_serializer_class(self):
"""
Select the specific serializer based on the request context.
If the `brief` query param equates to True, return the NestedDeviceSerializer
If the `exclude` query param includes `config_context` as a value, return the DeviceSerializer
Else, return the DeviceWithConfigContextSerializer
"""
request = self.get_serializer_context()['request']
if request.query_params.get('brief', False):
return serializers.NestedDeviceSerializer
elif 'config_context' in request.query_params.get('exclude', []):
return serializers.DeviceSerializer
return serializers.DeviceWithConfigContextSerializer
@action(detail=True)
def graphs(self, request, pk):
"""
A convenience method for rendering graphs for a particular Device.
"""
device = get_object_or_404(Device, pk=pk)
queryset = Graph.objects.filter(type__model='device')
serializer = RenderedGraphSerializer(queryset, many=True, context={'graphed_object': device})
return Response(serializer.data)
@swagger_auto_schema(
manual_parameters=[
Parameter(
name='method',
in_='query',
required=True,
type=openapi.TYPE_STRING
)
],
responses={'200': serializers.DeviceNAPALMSerializer}
)
@action(detail=True, url_path='napalm')
def napalm(self, request, pk):
"""
Execute a NAPALM method on a Device
"""
device = get_object_or_404(Device, pk=pk)
if not device.primary_ip:
raise ServiceUnavailable("This device does not have a primary IP address configured.")
if device.platform is None:
raise ServiceUnavailable("No platform is configured for this device.")
if not device.platform.napalm_driver:
raise ServiceUnavailable("No NAPALM driver is configured for this device's platform ().".format(
device.platform
))
# Check that NAPALM is installed
try:
import napalm
from napalm.base.exceptions import ModuleImportError
except ImportError:
raise ServiceUnavailable("NAPALM is not installed. Please see the documentation for instructions.")
# Validate the configured driver
try:
driver = napalm.get_network_driver(device.platform.napalm_driver)
except ModuleImportError:
raise ServiceUnavailable("NAPALM driver for platform {} not found: {}.".format(
device.platform, device.platform.napalm_driver
))
# Verify user permission
if not request.user.has_perm('dcim.napalm_read'):
return HttpResponseForbidden()
# Connect to the device
napalm_methods = request.GET.getlist('method')
response = OrderedDict([(m, None) for m in napalm_methods])
ip_address = str(device.primary_ip.address.ip)
username = settings.NAPALM_USERNAME
password = settings.NAPALM_PASSWORD
optional_args = settings.NAPALM_ARGS.copy()
if device.platform.napalm_args is not None:
optional_args.update(device.platform.napalm_args)
# Update NAPALM parameters according to the request headers
for header in request.headers:
if header[:9].lower() != 'x-napalm-':
continue
key = header[9:]
if key.lower() == 'username':
username = request.headers[header]
elif key.lower() == 'password':
password = request.headers[header]
elif key:
optional_args[key.lower()] = request.headers[header]
d = driver(
hostname=ip_address,
username=username,
password=password,
timeout=settings.NAPALM_TIMEOUT,
optional_args=optional_args
)
try:
d.open()
except Exception as e:
raise ServiceUnavailable("Error connecting to the device at {}: {}".format(ip_address, e))
# Validate and execute each specified NAPALM method
for method in napalm_methods:
if not hasattr(driver, method):
response[method] = {'error': 'Unknown NAPALM method'}
continue
if not method.startswith('get_'):
response[method] = {'error': 'Only get_* NAPALM methods are supported'}
continue
try:
response[method] = getattr(d, method)()
except NotImplementedError:
response[method] = {'error': 'Method {} not implemented for NAPALM driver {}'.format(method, driver)}
except Exception as e:
response[method] = {'error': 'Method {} failed: {}'.format(method, e)}
d.close()
return Response(response)
#
# Device components
#
class ConsolePortViewSet(CableTraceMixin, ModelViewSet):
queryset = ConsolePort.objects.prefetch_related('device', 'connected_endpoint__device', 'cable', 'tags')
serializer_class = serializers.ConsolePortSerializer
filterset_class = filters.ConsolePortFilterSet
class ConsoleServerPortViewSet(CableTraceMixin, ModelViewSet):
queryset = ConsoleServerPort.objects.prefetch_related('device', 'connected_endpoint__device', 'cable', 'tags')
serializer_class = serializers.ConsoleServerPortSerializer
filterset_class = filters.ConsoleServerPortFilterSet
class PowerPortViewSet(CableTraceMixin, ModelViewSet):
queryset = PowerPort.objects.prefetch_related(
'device', '_connected_poweroutlet__device', '_connected_powerfeed', 'cable', 'tags'
)
serializer_class = serializers.PowerPortSerializer
filterset_class = filters.PowerPortFilterSet
class PowerOutletViewSet(CableTraceMixin, ModelViewSet):
queryset = PowerOutlet.objects.prefetch_related('device', 'connected_endpoint__device', 'cable', 'tags')
serializer_class = serializers.PowerOutletSerializer
filterset_class = filters.PowerOutletFilterSet
class InterfaceViewSet(CableTraceMixin, ModelViewSet):
queryset = Interface.objects.prefetch_related(
'device', '_connected_interface', '_connected_circuittermination', 'cable', 'ip_addresses', 'tags'
).filter(
device__isnull=False
)
serializer_class = serializers.InterfaceSerializer
filterset_class = filters.InterfaceFilterSet
@action(detail=True)
def graphs(self, request, pk):
"""
A convenience method for rendering graphs for a particular interface.
"""
interface = get_object_or_404(Interface, pk=pk)
queryset = Graph.objects.filter(type__model='interface')
serializer = RenderedGraphSerializer(queryset, many=True, context={'graphed_object': interface})
return Response(serializer.data)
class FrontPortViewSet(CableTraceMixin, ModelViewSet):
queryset = FrontPort.objects.prefetch_related('device__device_type__manufacturer', 'rear_port', 'cable', 'tags')
serializer_class = serializers.FrontPortSerializer
filterset_class = filters.FrontPortFilterSet
class RearPortViewSet(CableTraceMixin, ModelViewSet):
queryset = RearPort.objects.prefetch_related('device__device_type__manufacturer', 'cable', 'tags')
serializer_class = serializers.RearPortSerializer
filterset_class = filters.RearPortFilterSet
class DeviceBayViewSet(ModelViewSet):
queryset = DeviceBay.objects.prefetch_related('installed_device').prefetch_related('tags')
serializer_class = serializers.DeviceBaySerializer
filterset_class = filters.DeviceBayFilterSet
class InventoryItemViewSet(ModelViewSet):
queryset = InventoryItem.objects.prefetch_related('device', 'manufacturer').prefetch_related('tags')
serializer_class = serializers.InventoryItemSerializer
filterset_class = filters.InventoryItemFilterSet
#
# Connections
#
class ConsoleConnectionViewSet(ListModelMixin, GenericViewSet):
queryset = ConsolePort.objects.prefetch_related(
'device', 'connected_endpoint__device'
).filter(
connected_endpoint__isnull=False
)
serializer_class = serializers.ConsolePortSerializer
filterset_class = filters.ConsoleConnectionFilterSet
class PowerConnectionViewSet(ListModelMixin, GenericViewSet):
queryset = PowerPort.objects.prefetch_related(
'device', 'connected_endpoint__device'
).filter(
_connected_poweroutlet__isnull=False
)
serializer_class = serializers.PowerPortSerializer
filterset_class = filters.PowerConnectionFilterSet
class InterfaceConnectionViewSet(ListModelMixin, GenericViewSet):
queryset = Interface.objects.prefetch_related(
'device', '_connected_interface__device'
).filter(
# Avoid duplicate connections by only selecting the lower PK in a connected pair
_connected_interface__isnull=False,
pk__lt=F('_connected_interface')
)
serializer_class = serializers.InterfaceConnectionSerializer
filterset_class = filters.InterfaceConnectionFilterSet
#
# Cables
#
class CableViewSet(ModelViewSet):
queryset = Cable.objects.prefetch_related(
'termination_a', 'termination_b'
)
serializer_class = serializers.CableSerializer
filterset_class = filters.CableFilterSet
#
# Virtual chassis
#
class VirtualChassisViewSet(ModelViewSet):
queryset = VirtualChassis.objects.prefetch_related('tags').annotate(
member_count=Count('members')
)
serializer_class = serializers.VirtualChassisSerializer
filterset_class = filters.VirtualChassisFilterSet
#
# Power panels
#
class PowerPanelViewSet(ModelViewSet):
queryset = PowerPanel.objects.prefetch_related(
'site', 'rack_group'
).annotate(
powerfeed_count=Count('powerfeeds')
)
serializer_class = serializers.PowerPanelSerializer
filterset_class = filters.PowerPanelFilterSet
#
# Power feeds
#
class PowerFeedViewSet(CustomFieldModelViewSet):
queryset = PowerFeed.objects.prefetch_related('power_panel', 'rack', 'tags')
serializer_class = serializers.PowerFeedSerializer
filterset_class = filters.PowerFeedFilterSet
#
# Miscellaneous
#
class ConnectedDeviceViewSet(ViewSet):
"""
This endpoint allows a user to determine what device (if any) is connected to a given peer device and peer
interface. This is useful in a situation where a device boots with no configuration, but can detect its neighbors
via a protocol such as LLDP. Two query parameters must be included in the request:
* `peer_device`: The name of the peer device
* `peer_interface`: The name of the peer interface
"""
permission_classes = [IsAuthenticatedOrLoginNotRequired]
_device_param = Parameter(
name='peer_device',
in_='query',
description='The name of the peer device',
required=True,
type=openapi.TYPE_STRING
)
_interface_param = Parameter(
name='peer_interface',
in_='query',
description='The name of the peer interface',
required=True,
type=openapi.TYPE_STRING
)
def get_view_name(self):
return "Connected Device Locator"
@swagger_auto_schema(
manual_parameters=[_device_param, _interface_param],
responses={'200': serializers.DeviceSerializer}
)
def list(self, request):
peer_device_name = request.query_params.get(self._device_param.name)
peer_interface_name = request.query_params.get(self._interface_param.name)
if not peer_device_name or not peer_interface_name:
raise MissingFilterException(detail='Request must include "peer_device" and "peer_interface" filters.')
# Determine local interface from peer interface's connection
peer_interface = get_object_or_404(Interface, device__name=peer_device_name, name=peer_interface_name)
local_interface = peer_interface._connected_interface
if local_interface is None:
return Response()
return Response(serializers.DeviceSerializer(local_interface.device, context={'request': request}).data)
|
# ---------------------------------------------------------------------
# Angtel.Topaz.get_interface_status
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus
class Script(BaseScript):
name = "Angtel.Topaz.get_interface_status"
interface = IGetInterfaceStatus
cache = True
rx_port = re.compile(
r"^(?P<port>(?:Fa|Gi|Te|Po)\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+"
r"(?P<oper_status>Up|Down|Not Present)",
re.MULTILINE | re.IGNORECASE,
)
def execute_cli(self, interface=None):
r = []
v = self.cli("show interfaces status", cached=True)
for match in self.rx_port.finditer(v):
if (interface is not None) and (interface == match.group("port")):
return [
{"interface": match.group("port"), "status": match.group("oper_status") == "Up"}
]
r += [{"interface": match.group("port"), "status": match.group("oper_status") == "Up"}]
return r
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.diff
~~~~~~~~~~~~~~~~~~~~
Lexers for diff/patch formats.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal
__all__ = ['DiffLexer', 'DarcsPatchLexer', 'WDiffLexer']
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
.. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace')
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'\{', Operator),
(r'\}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class WDiffLexer(RegexLexer):
"""
A `wdiff <https://www.gnu.org/software/wdiff/>`_ lexer.
Note that:
* only to normal output (without option like -l).
* if target files of wdiff contain "[-", "-]", "{+", "+}",
especially they are unbalanced, this lexer will get confusing.
.. versionadded:: 2.2
"""
name = 'WDiff'
aliases = ['wdiff']
filenames = ['*.wdiff']
mimetypes = []
flags = re.MULTILINE | re.DOTALL
# We can only assume "[-" after "[-" before "-]" is `nested`,
# for instance wdiff to wdiff outputs. We have no way to
# distinct these marker is of wdiff output from original text.
ins_op = r"\{\+"
ins_cl = r"\+\}"
del_op = r"\[\-"
del_cl = r"\-\]"
normal = r'[^{}[\]+-]+' # for performance
tokens = {
'root': [
(ins_op, Generic.Inserted, 'inserted'),
(del_op, Generic.Deleted, 'deleted'),
(normal, Text),
(r'.', Text),
],
'inserted': [
(ins_op, Generic.Inserted, '#push'),
(del_op, Generic.Inserted, '#push'),
(del_cl, Generic.Inserted, '#pop'),
(ins_cl, Generic.Inserted, '#pop'),
(normal, Generic.Inserted),
(r'.', Generic.Inserted),
],
'deleted': [
(del_op, Generic.Deleted, '#push'),
(ins_op, Generic.Deleted, '#push'),
(ins_cl, Generic.Deleted, '#pop'),
(del_cl, Generic.Deleted, '#pop'),
(normal, Generic.Deleted),
(r'.', Generic.Deleted),
],
}
|
from disco import Disco
class Config:
def __init__(self):
self._numero_discos = int(input("\nInforme a quantidade de discos: "))
def adiciona_discos(self, torre_inicial):
discos = self.add_disco()
for ix in range(self._numero_discos):
torre_inicial.empilha(discos[ix])
def add_disco(self):
discos = []
arquivo = open('disco.txt', 'r')
for linha in arquivo:
discos.append(Disco(int(linha)))
return discos
def numero_discos(self):
return self._numero_discos
def status_torres(self, torres):
print('\nNumero de discos: ' + str(self._numero_discos))
for torre in torres:
torre.to_string()
|
import os
import platform
import socket
import copy
import json
import numpy as np
from datetime import datetime
import time
from .metadata import acdd
import flopy
# globals
FILLVALUE = -99999.9
ITMUNI = {
0: "undefined",
1: "seconds",
2: "minutes",
3: "hours",
4: "days",
5: "years",
}
PRECISION_STRS = ["f4", "f8", "i4"]
STANDARD_VARS = ["longitude", "latitude", "layer", "elevation", "time"]
path = os.path.split(__file__)[0]
with open(path + "/longnames.json") as f:
NC_LONG_NAMES = json.load(f)
class Logger(object):
"""
Basic class for logging events during the linear analysis calculations
if filename is passed, then an file handle is opened
Parameters
----------
filename : bool or string
if string, it is the log file to write. If a bool, then log is
written to the screen. echo (bool): a flag to force screen output
Attributes
----------
items : dict
tracks when something is started. If a log entry is
not in items, then it is treated as a new entry with the string
being the key and the datetime as the value. If a log entry is
in items, then the end time and delta time are written and
the item is popped from the keys
"""
def __init__(self, filename, echo=False):
self.items = {}
self.echo = bool(echo)
if filename == True:
self.echo = True
self.filename = None
elif filename:
self.f = open(filename, "w", 0) # unbuffered
self.t = datetime.now()
self.log("opening " + str(filename) + " for logging")
else:
self.filename = None
def log(self, phrase):
"""
log something that happened
Parameters
----------
phrase : str
the thing that happened
"""
pass
t = datetime.now()
if phrase in self.items.keys():
s = (
str(t)
+ " finished: "
+ str(phrase)
+ ", took: "
+ str(t - self.items[phrase])
+ "\n"
)
if self.echo:
print(s,)
if self.filename:
self.f.write(s)
self.items.pop(phrase)
else:
s = str(t) + " starting: " + str(phrase) + "\n"
if self.echo:
print(s,)
if self.filename:
self.f.write(s)
self.items[phrase] = copy.deepcopy(t)
def warn(self, message):
"""
Write a warning to the log file
Parameters
----------
message : str
the warning text
"""
s = str(datetime.now()) + " WARNING: " + message + "\n"
if self.echo:
print(s,)
if self.filename:
self.f.write(s)
return
class NetCdf(object):
"""
Support for writing a netCDF4 compliant file from a flopy model
Parameters
----------
output_filename : str
Name of the .nc file to write
model : flopy model instance
time_values : the entries for the time dimension
if not None, the constructor will initialize
the file. If None, the perlen array of ModflowDis
will be used
z_positive : str ('up' or 'down')
Positive direction of vertical coordinates written to NetCDF file.
(default 'down')
verbose : if True, stdout is verbose. If str, then a log file
is written to the verbose file
forgive : what to do if a duplicate variable name is being created. If
True, then the newly requested var is skipped. If False, then
an exception is raised.
**kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied model grid which will be used in lieu of the model
object modelgrid for netcdf production
Notes
-----
This class relies heavily on the grid and modeltime objects,
including these attributes: lenuni, itmuni, start_datetime, and proj4.
Make sure these attributes have meaningful values.
"""
def __init__(
self,
output_filename,
model,
time_values=None,
z_positive="up",
verbose=None,
prj=None,
logger=None,
forgive=False,
**kwargs
):
assert output_filename.lower().endswith(".nc")
if verbose is None:
verbose = model.verbose
if logger is not None:
self.logger = logger
else:
self.logger = Logger(verbose)
self.var_attr_dict = {}
self.log = self.logger.log
if os.path.exists(output_filename):
self.logger.warn("removing existing nc file: " + output_filename)
os.remove(output_filename)
self.output_filename = output_filename
self.forgive = bool(forgive)
self.model = model
self.model_grid = model.modelgrid
if "modelgrid" in kwargs:
self.model_grid = kwargs.pop("modelgrid")
self.model_time = model.modeltime
if prj is not None:
self.model_grid.proj4 = prj
if self.model_grid.grid_type == "structured":
self.dimension_names = ("layer", "y", "x")
STANDARD_VARS.extend(["delc", "delr"])
# elif self.model_grid.grid_type == 'vertex':
# self.dimension_names = ('layer', 'ncpl')
else:
raise Exception(
"Grid type {} not supported.".format(self.model_grid.grid_type)
)
self.shape = self.model_grid.shape
try:
import dateutil.parser
except:
print(
"python-dateutil is not installed\n"
+ "try pip install python-dateutil"
)
return
self.start_datetime = self._dt_str(
dateutil.parser.parse(self.model_time.start_datetime)
)
self.logger.warn("start datetime:{0}".format(str(self.start_datetime)))
proj4_str = self.model_grid.proj4
if proj4_str is None:
proj4_str = "epsg:4326"
self.log(
"Warning: model has no coordinate reference system specified. "
"Using default proj4 string: {}".format(proj4_str)
)
self.proj4_str = proj4_str
self.grid_units = self.model_grid.units
self.z_positive = z_positive
if self.grid_units is None:
self.grid_units = "undefined"
assert self.grid_units in ["feet", "meters", "undefined"], (
"unsupported length units: " + self.grid_units
)
self.time_units = self.model_time.time_units
# this gives us confidence that every NetCdf instance
# has the same attributes
self.log("initializing attributes")
self._initialize_attributes()
self.log("initializing attributes")
self.time_values_arg = time_values
self.log("initializing file")
self.initialize_file(time_values=self.time_values_arg)
self.log("initializing file")
def __add__(self, other):
new_net = NetCdf.zeros_like(self)
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] + other
)
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] + other.nc.variables[vname][:]
)
else:
raise Exception(
"NetCdf.__add__(): unrecognized other:{0}".format(
str(type(other))
)
)
return new_net
def __sub__(self, other):
new_net = NetCdf.zeros_like(self)
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] - other
)
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] - other.nc.variables[vname][:]
)
else:
raise Exception(
"NetCdf.__sub__(): unrecognized other:{0}".format(
str(type(other))
)
)
return new_net
def __mul__(self, other):
new_net = NetCdf.zeros_like(self)
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] * other
)
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] * other.nc.variables[vname][:]
)
else:
raise Exception(
"NetCdf.__mul__(): unrecognized other:{0}".format(
str(type(other))
)
)
return new_net
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
new_net = NetCdf.zeros_like(self)
with np.errstate(invalid="ignore"):
if np.isscalar(other) or isinstance(other, np.ndarray):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:] / other
)
elif isinstance(other, NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = (
self.nc.variables[vname][:]
/ other.nc.variables[vname][:]
)
else:
raise Exception(
"NetCdf.__sub__(): unrecognized other:{0}".format(
str(type(other))
)
)
return new_net
def append(self, other, suffix="_1"):
assert isinstance(other, NetCdf) or isinstance(other, dict)
if isinstance(other, NetCdf):
for vname in other.var_attr_dict.keys():
attrs = other.var_attr_dict[vname].copy()
var = other.nc.variables[vname]
new_vname = vname
if vname in self.nc.variables.keys():
if vname not in STANDARD_VARS:
new_vname = vname + suffix
if "long_name" in attrs:
attrs["long_name"] += " " + suffix
else:
continue
assert (
new_vname not in self.nc.variables.keys()
), "var already exists:{0} in {1}".format(
new_vname, ",".join(self.nc.variables.keys())
)
attrs["max"] = var[:].max()
attrs["min"] = var[:].min()
new_var = self.create_variable(
new_vname, attrs, var.dtype, dimensions=var.dimensions
)
new_var[:] = var[:]
else:
for vname, array in other.items():
vname_norm = self.normalize_name(vname)
assert (
vname_norm in self.nc.variables.keys()
), "dict var not in " "self.vars:{0}-->".format(
vname
) + ",".join(
self.nc.variables.keys()
)
new_vname = vname_norm + suffix
assert new_vname not in self.nc.variables.keys()
attrs = self.var_attr_dict[vname_norm].copy()
attrs["max"] = np.nanmax(array)
attrs["min"] = np.nanmin(array)
attrs["name"] = new_vname
attrs["long_name"] = attrs["long_name"] + " " + suffix
var = self.nc.variables[vname_norm]
# assert var.shape == array.shape,\
# "{0} shape ({1}) doesn't make array shape ({2})".\
# format(new_vname,str(var.shape),str(array.shape))
new_var = self.create_variable(
new_vname, attrs, var.dtype, dimensions=var.dimensions
)
try:
new_var[:] = array
except:
new_var[:, 0] = array
return
def copy(self, output_filename):
new_net = NetCdf.zeros_like(self, output_filename=output_filename)
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = self.nc.variables[vname][:]
return new_net
@classmethod
def zeros_like(
cls, other, output_filename=None, verbose=None, logger=None
):
new_net = NetCdf.empty_like(
other,
output_filename=output_filename,
verbose=verbose,
logger=logger,
)
# add the vars to the instance
for vname in other.var_attr_dict.keys():
if new_net.nc.variables.get(vname) is not None:
new_net.logger.warn(
"variable {0} already defined, skipping".format(vname)
)
continue
new_net.log("adding variable {0}".format(vname))
var = other.nc.variables[vname]
data = var[:]
try:
mask = data.mask
data = np.array(data)
except:
mask = None
new_data = np.zeros_like(data)
new_data[mask] = FILLVALUE
new_var = new_net.create_variable(
vname,
other.var_attr_dict[vname],
var.dtype,
dimensions=var.dimensions,
)
new_var[:] = new_data
new_net.log("adding variable {0}".format(vname))
global_attrs = {}
for attr in other.nc.ncattrs():
if attr not in new_net.nc.ncattrs():
global_attrs[attr] = other.nc[attr]
new_net.add_global_attributes(global_attrs)
return new_net
@classmethod
def empty_like(
cls, other, output_filename=None, verbose=None, logger=None
):
if output_filename is None:
output_filename = (
str(time.mktime(datetime.now().timetuple())) + ".nc"
)
while os.path.exists(output_filename):
print("{}...already exists".format(output_filename))
output_filename = (
str(time.mktime(datetime.now().timetuple())) + ".nc"
)
print(
"creating temporary netcdf file..."
+ "{}".format(output_filename)
)
new_net = cls(
output_filename,
other.model,
time_values=other.time_values_arg,
verbose=verbose,
logger=logger,
)
return new_net
def difference(
self, other, minuend="self", mask_zero_diff=True, onlydiff=True
):
"""
make a new NetCDF instance that is the difference with another
netcdf file
Parameters
----------
other : either an str filename of a netcdf file or
a netCDF4 instance
minuend : (optional) the order of the difference operation.
Default is self (e.g. self - other). Can be "self" or "other"
mask_zero_diff : bool flag to mask differences that are zero. If
True, positions in the difference array that are zero will be set
to self.fillvalue
only_diff : bool flag to only add non-zero diffs to output file
Returns
-------
net NetCDF instance
Notes
-----
assumes the current NetCDF instance has been populated. The
variable names and dimensions between the two files must match
exactly. The name of the new .nc file is
<self.output_filename>.diff.nc. The masks from both self and
other are carried through to the new instance
"""
assert self.nc is not None, (
"can't call difference() if nc " + "hasn't been populated"
)
try:
import netCDF4
except Exception as e:
mess = "error import netCDF4: {0}".format(str(e))
self.logger.warn(mess)
raise Exception(mess)
if isinstance(other, str):
assert os.path.exists(
other
), "filename 'other' not found:" + "{0}".format(other)
other = netCDF4.Dataset(other, "r")
assert isinstance(other, netCDF4.Dataset)
# check for similar variables
self_vars = set(self.nc.variables.keys())
other_vars = set(other.variables)
diff = self_vars.symmetric_difference(other_vars)
if len(diff) > 0:
self.logger.warn(
"variables are not the same between the two "
+ "nc files: "
+ ",".join(diff)
)
return
# check for similar dimensions
self_dimens = self.nc.dimensions
other_dimens = other.dimensions
for d in self_dimens.keys():
if d not in other_dimens:
self.logger.warn("missing dimension in other:{0}".format(d))
return
if len(self_dimens[d]) != len(other_dimens[d]):
self.logger.warn(
"dimension not consistent: "
+ "{0}:{1}".format(self_dimens[d], other_dimens[d])
)
return
# should be good to go
time_values = self.nc.variables.get("time")[:]
new_net = NetCdf(
self.output_filename.replace(".nc", ".diff.nc"),
self.model,
time_values=time_values,
)
# add the vars to the instance
for vname in self_vars:
if (
vname not in self.var_attr_dict
or new_net.nc.variables.get(vname) is not None
):
self.logger.warn("skipping variable: {0}".format(vname))
continue
self.log("processing variable {0}".format(vname))
s_var = self.nc.variables[vname]
o_var = other.variables[vname]
s_data = s_var[:]
o_data = o_var[:]
o_mask, s_mask = None, None
# keep the masks to apply later
if isinstance(s_data, np.ma.MaskedArray):
self.logger.warn("masked array for {0}".format(vname))
s_mask = s_data.mask
s_data = np.array(s_data)
s_data[s_mask] = 0.0
else:
np.nan_to_num(s_data)
if isinstance(o_data, np.ma.MaskedArray):
o_mask = o_data.mask
o_data = np.array(o_data)
o_data[o_mask] = 0.0
else:
np.nan_to_num(o_data)
# difference with self
if minuend.lower() == "self":
d_data = s_data - o_data
elif minuend.lower() == "other":
d_data = o_data - s_data
else:
mess = "unrecognized minuend {0}".format(minuend)
self.logger.warn(mess)
raise Exception(mess)
# check for non-zero diffs
if onlydiff and d_data.sum() == 0.0:
self.logger.warn(
"var {0} has zero differences, skipping...".format(vname)
)
continue
self.logger.warn(
"resetting diff attrs max,min:{0},{1}".format(
d_data.min(), d_data.max()
)
)
attrs = self.var_attr_dict[vname].copy()
attrs["max"] = np.nanmax(d_data)
attrs["min"] = np.nanmin(d_data)
# reapply masks
if s_mask is not None:
self.log("applying self mask")
s_mask[d_data != 0.0] = False
d_data[s_mask] = FILLVALUE
self.log("applying self mask")
if o_mask is not None:
self.log("applying other mask")
o_mask[d_data != 0.0] = False
d_data[o_mask] = FILLVALUE
self.log("applying other mask")
d_data[np.isnan(d_data)] = FILLVALUE
if mask_zero_diff:
d_data[np.where(d_data == 0.0)] = FILLVALUE
var = new_net.create_variable(
vname, attrs, s_var.dtype, dimensions=s_var.dimensions
)
var[:] = d_data
self.log("processing variable {0}".format(vname))
def _dt_str(self, dt):
""" for datetime to string for year < 1900
"""
dt_str = "{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02}Z".format(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second
)
return dt_str
def write(self):
"""write the nc object to disk"""
self.log("writing nc file")
assert (
self.nc is not None
), "netcdf.write() error: nc file not initialized"
# write any new attributes that have been set since
# initializing the file
for k, v in self.global_attributes.items():
try:
if self.nc.attributes.get(k) is not None:
self.nc.setncattr(k, v)
except Exception:
self.logger.warn(
"error setting global attribute {0}".format(k)
)
self.nc.sync()
self.nc.close()
self.log("writing nc file")
def _initialize_attributes(self):
"""private method to initial the attributes
of the NetCdf instance
"""
assert (
"nc" not in self.__dict__.keys()
), "NetCdf._initialize_attributes() error: nc attribute already set"
self.nc_epsg_str = "epsg:4326"
self.nc_crs_longname = "http://www.opengis.net/def/crs/EPSG/0/4326"
self.nc_semi_major = float(6378137.0)
self.nc_inverse_flat = float(298.257223563)
self.global_attributes = {}
self.global_attributes["namefile"] = self.model.namefile
self.global_attributes["model_ws"] = self.model.model_ws
self.global_attributes["exe_name"] = self.model.exe_name
self.global_attributes["modflow_version"] = self.model.version
self.global_attributes["create_hostname"] = socket.gethostname()
self.global_attributes["create_platform"] = platform.system()
self.global_attributes["create_directory"] = os.getcwd()
htol, rtol = -999, -999
try:
htol, rtol = self.model.solver_tols()
except Exception as e:
self.logger.warn(
"unable to get solver tolerances:" + "{0}".format(str(e))
)
self.global_attributes["solver_head_tolerance"] = htol
self.global_attributes["solver_flux_tolerance"] = rtol
spatial_attribs = {
"xll": self.model_grid.xoffset,
"yll": self.model_grid.yoffset,
"rotation": self.model_grid.angrot,
"proj4_str": self.model_grid.proj4,
}
for n, v in spatial_attribs.items():
self.global_attributes["flopy_sr_" + n] = v
self.global_attributes[
"start_datetime"
] = self.model_time.start_datetime
self.fillvalue = FILLVALUE
# initialize attributes
self.grid_crs = None
self.zs = None
self.ys = None
self.xs = None
self.nc = None
def initialize_geometry(self):
""" initialize the geometric information
needed for the netcdf file
"""
try:
import pyproj
except ImportError as e:
raise ImportError(
"NetCdf error importing pyproj module:\n" + str(e)
)
from distutils.version import LooseVersion
# Check if using newer pyproj version conventions
pyproj220 = LooseVersion(pyproj.__version__) >= LooseVersion("2.2.0")
proj4_str = self.proj4_str
print("initialize_geometry::proj4_str = {}".format(proj4_str))
self.log("building grid crs using proj4 string: {}".format(proj4_str))
if pyproj220:
self.grid_crs = pyproj.CRS(proj4_str)
else:
self.grid_crs = pyproj.Proj(proj4_str, preserve_units=True)
print("initialize_geometry::self.grid_crs = {}".format(self.grid_crs))
vmin, vmax = self.model_grid.botm.min(), self.model_grid.top.max()
if self.z_positive == "down":
vmin, vmax = vmax, vmin
else:
self.zs = self.model_grid.xyzcellcenters[2].copy()
ys = self.model_grid.xyzcellcenters[1].copy()
xs = self.model_grid.xyzcellcenters[0].copy()
# Transform to a known CRS
if pyproj220:
nc_crs = pyproj.CRS(self.nc_epsg_str)
self.transformer = pyproj.Transformer.from_crs(
self.grid_crs, nc_crs, always_xy=True
)
else:
nc_crs = pyproj.Proj(self.nc_epsg_str)
self.transformer = None
print("initialize_geometry::nc_crs = {}".format(nc_crs))
if pyproj220:
print(
"transforming coordinates using = {}".format(self.transformer)
)
self.log("projecting grid cell center arrays")
if pyproj220:
self.xs, self.ys = self.transformer.transform(xs, ys)
else:
self.xs, self.ys = pyproj.transform(self.grid_crs, nc_crs, xs, ys)
# get transformed bounds and record to check against ScienceBase later
xmin, xmax, ymin, ymax = self.model_grid.extent
bbox = np.array(
[[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]
)
if pyproj220:
x, y = self.transformer.transform(*bbox.transpose())
else:
x, y = pyproj.transform(self.grid_crs, nc_crs, *bbox.transpose())
self.bounds = x.min(), y.min(), x.max(), y.max()
self.vbounds = vmin, vmax
def initialize_file(self, time_values=None):
"""
initialize the netcdf instance, including global attributes,
dimensions, and grid information
Parameters
----------
time_values : list of times to use as time dimension
entries. If none, then use the times in
self.model.dis.perlen and self.start_datetime
"""
if self.nc is not None:
raise Exception("nc file already initialized")
if self.grid_crs is None:
self.log("initializing geometry")
self.initialize_geometry()
self.log("initializing geometry")
try:
import netCDF4
except Exception as e:
self.logger.warn("error importing netCDF module")
msg = "NetCdf error importing netCDF4 module:\n" + str(e)
raise Exception(msg)
# open the file for writing
try:
self.nc = netCDF4.Dataset(self.output_filename, "w")
except Exception as e:
msg = "error creating netcdf dataset:\n{}".format(str(e))
raise Exception(msg)
# write some attributes
self.log("setting standard attributes")
self.nc.setncattr(
"Conventions",
"CF-1.6, ACDD-1.3, flopy {}".format(flopy.__version__),
)
self.nc.setncattr(
"date_created", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:00Z")
)
self.nc.setncattr(
"geospatial_vertical_positive", "{}".format(self.z_positive)
)
min_vertical = np.min(self.zs)
max_vertical = np.max(self.zs)
self.nc.setncattr("geospatial_vertical_min", min_vertical)
self.nc.setncattr("geospatial_vertical_max", max_vertical)
self.nc.setncattr("geospatial_vertical_resolution", "variable")
self.nc.setncattr("featureType", "Grid")
for k, v in self.global_attributes.items():
try:
self.nc.setncattr(k, v)
except:
self.logger.warn(
"error setting global attribute {0}".format(k)
)
self.global_attributes = {}
self.log("setting standard attributes")
# spatial dimensions
self.log("creating dimensions")
# time
if time_values is None:
time_values = np.cumsum(self.model_time.perlen)
self.nc.createDimension("time", len(time_values))
for name, length in zip(self.dimension_names, self.shape):
self.nc.createDimension(name, length)
self.log("creating dimensions")
self.log("setting CRS info")
# Metadata variables
crs = self.nc.createVariable("crs", "i4")
crs.long_name = self.nc_crs_longname
crs.epsg_code = self.nc_epsg_str
crs.semi_major_axis = self.nc_semi_major
crs.inverse_flattening = self.nc_inverse_flat
self.log("setting CRS info")
attribs = {
"units": "{} since {}".format(
self.time_units, self.start_datetime
),
"standard_name": "time",
"long_name": NC_LONG_NAMES.get("time", "time"),
"calendar": "gregorian",
"_CoordinateAxisType": "Time",
}
time = self.create_variable(
"time", attribs, precision_str="f8", dimensions=("time",)
)
self.logger.warn("time_values:{0}".format(str(time_values)))
time[:] = np.asarray(time_values)
# Elevation
attribs = {
"units": self.model_grid.units,
"standard_name": "elevation",
"long_name": NC_LONG_NAMES.get("elevation", "elevation"),
"axis": "Z",
"valid_min": min_vertical,
"valid_max": max_vertical,
"positive": self.z_positive,
}
elev = self.create_variable(
"elevation",
attribs,
precision_str="f8",
dimensions=self.dimension_names,
)
elev[:] = self.zs
# Longitude
attribs = {
"units": "degrees_east",
"standard_name": "longitude",
"long_name": NC_LONG_NAMES.get("longitude", "longitude"),
"axis": "X",
"_CoordinateAxisType": "Lon",
}
lon = self.create_variable(
"longitude",
attribs,
precision_str="f8",
dimensions=self.dimension_names[1:],
)
lon[:] = self.xs
self.log("creating longitude var")
# Latitude
self.log("creating latitude var")
attribs = {
"units": "degrees_north",
"standard_name": "latitude",
"long_name": NC_LONG_NAMES.get("latitude", "latitude"),
"axis": "Y",
"_CoordinateAxisType": "Lat",
}
lat = self.create_variable(
"latitude",
attribs,
precision_str="f8",
dimensions=self.dimension_names[1:],
)
lat[:] = self.ys
# x
self.log("creating x var")
attribs = {
"units": self.model_grid.units,
"standard_name": "projection_x_coordinate",
"long_name": NC_LONG_NAMES.get("x", "x coordinate of projection"),
"axis": "X",
}
x = self.create_variable(
"x_proj",
attribs,
precision_str="f8",
dimensions=self.dimension_names[1:],
)
x[:] = self.model_grid.xyzcellcenters[0]
# y
self.log("creating y var")
attribs = {
"units": self.model_grid.units,
"standard_name": "projection_y_coordinate",
"long_name": NC_LONG_NAMES.get("y", "y coordinate of projection"),
"axis": "Y",
}
y = self.create_variable(
"y_proj",
attribs,
precision_str="f8",
dimensions=self.dimension_names[1:],
)
y[:] = self.model_grid.xyzcellcenters[1]
# grid mapping variable
crs = flopy.utils.reference.crs(
prj=self.model_grid.prj, epsg=self.model_grid.epsg
)
attribs = crs.grid_mapping_attribs
if attribs is not None:
self.log("creating grid mapping variable")
self.create_variable(
attribs["grid_mapping_name"], attribs, precision_str="f8"
)
# layer
self.log("creating layer var")
attribs = {
"units": "",
"standard_name": "layer",
"long_name": NC_LONG_NAMES.get("layer", "layer"),
"positive": "down",
"axis": "Z",
}
lay = self.create_variable("layer", attribs, dimensions=("layer",))
lay[:] = np.arange(0, self.shape[0])
self.log("creating layer var")
if self.model_grid.grid_type == "structured":
# delc
attribs = {
"units": self.model_grid.units.strip("s"),
"long_name": NC_LONG_NAMES.get(
"delc", "Model grid cell spacing along a column"
),
}
delc = self.create_variable("delc", attribs, dimensions=("y",))
delc[:] = self.model_grid.delc[::-1]
if self.model_grid.angrot != 0:
delc.comments = (
"This is the row spacing that applied to the UNROTATED grid. "
+ "This grid HAS been rotated before being saved to NetCDF. "
+ "To compute the unrotated grid, use the origin point and this array."
)
# delr
attribs = {
"units": self.model_grid.units.strip("s"),
"long_name": NC_LONG_NAMES.get(
"delr", "Model grid cell spacing along a row"
),
}
delr = self.create_variable("delr", attribs, dimensions=("x",))
delr[:] = self.model_grid.delr[::-1]
if self.model_grid.angrot != 0:
delr.comments = (
"This is the col spacing that applied to the UNROTATED grid. "
+ "This grid HAS been rotated before being saved to NetCDF. "
+ "To compute the unrotated grid, use the origin point and this array."
)
# else:
# vertices
# attribs = {"units": self.model_grid.lenuni.strip('s'),
# "long_name": NC_LONG_NAMES.get("vertices",
# "List of vertices used in the model by cell"),
# }
# vertices = self.create_variable('vertices', attribs, dimensions=('ncpl',))
# vertices[:] = self.model_grid.vertices
# Workaround for CF/CDM.
# http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/
# reference/StandardCoordinateTransforms.html
# "explicit_field"
exp = self.nc.createVariable("VerticalTransform", "S1")
exp.transform_name = "explicit_field"
exp.existingDataField = "elevation"
exp._CoordinateTransformType = "vertical"
exp._CoordinateAxes = "layer"
return
def initialize_group(
self,
group="timeseries",
dimensions=("time",),
attributes=None,
dimension_data=None,
):
"""
Method to initialize a new group within a netcdf file. This group
can have independent dimensions from the global dimensions
Parameters:
----------
name : str
name of the netcdf group
dimensions : tuple
data dimension names for group
dimension_shape : tuple
tuple of data dimension lengths
attributes : dict
nested dictionary of {dimension : {attributes}} for each netcdf
group dimension
dimension_data : dict
dictionary of {dimension : [data]} for each netcdf group dimension
"""
if attributes is None:
attributes = {}
if dimension_data is None:
dimension_data = {}
if self.nc is None:
self.initialize_file()
if group in self.nc.groups:
raise AttributeError("{} group already initialized".format(group))
self.log("creating netcdf group {}".format(group))
self.nc.createGroup(group)
self.log("{} group created".format(group))
self.log("creating {} group dimensions".format(group))
for dim in dimensions:
if dim == "time":
if "time" not in dimension_data:
time_values = np.cumsum(self.model_time.perlen)
else:
time_values = dimension_data["time"]
self.nc.groups[group].createDimension(dim, len(time_values))
else:
if dim not in dimension_data:
raise AssertionError(
"{} information must be supplied "
"to dimension data".format(dim)
)
else:
self.nc.groups[group].createDimension(
dim, len(dimension_data[dim])
)
self.log("created {} group dimensions".format(group))
dim_names = tuple([i for i in dimensions if i != "time"])
for dim in dimensions:
if dim.lower() == "time":
if "time" not in attributes:
unit_value = "{} since {}".format(
self.time_units, self.start_datetime
)
attribs = {
"units": unit_value,
"standard_name": "time",
"long_name": NC_LONG_NAMES.get("time", "time"),
"calendar": "gregorian",
"Axis": "Y",
"_CoordinateAxisType": "Time",
}
else:
attribs = attributes["time"]
time = self.create_group_variable(
group,
"time",
attribs,
precision_str="f8",
dimensions=("time",),
)
time[:] = np.asarray(time_values)
elif dim.lower() == "zone":
if "zone" not in attributes:
attribs = {
"units": "N/A",
"standard_name": "zone",
"long_name": "zonebudget zone",
"Axis": "X",
"_CoordinateAxisType": "Zone",
}
else:
attribs = attributes["zone"]
zone = self.create_group_variable(
group,
"zone",
attribs,
precision_str="i4",
dimensions=("zone",),
)
zone[:] = np.asarray(dimension_data["zone"])
else:
attribs = attributes[dim]
var = self.create_group_variable(
group,
dim,
attribs,
precision_str="f8",
dimensions=dim_names,
)
var[:] = np.asarray(dimension_data[dim])
@staticmethod
def normalize_name(name):
return name.replace(".", "_").replace(" ", "_").replace("-", "_")
def create_group_variable(
self, group, name, attributes, precision_str, dimensions=("time",)
):
"""
Create a new group variable in the netcdf object
Parameters
----------
name : str
the name of the variable
attributes : dict
attributes to add to the new variable
precision_str : str
netcdf-compliant string. e.g. f4
dimensions : tuple
which dimensions the variable applies to
default : ("time","layer","x","y")
group : str
which netcdf group the variable goes in
default : None which creates the variable in root
Returns
-------
nc variable
Raises
------
AssertionError if precision_str not right
AssertionError if variable name already in netcdf object
AssertionError if one of more dimensions do not exist
"""
name = self.normalize_name(name)
if (
name in STANDARD_VARS
and name in self.nc.groups[group].variables.keys()
):
return
if name in self.nc.groups[group].variables.keys():
if self.forgive:
self.logger.warn(
"skipping duplicate {} group variable: {}".format(
group, name
)
)
return
else:
raise Exception(
"duplicate {} group variable name: {}".format(group, name)
)
self.log("creating group {} variable: {}".format(group, name))
if precision_str not in PRECISION_STRS:
raise AssertionError(
"netcdf.create_variable() error: precision "
"string {} not in {}".format(precision_str, PRECISION_STRS)
)
if group not in self.nc.groups:
raise AssertionError(
"netcdf group `{}` must be created before "
"variables can be added to it".format(group)
)
self.var_attr_dict["{}/{}".format(group, name)] = attributes
var = self.nc.groups[group].createVariable(
name,
precision_str,
dimensions,
fill_value=self.fillvalue,
zlib=True,
)
for k, v in attributes.items():
try:
var.setncattr(k, v)
except:
self.logger.warn(
"error setting attribute"
+ "{} for group {} variable {}".format(k, group, name)
)
self.log("creating group {} variable: {}".format(group, name))
return var
def create_variable(
self,
name,
attributes,
precision_str="f4",
dimensions=("time", "layer"),
group=None,
):
"""
Create a new variable in the netcdf object
Parameters
----------
name : str
the name of the variable
attributes : dict
attributes to add to the new variable
precision_str : str
netcdf-compliant string. e.g. f4
dimensions : tuple
which dimensions the variable applies to
default : ("time","layer","x","y")
group : str
which netcdf group the variable goes in
default : None which creates the variable in root
Returns
-------
nc variable
Raises
------
AssertionError if precision_str not right
AssertionError if variable name already in netcdf object
AssertionError if one of more dimensions do not exist
"""
# Normalize variable name
name = self.normalize_name(name)
# if this is a core var like a dimension...
# long_name = attributes.pop("long_name",name)
if name in STANDARD_VARS and name in self.nc.variables.keys():
return
if (
name not in self.var_attr_dict.keys()
and name in self.nc.variables.keys()
):
if self.forgive:
self.logger.warn(
"skipping duplicate variable: {0}".format(name)
)
return
else:
raise Exception("duplicate variable name: {0}".format(name))
if name in self.nc.variables.keys():
raise Exception("duplicate variable name: {0}".format(name))
self.log("creating variable: " + str(name))
assert (
precision_str in PRECISION_STRS
), "netcdf.create_variable() error: precision string {0} not in {1}".format(
precision_str, PRECISION_STRS
)
if self.nc is None:
self.initialize_file()
# check that the requested dimension exists and
# build up the chuck sizes
# chunks = []
# for dimension in dimensions:
# assert self.nc.dimensions.get(dimension) is not None, \
# "netcdf.create_variable() dimension not found:" + dimension
# chunk = self.chunks[dimension]
# assert chunk is not None, \
# "netcdf.create_variable() chunk size of {0} is None in self.chunks". \
# format(dimension)
# chunks.append(chunk)
self.var_attr_dict[name] = attributes
var = self.nc.createVariable(
name,
precision_str,
dimensions,
fill_value=self.fillvalue,
zlib=True,
) # ,
# chunksizes=tuple(chunks))
for k, v in attributes.items():
try:
var.setncattr(k, v)
except:
self.logger.warn(
"error setting attribute"
+ "{0} for variable {1}".format(k, name)
)
self.log("creating variable: " + str(name))
return var
def add_global_attributes(self, attr_dict):
""" add global attribute to an initialized file
Parameters
----------
attr_dict : dict(attribute name, attribute value)
Returns
-------
None
Raises
------
Exception of self.nc is None (initialize_file()
has not been called)
"""
if self.nc is None:
# self.initialize_file()
mess = (
"NetCDF.add_global_attributes() should only "
+ "be called after the file has been initialized"
)
self.logger.warn(mess)
raise Exception(mess)
self.log("setting global attributes")
self.nc.setncatts(attr_dict)
self.log("setting global attributes")
def add_sciencebase_metadata(self, id, check=True):
"""Add metadata from ScienceBase using the
flopy.export.metadata.acdd class.
Returns
-------
metadata : flopy.export.metadata.acdd object
"""
md = acdd(id, model=self.model)
if md.sb is not None:
if check:
self._check_vs_sciencebase(md)
# get set of public attributes
attr = {n for n in dir(md) if "_" not in n[0]}
# skip some convenience attributes
skip = {
"bounds",
"creator",
"sb",
"xmlroot",
"time_coverage",
"get_sciencebase_xml_metadata",
"get_sciencebase_metadata",
}
towrite = sorted(list(attr.difference(skip)))
for k in towrite:
v = md.__getattribute__(k)
if v is not None:
# convert everything to strings
if not isinstance(v, str):
if isinstance(v, list):
v = ",".join(v)
else:
v = str(v)
self.global_attributes[k] = v
self.nc.setncattr(k, v)
self.write()
return md
def _check_vs_sciencebase(self, md):
"""Check that model bounds read from flopy are consistent with those in ScienceBase."""
xmin, ymin, xmax, ymax = self.bounds
tol = 1e-5
assert md.geospatial_lon_min - xmin < tol
assert md.geospatial_lon_max - xmax < tol
assert md.geospatial_lat_min - ymin < tol
assert md.geospatial_lat_max - ymax < tol
assert md.geospatial_vertical_min - self.vbounds[0] < tol
assert md.geospatial_vertical_max - self.vbounds[1] < tol
def get_longnames_from_docstrings(self, outfile="longnames.json"):
"""
This is experimental.
Scrape Flopy module docstrings and return docstrings for parameters
included in the list of variables added to NetCdf object. Create
a dictionary of longnames keyed by the NetCdf variable names; make each
longname from the first sentence of the docstring for that parameter.
One major limitation is that variables from mflists often aren't described
in the docstrings.
"""
def startstop(ds):
"""Get just the Parameters section of the docstring."""
start, stop = 0, -1
for i, l in enumerate(ds):
if "Parameters" in l and "----" in ds[i + 1]:
start = i + 2
if l.strip() in ["Attributes", "Methods", "Returns", "Notes"]:
stop = i - 1
break
if i >= start and "----" in l:
stop = i - 2
break
return start, stop
def get_entries(ds):
"""Parse docstring entries into dictionary."""
stuff = {}
k = None
for line in ds:
if (
len(line) >= 5
and line[:4] == " " * 4
and line[4] != " "
and ":" in line
):
k = line.split(":")[0].strip()
stuff[k] = ""
# lines with parameter descriptions
elif k is not None and len(line) > 10: # avoid orphans
stuff[k] += line.strip() + " "
return stuff
# get a list of the flopy classes
# packages = inspect.getmembers(flopy.modflow, inspect.isclass)
packages = [(pp.name[0], pp) for pp in self.model.packagelist]
# get a list of the NetCDF variables
attr = [v.split("_")[-1] for v in self.nc.variables]
# parse docstrings to get long names
longnames = {}
for pkg in packages:
# parse the docstring
obj = pkg[-1]
ds = obj.__doc__.split("\n")
start, stop = startstop(ds)
txt = ds[start:stop]
if stop - start > 0:
params = get_entries(txt)
for k, v in params.items():
if k in attr:
longnames[k] = v.split(". ")[0]
# add in any variables that weren't found
for var in attr:
if var not in longnames.keys():
longnames[var] = ""
with open(outfile, "w") as output:
json.dump(longnames, output, sort_keys=True, indent=2)
return longnames
|
# -*- coding: utf-8 -*-
from .Enviopack import Enviopack
from .Auth.Auth import Auth
from .Quote.Quote import Quote
from .Pickings.Pickings import Pickings
from .Orders.Orders import Orders
__version__ = "0.4.6"
__author__ = "Federico Gobea"
|
"""
WSGI config for billsengine_31836 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'billsengine_31836.settings')
application = get_wsgi_application()
|
import requests
import threading
import random
import json
usernames = json.loads(open("usernames.json", "r").read())
password = '%4B%65%6E%79%6F%6E%35%25' # A hex encoded password
siteurl = '192.168.122.61'
def run():
username = random.choice(usernames)
token = requests.get('http://' + siteurl + '/login/token.php?username=' + username + '&password=' + password + '&service=moodle_mobile_app').json()["token"]
print(f'{token}')
while True:
#run()
#"""
numthreads = 200
threads = []
for i in range(numthreads):
t = threading.Thread(target = run)
t.daemon = True
threads.append(t)
for i in range(numthreads):
threads[i].start()
for i in range(numthreads):
threads[i].join()
#"""
|
import pytest
from autogluon.core.space import Categorical
from autogluon.vision._gluoncv import ObjectDetection
def get_dataset(path):
return ObjectDetection.Dataset.from_voc(path)
@pytest.mark.skip(reason="ObjectDetector is not stable to test, and fails due to transient errors occasionally.")
def test_object_detection_estimator():
dataset = get_dataset('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip')
train_data, val_data, test_data = dataset.random_split(val_size=0.3, test_size=0.2, random_state=0)
task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'batch_size': 4})
detector = task.fit(train_data)
assert task.fit_summary().get('valid_map', 0) > 0
test_result = detector.predict(test_data)
@pytest.mark.skip(reason="ObjectDetector is not stable to test, and fails due to transient errors occasionally.")
def test_object_detection_estimator_transfer():
dataset = get_dataset('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip')
train_data, val_data, test_data = dataset.random_split(val_size=0.3, test_size=0.2, random_state=0)
task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'transfer': Categorical('yolo3_darknet53_coco', 'ssd_512_resnet50_v1_voc'), 'estimator': 'ssd', 'batch_size': 4})
detector = task.fit(train_data)
assert task.fit_summary().get('valid_map', 0) > 0
test_result = detector.predict(test_data)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import asyncio
import pyppeteer
import time
import os
import random
from exe_js import js1, js3, js4, js5
# http://www.mamicode.com/info-detail-2302923.html
# https://segmentfault.com/a/1190000011627343
"""
{
proxy: "127.0.0.1:1234",
proxy-auth: "userx:passx",
proxy-type: "meh"
}
"""
def input_time_random():
return random.randint(300, 500)
async def main():
print("in main ")
print(os.environ.get('PYPPETEER_CHROMIUM_REVISION'))
browser = await pyppeteer.launch(
executablePath=r"D:\A\Desktop\项目+更新\node_project\chrome-win\chrome-win\chrome.exe",
headless=False,
args=[
'--proxy-server=118.24.156.214:8118'
],
timeout=30000)
page = await browser.newPage()
await page.setViewport({"width": 1000, "height": 780})
await page.setUserAgent("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36")
await page.goto('http://httpbin.net/ip')
# await page.waitForNavigation({'waitUntil': 'load'}) # 有时候不需要
content = await page.content()
cookies = await page.cookies()
await page.screenshot({'path': 'example.png'})
dimensions = await page.evaluate('''() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}''')
print(dimensions)
await browser.close()
return {'content': content, 'cookies': cookies}
asyncio.get_event_loop().run_until_complete(main())
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_yamdb.settings')
application = get_wsgi_application()
|
"""
Django settings for bingo project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
MEDIA_ROOT=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@k0#p3kidu)yaaa3u1hplxz)f@^6xiy384*(+n@@s5x#1bx@m5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quiz',
'teacher',
'student',
'widget_tweaks',
'channels',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CSRF_COOKIE_SECURE=False
ROOT_URLCONF = 'bingo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bingo.wsgi.application'
ASGI_APPLICATION = 'bingo.asgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
STATIC_DIR,
]
LOGIN_REDIRECT_URL='/afterlogin'
#for contact us give your gmail id and password
EMAIL_BACKEND ='django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'xyz.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'from@gmail.com' # this email will be used to send emails
EMAIL_HOST_PASSWORD = 'xyz' # host email password required
# now sign in with your host gmail account in your browser
# open following link and turn it ON
# https://myaccount.google.com/lesssecureapps
# otherwise you will get SMTPAuthenticationError at /contactus
# this process is required because google blocks apps authentication by default
EMAIL_RECEIVING_USER = ['to@gmail.com'] # email on which you will receive messages sent from website
|
from zenml.steps import BaseStepConfig
class PreTrainingConfigs(BaseStepConfig):
# The configuration for the pre-training of the agent
ENV_NAME: str = "BreakoutDeterministic-v4"
WRITE_TENSORBOARD: bool = True
TENSORBOARD_DIR: str = "tensorboard/"
LEARNING_RATE: float = 0.00001
INPUT_SHAPE: tuple = (84, 84)
BATCH_SIZE: int = 32
SAVE_PATH = "breakout-saves"
USE_PER: bool = False
MEM_SIZE: int = 100
LOAD_FROM: str = None
LOAD_REPLAY_BUFFER: bool = True
MAX_NOOP_STEPS: int = 2000
TOTAL_FRAMES: int = 3000
FRAMES_BETWEEN_EVAL: int = 100000
MAX_EPISODE_LENGTH: int = 18000
EVAL_LENGTH: int = 10000
UPDATE_FREQ: int = 10000
PRIORITY_SCALE: float = 0.7 # How much the replay buffer should sample based on priorities. 0 = complete random samples, 1 = completely aligned with priorities
CLIP_REWARD: bool = True # Any positive reward is +1, and negative reward is -1, 0 is unchanged
UPDATE_FREQ: int = 4 # Number of actions between gradient descent steps
DISCOUNT_FACTOR: float = 0.99 # Gamma, how much to discount future rewards
BATCH_SIZE: int = 32 # Batch size for training
MIN_REPLAY_BUFFER_SIZE = 50000 # The minimum size the replay buffer must be before we start to update the agent
WRITE_TENSORBOARD: bool = True
EVAL_LENGTH: int = 10000 # Number of frames to evaluate for
|
import educative.course1.stacks_queues.stack as s
input_data = [23, 60, 12, 42, 4, 97, 2]
expected_output_data = [2, 4, 12, 23, 42, 60, 97]
# This solution uses a second stack
# 1. until input stack is not empty, we pop the top value and compare it
# with the top value of the second stack
# 2. if value > top of stack 2, we insert the popped value in stack 2
# 3. else while popped value < top of stack 2, we keep pushing top of stack 2 to stack 1
# 4. finally when stack 2 is empty we push the popped value and start over again
# 5. The output will be a sorted stack
# ---------------------------------------------
# NOTE - This can also be done by recursion ---
# ---------------------------------------------
def sort_stack_1(stack):
result = s.Stack(stack.capacity, True) # suppress_printing = True
while not stack.is_empty():
value = stack.pop()
if not result.is_empty() and value >= int(result.peek()):
result.push(value)
else:
while not result.is_empty() and value < int(result.peek()):
stack.push(result.pop())
result.push(value)
return result.prettify()
def main():
input_stack = s.Stack(len(input_data), True) # suppress_printing = True
[input_stack.push(x) for x in input_data]
expected_output_stack = s.Stack(len(expected_output_data), True) # suppress_printing = True
[expected_output_stack.push(x) for x in expected_output_data]
print("Input: \n" + str(input_stack.prettify()))
print("Expected: \n" + str(expected_output_stack.prettify()))
print("Output: \n" + str(sort_stack_1(input_stack)))
if __name__ == '__main__':
main()
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from binascii import unhexlify
from bisect import bisect_left
from collections import defaultdict
from functools import total_ordering
from hashlib import md5
import json
import logging
import re
import six
from six.moves import zip
import sys
from threading import RLock
import struct
import random
murmur3 = None
try:
from cassandra.murmur3 import murmur3
except ImportError as e:
pass
from cassandra import SignatureDescriptor, ConsistencyLevel, InvalidRequest, Unauthorized
import cassandra.cqltypes as types
from cassandra.encoder import Encoder
from cassandra.marshal import varint_unpack
from cassandra.protocol import QueryMessage
from cassandra.query import dict_factory, bind_params
from cassandra.util import OrderedDict, Version
from cassandra.pool import HostDistance
from cassandra.connection import EndPoint
from cassandra.compat import Mapping
log = logging.getLogger(__name__)
cql_keywords = set((
'add', 'aggregate', 'all', 'allow', 'alter', 'and', 'apply', 'as', 'asc', 'ascii', 'authorize', 'batch', 'begin',
'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count',
'counter', 'create', 'custom', 'date', 'decimal', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop',
'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function',
'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json',
'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'modify', 'monotonic', 'nan', 'nologin',
'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission',
'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set',
'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid',
'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'update', 'use', 'user',
'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime',
# DSE specifics
"node", "nodes", "plan", "active", "application", "applications", "java", "executor", "executors", "std_out", "std_err",
"renew", "delegation", "no", "redact", "token", "lowercasestring", "cluster", "authentication", "schemes", "scheme",
"internal", "ldap", "kerberos", "remote", "object", "method", "call", "calls", "search", "schema", "config", "rows",
"columns", "profiles", "commit", "reload", "unset", "rebuild", "field", "workpool", "any", "submission", "indices",
"restrict", "unrestrict"
))
"""
Set of keywords in CQL.
Derived from .../cassandra/src/java/org/apache/cassandra/cql3/Cql.g
"""
cql_keywords_unreserved = set((
'aggregate', 'all', 'as', 'ascii', 'bigint', 'blob', 'boolean', 'called', 'clustering', 'compact', 'contains',
'count', 'counter', 'custom', 'date', 'decimal', 'deterministic', 'distinct', 'double', 'exists', 'filtering', 'finalfunc', 'float',
'frozen', 'function', 'functions', 'inet', 'initcond', 'input', 'int', 'json', 'key', 'keys', 'keyspaces',
'language', 'list', 'login', 'map', 'monotonic', 'nologin', 'nosuperuser', 'options', 'password', 'permission', 'permissions',
'returns', 'role', 'roles', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'text', 'time',
'timestamp', 'timeuuid', 'tinyint', 'trigger', 'ttl', 'tuple', 'type', 'user', 'users', 'uuid', 'values', 'varchar',
'varint', 'writetime'
))
"""
Set of unreserved keywords in CQL.
Derived from .../cassandra/src/java/org/apache/cassandra/cql3/Cql.g
"""
cql_keywords_reserved = cql_keywords - cql_keywords_unreserved
"""
Set of reserved keywords in CQL.
"""
_encoder = Encoder()
class Metadata(object):
"""
Holds a representation of the cluster schema and topology.
"""
cluster_name = None
""" The string name of the cluster. """
keyspaces = None
"""
A map from keyspace names to matching :class:`~.KeyspaceMetadata` instances.
"""
partitioner = None
"""
The string name of the partitioner for the cluster.
"""
token_map = None
""" A :class:`~.TokenMap` instance describing the ring topology. """
dbaas = False
""" A boolean indicating if connected to a DBaaS cluster """
def __init__(self):
self.keyspaces = {}
self.dbaas = False
self._hosts = {}
self._hosts_lock = RLock()
def export_schema_as_string(self):
"""
Returns a string that can be executed as a query in order to recreate
the entire schema. The string is formatted to be human readable.
"""
return "\n\n".join(ks.export_as_string() for ks in self.keyspaces.values())
def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs):
server_version = self.get_host(connection.endpoint).release_version
dse_version = self.get_host(connection.endpoint).dse_version
parser = get_schema_parser(connection, server_version, dse_version, timeout)
if not target_type:
self._rebuild_all(parser)
return
tt_lower = target_type.lower()
try:
parse_method = getattr(parser, 'get_' + tt_lower)
meta = parse_method(self.keyspaces, **kwargs)
if meta:
update_method = getattr(self, '_update_' + tt_lower)
if tt_lower == 'keyspace' and connection.protocol_version < 3:
# we didn't have 'type' target in legacy protocol versions, so we need to query those too
user_types = parser.get_types_map(self.keyspaces, **kwargs)
self._update_keyspace(meta, user_types)
else:
update_method(meta)
else:
drop_method = getattr(self, '_drop_' + tt_lower)
drop_method(**kwargs)
except AttributeError:
raise ValueError("Unknown schema target_type: '%s'" % target_type)
def _rebuild_all(self, parser):
current_keyspaces = set()
for keyspace_meta in parser.get_all_keyspaces():
current_keyspaces.add(keyspace_meta.name)
old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None)
self.keyspaces[keyspace_meta.name] = keyspace_meta
if old_keyspace_meta:
self._keyspace_updated(keyspace_meta.name)
else:
self._keyspace_added(keyspace_meta.name)
# remove not-just-added keyspaces
removed_keyspaces = [name for name in self.keyspaces.keys()
if name not in current_keyspaces]
self.keyspaces = dict((name, meta) for name, meta in self.keyspaces.items()
if name in current_keyspaces)
for ksname in removed_keyspaces:
self._keyspace_removed(ksname)
def _update_keyspace(self, keyspace_meta, new_user_types=None):
ks_name = keyspace_meta.name
old_keyspace_meta = self.keyspaces.get(ks_name, None)
self.keyspaces[ks_name] = keyspace_meta
if old_keyspace_meta:
keyspace_meta.tables = old_keyspace_meta.tables
keyspace_meta.user_types = new_user_types if new_user_types is not None else old_keyspace_meta.user_types
keyspace_meta.indexes = old_keyspace_meta.indexes
keyspace_meta.functions = old_keyspace_meta.functions
keyspace_meta.aggregates = old_keyspace_meta.aggregates
keyspace_meta.views = old_keyspace_meta.views
if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy):
self._keyspace_updated(ks_name)
else:
self._keyspace_added(ks_name)
def _drop_keyspace(self, keyspace):
if self.keyspaces.pop(keyspace, None):
self._keyspace_removed(keyspace)
def _update_table(self, meta):
try:
keyspace_meta = self.keyspaces[meta.keyspace_name]
# this is unfortunate, but protocol v4 does not differentiate
# between events for tables and views. <parser>.get_table will
# return one or the other based on the query results.
# Here we deal with that.
if isinstance(meta, TableMetadata):
keyspace_meta._add_table_metadata(meta)
else:
keyspace_meta._add_view_metadata(meta)
except KeyError:
# can happen if keyspace disappears while processing async event
pass
def _drop_table(self, keyspace, table):
try:
keyspace_meta = self.keyspaces[keyspace]
keyspace_meta._drop_table_metadata(table) # handles either table or view
except KeyError:
# can happen if keyspace disappears while processing async event
pass
def _update_type(self, type_meta):
try:
self.keyspaces[type_meta.keyspace].user_types[type_meta.name] = type_meta
except KeyError:
# can happen if keyspace disappears while processing async event
pass
def _drop_type(self, keyspace, type):
try:
self.keyspaces[keyspace].user_types.pop(type, None)
except KeyError:
# can happen if keyspace disappears while processing async event
pass
def _update_function(self, function_meta):
try:
self.keyspaces[function_meta.keyspace].functions[function_meta.signature] = function_meta
except KeyError:
# can happen if keyspace disappears while processing async event
pass
def _drop_function(self, keyspace, function):
try:
self.keyspaces[keyspace].functions.pop(function.signature, None)
except KeyError:
pass
def _update_aggregate(self, aggregate_meta):
try:
self.keyspaces[aggregate_meta.keyspace].aggregates[aggregate_meta.signature] = aggregate_meta
except KeyError:
pass
def _drop_aggregate(self, keyspace, aggregate):
try:
self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None)
except KeyError:
pass
def _keyspace_added(self, ksname):
if self.token_map:
self.token_map.rebuild_keyspace(ksname, build_if_absent=False)
def _keyspace_updated(self, ksname):
if self.token_map:
self.token_map.rebuild_keyspace(ksname, build_if_absent=False)
def _keyspace_removed(self, ksname):
if self.token_map:
self.token_map.remove_keyspace(ksname)
def rebuild_token_map(self, partitioner, token_map):
"""
Rebuild our view of the topology from fresh rows from the
system topology tables.
For internal use only.
"""
self.partitioner = partitioner
if partitioner.endswith('RandomPartitioner'):
token_class = MD5Token
elif partitioner.endswith('Murmur3Partitioner'):
token_class = Murmur3Token
elif partitioner.endswith('ByteOrderedPartitioner'):
token_class = BytesToken
else:
self.token_map = None
return
token_to_host_owner = {}
ring = []
for host, token_strings in six.iteritems(token_map):
for token_string in token_strings:
token = token_class.from_string(token_string)
ring.append(token)
token_to_host_owner[token] = host
all_tokens = sorted(ring)
self.token_map = TokenMap(
token_class, token_to_host_owner, all_tokens, self)
def get_replicas(self, keyspace, key):
"""
Returns a list of :class:`.Host` instances that are replicas for a given
partition key.
"""
t = self.token_map
if not t:
return []
try:
return t.get_replicas(keyspace, t.token_class.from_key(key))
except NoMurmur3:
return []
def can_support_partitioner(self):
if self.partitioner.endswith('Murmur3Partitioner') and murmur3 is None:
return False
else:
return True
def add_or_return_host(self, host):
"""
Returns a tuple (host, new), where ``host`` is a Host
instance, and ``new`` is a bool indicating whether
the host was newly added.
"""
with self._hosts_lock:
try:
return self._hosts[host.endpoint], False
except KeyError:
self._hosts[host.endpoint] = host
return host, True
def remove_host(self, host):
with self._hosts_lock:
return bool(self._hosts.pop(host.endpoint, False))
def get_host(self, endpoint_or_address):
"""
Find a host in the metadata for a specific endpoint. If a string inet address is passed,
iterate all hosts to match the :attr:`~.pool.Host.broadcast_rpc_address` attribute.
"""
if not isinstance(endpoint_or_address, EndPoint):
return self._get_host_by_address(endpoint_or_address)
return self._hosts.get(endpoint_or_address)
def _get_host_by_address(self, address):
for host in six.itervalues(self._hosts):
if host.broadcast_rpc_address == address:
return host
return None
def all_hosts(self):
"""
Returns a list of all known :class:`.Host` instances in the cluster.
"""
with self._hosts_lock:
return list(self._hosts.values())
REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator."
def trim_if_startswith(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
_replication_strategies = {}
class ReplicationStrategyTypeType(type):
def __new__(metacls, name, bases, dct):
dct.setdefault('name', name)
cls = type.__new__(metacls, name, bases, dct)
if not name.startswith('_'):
_replication_strategies[name] = cls
return cls
@six.add_metaclass(ReplicationStrategyTypeType)
class _ReplicationStrategy(object):
options_map = None
@classmethod
def create(cls, strategy_class, options_map):
if not strategy_class:
return None
strategy_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX)
rs_class = _replication_strategies.get(strategy_name, None)
if rs_class is None:
rs_class = _UnknownStrategyBuilder(strategy_name)
_replication_strategies[strategy_name] = rs_class
try:
rs_instance = rs_class(options_map)
except Exception as exc:
log.warning("Failed creating %s with options %s: %s", strategy_name, options_map, exc)
return None
return rs_instance
def make_token_replica_map(self, token_to_host_owner, ring):
raise NotImplementedError()
def export_for_schema(self):
raise NotImplementedError()
ReplicationStrategy = _ReplicationStrategy
class _UnknownStrategyBuilder(object):
def __init__(self, name):
self.name = name
def __call__(self, options_map):
strategy_instance = _UnknownStrategy(self.name, options_map)
return strategy_instance
class _UnknownStrategy(ReplicationStrategy):
def __init__(self, name, options_map):
self.name = name
self.options_map = options_map.copy() if options_map is not None else dict()
self.options_map['class'] = self.name
def __eq__(self, other):
return (isinstance(other, _UnknownStrategy) and
self.name == other.name and
self.options_map == other.options_map)
def export_for_schema(self):
"""
Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.
"""
if self.options_map:
return dict((str(key), str(value)) for key, value in self.options_map.items())
return "{'class': '%s'}" % (self.name, )
def make_token_replica_map(self, token_to_host_owner, ring):
return {}
class SimpleStrategy(ReplicationStrategy):
replication_factor = None
"""
The replication factor for this keyspace.
"""
def __init__(self, options_map):
try:
self.replication_factor = int(options_map['replication_factor'])
except Exception:
raise ValueError("SimpleStrategy requires an integer 'replication_factor' option")
def make_token_replica_map(self, token_to_host_owner, ring):
replica_map = {}
for i in range(len(ring)):
j, hosts = 0, list()
while len(hosts) < self.replication_factor and j < len(ring):
token = ring[(i + j) % len(ring)]
host = token_to_host_owner[token]
if host not in hosts:
hosts.append(host)
j += 1
replica_map[ring[i]] = hosts
return replica_map
def export_for_schema(self):
"""
Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.
"""
return "{'class': 'SimpleStrategy', 'replication_factor': '%d'}" \
% (self.replication_factor,)
def __eq__(self, other):
if not isinstance(other, SimpleStrategy):
return False
return self.replication_factor == other.replication_factor
class NetworkTopologyStrategy(ReplicationStrategy):
dc_replication_factors = None
"""
A map of datacenter names to the replication factor for that DC.
"""
def __init__(self, dc_replication_factors):
self.dc_replication_factors = dict(
(str(k), int(v)) for k, v in dc_replication_factors.items())
def make_token_replica_map(self, token_to_host_owner, ring):
dc_rf_map = dict((dc, int(rf))
for dc, rf in self.dc_replication_factors.items() if rf > 0)
# build a map of DCs to lists of indexes into `ring` for tokens that
# belong to that DC
dc_to_token_offset = defaultdict(list)
dc_racks = defaultdict(set)
hosts_per_dc = defaultdict(set)
for i, token in enumerate(ring):
host = token_to_host_owner[token]
dc_to_token_offset[host.datacenter].append(i)
if host.datacenter and host.rack:
dc_racks[host.datacenter].add(host.rack)
hosts_per_dc[host.datacenter].add(host)
# A map of DCs to an index into the dc_to_token_offset value for that dc.
# This is how we keep track of advancing around the ring for each DC.
dc_to_current_index = defaultdict(int)
replica_map = defaultdict(list)
for i in range(len(ring)):
replicas = replica_map[ring[i]]
# go through each DC and find the replicas in that DC
for dc in dc_to_token_offset.keys():
if dc not in dc_rf_map:
continue
# advance our per-DC index until we're up to at least the
# current token in the ring
token_offsets = dc_to_token_offset[dc]
index = dc_to_current_index[dc]
num_tokens = len(token_offsets)
while index < num_tokens and token_offsets[index] < i:
index += 1
dc_to_current_index[dc] = index
replicas_remaining = dc_rf_map[dc]
replicas_this_dc = 0
skipped_hosts = []
racks_placed = set()
racks_this_dc = dc_racks[dc]
hosts_this_dc = len(hosts_per_dc[dc])
for token_offset_index in six.moves.range(index, index+num_tokens):
if token_offset_index >= len(token_offsets):
token_offset_index = token_offset_index - len(token_offsets)
token_offset = token_offsets[token_offset_index]
host = token_to_host_owner[ring[token_offset]]
if replicas_remaining == 0 or replicas_this_dc == hosts_this_dc:
break
if host in replicas:
continue
if host.rack in racks_placed and len(racks_placed) < len(racks_this_dc):
skipped_hosts.append(host)
continue
replicas.append(host)
replicas_this_dc += 1
replicas_remaining -= 1
racks_placed.add(host.rack)
if len(racks_placed) == len(racks_this_dc):
for host in skipped_hosts:
if replicas_remaining == 0:
break
replicas.append(host)
replicas_remaining -= 1
del skipped_hosts[:]
return replica_map
def export_for_schema(self):
"""
Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.
"""
ret = "{'class': 'NetworkTopologyStrategy'"
for dc, repl_factor in sorted(self.dc_replication_factors.items()):
ret += ", '%s': '%d'" % (dc, repl_factor)
return ret + "}"
def __eq__(self, other):
if not isinstance(other, NetworkTopologyStrategy):
return False
return self.dc_replication_factors == other.dc_replication_factors
class LocalStrategy(ReplicationStrategy):
def __init__(self, options_map):
pass
def make_token_replica_map(self, token_to_host_owner, ring):
return {}
def export_for_schema(self):
"""
Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.
"""
return "{'class': 'LocalStrategy'}"
def __eq__(self, other):
return isinstance(other, LocalStrategy)
class KeyspaceMetadata(object):
"""
A representation of the schema for a single keyspace.
"""
name = None
""" The string name of the keyspace. """
durable_writes = True
"""
A boolean indicating whether durable writes are enabled for this keyspace
or not.
"""
replication_strategy = None
"""
A :class:`.ReplicationStrategy` subclass object.
"""
tables = None
"""
A map from table names to instances of :class:`~.TableMetadata`.
"""
indexes = None
"""
A dict mapping index names to :class:`.IndexMetadata` instances.
"""
user_types = None
"""
A map from user-defined type names to instances of :class:`~cassandra.metadata.UserType`.
.. versionadded:: 2.1.0
"""
functions = None
"""
A map from user-defined function signatures to instances of :class:`~cassandra.metadata.Function`.
.. versionadded:: 2.6.0
"""
aggregates = None
"""
A map from user-defined aggregate signatures to instances of :class:`~cassandra.metadata.Aggregate`.
.. versionadded:: 2.6.0
"""
views = None
"""
A dict mapping view names to :class:`.MaterializedViewMetadata` instances.
"""
virtual = False
"""
A boolean indicating if this is a virtual keyspace or not. Always ``False``
for clusters running Cassandra pre-4.0 and DSE pre-6.7 versions.
.. versionadded:: 3.15
"""
_exc_info = None
""" set if metadata parsing failed """
def __init__(self, name, durable_writes, strategy_class, strategy_options):
self.name = name
self.durable_writes = durable_writes
self.replication_strategy = ReplicationStrategy.create(strategy_class, strategy_options)
self.tables = {}
self.indexes = {}
self.user_types = {}
self.functions = {}
self.aggregates = {}
self.views = {}
def export_as_string(self):
"""
Returns a CQL query string that can be used to recreate the entire keyspace,
including user-defined types and tables.
"""
cql = "\n\n".join([self.as_cql_query() + ';'] +
self.user_type_strings() +
[f.export_as_string() for f in self.functions.values()] +
[a.export_as_string() for a in self.aggregates.values()] +
[t.export_as_string() for t in self.tables.values()])
if self._exc_info:
import traceback
ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \
(self.name)
for line in traceback.format_exception(*self._exc_info):
ret += line
ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % cql
return ret
if self.virtual:
return ("/*\nWarning: Keyspace {ks} is a virtual keyspace and cannot be recreated with CQL.\n"
"Structure, for reference:*/\n"
"{cql}\n"
"").format(ks=self.name, cql=cql)
return cql
def as_cql_query(self):
"""
Returns a CQL query string that can be used to recreate just this keyspace,
not including user-defined types and tables.
"""
if self.virtual:
return "// VIRTUAL KEYSPACE {}".format(protect_name(self.name))
ret = "CREATE KEYSPACE %s WITH replication = %s " % (
protect_name(self.name),
self.replication_strategy.export_for_schema())
return ret + (' AND durable_writes = %s' % ("true" if self.durable_writes else "false"))
def user_type_strings(self):
user_type_strings = []
user_types = self.user_types.copy()
keys = sorted(user_types.keys())
for k in keys:
if k in user_types:
self.resolve_user_types(k, user_types, user_type_strings)
return user_type_strings
def resolve_user_types(self, key, user_types, user_type_strings):
user_type = user_types.pop(key)
for type_name in user_type.field_types:
for sub_type in types.cql_types_from_string(type_name):
if sub_type in user_types:
self.resolve_user_types(sub_type, user_types, user_type_strings)
user_type_strings.append(user_type.export_as_string())
def _add_table_metadata(self, table_metadata):
old_indexes = {}
old_meta = self.tables.get(table_metadata.name, None)
if old_meta:
# views are not queried with table, so they must be transferred to new
table_metadata.views = old_meta.views
# indexes will be updated with what is on the new metadata
old_indexes = old_meta.indexes
# note the intentional order of add before remove
# this makes sure the maps are never absent something that existed before this update
for index_name, index_metadata in six.iteritems(table_metadata.indexes):
self.indexes[index_name] = index_metadata
for index_name in (n for n in old_indexes if n not in table_metadata.indexes):
self.indexes.pop(index_name, None)
self.tables[table_metadata.name] = table_metadata
def _drop_table_metadata(self, table_name):
table_meta = self.tables.pop(table_name, None)
if table_meta:
for index_name in table_meta.indexes:
self.indexes.pop(index_name, None)
for view_name in table_meta.views:
self.views.pop(view_name, None)
return
# we can't tell table drops from views, so drop both
# (name is unique among them, within a keyspace)
view_meta = self.views.pop(table_name, None)
if view_meta:
try:
self.tables[view_meta.base_table_name].views.pop(table_name, None)
except KeyError:
pass
def _add_view_metadata(self, view_metadata):
try:
self.tables[view_metadata.base_table_name].views[view_metadata.name] = view_metadata
self.views[view_metadata.name] = view_metadata
except KeyError:
pass
class UserType(object):
"""
A user defined type, as created by ``CREATE TYPE`` statements.
User-defined types were introduced in Cassandra 2.1.
.. versionadded:: 2.1.0
"""
keyspace = None
"""
The string name of the keyspace in which this type is defined.
"""
name = None
"""
The name of this type.
"""
field_names = None
"""
An ordered list of the names for each field in this user-defined type.
"""
field_types = None
"""
An ordered list of the types for each field in this user-defined type.
"""
def __init__(self, keyspace, name, field_names, field_types):
self.keyspace = keyspace
self.name = name
# non-frozen collections can return None
self.field_names = field_names or []
self.field_types = field_types or []
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this type.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.
"""
ret = "CREATE TYPE %s.%s (%s" % (
protect_name(self.keyspace),
protect_name(self.name),
"\n" if formatted else "")
if formatted:
field_join = ",\n"
padding = " "
else:
field_join = ", "
padding = ""
fields = []
for field_name, field_type in zip(self.field_names, self.field_types):
fields.append("%s %s" % (protect_name(field_name), field_type))
ret += field_join.join("%s%s" % (padding, field) for field in fields)
ret += "\n)" if formatted else ")"
return ret
def export_as_string(self):
return self.as_cql_query(formatted=True) + ';'
class Aggregate(object):
"""
A user defined aggregate function, as created by ``CREATE AGGREGATE`` statements.
Aggregate functions were introduced in Cassandra 2.2
.. versionadded:: 2.6.0
"""
keyspace = None
"""
The string name of the keyspace in which this aggregate is defined
"""
name = None
"""
The name of this aggregate
"""
argument_types = None
"""
An ordered list of the types for each argument to the aggregate
"""
final_func = None
"""
Name of a final function
"""
initial_condition = None
"""
Initial condition of the aggregate
"""
return_type = None
"""
Return type of the aggregate
"""
state_func = None
"""
Name of a state function
"""
state_type = None
"""
Type of the aggregate state
"""
deterministic = None
"""
Flag indicating if this function is guaranteed to produce the same result
for a particular input and state. This is available only with DSE >=6.0.
"""
def __init__(self, keyspace, name, argument_types, state_func,
state_type, final_func, initial_condition, return_type,
deterministic):
self.keyspace = keyspace
self.name = name
self.argument_types = argument_types
self.state_func = state_func
self.state_type = state_type
self.final_func = final_func
self.initial_condition = initial_condition
self.return_type = return_type
self.deterministic = deterministic
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this aggregate.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.
"""
sep = '\n ' if formatted else ' '
keyspace = protect_name(self.keyspace)
name = protect_name(self.name)
type_list = ', '.join([types.strip_frozen(arg_type) for arg_type in self.argument_types])
state_func = protect_name(self.state_func)
state_type = types.strip_frozen(self.state_type)
ret = "CREATE AGGREGATE %(keyspace)s.%(name)s(%(type_list)s)%(sep)s" \
"SFUNC %(state_func)s%(sep)s" \
"STYPE %(state_type)s" % locals()
ret += ''.join((sep, 'FINALFUNC ', protect_name(self.final_func))) if self.final_func else ''
ret += ''.join((sep, 'INITCOND ', self.initial_condition)) if self.initial_condition is not None else ''
ret += '{}DETERMINISTIC'.format(sep) if self.deterministic else ''
return ret
def export_as_string(self):
return self.as_cql_query(formatted=True) + ';'
@property
def signature(self):
return SignatureDescriptor.format_signature(self.name, self.argument_types)
class Function(object):
"""
A user defined function, as created by ``CREATE FUNCTION`` statements.
User-defined functions were introduced in Cassandra 2.2
.. versionadded:: 2.6.0
"""
keyspace = None
"""
The string name of the keyspace in which this function is defined
"""
name = None
"""
The name of this function
"""
argument_types = None
"""
An ordered list of the types for each argument to the function
"""
argument_names = None
"""
An ordered list of the names of each argument to the function
"""
return_type = None
"""
Return type of the function
"""
language = None
"""
Language of the function body
"""
body = None
"""
Function body string
"""
called_on_null_input = None
"""
Flag indicating whether this function should be called for rows with null values
(convenience function to avoid handling nulls explicitly if the result will just be null)
"""
deterministic = None
"""
Flag indicating if this function is guaranteed to produce the same result
for a particular input. This is available only for DSE >=6.0.
"""
monotonic = None
"""
Flag indicating if this function is guaranteed to increase or decrease
monotonically on any of its arguments. This is available only for DSE >=6.0.
"""
monotonic_on = None
"""
A list containing the argument or arguments over which this function is
monotonic. This is available only for DSE >=6.0.
"""
def __init__(self, keyspace, name, argument_types, argument_names,
return_type, language, body, called_on_null_input,
deterministic, monotonic, monotonic_on):
self.keyspace = keyspace
self.name = name
self.argument_types = argument_types
# argument_types (frozen<list<>>) will always be a list
# argument_name is not frozen in C* < 3.0 and may return None
self.argument_names = argument_names or []
self.return_type = return_type
self.language = language
self.body = body
self.called_on_null_input = called_on_null_input
self.deterministic = deterministic
self.monotonic = monotonic
self.monotonic_on = monotonic_on
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this function.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.
"""
sep = '\n ' if formatted else ' '
keyspace = protect_name(self.keyspace)
name = protect_name(self.name)
arg_list = ', '.join(["%s %s" % (protect_name(n), types.strip_frozen(t))
for n, t in zip(self.argument_names, self.argument_types)])
typ = self.return_type
lang = self.language
body = self.body
on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL"
deterministic_token = ('DETERMINISTIC{}'.format(sep)
if self.deterministic else
'')
monotonic_tokens = '' # default for nonmonotonic function
if self.monotonic:
# monotonic on all arguments; ignore self.monotonic_on
monotonic_tokens = 'MONOTONIC{}'.format(sep)
elif self.monotonic_on:
# if monotonic == False and monotonic_on is nonempty, we know that
# monotonicity was specified with MONOTONIC ON <arg>, so there's
# exactly 1 value there
monotonic_tokens = 'MONOTONIC ON {}{}'.format(self.monotonic_on[0],
sep)
return "CREATE FUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \
"%(on_null)s ON NULL INPUT%(sep)s" \
"RETURNS %(typ)s%(sep)s" \
"%(deterministic_token)s" \
"%(monotonic_tokens)s" \
"LANGUAGE %(lang)s%(sep)s" \
"AS $$%(body)s$$" % locals()
def export_as_string(self):
return self.as_cql_query(formatted=True) + ';'
@property
def signature(self):
return SignatureDescriptor.format_signature(self.name, self.argument_types)
class TableMetadata(object):
"""
A representation of the schema for a single table.
"""
keyspace_name = None
""" String name of this Table's keyspace """
name = None
""" The string name of the table. """
partition_key = None
"""
A list of :class:`.ColumnMetadata` instances representing the columns in
the partition key for this table. This will always hold at least one
column.
"""
clustering_key = None
"""
A list of :class:`.ColumnMetadata` instances representing the columns
in the clustering key for this table. These are all of the
:attr:`.primary_key` columns that are not in the :attr:`.partition_key`.
Note that a table may have no clustering keys, in which case this will
be an empty list.
"""
@property
def primary_key(self):
"""
A list of :class:`.ColumnMetadata` representing the components of
the primary key for this table.
"""
return self.partition_key + self.clustering_key
columns = None
"""
A dict mapping column names to :class:`.ColumnMetadata` instances.
"""
indexes = None
"""
A dict mapping index names to :class:`.IndexMetadata` instances.
"""
is_compact_storage = False
options = None
"""
A dict mapping table option names to their specific settings for this
table.
"""
compaction_options = {
"min_compaction_threshold": "min_threshold",
"max_compaction_threshold": "max_threshold",
"compaction_strategy_class": "class"}
triggers = None
"""
A dict mapping trigger names to :class:`.TriggerMetadata` instances.
"""
views = None
"""
A dict mapping view names to :class:`.MaterializedViewMetadata` instances.
"""
_exc_info = None
""" set if metadata parsing failed """
virtual = False
"""
A boolean indicating if this is a virtual table or not. Always ``False``
for clusters running Cassandra pre-4.0 and DSE pre-6.7 versions.
.. versionadded:: 3.15
"""
@property
def is_cql_compatible(self):
"""
A boolean indicating if this table can be represented as CQL in export
"""
if self.virtual:
return False
comparator = getattr(self, 'comparator', None)
if comparator:
# no compact storage with more than one column beyond PK if there
# are clustering columns
incompatible = (self.is_compact_storage and
len(self.columns) > len(self.primary_key) + 1 and
len(self.clustering_key) >= 1)
return not incompatible
return True
extensions = None
"""
Metadata describing configuration for table extensions
"""
def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None, virtual=False):
self.keyspace_name = keyspace_name
self.name = name
self.partition_key = [] if partition_key is None else partition_key
self.clustering_key = [] if clustering_key is None else clustering_key
self.columns = OrderedDict() if columns is None else columns
self.indexes = {}
self.options = {} if options is None else options
self.comparator = None
self.triggers = OrderedDict() if triggers is None else triggers
self.views = {}
self.virtual = virtual
def export_as_string(self):
"""
Returns a string of CQL queries that can be used to recreate this table
along with all indexes on it. The returned string is formatted to
be human readable.
"""
if self._exc_info:
import traceback
ret = "/*\nWarning: Table %s.%s is incomplete because of an error processing metadata.\n" % \
(self.keyspace_name, self.name)
for line in traceback.format_exception(*self._exc_info):
ret += line
ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql()
elif not self.is_cql_compatible:
# If we can't produce this table with CQL, comment inline
ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \
(self.keyspace_name, self.name)
ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql()
elif self.virtual:
ret = ('/*\nWarning: Table {ks}.{tab} is a virtual table and cannot be recreated with CQL.\n'
'Structure, for reference:\n'
'{cql}\n*/').format(ks=self.keyspace_name, tab=self.name, cql=self._all_as_cql())
else:
ret = self._all_as_cql()
return ret
def _all_as_cql(self):
ret = self.as_cql_query(formatted=True)
ret += ";"
for index in self.indexes.values():
ret += "\n%s;" % index.as_cql_query()
for trigger_meta in self.triggers.values():
ret += "\n%s;" % (trigger_meta.as_cql_query(),)
for view_meta in self.views.values():
ret += "\n\n%s;" % (view_meta.as_cql_query(formatted=True),)
if self.extensions:
registry = _RegisteredExtensionType._extension_registry
for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey
ext = registry[k]
cql = ext.after_table_cql(self, k, self.extensions[k])
if cql:
ret += "\n\n%s" % (cql,)
return ret
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this table (index
creations are not included). If `formatted` is set to :const:`True`,
extra whitespace will be added to make the query human readable.
"""
ret = "%s TABLE %s.%s (%s" % (
('VIRTUAL' if self.virtual else 'CREATE'),
protect_name(self.keyspace_name),
protect_name(self.name),
"\n" if formatted else "")
if formatted:
column_join = ",\n"
padding = " "
else:
column_join = ", "
padding = ""
columns = []
for col in self.columns.values():
columns.append("%s %s%s" % (protect_name(col.name), col.cql_type, ' static' if col.is_static else ''))
if len(self.partition_key) == 1 and not self.clustering_key:
columns[0] += " PRIMARY KEY"
ret += column_join.join("%s%s" % (padding, col) for col in columns)
# primary key
if len(self.partition_key) > 1 or self.clustering_key:
ret += "%s%sPRIMARY KEY (" % (column_join, padding)
if len(self.partition_key) > 1:
ret += "(%s)" % ", ".join(protect_name(col.name) for col in self.partition_key)
else:
ret += protect_name(self.partition_key[0].name)
if self.clustering_key:
ret += ", %s" % ", ".join(protect_name(col.name) for col in self.clustering_key)
ret += ")"
# properties
ret += "%s) WITH " % ("\n" if formatted else "")
ret += self._property_string(formatted, self.clustering_key, self.options, self.is_compact_storage)
return ret
@classmethod
def _property_string(cls, formatted, clustering_key, options_map, is_compact_storage=False):
properties = []
if is_compact_storage:
properties.append("COMPACT STORAGE")
if clustering_key:
cluster_str = "CLUSTERING ORDER BY "
inner = []
for col in clustering_key:
ordering = "DESC" if col.is_reversed else "ASC"
inner.append("%s %s" % (protect_name(col.name), ordering))
cluster_str += "(%s)" % ", ".join(inner)
properties.append(cluster_str)
properties.extend(cls._make_option_strings(options_map))
join_str = "\n AND " if formatted else " AND "
return join_str.join(properties)
@classmethod
def _make_option_strings(cls, options_map):
ret = []
options_copy = dict(options_map.items())
actual_options = json.loads(options_copy.pop('compaction_strategy_options', '{}'))
value = options_copy.pop("compaction_strategy_class", None)
actual_options.setdefault("class", value)
compaction_option_strings = ["'%s': '%s'" % (k, v) for k, v in actual_options.items()]
ret.append('compaction = {%s}' % ', '.join(compaction_option_strings))
for system_table_name in cls.compaction_options.keys():
options_copy.pop(system_table_name, None) # delete if present
options_copy.pop('compaction_strategy_option', None)
if not options_copy.get('compression'):
params = json.loads(options_copy.pop('compression_parameters', '{}'))
param_strings = ["'%s': '%s'" % (k, v) for k, v in params.items()]
ret.append('compression = {%s}' % ', '.join(param_strings))
for name, value in options_copy.items():
if value is not None:
if name == "comment":
value = value or ""
ret.append("%s = %s" % (name, protect_value(value)))
return list(sorted(ret))
class TableExtensionInterface(object):
"""
Defines CQL/DDL for Cassandra table extensions.
"""
# limited API for now. Could be expanded as new extension types materialize -- "extend_option_strings", for example
@classmethod
def after_table_cql(cls, ext_key, ext_blob):
"""
Called to produce CQL/DDL to follow the table definition.
Should contain requisite terminating semicolon(s).
"""
pass
class _RegisteredExtensionType(type):
_extension_registry = {}
def __new__(mcs, name, bases, dct):
cls = super(_RegisteredExtensionType, mcs).__new__(mcs, name, bases, dct)
if name != 'RegisteredTableExtension':
mcs._extension_registry[cls.name] = cls
return cls
@six.add_metaclass(_RegisteredExtensionType)
class RegisteredTableExtension(TableExtensionInterface):
"""
Extending this class registers it by name (associated by key in the `system_schema.tables.extensions` map).
"""
name = None
"""
Name of the extension (key in the map)
"""
def protect_name(name):
return maybe_escape_name(name)
def protect_names(names):
return [protect_name(n) for n in names]
def protect_value(value):
if value is None:
return 'NULL'
if isinstance(value, (int, float, bool)):
return str(value).lower()
return "'%s'" % value.replace("'", "''")
valid_cql3_word_re = re.compile(r'^[a-z][0-9a-z_]*$')
def is_valid_name(name):
if name is None:
return False
if name.lower() in cql_keywords_reserved:
return False
return valid_cql3_word_re.match(name) is not None
def maybe_escape_name(name):
if is_valid_name(name):
return name
return escape_name(name)
def escape_name(name):
return '"%s"' % (name.replace('"', '""'),)
class ColumnMetadata(object):
"""
A representation of a single column in a table.
"""
table = None
""" The :class:`.TableMetadata` this column belongs to. """
name = None
""" The string name of this column. """
cql_type = None
"""
The CQL type for the column.
"""
is_static = False
"""
If this column is static (available in Cassandra 2.1+), this will
be :const:`True`, otherwise :const:`False`.
"""
is_reversed = False
"""
If this column is reversed (DESC) as in clustering order
"""
_cass_type = None
def __init__(self, table_metadata, column_name, cql_type, is_static=False, is_reversed=False):
self.table = table_metadata
self.name = column_name
self.cql_type = cql_type
self.is_static = is_static
self.is_reversed = is_reversed
def __str__(self):
return "%s %s" % (self.name, self.cql_type)
class IndexMetadata(object):
"""
A representation of a secondary index on a column.
"""
keyspace_name = None
""" A string name of the keyspace. """
table_name = None
""" A string name of the table this index is on. """
name = None
""" A string name for the index. """
kind = None
""" A string representing the kind of index (COMPOSITE, CUSTOM,...). """
index_options = {}
""" A dict of index options. """
def __init__(self, keyspace_name, table_name, index_name, kind, index_options):
self.keyspace_name = keyspace_name
self.table_name = table_name
self.name = index_name
self.kind = kind
self.index_options = index_options
def as_cql_query(self):
"""
Returns a CQL query that can be used to recreate this index.
"""
options = dict(self.index_options)
index_target = options.pop("target")
if self.kind != "CUSTOM":
return "CREATE INDEX %s ON %s.%s (%s)" % (
protect_name(self.name),
protect_name(self.keyspace_name),
protect_name(self.table_name),
index_target)
else:
class_name = options.pop("class_name")
ret = "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % (
protect_name(self.name),
protect_name(self.keyspace_name),
protect_name(self.table_name),
index_target,
class_name)
if options:
# PYTHON-1008: `ret` will always be a unicode
opts_cql_encoded = _encoder.cql_encode_all_types(options, as_text_type=True)
ret += " WITH OPTIONS = %s" % opts_cql_encoded
return ret
def export_as_string(self):
"""
Returns a CQL query string that can be used to recreate this index.
"""
return self.as_cql_query() + ';'
class TokenMap(object):
"""
Information about the layout of the ring.
"""
token_class = None
"""
A subclass of :class:`.Token`, depending on what partitioner the cluster uses.
"""
token_to_host_owner = None
"""
A map of :class:`.Token` objects to the :class:`.Host` that owns that token.
"""
tokens_to_hosts_by_ks = None
"""
A map of keyspace names to a nested map of :class:`.Token` objects to
sets of :class:`.Host` objects.
"""
ring = None
"""
An ordered list of :class:`.Token` instances in the ring.
"""
_metadata = None
def __init__(self, token_class, token_to_host_owner, all_tokens, metadata):
self.token_class = token_class
self.ring = all_tokens
self.token_to_host_owner = token_to_host_owner
self.tokens_to_hosts_by_ks = {}
self._metadata = metadata
self._rebuild_lock = RLock()
def rebuild_keyspace(self, keyspace, build_if_absent=False):
with self._rebuild_lock:
try:
current = self.tokens_to_hosts_by_ks.get(keyspace, None)
if (build_if_absent and current is None) or (not build_if_absent and current is not None):
ks_meta = self._metadata.keyspaces.get(keyspace)
if ks_meta:
replica_map = self.replica_map_for_keyspace(self._metadata.keyspaces[keyspace])
self.tokens_to_hosts_by_ks[keyspace] = replica_map
except Exception:
# should not happen normally, but we don't want to blow up queries because of unexpected meta state
# bypass until new map is generated
self.tokens_to_hosts_by_ks[keyspace] = {}
log.exception("Failed creating a token map for keyspace '%s' with %s. PLEASE REPORT THIS: https://datastax-oss.atlassian.net/projects/PYTHON", keyspace, self.token_to_host_owner)
def replica_map_for_keyspace(self, ks_metadata):
strategy = ks_metadata.replication_strategy
if strategy:
return strategy.make_token_replica_map(self.token_to_host_owner, self.ring)
else:
return None
def remove_keyspace(self, keyspace):
self.tokens_to_hosts_by_ks.pop(keyspace, None)
def get_replicas(self, keyspace, token):
"""
Get a set of :class:`.Host` instances representing all of the
replica nodes for a given :class:`.Token`.
"""
tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None)
if tokens_to_hosts is None:
self.rebuild_keyspace(keyspace, build_if_absent=True)
tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None)
if tokens_to_hosts:
# The values in self.ring correspond to the end of the
# token range up to and including the value listed.
point = bisect_left(self.ring, token)
if point == len(self.ring):
return tokens_to_hosts[self.ring[0]]
else:
return tokens_to_hosts[self.ring[point]]
return []
@total_ordering
class Token(object):
"""
Abstract class representing a token.
"""
def __init__(self, token):
self.value = token
@classmethod
def hash_fn(cls, key):
return key
@classmethod
def from_key(cls, key):
return cls(cls.hash_fn(key))
@classmethod
def from_string(cls, token_string):
raise NotImplementedError()
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
def __hash__(self):
return hash(self.value)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
__str__ = __repr__
MIN_LONG = -(2 ** 63)
MAX_LONG = (2 ** 63) - 1
class NoMurmur3(Exception):
pass
class HashToken(Token):
@classmethod
def from_string(cls, token_string):
""" `token_string` should be the string representation from the server. """
# The hash partitioners just store the deciman value
return cls(int(token_string))
class Murmur3Token(HashToken):
"""
A token for ``Murmur3Partitioner``.
"""
@classmethod
def hash_fn(cls, key):
if murmur3 is not None:
h = int(murmur3(key))
return h if h != MIN_LONG else MAX_LONG
else:
raise NoMurmur3()
def __init__(self, token):
""" `token` is an int or string representing the token. """
self.value = int(token)
class MD5Token(HashToken):
"""
A token for ``RandomPartitioner``.
"""
@classmethod
def hash_fn(cls, key):
if isinstance(key, six.text_type):
key = key.encode('UTF-8')
return abs(varint_unpack(md5(key).digest()))
class BytesToken(Token):
"""
A token for ``ByteOrderedPartitioner``.
"""
@classmethod
def from_string(cls, token_string):
""" `token_string` should be the string representation from the server. """
# unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface"
if isinstance(token_string, six.text_type):
token_string = token_string.encode('ascii')
# The BOP stores a hex string
return cls(unhexlify(token_string))
class TriggerMetadata(object):
"""
A representation of a trigger for a table.
"""
table = None
""" The :class:`.TableMetadata` this trigger belongs to. """
name = None
""" The string name of this trigger. """
options = None
"""
A dict mapping trigger option names to their specific settings for this
table.
"""
def __init__(self, table_metadata, trigger_name, options=None):
self.table = table_metadata
self.name = trigger_name
self.options = options
def as_cql_query(self):
ret = "CREATE TRIGGER %s ON %s.%s USING %s" % (
protect_name(self.name),
protect_name(self.table.keyspace_name),
protect_name(self.table.name),
protect_value(self.options['class'])
)
return ret
def export_as_string(self):
return self.as_cql_query() + ';'
class _SchemaParser(object):
def __init__(self, connection, timeout):
self.connection = connection
self.timeout = timeout
def _handle_results(self, success, result, expected_failures=tuple()):
"""
Given a bool and a ResultSet (the form returned per result from
Connection.wait_for_responses), return a dictionary containing the
results. Used to process results from asynchronous queries to system
tables.
``expected_failures`` will usually be used to allow callers to ignore
``InvalidRequest`` errors caused by a missing system keyspace. For
example, some DSE versions report a 4.X server version, but do not have
virtual tables. Thus, running against 4.X servers, SchemaParserV4 uses
expected_failures to make a best-effort attempt to read those
keyspaces, but treat them as empty if they're not found.
:param success: A boolean representing whether or not the query
succeeded
:param result: The resultset in question.
:expected_failures: An Exception class or an iterable thereof. If the
query failed, but raised an instance of an expected failure class, this
will ignore the failure and return an empty list.
"""
if not success and isinstance(result, expected_failures):
return []
elif success:
return dict_factory(result.column_names, result.parsed_rows) if result else []
else:
raise result
def _query_build_row(self, query_string, build_func):
result = self._query_build_rows(query_string, build_func)
return result[0] if result else None
def _query_build_rows(self, query_string, build_func):
query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE)
responses = self.connection.wait_for_responses((query), timeout=self.timeout, fail_on_error=False)
(success, response) = responses[0]
if success:
result = dict_factory(response.column_names, response.parsed_rows)
return [build_func(row) for row in result]
elif isinstance(response, InvalidRequest):
log.debug("user types table not found")
return []
else:
raise response
class SchemaParserV22(_SchemaParser):
"""
For C* 2.2+
"""
_SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"
_SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"
_SELECT_COLUMNS = "SELECT * FROM system.schema_columns"
_SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers"
_SELECT_TYPES = "SELECT * FROM system.schema_usertypes"
_SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions"
_SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates"
_table_name_col = 'columnfamily_name'
_function_agg_arument_type_col = 'signature'
recognized_table_options = (
"comment",
"read_repair_chance",
"dclocal_read_repair_chance", # kept to be safe, but see _build_table_options()
"local_read_repair_chance",
"replicate_on_write",
"gc_grace_seconds",
"bloom_filter_fp_chance",
"caching",
"compaction_strategy_class",
"compaction_strategy_options",
"min_compaction_threshold",
"max_compaction_threshold",
"compression_parameters",
"min_index_interval",
"max_index_interval",
"index_interval",
"speculative_retry",
"rows_per_partition_to_cache",
"memtable_flush_period_in_ms",
"populate_io_cache_on_flush",
"compression",
"default_time_to_live")
def __init__(self, connection, timeout):
super(SchemaParserV22, self).__init__(connection, timeout)
self.keyspaces_result = []
self.tables_result = []
self.columns_result = []
self.triggers_result = []
self.types_result = []
self.functions_result = []
self.aggregates_result = []
self.keyspace_table_rows = defaultdict(list)
self.keyspace_table_col_rows = defaultdict(lambda: defaultdict(list))
self.keyspace_type_rows = defaultdict(list)
self.keyspace_func_rows = defaultdict(list)
self.keyspace_agg_rows = defaultdict(list)
self.keyspace_table_trigger_rows = defaultdict(lambda: defaultdict(list))
def get_all_keyspaces(self):
self._query_all()
for row in self.keyspaces_result:
keyspace_meta = self._build_keyspace_metadata(row)
try:
for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []):
table_meta = self._build_table_metadata(table_row)
keyspace_meta._add_table_metadata(table_meta)
for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []):
usertype = self._build_user_type(usertype_row)
keyspace_meta.user_types[usertype.name] = usertype
for fn_row in self.keyspace_func_rows.get(keyspace_meta.name, []):
fn = self._build_function(fn_row)
keyspace_meta.functions[fn.signature] = fn
for agg_row in self.keyspace_agg_rows.get(keyspace_meta.name, []):
agg = self._build_aggregate(agg_row)
keyspace_meta.aggregates[agg.signature] = agg
except Exception:
log.exception("Error while parsing metadata for keyspace %s. Metadata model will be incomplete.", keyspace_meta.name)
keyspace_meta._exc_info = sys.exc_info()
yield keyspace_meta
def get_table(self, keyspaces, keyspace, table):
cl = ConsistencyLevel.ONE
where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col,), (keyspace, table), _encoder)
cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl)
col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl)
triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl)
(cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \
= self.connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self.timeout, fail_on_error=False)
table_result = self._handle_results(cf_success, cf_result)
col_result = self._handle_results(col_success, col_result)
# the triggers table doesn't exist in C* 1.2
triggers_result = self._handle_results(triggers_success, triggers_result,
expected_failures=InvalidRequest)
if table_result:
return self._build_table_metadata(table_result[0], col_result, triggers_result)
def get_type(self, keyspaces, keyspace, type):
where_clause = bind_params(" WHERE keyspace_name = %s AND type_name = %s", (keyspace, type), _encoder)
return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type)
def get_types_map(self, keyspaces, keyspace):
where_clause = bind_params(" WHERE keyspace_name = %s", (keyspace,), _encoder)
types = self._query_build_rows(self._SELECT_TYPES + where_clause, self._build_user_type)
return dict((t.name, t) for t in types)
def get_function(self, keyspaces, keyspace, function):
where_clause = bind_params(" WHERE keyspace_name = %%s AND function_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,),
(keyspace, function.name, function.argument_types), _encoder)
return self._query_build_row(self._SELECT_FUNCTIONS + where_clause, self._build_function)
def get_aggregate(self, keyspaces, keyspace, aggregate):
where_clause = bind_params(" WHERE keyspace_name = %%s AND aggregate_name = %%s AND %s = %%s" % (self._function_agg_arument_type_col,),
(keyspace, aggregate.name, aggregate.argument_types), _encoder)
return self._query_build_row(self._SELECT_AGGREGATES + where_clause, self._build_aggregate)
def get_keyspace(self, keyspaces, keyspace):
where_clause = bind_params(" WHERE keyspace_name = %s", (keyspace,), _encoder)
return self._query_build_row(self._SELECT_KEYSPACES + where_clause, self._build_keyspace_metadata)
@classmethod
def _build_keyspace_metadata(cls, row):
try:
ksm = cls._build_keyspace_metadata_internal(row)
except Exception:
name = row["keyspace_name"]
ksm = KeyspaceMetadata(name, False, 'UNKNOWN', {})
ksm._exc_info = sys.exc_info() # capture exc_info before log because nose (test) logging clears it in certain circumstances
log.exception("Error while parsing metadata for keyspace %s row(%s)", name, row)
return ksm
@staticmethod
def _build_keyspace_metadata_internal(row):
name = row["keyspace_name"]
durable_writes = row["durable_writes"]
strategy_class = row["strategy_class"]
strategy_options = json.loads(row["strategy_options"])
return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options)
@classmethod
def _build_user_type(cls, usertype_row):
field_types = list(map(cls._schema_type_to_cql, usertype_row['field_types']))
return UserType(usertype_row['keyspace_name'], usertype_row['type_name'],
usertype_row['field_names'], field_types)
@classmethod
def _build_function(cls, function_row):
return_type = cls._schema_type_to_cql(function_row['return_type'])
deterministic = function_row.get('deterministic', False)
monotonic = function_row.get('monotonic', False)
monotonic_on = function_row.get('monotonic_on', ())
return Function(function_row['keyspace_name'], function_row['function_name'],
function_row[cls._function_agg_arument_type_col], function_row['argument_names'],
return_type, function_row['language'], function_row['body'],
function_row['called_on_null_input'],
deterministic, monotonic, monotonic_on)
@classmethod
def _build_aggregate(cls, aggregate_row):
cass_state_type = types.lookup_casstype(aggregate_row['state_type'])
initial_condition = aggregate_row['initcond']
if initial_condition is not None:
initial_condition = _encoder.cql_encode_all_types(cass_state_type.deserialize(initial_condition, 3))
state_type = _cql_from_cass_type(cass_state_type)
return_type = cls._schema_type_to_cql(aggregate_row['return_type'])
return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'],
aggregate_row['signature'], aggregate_row['state_func'], state_type,
aggregate_row['final_func'], initial_condition, return_type,
aggregate_row.get('deterministic', False))
def _build_table_metadata(self, row, col_rows=None, trigger_rows=None):
keyspace_name = row["keyspace_name"]
cfname = row[self._table_name_col]
col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][cfname]
trigger_rows = trigger_rows or self.keyspace_table_trigger_rows[keyspace_name][cfname]
if not col_rows: # CASSANDRA-8487
log.warning("Building table metadata with no column meta for %s.%s",
keyspace_name, cfname)
table_meta = TableMetadata(keyspace_name, cfname)
try:
comparator = types.lookup_casstype(row["comparator"])
table_meta.comparator = comparator
is_dct_comparator = issubclass(comparator, types.DynamicCompositeType)
is_composite_comparator = issubclass(comparator, types.CompositeType)
column_name_types = comparator.subtypes if is_composite_comparator else (comparator,)
num_column_name_components = len(column_name_types)
last_col = column_name_types[-1]
column_aliases = row.get("column_aliases", None)
clustering_rows = [r for r in col_rows
if r.get('type', None) == "clustering_key"]
if len(clustering_rows) > 1:
clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index'))
if column_aliases is not None:
column_aliases = json.loads(column_aliases)
if not column_aliases: # json load failed or column_aliases empty PYTHON-562
column_aliases = [r.get('column_name') for r in clustering_rows]
if is_composite_comparator:
if issubclass(last_col, types.ColumnToCollectionType):
# collections
is_compact = False
has_value = False
clustering_size = num_column_name_components - 2
elif (len(column_aliases) == num_column_name_components - 1 and
issubclass(last_col, types.UTF8Type)):
# aliases?
is_compact = False
has_value = False
clustering_size = num_column_name_components - 1
else:
# compact table
is_compact = True
has_value = column_aliases or not col_rows
clustering_size = num_column_name_components
# Some thrift tables define names in composite types (see PYTHON-192)
if not column_aliases and hasattr(comparator, 'fieldnames'):
column_aliases = filter(None, comparator.fieldnames)
else:
is_compact = True
if column_aliases or not col_rows or is_dct_comparator:
has_value = True
clustering_size = num_column_name_components
else:
has_value = False
clustering_size = 0
# partition key
partition_rows = [r for r in col_rows
if r.get('type', None) == "partition_key"]
if len(partition_rows) > 1:
partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index'))
key_aliases = row.get("key_aliases")
if key_aliases is not None:
key_aliases = json.loads(key_aliases) if key_aliases else []
else:
# In 2.0+, we can use the 'type' column. In 3.0+, we have to use it.
key_aliases = [r.get('column_name') for r in partition_rows]
key_validator = row.get("key_validator")
if key_validator is not None:
key_type = types.lookup_casstype(key_validator)
key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type]
else:
key_types = [types.lookup_casstype(r.get('validator')) for r in partition_rows]
for i, col_type in enumerate(key_types):
if len(key_aliases) > i:
column_name = key_aliases[i]
elif i == 0:
column_name = "key"
else:
column_name = "key%d" % i
col = ColumnMetadata(table_meta, column_name, col_type.cql_parameterized_type())
table_meta.columns[column_name] = col
table_meta.partition_key.append(col)
# clustering key
for i in range(clustering_size):
if len(column_aliases) > i:
column_name = column_aliases[i]
else:
column_name = "column%d" % (i + 1)
data_type = column_name_types[i]
cql_type = _cql_from_cass_type(data_type)
is_reversed = types.is_reversed_casstype(data_type)
col = ColumnMetadata(table_meta, column_name, cql_type, is_reversed=is_reversed)
table_meta.columns[column_name] = col
table_meta.clustering_key.append(col)
# value alias (if present)
if has_value:
value_alias_rows = [r for r in col_rows
if r.get('type', None) == "compact_value"]
if not key_aliases: # TODO are we checking the right thing here?
value_alias = "value"
else:
value_alias = row.get("value_alias", None)
if value_alias is None and value_alias_rows: # CASSANDRA-8487
# In 2.0+, we can use the 'type' column. In 3.0+, we have to use it.
value_alias = value_alias_rows[0].get('column_name')
default_validator = row.get("default_validator")
if default_validator:
validator = types.lookup_casstype(default_validator)
else:
if value_alias_rows: # CASSANDRA-8487
validator = types.lookup_casstype(value_alias_rows[0].get('validator'))
cql_type = _cql_from_cass_type(validator)
col = ColumnMetadata(table_meta, value_alias, cql_type)
if value_alias: # CASSANDRA-8487
table_meta.columns[value_alias] = col
# other normal columns
for col_row in col_rows:
column_meta = self._build_column_metadata(table_meta, col_row)
if column_meta.name is not None:
table_meta.columns[column_meta.name] = column_meta
index_meta = self._build_index_metadata(column_meta, col_row)
if index_meta:
table_meta.indexes[index_meta.name] = index_meta
for trigger_row in trigger_rows:
trigger_meta = self._build_trigger_metadata(table_meta, trigger_row)
table_meta.triggers[trigger_meta.name] = trigger_meta
table_meta.options = self._build_table_options(row)
table_meta.is_compact_storage = is_compact
except Exception:
table_meta._exc_info = sys.exc_info()
log.exception("Error while parsing metadata for table %s.%s row(%s) columns(%s)", keyspace_name, cfname, row, col_rows)
return table_meta
def _build_table_options(self, row):
""" Setup the mostly-non-schema table options, like caching settings """
options = dict((o, row.get(o)) for o in self.recognized_table_options if o in row)
# the option name when creating tables is "dclocal_read_repair_chance",
# but the column name in system.schema_columnfamilies is
# "local_read_repair_chance". We'll store this as dclocal_read_repair_chance,
# since that's probably what users are expecting (and we need it for the
# CREATE TABLE statement anyway).
if "local_read_repair_chance" in options:
val = options.pop("local_read_repair_chance")
options["dclocal_read_repair_chance"] = val
return options
@classmethod
def _build_column_metadata(cls, table_metadata, row):
name = row["column_name"]
type_string = row["validator"]
data_type = types.lookup_casstype(type_string)
cql_type = _cql_from_cass_type(data_type)
is_static = row.get("type", None) == "static"
is_reversed = types.is_reversed_casstype(data_type)
column_meta = ColumnMetadata(table_metadata, name, cql_type, is_static, is_reversed)
column_meta._cass_type = data_type
return column_meta
@staticmethod
def _build_index_metadata(column_metadata, row):
index_name = row.get("index_name")
kind = row.get("index_type")
if index_name or kind:
options = row.get("index_options")
options = json.loads(options) if options else {}
options = options or {} # if the json parsed to None, init empty dict
# generate a CQL index identity string
target = protect_name(column_metadata.name)
if kind != "CUSTOM":
if "index_keys" in options:
target = 'keys(%s)' % (target,)
elif "index_values" in options:
# don't use any "function" for collection values
pass
else:
# it might be a "full" index on a frozen collection, but
# we need to check the data type to verify that, because
# there is no special index option for full-collection
# indexes.
data_type = column_metadata._cass_type
collection_types = ('map', 'set', 'list')
if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types:
# no index option for full-collection index
target = 'full(%s)' % (target,)
options['target'] = target
return IndexMetadata(column_metadata.table.keyspace_name, column_metadata.table.name, index_name, kind, options)
@staticmethod
def _build_trigger_metadata(table_metadata, row):
name = row["trigger_name"]
options = row["trigger_options"]
trigger_meta = TriggerMetadata(table_metadata, name, options)
return trigger_meta
def _query_all(self):
cl = ConsistencyLevel.ONE
queries = [
QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl),
QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl),
QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl),
QueryMessage(query=self._SELECT_TYPES, consistency_level=cl),
QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl),
QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl),
QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl)
]
((ks_success, ks_result),
(table_success, table_result),
(col_success, col_result),
(types_success, types_result),
(functions_success, functions_result),
(aggregates_success, aggregates_result),
(triggers_success, triggers_result)) = (
self.connection.wait_for_responses(*queries, timeout=self.timeout,
fail_on_error=False)
)
self.keyspaces_result = self._handle_results(ks_success, ks_result)
self.tables_result = self._handle_results(table_success, table_result)
self.columns_result = self._handle_results(col_success, col_result)
# if we're connected to Cassandra < 2.0, the triggers table will not exist
if triggers_success:
self.triggers_result = dict_factory(triggers_result.column_names, triggers_result.parsed_rows)
else:
if isinstance(triggers_result, InvalidRequest):
log.debug("triggers table not found")
elif isinstance(triggers_result, Unauthorized):
log.warning("this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); "
"The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.")
else:
raise triggers_result
# if we're connected to Cassandra < 2.1, the usertypes table will not exist
if types_success:
self.types_result = dict_factory(types_result.column_names, types_result.parsed_rows)
else:
if isinstance(types_result, InvalidRequest):
log.debug("user types table not found")
self.types_result = {}
else:
raise types_result
# functions were introduced in Cassandra 2.2
if functions_success:
self.functions_result = dict_factory(functions_result.column_names, functions_result.parsed_rows)
else:
if isinstance(functions_result, InvalidRequest):
log.debug("user functions table not found")
else:
raise functions_result
# aggregates were introduced in Cassandra 2.2
if aggregates_success:
self.aggregates_result = dict_factory(aggregates_result.column_names, aggregates_result.parsed_rows)
else:
if isinstance(aggregates_result, InvalidRequest):
log.debug("user aggregates table not found")
else:
raise aggregates_result
self._aggregate_results()
def _aggregate_results(self):
m = self.keyspace_table_rows
for row in self.tables_result:
m[row["keyspace_name"]].append(row)
m = self.keyspace_table_col_rows
for row in self.columns_result:
ksname = row["keyspace_name"]
cfname = row[self._table_name_col]
m[ksname][cfname].append(row)
m = self.keyspace_type_rows
for row in self.types_result:
m[row["keyspace_name"]].append(row)
m = self.keyspace_func_rows
for row in self.functions_result:
m[row["keyspace_name"]].append(row)
m = self.keyspace_agg_rows
for row in self.aggregates_result:
m[row["keyspace_name"]].append(row)
m = self.keyspace_table_trigger_rows
for row in self.triggers_result:
ksname = row["keyspace_name"]
cfname = row[self._table_name_col]
m[ksname][cfname].append(row)
@staticmethod
def _schema_type_to_cql(type_string):
cass_type = types.lookup_casstype(type_string)
return _cql_from_cass_type(cass_type)
class SchemaParserV3(SchemaParserV22):
"""
For C* 3.0+
"""
_SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces"
_SELECT_TABLES = "SELECT * FROM system_schema.tables"
_SELECT_COLUMNS = "SELECT * FROM system_schema.columns"
_SELECT_INDEXES = "SELECT * FROM system_schema.indexes"
_SELECT_TRIGGERS = "SELECT * FROM system_schema.triggers"
_SELECT_TYPES = "SELECT * FROM system_schema.types"
_SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions"
_SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates"
_SELECT_VIEWS = "SELECT * FROM system_schema.views"
_table_name_col = 'table_name'
_function_agg_arument_type_col = 'argument_types'
recognized_table_options = (
'bloom_filter_fp_chance',
'caching',
'cdc',
'comment',
'compaction',
'compression',
'crc_check_chance',
'dclocal_read_repair_chance',
'default_time_to_live',
'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'min_index_interval',
'read_repair_chance',
'speculative_retry')
def __init__(self, connection, timeout):
super(SchemaParserV3, self).__init__(connection, timeout)
self.indexes_result = []
self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list))
self.keyspace_view_rows = defaultdict(list)
def get_all_keyspaces(self):
for keyspace_meta in super(SchemaParserV3, self).get_all_keyspaces():
for row in self.keyspace_view_rows[keyspace_meta.name]:
view_meta = self._build_view_metadata(row)
keyspace_meta._add_view_metadata(view_meta)
yield keyspace_meta
def get_table(self, keyspaces, keyspace, table):
cl = ConsistencyLevel.ONE
where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder)
cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl)
col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl)
indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl)
triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl)
# in protocol v4 we don't know if this event is a view or a table, so we look for both
where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), _encoder)
view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause,
consistency_level=cl)
((cf_success, cf_result), (col_success, col_result),
(indexes_sucess, indexes_result), (triggers_success, triggers_result),
(view_success, view_result)) = (
self.connection.wait_for_responses(
cf_query, col_query, indexes_query, triggers_query,
view_query, timeout=self.timeout, fail_on_error=False)
)
table_result = self._handle_results(cf_success, cf_result)
col_result = self._handle_results(col_success, col_result)
if table_result:
indexes_result = self._handle_results(indexes_sucess, indexes_result)
triggers_result = self._handle_results(triggers_success, triggers_result)
return self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result)
view_result = self._handle_results(view_success, view_result)
if view_result:
return self._build_view_metadata(view_result[0], col_result)
@staticmethod
def _build_keyspace_metadata_internal(row):
name = row["keyspace_name"]
durable_writes = row["durable_writes"]
strategy_options = dict(row["replication"])
strategy_class = strategy_options.pop("class")
return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options)
@staticmethod
def _build_aggregate(aggregate_row):
return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'],
aggregate_row['argument_types'], aggregate_row['state_func'], aggregate_row['state_type'],
aggregate_row['final_func'], aggregate_row['initcond'], aggregate_row['return_type'],
aggregate_row.get('deterministic', False))
def _build_table_metadata(self, row, col_rows=None, trigger_rows=None, index_rows=None, virtual=False):
keyspace_name = row["keyspace_name"]
table_name = row[self._table_name_col]
col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][table_name]
trigger_rows = trigger_rows or self.keyspace_table_trigger_rows[keyspace_name][table_name]
index_rows = index_rows or self.keyspace_table_index_rows[keyspace_name][table_name]
table_meta = TableMetadataV3(keyspace_name, table_name, virtual=virtual)
try:
table_meta.options = self._build_table_options(row)
flags = row.get('flags', set())
if flags:
is_dense = 'dense' in flags
compact_static = not is_dense and 'super' not in flags and 'compound' not in flags
table_meta.is_compact_storage = is_dense or 'super' in flags or 'compound' not in flags
elif virtual:
compact_static = False
table_meta.is_compact_storage = False
is_dense = False
else:
compact_static = True
table_meta.is_compact_storage = True
is_dense = False
self._build_table_columns(table_meta, col_rows, compact_static, is_dense, virtual)
for trigger_row in trigger_rows:
trigger_meta = self._build_trigger_metadata(table_meta, trigger_row)
table_meta.triggers[trigger_meta.name] = trigger_meta
for index_row in index_rows:
index_meta = self._build_index_metadata(table_meta, index_row)
if index_meta:
table_meta.indexes[index_meta.name] = index_meta
table_meta.extensions = row.get('extensions', {})
except Exception:
table_meta._exc_info = sys.exc_info()
log.exception("Error while parsing metadata for table %s.%s row(%s) columns(%s)", keyspace_name, table_name, row, col_rows)
return table_meta
def _build_table_options(self, row):
""" Setup the mostly-non-schema table options, like caching settings """
return dict((o, row.get(o)) for o in self.recognized_table_options if o in row)
def _build_table_columns(self, meta, col_rows, compact_static=False, is_dense=False, virtual=False):
# partition key
partition_rows = [r for r in col_rows
if r.get('kind', None) == "partition_key"]
if len(partition_rows) > 1:
partition_rows = sorted(partition_rows, key=lambda row: row.get('position'))
for r in partition_rows:
# we have to add meta here (and not in the later loop) because TableMetadata.columns is an
# OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL
column_meta = self._build_column_metadata(meta, r)
meta.columns[column_meta.name] = column_meta
meta.partition_key.append(meta.columns[r.get('column_name')])
# clustering key
if not compact_static:
clustering_rows = [r for r in col_rows
if r.get('kind', None) == "clustering"]
if len(clustering_rows) > 1:
clustering_rows = sorted(clustering_rows, key=lambda row: row.get('position'))
for r in clustering_rows:
column_meta = self._build_column_metadata(meta, r)
meta.columns[column_meta.name] = column_meta
meta.clustering_key.append(meta.columns[r.get('column_name')])
for col_row in (r for r in col_rows
if r.get('kind', None) not in ('partition_key', 'clustering_key')):
column_meta = self._build_column_metadata(meta, col_row)
if is_dense and column_meta.cql_type == types.cql_empty_type:
continue
if compact_static and not column_meta.is_static:
# for compact static tables, we omit the clustering key and value, and only add the logical columns.
# They are marked not static so that it generates appropriate CQL
continue
if compact_static:
column_meta.is_static = False
meta.columns[column_meta.name] = column_meta
def _build_view_metadata(self, row, col_rows=None):
keyspace_name = row["keyspace_name"]
view_name = row["view_name"]
base_table_name = row["base_table_name"]
include_all_columns = row["include_all_columns"]
where_clause = row["where_clause"]
col_rows = col_rows or self.keyspace_table_col_rows[keyspace_name][view_name]
view_meta = MaterializedViewMetadata(keyspace_name, view_name, base_table_name,
include_all_columns, where_clause, self._build_table_options(row))
self._build_table_columns(view_meta, col_rows)
view_meta.extensions = row.get('extensions', {})
return view_meta
@staticmethod
def _build_column_metadata(table_metadata, row):
name = row["column_name"]
cql_type = row["type"]
is_static = row.get("kind", None) == "static"
is_reversed = row["clustering_order"].upper() == "DESC"
column_meta = ColumnMetadata(table_metadata, name, cql_type, is_static, is_reversed)
return column_meta
@staticmethod
def _build_index_metadata(table_metadata, row):
index_name = row.get("index_name")
kind = row.get("kind")
if index_name or kind:
index_options = row.get("options")
return IndexMetadata(table_metadata.keyspace_name, table_metadata.name, index_name, kind, index_options)
else:
return None
@staticmethod
def _build_trigger_metadata(table_metadata, row):
name = row["trigger_name"]
options = row["options"]
trigger_meta = TriggerMetadata(table_metadata, name, options)
return trigger_meta
def _query_all(self):
cl = ConsistencyLevel.ONE
queries = [
QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl),
QueryMessage(query=self._SELECT_TABLES, consistency_level=cl),
QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl),
QueryMessage(query=self._SELECT_TYPES, consistency_level=cl),
QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl),
QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl),
QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl),
QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl),
QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl)
]
((ks_success, ks_result),
(table_success, table_result),
(col_success, col_result),
(types_success, types_result),
(functions_success, functions_result),
(aggregates_success, aggregates_result),
(triggers_success, triggers_result),
(indexes_success, indexes_result),
(views_success, views_result)) = self.connection.wait_for_responses(
*queries, timeout=self.timeout, fail_on_error=False
)
self.keyspaces_result = self._handle_results(ks_success, ks_result)
self.tables_result = self._handle_results(table_success, table_result)
self.columns_result = self._handle_results(col_success, col_result)
self.triggers_result = self._handle_results(triggers_success, triggers_result)
self.types_result = self._handle_results(types_success, types_result)
self.functions_result = self._handle_results(functions_success, functions_result)
self.aggregates_result = self._handle_results(aggregates_success, aggregates_result)
self.indexes_result = self._handle_results(indexes_success, indexes_result)
self.views_result = self._handle_results(views_success, views_result)
self._aggregate_results()
def _aggregate_results(self):
super(SchemaParserV3, self)._aggregate_results()
m = self.keyspace_table_index_rows
for row in self.indexes_result:
ksname = row["keyspace_name"]
cfname = row[self._table_name_col]
m[ksname][cfname].append(row)
m = self.keyspace_view_rows
for row in self.views_result:
m[row["keyspace_name"]].append(row)
@staticmethod
def _schema_type_to_cql(type_string):
return type_string
class SchemaParserDSE60(SchemaParserV3):
"""
For DSE 6.0+
"""
recognized_table_options = (SchemaParserV3.recognized_table_options +
("nodesync",))
class SchemaParserV4(SchemaParserV3):
recognized_table_options = (
'additional_write_policy',
'bloom_filter_fp_chance',
'caching',
'cdc',
'comment',
'compaction',
'compression',
'crc_check_chance',
'default_time_to_live',
'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'min_index_interval',
'read_repair',
'speculative_retry')
_SELECT_VIRTUAL_KEYSPACES = 'SELECT * from system_virtual_schema.keyspaces'
_SELECT_VIRTUAL_TABLES = 'SELECT * from system_virtual_schema.tables'
_SELECT_VIRTUAL_COLUMNS = 'SELECT * from system_virtual_schema.columns'
def __init__(self, connection, timeout):
super(SchemaParserV4, self).__init__(connection, timeout)
self.virtual_keyspaces_rows = defaultdict(list)
self.virtual_tables_rows = defaultdict(list)
self.virtual_columns_rows = defaultdict(lambda: defaultdict(list))
def _query_all(self):
cl = ConsistencyLevel.ONE
# todo: this duplicates V3; we should find a way for _query_all methods
# to extend each other.
queries = [
# copied from V3
QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl),
QueryMessage(query=self._SELECT_TABLES, consistency_level=cl),
QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl),
QueryMessage(query=self._SELECT_TYPES, consistency_level=cl),
QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl),
QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl),
QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl),
QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl),
QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl),
# V4-only queries
QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl),
QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl),
QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl)
]
responses = self.connection.wait_for_responses(
*queries, timeout=self.timeout, fail_on_error=False)
(
# copied from V3
(ks_success, ks_result),
(table_success, table_result),
(col_success, col_result),
(types_success, types_result),
(functions_success, functions_result),
(aggregates_success, aggregates_result),
(triggers_success, triggers_result),
(indexes_success, indexes_result),
(views_success, views_result),
# V4-only responses
(virtual_ks_success, virtual_ks_result),
(virtual_table_success, virtual_table_result),
(virtual_column_success, virtual_column_result)
) = responses
# copied from V3
self.keyspaces_result = self._handle_results(ks_success, ks_result)
self.tables_result = self._handle_results(table_success, table_result)
self.columns_result = self._handle_results(col_success, col_result)
self.triggers_result = self._handle_results(triggers_success, triggers_result)
self.types_result = self._handle_results(types_success, types_result)
self.functions_result = self._handle_results(functions_success, functions_result)
self.aggregates_result = self._handle_results(aggregates_success, aggregates_result)
self.indexes_result = self._handle_results(indexes_success, indexes_result)
self.views_result = self._handle_results(views_success, views_result)
# V4-only results
# These tables don't exist in some DSE versions reporting 4.X so we can
# ignore them if we got an error
self.virtual_keyspaces_result = self._handle_results(
virtual_ks_success, virtual_ks_result,
expected_failures=InvalidRequest
)
self.virtual_tables_result = self._handle_results(
virtual_table_success, virtual_table_result,
expected_failures=InvalidRequest
)
self.virtual_columns_result = self._handle_results(
virtual_column_success, virtual_column_result,
expected_failures=InvalidRequest
)
self._aggregate_results()
def _aggregate_results(self):
super(SchemaParserV4, self)._aggregate_results()
m = self.virtual_tables_rows
for row in self.virtual_tables_result:
m[row["keyspace_name"]].append(row)
m = self.virtual_columns_rows
for row in self.virtual_columns_result:
ks_name = row['keyspace_name']
tab_name = row[self._table_name_col]
m[ks_name][tab_name].append(row)
def get_all_keyspaces(self):
for x in super(SchemaParserV4, self).get_all_keyspaces():
yield x
for row in self.virtual_keyspaces_result:
ks_name = row['keyspace_name']
keyspace_meta = self._build_keyspace_metadata(row)
keyspace_meta.virtual = True
for table_row in self.virtual_tables_rows.get(ks_name, []):
table_name = table_row[self._table_name_col]
col_rows = self.virtual_columns_rows[ks_name][table_name]
keyspace_meta._add_table_metadata(
self._build_table_metadata(table_row,
col_rows=col_rows,
virtual=True)
)
yield keyspace_meta
@staticmethod
def _build_keyspace_metadata_internal(row):
# necessary fields that aren't int virtual ks
row["durable_writes"] = row.get("durable_writes", None)
row["replication"] = row.get("replication", {})
row["replication"]["class"] = row["replication"].get("class", None)
return super(SchemaParserV4, SchemaParserV4)._build_keyspace_metadata_internal(row)
class SchemaParserDSE67(SchemaParserV4):
"""
For DSE 6.7+
"""
recognized_table_options = (SchemaParserV4.recognized_table_options +
("nodesync",))
class TableMetadataV3(TableMetadata):
"""
For C* 3.0+. `option_maps` take a superset of map names, so if nothing
changes structurally, new option maps can just be appended to the list.
"""
compaction_options = {}
option_maps = [
'compaction', 'compression', 'caching',
'nodesync' # added DSE 6.0
]
@property
def is_cql_compatible(self):
return True
@classmethod
def _make_option_strings(cls, options_map):
ret = []
options_copy = dict(options_map.items())
for option in cls.option_maps:
value = options_copy.get(option)
if isinstance(value, Mapping):
del options_copy[option]
params = ("'%s': '%s'" % (k, v) for k, v in value.items())
ret.append("%s = {%s}" % (option, ', '.join(params)))
for name, value in options_copy.items():
if value is not None:
if name == "comment":
value = value or ""
ret.append("%s = %s" % (name, protect_value(value)))
return list(sorted(ret))
class MaterializedViewMetadata(object):
"""
A representation of a materialized view on a table
"""
keyspace_name = None
""" A string name of the view."""
name = None
""" A string name of the view."""
base_table_name = None
""" A string name of the base table for this view."""
partition_key = None
"""
A list of :class:`.ColumnMetadata` instances representing the columns in
the partition key for this view. This will always hold at least one
column.
"""
clustering_key = None
"""
A list of :class:`.ColumnMetadata` instances representing the columns
in the clustering key for this view.
Note that a table may have no clustering keys, in which case this will
be an empty list.
"""
columns = None
"""
A dict mapping column names to :class:`.ColumnMetadata` instances.
"""
include_all_columns = None
""" A flag indicating whether the view was created AS SELECT * """
where_clause = None
""" String WHERE clause for the view select statement. From server metadata """
options = None
"""
A dict mapping table option names to their specific settings for this
view.
"""
extensions = None
"""
Metadata describing configuration for table extensions
"""
def __init__(self, keyspace_name, view_name, base_table_name, include_all_columns, where_clause, options):
self.keyspace_name = keyspace_name
self.name = view_name
self.base_table_name = base_table_name
self.partition_key = []
self.clustering_key = []
self.columns = OrderedDict()
self.include_all_columns = include_all_columns
self.where_clause = where_clause
self.options = options or {}
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this function.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.
"""
sep = '\n ' if formatted else ' '
keyspace = protect_name(self.keyspace_name)
name = protect_name(self.name)
selected_cols = '*' if self.include_all_columns else ', '.join(protect_name(col.name) for col in self.columns.values())
base_table = protect_name(self.base_table_name)
where_clause = self.where_clause
part_key = ', '.join(protect_name(col.name) for col in self.partition_key)
if len(self.partition_key) > 1:
pk = "((%s)" % part_key
else:
pk = "(%s" % part_key
if self.clustering_key:
pk += ", %s" % ', '.join(protect_name(col.name) for col in self.clustering_key)
pk += ")"
properties = TableMetadataV3._property_string(formatted, self.clustering_key, self.options)
ret = ("CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s"
"SELECT %(selected_cols)s%(sep)s"
"FROM %(keyspace)s.%(base_table)s%(sep)s"
"WHERE %(where_clause)s%(sep)s"
"PRIMARY KEY %(pk)s%(sep)s"
"WITH %(properties)s") % locals()
if self.extensions:
registry = _RegisteredExtensionType._extension_registry
for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey
ext = registry[k]
cql = ext.after_table_cql(self, k, self.extensions[k])
if cql:
ret += "\n\n%s" % (cql,)
return ret
def export_as_string(self):
return self.as_cql_query(formatted=True) + ";"
def get_schema_parser(connection, server_version, dse_version, timeout):
version = Version(server_version)
if dse_version:
v = Version(dse_version)
if v >= Version('6.7.0'):
return SchemaParserDSE67(connection, timeout)
elif v >= Version('6.0.0'):
return SchemaParserDSE60(connection, timeout)
if version >= Version('4-a'):
return SchemaParserV4(connection, timeout)
elif version >= Version('3.0.0'):
return SchemaParserV3(connection, timeout)
else:
# we could further specialize by version. Right now just refactoring the
# multi-version parser we have as of C* 2.2.0rc1.
return SchemaParserV22(connection, timeout)
def _cql_from_cass_type(cass_type):
"""
A string representation of the type for this column, such as "varchar"
or "map<string, int>".
"""
if issubclass(cass_type, types.ReversedType):
return cass_type.subtypes[0].cql_parameterized_type()
else:
return cass_type.cql_parameterized_type()
class RLACTableExtension(RegisteredTableExtension):
name = "DSE_RLACA"
@classmethod
def after_table_cql(cls, table_meta, ext_key, ext_blob):
return "RESTRICT ROWS ON %s.%s USING %s;" % (protect_name(table_meta.keyspace_name),
protect_name(table_meta.name),
protect_name(ext_blob.decode('utf-8')))
NO_VALID_REPLICA = object()
def group_keys_by_replica(session, keyspace, table, keys):
"""
Returns a :class:`dict` with the keys grouped per host. This can be
used to more accurately group by IN clause or to batch the keys per host.
If a valid replica is not found for a particular key it will be grouped under
:class:`~.NO_VALID_REPLICA`
Example usage::
result = group_keys_by_replica(
session, "system", "peers",
(("127.0.0.1", ), ("127.0.0.2", ))
)
"""
cluster = session.cluster
partition_keys = cluster.metadata.keyspaces[keyspace].tables[table].partition_key
serializers = list(types._cqltypes[partition_key.cql_type] for partition_key in partition_keys)
keys_per_host = defaultdict(list)
distance = cluster._default_load_balancing_policy.distance
for key in keys:
serialized_key = [serializer.serialize(pk, cluster.protocol_version)
for serializer, pk in zip(serializers, key)]
if len(serialized_key) == 1:
routing_key = serialized_key[0]
else:
routing_key = b"".join(struct.pack(">H%dsB" % len(p), len(p), p, 0) for p in serialized_key)
all_replicas = cluster.metadata.get_replicas(keyspace, routing_key)
# First check if there are local replicas
valid_replicas = [host for host in all_replicas if
host.is_up and distance(host) == HostDistance.LOCAL]
if not valid_replicas:
valid_replicas = [host for host in all_replicas if host.is_up]
if valid_replicas:
keys_per_host[random.choice(valid_replicas)].append(key)
else:
# We will group under this statement all the keys for which
# we haven't found a valid replica
keys_per_host[NO_VALID_REPLICA].append(key)
return dict(keys_per_host)
|
from __future__ import print_function
from __future__ import division
|
# Author: Denys Makogon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.v2 import client as glanceclient
from keystoneauth1 import loading
from keystoneauth1 import session
from keystoneclient import client as keystoneclient
from novaclient import client as novaclient
from neutronclient.v2_0 import client as neutronclient
class OpenStackClients(object):
__keystone = None
__nova = None
__neutron = None
__glance = None
def __password_session_setup(self, node):
creds = node.runtime_properties['auth_properties']
if 'region_name' in creds:
del creds['region_name']
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(**creds)
sess = session.Session(auth=auth)
return sess
def keystone(self, node):
if self.__keystone is None:
self.__keystone = keystoneclient.Client(**node.properties)
self.__keystone.authenticate()
return self.__keystone
def nova(self, node):
if self.__nova is None:
version = node.properties['compute_api_version']
use_connection_pool = node.properties['use_connection_pool']
self.__nova = novaclient.Client(
version, session=self.__password_session_setup(node),
connection_pool=use_connection_pool)
return self.__nova
def neutron(self, node):
if self.__neutron is None:
self.__neutron = neutronclient.Client(
session=self.__password_session_setup(node))
return self.__neutron
def glance(self, node):
if self.__glance is None:
self.__glance = glanceclient.Client(
session=self.__password_session_setup(node))
return self.__glance
openstack = OpenStackClients()
|
from abc import abstractmethod
from .apr_fetcher import APRFetcher
from typing import Dict, List, Union, Any
from .dapp_apr_fetcher import DappAPRFetcher
from .utils.utils import (
calculate_lp_token_price,
get_block_average_time,
get_token_price_from_dexs,
open_contract,
usdt_address,
platform_name_mapping,
decimals_mapping,
symbol_mapping
)
class MasterchefAPRFetcher(DappAPRFetcher):
"""
Interface for data-fetching based APR fetcher
"""
@abstractmethod
def masterchef_address(self):
raise NotImplementedError()
@abstractmethod
def dapp_token_address_field(self):
raise NotImplementedError()
@abstractmethod
def dapp_token_per_block_or_per_second_field(self, per_block: bool) -> str:
raise NotImplementedError()
@abstractmethod
def _total_staked(self, pool_info):
raise NotImplementedError()
@abstractmethod
def _pool_address(self, pool_info):
raise NotImplementedError()
@abstractmethod
def _alloc_point(self, pool_info):
raise NotImplementedError()
def dapp_token_address(self, web3) -> str:
masterchef_contract = open_contract(self._web3, self._blockchain, self.masterchef_address())
return getattr(masterchef_contract.functions, self.dapp_token_address_field())().call()
def dapp_pools_infos(self, web3) -> List[Dict[str, Union[str, float]]]:
masterchef_contract = open_contract(self._web3, self._blockchain, self.masterchef_address())
d = []
for i in range(masterchef_contract.functions.poolLength().call()):
pool_info = masterchef_contract.functions.poolInfo(i).call()
d.append({
"total_staked": self._total_staked(i, pool_info),
"pool_address": self._pool_address(i, pool_info),
"alloc_point": self._alloc_point(i, pool_info),
})
return d
def dapp_token_per_year(self, web3) -> float:
field_per_second = self.dapp_token_per_block_or_per_second_field(per_block=False)
masterchef_contract = open_contract(self._web3, self._blockchain, self.masterchef_address())
token_contract = open_contract(web3, self._blockchain, self.dapp_token_address(web3))
decimals = token_contract.functions.decimals().call()
if field_per_second is None or field_per_second == "":
average_time_per_block_seconds = get_block_average_time(web3, span=100)
block_per_seconds = 1.0 / average_time_per_block_seconds
block_per_year = block_per_seconds * 3600 * 24 * 365
token_per_block = getattr(masterchef_contract.functions, self.dapp_token_per_block_field(per_block=True))().call()
annual_token_emission = block_per_year * (token_per_block/(10**decimals))
else:
annual_token_emission = getattr(masterchef_contract.functions, field_per_second)().call() * 10**(-decimals) * 3600 * 24 * 365
return annual_token_emission
def dapp_token_total_alloc(self, web3) -> int:
total_alloc = sum([p["alloc_point"] for p in self.dapp_pools_infos(web3)])
return total_alloc
def dapp_token_price(self, web3) -> float:
return get_token_price_from_dexs(web3, self._blockchain, self.dapp_token_address(web3))
|
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
QUIZZES_URL = reverse('questionary:quiz-list')
class PublicQuizzesApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required for retrieving quizzes"""
res = self.client.get(QUIZZES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
|
###############################################################################
# Author: CallMeCCLemon
# Date: 2019
# Copyright: 2019 Thomas Littlejohn (@CallMeCCLemon) - Modified BSD License
###############################################################################
from enum import Enum
from PythonApp.pillar.MessageClient import MessageClient
from PythonApp.pillar.PillarMessageTransformer import PillarMessageTransformer
from PythonApp.qc_serial.SerialDao import SerialDao
from PythonApp.qc_serial.SerialUtil import SerialUtil
from PythonApp.qc_serial.model.HeaderMessage import HeaderMessage
from PythonApp.qc_serial.model.OpCode import OpCode
from PythonApp.qc_serial.model.PayloadMessage import PayloadMessage
from PythonApp.util.Config import Config
class States(Enum):
DISCONNECTED = 0
CONNECTED = 1
class SerialStateMachine:
def __init__(self, serial_dao: SerialDao):
self.active_state = States.DISCONNECTED
self.config = Config()
self.states = {
States.DISCONNECTED: self.disconnected,
States.CONNECTED: self.connected,
}
self.serial_dao = serial_dao
self.message_client = MessageClient()
self.header_message_length = 11
self.done = False
def run(self):
while not self.done:
self.states[self.active_state]()
def disconnected(self):
# Send HELO Messages waiting for an ACK.You
hello_message = HeaderMessage(
OpCode.HELO,
0,
int(self.config.get_master_config_value("PillarID")),
0)
self.serial_dao.write(hello_message.to_serial_payload())
message = self.serial_dao.read(self.header_message_length)
try:
SerialUtil.validate_message_header(message)
except TimeoutError as ex:
return
except ValueError as ex:
print(ex)
return
header_message = HeaderMessage.build_header_object(message[1:])
if header_message.opcode == OpCode.ACK:
print("Received ACK! Now connected to badge {}!".format(header_message.from_id))
self.active_state = States.CONNECTED
else:
print("Received unknown message! Skipping..")
def connected(self):
# Send DUMPQ messages waiting for a DUMPA.
dump_q_message = HeaderMessage(
OpCode.DUMPQ,
1,
int(self.config.get_master_config_value("PillarID")),
0)
dump_q_payload = PayloadMessage(int(self.config.get_master_config_value("PillarType")))
print("Sending dump Q message!")
print("Dump Q Header: {}".format(dump_q_message.to_serial_payload(dump_q_payload)))
self.serial_dao.write(dump_q_message.to_serial_payload(dump_q_payload))
print("Dump q payload: {}".format(dump_q_payload.to_serial_payload()))
self.serial_dao.write_no_sync(dump_q_payload.to_serial_payload())
message = self.serial_dao.read(self.header_message_length)
try:
SerialUtil.validate_message_header(message)
header_message = HeaderMessage.build_header_object(message[1:])
if header_message.opcode == OpCode.DUMPA:
print("Received DUMPA! Sending update to cloud!")
message = self.serial_dao.read(header_message.payload_len)
payload_message = PayloadMessage.build_payload_object(message)
pillar_message = PillarMessageTransformer\
.transform_serial_message_to_pillar_message(header_message, payload_message)
self.message_client.send_message_to_queue(pillar_message)
self.done = True
else:
print("Unexpected message type!")
except TimeoutError as ex:
print(ex)
except ValueError as ex:
print(ex)
self.active_state = States.DISCONNECTED
|
from __future__ import unicode_literals
import Queue
import datetime
import errno
import gettext
import itertools
import json
import locale
import os
import subprocess
import sys
import threading
import wx
import openslides
from openslides.utils.main import (
detect_openslides_type,
filesystem2unicode,
unicode2filesystem,
get_default_user_data_path,
get_port,
PortableDirNotWritable,
)
# NOTE: djangos translation module can't be used here since it requires
# a defined settings module
_translations = gettext.NullTranslations()
_ = lambda text: _translations.ugettext(text)
ungettext = lambda msg1, msg2, n: _translations.ungettext(msg1, msg2, n)
def get_data_path(*args):
path = filesystem2unicode(__file__)
return os.path.join(os.path.dirname(path), "data", *args)
class RunCmdEvent(wx.PyCommandEvent):
def __init__(self, evt_type, evt_id):
super(RunCmdEvent, self).__init__(evt_type, evt_id)
self.running = False
self.exitcode = None
EVT_RUN_CMD_ID = wx.NewEventType()
EVT_RUN_CMD = wx.PyEventBinder(EVT_RUN_CMD_ID, 1)
class RunCommandControl(wx.Panel):
UPDATE_INTERVAL = 500
def __init__(self, parent):
super(RunCommandControl, self).__init__(parent)
self.child_process = None
self.output_queue = Queue.Queue()
self.output_read_thread = None
self.canceled = False
self.output_mutex = threading.RLock()
vbox = wx.BoxSizer(wx.VERTICAL)
self.te_output = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
vbox.Add(self.te_output, 1, wx.EXPAND)
self.update_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_update_timer, self.update_timer)
self.SetSizerAndFit(vbox)
def _read_output(self):
while True:
# NOTE: don't use iterator interface since it uses an
# internal buffer and we don't see output in a timely fashion
line = self.child_process.stdout.readline()
if not line:
break
self.output_queue.put(line)
def is_alive(self):
if self.child_process is None:
return False
return self.child_process.poll() is None
def run_command(self, *args):
if self.is_alive():
raise ValueError("already running a command")
cmd = [sys.executable, "-u", "-m", "openslides"]
cmd.extend(args)
# XXX: subprocess on windows only handles byte strings
# with python3 this will hopefully no longer be the case
cmd = [unicode2filesystem(x) for x in cmd]
creationflags = getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0)
self.child_process = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, creationflags=creationflags)
self.child_process.stdin.close()
self.output_read_thread = threading.Thread(target=self._read_output)
self.output_read_thread.start()
self.update_timer.Start(self.UPDATE_INTERVAL)
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = True
self.GetEventHandler().ProcessEvent(evt)
def cancel_command(self):
if not self.is_alive():
return
# TODO: try sigint first, then get more aggressive if user insists
self.child_process.kill()
self.canceled = True
def on_update_timer(self, evt):
is_alive = self.is_alive()
if not is_alive:
# join thread to make sure everything was read
self.output_read_thread.join()
self.output_read_thread = None
for line_no in itertools.count():
try:
data = self.output_queue.get(block=False)
except Queue.Empty:
break
else:
# XXX: check whether django uses utf-8 or locale for
# it's cli output
text = data.decode("utf-8", errors="replace")
with self.output_mutex:
self.te_output.AppendText(text)
# avoid waiting too long here if child is still alive
if is_alive and line_no > 10:
break
if not is_alive:
exitcode = self.child_process.returncode
self.update_timer.Stop()
self.child_process = None
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = False
evt.exitcode = exitcode
self.GetEventHandler().ProcessEvent(evt)
def append_message(self, text, newline="\n"):
with self.output_mutex:
self.te_output.AppendText(text + newline)
class SettingsDialog(wx.Dialog):
def __init__(self, parent):
super(SettingsDialog, self).__init__(parent, wx.ID_ANY, _("Settings"))
grid = wx.GridBagSizer(5, 5)
row = 0
lb_host = wx.StaticText(self, label=_("&Host:"))
grid.Add(lb_host, pos=(row, 0))
self.tc_host = wx.TextCtrl(self)
grid.Add(self.tc_host, pos=(row, 1), flag=wx.EXPAND)
row += 1
lb_port = wx.StaticText(self, label=_("&Port:"))
grid.Add(lb_port, pos=(row, 0))
self.tc_port = wx.TextCtrl(self)
grid.Add(self.tc_port, pos=(row, 1), flag=wx.EXPAND)
row += 1
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add((0, 0), pos=(row, 0), span=(1, 2))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 2))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
@property
def host(self):
return self.tc_host.GetValue()
@host.setter
def host(self, host):
self.tc_host.SetValue(host)
@property
def port(self):
return self.tc_port.GetValue()
@port.setter
def port(self, port):
self.tc_port.SetValue(port)
class BackupSettingsDialog(wx.Dialog):
# NOTE: keep order in sync with _update_interval_choices()
_INTERVAL_UNITS = ["second", "minute", "hour"]
def __init__(self, parent):
super(BackupSettingsDialog, self).__init__(
parent, wx.ID_ANY, _("Database backup"))
self._interval_units = {}
grid = wx.GridBagSizer(5, 5)
row = 0
self.cb_backup = wx.CheckBox(
self, label=_("&Regularly backup database"))
self.cb_backup.SetValue(True)
self.cb_backup.Bind(wx.EVT_CHECKBOX, self.on_backup_checked)
grid.Add(self.cb_backup, pos=(row, 0), span=(1, 3))
row += 1
lb_dest = wx.StaticText(self, label=_("&Destination:"))
grid.Add(lb_dest, pos=(row, 0))
style = wx.FLP_SAVE | wx.FLP_USE_TEXTCTRL
self.fp_dest = wx.FilePickerCtrl(self, style=style)
grid.Add(self.fp_dest, pos=(row, 1), span=(1, 2), flag=wx.EXPAND)
row += 1
lb_interval = wx.StaticText(self, label=_("&Every"))
grid.Add(lb_interval, pos=(row, 0))
self.sb_interval = wx.SpinCtrl(self, min=1, initial=1)
self.sb_interval.Bind(wx.EVT_SPINCTRL, self.on_interval_changed)
grid.Add(self.sb_interval, pos=(row, 1))
self.ch_interval_unit = wx.Choice(self)
grid.Add(self.ch_interval_unit, pos=(row, 2))
row += 1
grid.AddGrowableCol(1)
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add((0, 0), pos=(row, 0), span=(1, 3))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 3))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
self._update_interval_choices()
self._update_backup_enabled()
@property
def backupdb_enabled(self):
return self.cb_backup.GetValue()
@backupdb_enabled.setter
def backupdb_enabled(self, enabled):
self.cb_backup.SetValue(enabled)
self._update_backup_enabled()
@property
def backupdb_destination(self):
return self.fp_dest.GetPath()
@backupdb_destination.setter
def backupdb_destination(self, path):
self.fp_dest.SetPath(path)
@property
def interval(self):
return self.sb_interval.GetValue()
@interval.setter
def interval(self, value):
self.sb_interval.SetValue(value)
self._update_interval_choices()
@property
def interval_unit(self):
return self._INTERVAL_UNITS[self.ch_interval_unit.GetSelection()]
@interval_unit.setter
def interval_unit(self, unit):
try:
idx = self._INTERVAL_UNITS.index(unit)
except IndexError:
raise ValueError("Unknown unit {0}".format(unit))
self.ch_interval_unit.SetSelection(idx)
def _update_interval_choices(self):
count = self.sb_interval.GetValue()
choices = [
ungettext("second", "seconds", count),
ungettext("minute", "minutes", count),
ungettext("hour", "hours", count),
]
current = self.ch_interval_unit.GetSelection()
if current == wx.NOT_FOUND:
current = 2 # default to hour
self.ch_interval_unit.Clear()
self.ch_interval_unit.AppendItems(choices)
self.ch_interval_unit.SetSelection(current)
def _update_backup_enabled(self):
checked = self.cb_backup.IsChecked()
self.fp_dest.Enable(checked)
self.sb_interval.Enable(checked)
self.ch_interval_unit.Enable(checked)
def on_backup_checked(self, evt):
self._update_backup_enabled()
def on_interval_changed(self, evt):
self._update_interval_choices()
# TODO: validate settings on close (e.g. non-empty path if backup is
# enabled)
class MainWindow(wx.Frame):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent, title="OpenSlides")
icons = wx.IconBundleFromFile(
get_data_path("openslides.ico"),
wx.BITMAP_TYPE_ICO)
self.SetIcons(icons)
self.server_running = False
self.gui_settings_path = None
self.gui_initialized = False
self.backupdb_enabled = False
self.backupdb_destination = ""
self.backupdb_interval = 15
self.backupdb_interval_unit = "minute"
self.last_backup = None
self.backup_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_backup_timer, self.backup_timer)
spacing = 5
panel = wx.Panel(self)
grid = wx.GridBagSizer(spacing, spacing)
# logo & about button
logo_box = wx.BoxSizer(wx.HORIZONTAL)
grid.Add(logo_box, pos=(0, 0), flag=wx.EXPAND)
row = 0
fp = get_data_path("openslides-logo_wide.png")
with open(fp, "rb") as f:
logo_wide_bmp = wx.ImageFromStream(f).ConvertToBitmap()
logo_wide = wx.StaticBitmap(panel, wx.ID_ANY, logo_wide_bmp)
logo_box.AddSpacer(2 * spacing)
logo_box.Add(logo_wide)
logo_box.AddStretchSpacer()
version_str = _("Version {0}").format(openslides.get_version())
lb_version = wx.StaticText(panel, label=version_str)
font = lb_version.GetFont()
font.SetPointSize(8)
lb_version.SetFont(font)
logo_box.Add(lb_version, flag=wx.ALIGN_CENTER_VERTICAL)
self.bt_about = wx.Button(panel, label=_("&About..."))
self.bt_about.Bind(wx.EVT_BUTTON, self.on_about_clicked)
grid.Add(self.bt_about, pos=(row, 1), flag=wx.ALIGN_CENTER_VERTICAL)
row += 1
grid.Add((0, spacing), pos=(row, 0), span=(1, 2))
row += 1
# server settings
server_settings = wx.StaticBox(panel, wx.ID_ANY, _("Server Settings"))
server_box = wx.StaticBoxSizer(server_settings, wx.VERTICAL)
grid.Add(server_box, pos=(row, 0), flag=wx.EXPAND)
self._host = None
self._port = None
hbox = wx.BoxSizer(wx.HORIZONTAL)
server_box.Add(hbox, flag=wx.EXPAND)
self.lb_host = wx.StaticText(panel)
hbox.Add(self.lb_host, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.lb_port = wx.StaticText(panel)
hbox.Add(self.lb_port, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.bt_settings = wx.Button(panel, label=_("S&ettings..."))
self.bt_settings.Bind(wx.EVT_BUTTON, self.on_settings_clicked)
hbox.Add(self.bt_settings)
server_box.AddSpacer(spacing)
self.cb_start_browser = wx.CheckBox(
panel, label=_("Automatically open &browser"))
self.cb_start_browser.SetValue(True)
server_box.Add(self.cb_start_browser)
server_box.AddStretchSpacer()
server_box.AddSpacer(spacing)
self.bt_server = wx.Button(panel, label=_("&Start server"))
self.bt_server.Bind(wx.EVT_BUTTON, self.on_start_server_clicked)
server_box.Add(self.bt_server, flag=wx.EXPAND)
self.host = "0.0.0.0"
self.port = unicode(get_port(self.host, 80))
# "action" buttons
action_vbox = wx.BoxSizer(wx.VERTICAL)
action_vbox.AddSpacer(3 * spacing)
grid.Add(action_vbox, pos=(row, 1))
self.bt_backup = wx.Button(panel, label=_("&Backup database..."))
self.bt_backup.Bind(wx.EVT_BUTTON, self.on_backup_clicked)
action_vbox.Add(self.bt_backup)
action_vbox.AddSpacer(spacing)
self.bt_sync_db = wx.Button(panel, label=_("S&ync database"))
self.bt_sync_db.Bind(wx.EVT_BUTTON, self.on_syncdb_clicked)
action_vbox.Add(self.bt_sync_db)
action_vbox.AddSpacer(spacing)
self.bt_reset_admin = wx.Button(panel, label=_("&Reset admin"))
self.bt_reset_admin.Bind(wx.EVT_BUTTON, self.on_reset_admin_clicked)
action_vbox.Add(self.bt_reset_admin)
row += 1
# command output
self.cmd_run_ctrl = RunCommandControl(panel)
self.cmd_run_ctrl.Bind(EVT_RUN_CMD, self.on_run_cmd_changed)
grid.Add(
self.cmd_run_ctrl,
pos=(row, 0), span=(1, 2),
flag=wx.EXPAND)
grid.AddGrowableCol(0)
grid.AddGrowableRow(3)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=spacing, proportion=1)
panel.SetSizerAndFit(box)
self.Fit()
self.SetMinSize(self.ClientToWindowSize(box.GetMinSize()))
self.SetInitialSize(wx.Size(500, 400))
self.Bind(wx.EVT_CLOSE, self.on_close)
def initialize_gui(self):
if self.gui_initialized:
return True
# Set path for gui settings to default user data according to the
# OpenSlides type. This does not depend on any argument the user might
# type in.
openslides_type = detect_openslides_type()
try:
default_user_data_path = get_default_user_data_path(openslides_type)
except PortableDirNotWritable:
wx.MessageBox(
_("The portable directory is not writable. Please copy the "
"openslides portable to a writeable location and start it "
"again from there"),
_("Error: Portable directory not writable"),
wx.OK | wx.ICON_ERROR)
return False
self.gui_settings_path = os.path.join(
default_user_data_path, 'openslides', 'gui_settings.json')
self.load_gui_settings()
self.apply_backup_settings()
self.gui_initialized = True
return True
@property
def backup_interval_seconds(self):
if self.backupdb_interval_unit == "second":
factor = 1
elif self.backupdb_interval_unit == "minute":
factor = 60
elif self.backupdb_interval_unit == "hour":
factor = 3600
return self.backupdb_interval * factor
@property
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
self.lb_host.SetLabel(_("Host: {0}").format(host))
@property
def port(self):
return self._port
@port.setter
def port(self, port):
self._port = port
self.lb_port.SetLabel(_("Port: {0}").format(port))
def load_gui_settings(self):
if self.gui_settings_path is None:
return
try:
f = open(self.gui_settings_path, "rb")
except IOError as e:
if e.errno == errno.ENOENT:
return
raise
with f:
settings = json.load(f)
def setattr_unless_none(attr, value):
if not value is None:
setattr(self, attr, value)
backup_settings = settings.get("database_backup", {})
setattr_unless_none("backupdb_enabled", backup_settings.get("enabled"))
setattr_unless_none(
"backupdb_destination", backup_settings.get("destination"))
setattr_unless_none(
"backupdb_interval", backup_settings.get("interval"))
setattr_unless_none(
"backupdb_interval_unit", backup_settings.get("interval_unit"))
last_backup = backup_settings.get("last_backup")
if not last_backup is None:
self.last_backup = datetime.datetime.strptime(
last_backup, "%Y-%m-%d %H:%M:%S")
server_settings = settings.get("server_settings", {})
setattr_unless_none("host", server_settings.get("host"))
setattr_unless_none("port", server_settings.get("port"))
def save_gui_settings(self):
if self.last_backup is None:
last_backup = None
else:
last_backup = self.last_backup.strftime("%Y-%m-%d %H:%M:%S")
settings = {
"database_backup": {
"enabled": self.backupdb_enabled,
"destination": self.backupdb_destination,
"internal": self.backupdb_interval,
"interval_unit": self.backupdb_interval_unit,
"last_backup": last_backup
},
"server_settings": {
"host": self.host,
"port": self.port,
},
}
dp = os.path.dirname(self.gui_settings_path)
if not os.path.exists(dp):
os.makedirs(dp)
with open(self.gui_settings_path, "wb") as f:
json.dump(settings, f, ensure_ascii=False, indent=4)
def apply_backup_settings(self):
if self.backupdb_enabled and self.server_running:
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=self.backup_interval_seconds)
ref = self.last_backup
if ref is None:
ref = now
ref += delta
d = ref - now
seconds = d.days * 86400 + d.seconds
if seconds < 1:
seconds = 30 # avoid backup immediatly after start
self.backup_timer.Start(seconds * 1000, True)
else:
self.backup_timer.Stop()
def do_backup(self):
cmd = [
sys.executable, "-u", "-m", "openslides", "backupdb",
self.backupdb_destination,
]
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.close()
output = p.stdout.read().strip()
exitcode = p.wait()
if output:
self.cmd_run_ctrl.append_message(output)
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if exitcode == 0:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup successful.").format(time))
else:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup failed!").format(time))
self.last_backup = datetime.datetime.utcnow()
def on_syncdb_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Syncing database..."))
self.cmd_run_ctrl.run_command("syncdb")
def on_reset_admin_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Resetting admin user..."))
self.cmd_run_ctrl.run_command("createsuperuser")
def on_about_clicked(self, evt):
info = wx.AboutDialogInfo()
info.SetName("OpenSlides")
info.SetVersion(openslides.get_version())
info.SetDescription(_(
"OpenSlides is a free web based presentation and "
"assembly system.\n"
"OpenSlides is free software; licensed under the MIT license."
).replace(u" ", u"\u00a0"))
info.SetCopyright(_(u"\u00a9 2011-2014 by OpenSlides team"))
info.SetWebSite(("http://www.openslides.org/", "www.openslides.org"))
# XXX: at least on wxgtk this has no effect
info.SetIcon(self.GetIcon())
wx.AboutBox(info)
def on_start_server_clicked(self, evt):
if self.server_running:
self.cmd_run_ctrl.cancel_command()
return
if self._host == "0.0.0.0":
args = ["--port", self._port]
else:
args = ["--address", self._host, "--port", self._port]
if not self.cb_start_browser.GetValue():
args.append("--no-browser")
self.server_running = True
self.cmd_run_ctrl.run_command("start", *args)
# initiate backup_timer if backup is enabled
self.apply_backup_settings()
self.bt_server.SetLabel(_("&Stop server"))
def on_settings_clicked(self, evt):
dlg = SettingsDialog(self)
dlg.host = self._host
dlg.port = self._port
if dlg.ShowModal() == wx.ID_OK:
self.host = dlg.host
self.port = dlg.port
def on_backup_clicked(self, evt):
dlg = BackupSettingsDialog(self)
dlg.backupdb_enabled = self.backupdb_enabled
dlg.backupdb_destination = self.backupdb_destination
dlg.interval = self.backupdb_interval
dlg.interval_unit = self.backupdb_interval_unit
if dlg.ShowModal() == wx.ID_OK:
self.backupdb_enabled = dlg.backupdb_enabled
self.backupdb_destination = dlg.backupdb_destination
self.backupdb_interval = dlg.interval
self.backupdb_interval_unit = dlg.interval_unit
self.apply_backup_settings()
def on_run_cmd_changed(self, evt):
show_completion_msg = not evt.running
if self.server_running and not evt.running:
self.bt_server.SetLabel(_("&Start server"))
self.server_running = False
self.backup_timer.Stop()
if self.backupdb_enabled:
self.do_backup()
# no operation completed msg when stopping server
show_completion_msg = False
self.bt_settings.Enable(not evt.running)
self.bt_backup.Enable(not evt.running)
self.bt_sync_db.Enable(not evt.running)
self.bt_reset_admin.Enable(not evt.running)
self.bt_server.Enable(self.server_running or not evt.running)
if show_completion_msg:
if evt.exitcode == 0:
text = _("Operation successfully completed.")
else:
text = _("Operation failed (exit code = {0})").format(
evt.exitcode)
self.cmd_run_ctrl.append_message(text)
def on_backup_timer(self, evt):
if not self.backupdb_enabled:
return
self.do_backup()
self.backup_timer.Start(1000 * self.backup_interval_seconds, True)
def on_close(self, ev):
self.cmd_run_ctrl.cancel_command()
self.save_gui_settings()
self.Destroy()
class OpenslidesApp(wx.App):
def __init__(self):
super(OpenslidesApp, self).__init__(False)
def OnInit(self):
window = MainWindow()
self.SetTopWindow(window)
if not window.initialize_gui():
self.Exit()
return False
window.Show()
return True
def main():
locale.setlocale(locale.LC_ALL, "")
lang = locale.getdefaultlocale()[0]
if lang:
global _translations
localedir = filesystem2unicode(openslides.__file__)
localedir = os.path.dirname(localedir)
localedir = os.path.join(localedir, "locale")
_translations = gettext.translation(
"django", localedir, [lang], fallback=True)
app = OpenslidesApp()
app.MainLoop()
if __name__ == "__main__":
main()
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import aldryn_apphooks_config.fields
import app_data.fields
import djangocms_text_ckeditor.fields
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
('djangocms_blog', '0009_latestpostsplugin_tags_new'),
]
operations = [
migrations.CreateModel(
name='BlogConfig',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('type', models.CharField(verbose_name='type', max_length=100)),
('namespace', models.CharField(default=None, verbose_name='instance namespace', unique=True, max_length=100)),
('app_data', app_data.fields.AppDataField(editable=False, default='{}')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BlogConfigTranslation',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('language_code', models.CharField(db_index=True, verbose_name='Language', max_length=15)),
('app_title', models.CharField(verbose_name='application title', max_length=234)),
('master', models.ForeignKey(editable=False, to='djangocms_blog.BlogConfig', related_name='translations', null=True)),
],
options={
'verbose_name': 'blog config Translation',
'db_table': 'djangocms_blog_blogconfig_translation',
'default_permissions': (),
'db_tablespace': '',
'managed': True,
},
),
migrations.CreateModel(
name='GenericBlogPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, serialize=False, primary_key=True, auto_created=True, to='cms.CMSPlugin')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(verbose_name='app. config', blank=True, to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AlterField(
model_name='posttranslation',
name='abstract',
field=djangocms_text_ckeditor.fields.HTMLField(default='', verbose_name='abstract', blank=True),
),
migrations.AddField(
model_name='authorentriesplugin',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, blank=True, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='blogcategory',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='latestpostsplugin',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, blank=True, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='blogconfigtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterField(
model_name='post',
name='sites',
field=models.ManyToManyField(to='sites.Site', help_text='Select sites in which to show the post. If none is set it will be visible in all the configured sites.', blank=True, verbose_name='Site(s)'),
),
]
|
import copy
import re
from collections import defaultdict
from typing import List, Dict
from .substitution_augmenter import SubstitutionAugmenter
from ..actions import Chemical
from ..utils import extract_chemicals
from paragraph2actions.misc import TextWithActions
class CompoundNameAugmenter(SubstitutionAugmenter):
"""
Augments data by substituting compound names.
"""
def __init__(self, probability: float, compounds: List[str]):
"""
Args:
probability: probability with which to switch the compound name
compounds: list of names to use for substitution
"""
super().__init__(probability=probability, values=compounds)
def augment(self, sample: TextWithActions) -> TextWithActions:
sample = copy.deepcopy(sample)
chemicals = extract_chemicals(sample.actions)
# Build a dictionary of compound names and associated chemicals
# (necessary if the same chemical is present twice)
cpd_dict: Dict[str, List[Chemical]] = defaultdict(list)
for c in chemicals:
cpd_dict[c.name].append(c)
# remove compound names that are comprised in others; with this, if both '3-ethyltoluene' and
# '2-bromo-3-ethyltoluene' are present as compounds, we will never substitute the short one.
for chemical_name in list(cpd_dict.keys()):
if any(chemical_name in cpd for cpd in cpd_dict.keys() if chemical_name != cpd):
cpd_dict.pop(chemical_name)
# For each chemical name, try substitution
for cpd_name in cpd_dict:
if not self.random_draw_passes() or cpd_name not in sample.text:
continue
new_name = self.draw_value()
sample.text = self.replace_in_text(
text=sample.text, compound=cpd_name, new_name=new_name
)
for c in cpd_dict[cpd_name]:
c.name = new_name
return sample
def replace_in_text(self, text: str, compound: str, new_name: str) -> str:
# We replace only at word boundaries, to avoid things like 'H2SO4 -> waterSO4' when replacing 'H2' by 'water'
pattern = re.compile(rf'\b{re.escape(compound)}\b')
return pattern.sub(new_name, text)
|
# Generated by Django 3.1.13 on 2021-09-02 17:31
import django.contrib.postgres.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('stac_api', '0015_data_collection_summaries'),
]
operations = [
migrations.AddField(
model_name='collection',
name='summaries_geoadmin_lang',
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=2),
blank=True,
default=list,
editable=False,
size=None
),
),
migrations.AlterField(
model_name='asset',
name='media_type',
field=models.CharField(
choices=[
(
'application/x.ascii-grid+zip',
'Zipped ESRI ASCII raster format (.asc) (application/x.ascii-grid+zip)'
),
(
'application/x.ascii-xyz+zip',
'Zipped XYZ (.xyz) (application/x.ascii-xyz+zip)'
), ('application/x.e00+zip', 'Zipped e00 (application/x.e00+zip)'), (
'image/tiff; application=geotiff',
'GeoTIFF (image/tiff; application=geotiff)'
), ('application/x.geotiff+zip', 'Zipped GeoTIFF (application/x.geotiff+zip)'),
('application/x.tiff+zip', 'Zipped TIFF (application/x.tiff+zip)'),
('application/x.png+zip', 'Zipped PNG (application/x.png+zip)'),
('application/x.jpeg+zip', 'Zipped JPEG (application/x.jpeg+zip)'),
(
'application/vnd.google-earth.kml+xml',
'KML (application/vnd.google-earth.kml+xml)'
),
(
'application/vnd.google-earth.kmz',
'Zipped KML (application/vnd.google-earth.kmz)'
), ('application/x.dxf+zip', 'Zipped DXF (application/x.dxf+zip)'),
('application/gml+xml', 'GML (application/gml+xml)'),
('application/x.gml+zip', 'Zipped GML (application/x.gml+zip)'),
('application/vnd.las', 'LIDAR (application/vnd.las)'),
('application/vnd.laszip', 'Zipped LIDAR (application/vnd.laszip)'), (
'application/x.shapefile+zip',
'Zipped Shapefile (application/x.shapefile+zip)'
),
(
'application/x.filegdb+zip',
'Zipped File Geodatabase (application/x.filegdb+zip)'
),
(
'application/x.ms-access+zip',
'Zipped Personal Geodatabase (application/x.ms-access+zip)'
), ('application/x.ms-excel+zip', 'Zipped Excel (application/x.ms-excel+zip)'),
('application/x.tab+zip', 'Zipped Mapinfo-TAB (application/x.tab+zip)'),
(
'application/x.tab-raster+zip',
'Zipped Mapinfo-Raster-TAB (application/x.tab-raster+zip)'
), ('application/x.csv+zip',
'Zipped CSV (application/x.csv+zip)'), ('text/csv', 'CSV (text/csv)'), (
'application/geopackage+sqlite3',
'Geopackage (application/geopackage+sqlite3)'
),
(
'application/x.geopackage+zip',
'Zipped Geopackage (application/x.geopackage+zip)'
), ('application/geo+json', 'GeoJSON (application/geo+json)'),
('application/x.geojson+zip', 'Zipped GeoJSON (application/x.geojson+zip)'),
(
'application/x.interlis; version=2.3',
'Interlis 2 (application/x.interlis; version=2.3)'
),
(
'application/x.interlis+zip; version=2.3',
'Zipped XTF (2.3) (application/x.interlis+zip; version=2.3)'
),
(
'application/x.interlis; version=1',
'Interlis 1 (application/x.interlis; version=1)'
),
(
'application/x.interlis+zip; version=1',
'Zipped ITF (application/x.interlis+zip; version=1)'
),
(
'image/tiff; application=geotiff; profile=cloud-optimized',
'Cloud Optimized GeoTIFF (COG) (image/tiff; application=geotiff; profile=cloud-optimized)'
), ('application/pdf', 'PDF (application/pdf)'),
('application/x.pdf+zip', 'Zipped PDF (application/x.pdf+zip)'),
('application/json', 'JSON (application/json)'),
('application/x.json+zip', 'Zipped JSON (application/x.json+zip)'),
('application/x-netcdf', 'NetCDF (application/x-netcdf)'),
('application/x.netcdf+zip', 'Zipped NetCDF (application/x.netcdf+zip)'),
('application/xml', 'XML (application/xml)'),
('application/x.xml+zip', 'Zipped XML (application/x.xml+zip)'),
(
'application/vnd.mapbox-vector-tile',
'mbtiles (application/vnd.mapbox-vector-tile)'
), ('text/plain', 'Text (text/plain)'),
('text/x.plain+zip', 'Zipped text (text/x.plain+zip)'),
('application/x.dwg+zip', 'Zipped DWG (application/x.dwg+zip)')
],
max_length=200
),
),
]
|
SOURCE_DOC_PATH = ''
|
import numpy as np
from i3Deep import utils
from tqdm import tqdm
import os
# name = "KGU-53317EB91645"
# load_mask = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/" + name + "/mask.nii.gz"
# load_label_table = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/" + name + "/label_table.txt"
# save_mask = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/" + name + "/mask2.nii.gz"
load_path = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/"
def rename(case_path):
filenames = utils.load_filenames(case_path + "/", extensions=None)
for filename in filenames:
name = os.path.basename(filename)
if "label" in name and ".nii.gz" in name:
os.rename(filename, case_path + "/mask.nii.gz")
elif ".txt" in name:
os.rename(filename, case_path + "/label_table.txt")
elif ".nii.gz" in name:
os.rename(filename, case_path + "/image.nii.gz")
def get_labels(load_label_table):
with open(load_label_table) as f:
label_table = f.readlines()
label_table = np.asarray(label_table)
ggo = []
cons = []
pe = []
for line in label_table:
label = line.split()[0]
if label.isnumeric():
if "Background" in line or "background" in line:
continue
infection = line.split("_")[1]
keywords = ["ggo", "gg"]
if any(x in infection.lower() for x in keywords):
ggo.append(int(label))
keywords = ["cons", "cns", "con", "cos", "co"]
if any(x in infection.lower() for x in keywords):
cons.append(int(label))
keywords = ["pe", "pes"]
if any(x in infection.lower() for x in keywords):
pe.append(int(label))
return ggo, cons, pe
def merge_labels(load_mask, save_mask, load_label_table):
mask, affine, spacing, header = utils.load_nifty(load_mask)
mask = mask.astype(int)
ggo, cons, pe = get_labels(load_label_table)
for label in tqdm(np.concatenate((ggo, cons, pe), axis=0), disable=True):
mask[mask == label] = -label
for label in tqdm(ggo, disable=True):
mask[mask == -label] = 1
for label in tqdm(cons, disable=True):
mask[mask == -label] = 2
for label in tqdm(pe, disable=True):
mask[mask == -label] = 3
mask = np.rint(mask)
mask = mask.astype(int)
utils.save_nifty(save_mask, mask, affine, spacing, header)
def round_mask(filename):
mask, affine, spacing, header = utils.load_nifty(filename)
mask = np.rint(mask)
mask = mask.astype(int)
utils.save_nifty(filename, mask, affine, spacing, header)
def tmp2(filename):
mask, affine, spacing, header = utils.load_nifty(filename)
print(mask[46-1][155-1][116-1])
if __name__ == '__main__':
# filenames = utils.load_filenames(load_path, extensions=None)
# for filename in tqdm(filenames):
# if os.path.isfile(filename + "/mask2.nii.gz"):
# continue
# rename(filename)
# load_mask = filename + "/mask.nii.gz"
# save_mask = filename + "/mask2.nii.gz"
# load_label_table = filename + "/label_table.txt"
# merge_labels(load_mask, save_mask, load_label_table)
# for filename in tqdm(filenames):
# old_mask = filename + "/mask.nii.gz"
# new_mask = filename + "/mask2.nii.gz"
# label_table = filename + "/label_table.txt"
# if os.path.exists(new_mask):
# os.remove(old_mask)
# os.rename(new_mask, old_mask)
# os.remove(label_table)
# filenames = utils.load_filenames("/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task79_frankfurt3/labelsTr/", extensions=None)
# for filename in tqdm(filenames):
# mask, affine, spacing, header = utils.load_nifty(filename)
# mask = np.rint(mask)
# mask = mask.astype(np.uint8)
# utils.save_nifty(filename, mask, affine, spacing, header)
# filename = "/gris/gris-f/homelv/kgotkows/datasets/covid19/UK_Frankfurt3/KGU-E9EC0F06F1D6/mask.nii.gz"
# mask, affine, spacing, header = utils.load_nifty(filename)
# mask[mask == 5] = 2
# mask[mask == 6] = 2
# utils.save_nifty(filename, mask, affine, spacing, header)
#tmp("/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task077_frankfurt3Guided/imagesTr/0001_0001.nii.gz")
tmp2("/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task77_frankfurt3Guided/tmp/900.nii.gz")
|
import base64
import json
import requests
import urllib3
from contextlib import contextmanager
from packaging.version import Version
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from requests.exceptions import HTTPError
from mlflow import __version__
from mlflow.protos import databricks_pb2
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, ENDPOINT_NOT_FOUND, ErrorCode
from mlflow.utils.proto_json_utils import parse_dict
from mlflow.utils.string_utils import strip_suffix
from mlflow.exceptions import MlflowException, RestException
RESOURCE_DOES_NOT_EXIST = "RESOURCE_DOES_NOT_EXIST"
_REST_API_PATH_PREFIX = "/api/2.0"
_DEFAULT_HEADERS = {"User-Agent": "mlflow-python-client/%s" % __version__}
# Response codes that generally indicate transient network failures and merit client retries,
# based on guidance from cloud service providers
# (https://docs.microsoft.com/en-us/azure/architecture/best-practices/retry-service-specific#general-rest-and-retry-guidelines)
_TRANSIENT_FAILURE_RESPONSE_CODES = frozenset(
[
408, # Request Timeout
429, # Too Many Requests
500, # Internal Server Error
502, # Bad Gateway
503, # Service Unavailable
504, # Gateway Timeout
]
)
def _get_http_response_with_retries(
method, url, max_retries, backoff_factor, retry_codes, **kwargs
):
"""
Performs an HTTP request using Python's `requests` module with an automatic retry policy.
:param method: a string indicating the method to use, e.g. "GET", "POST", "PUT".
:param url: the target URL address for the HTTP request.
:param max_retries: Maximum total number of retries.
:param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
:param retry_codes: a list of HTTP response error codes that qualifies for retry.
:param kwargs: Additional keyword arguments to pass to `requests.Session.request()`
:return: requests.Response object.
"""
assert 0 <= max_retries < 10
assert 0 <= backoff_factor < 120
retry_kwargs = {
"total": max_retries,
"connect": max_retries,
"read": max_retries,
"redirect": max_retries,
"status": max_retries,
"status_forcelist": retry_codes,
"backoff_factor": backoff_factor,
}
if Version(urllib3.__version__) >= Version("1.26.0"):
retry_kwargs["allowed_methods"] = None
else:
retry_kwargs["method_whitelist"] = None
retry = Retry(**retry_kwargs)
adapter = HTTPAdapter(max_retries=retry)
with requests.Session() as http:
http.mount("https://", adapter)
http.mount("http://", adapter)
response = http.request(method, url, **kwargs)
return response
def http_request(
host_creds,
endpoint,
method,
max_retries=5,
backoff_factor=2,
retry_codes=_TRANSIENT_FAILURE_RESPONSE_CODES,
timeout=120,
**kwargs,
):
"""
Makes an HTTP request with the specified method to the specified hostname/endpoint. Transient
errors such as Rate-limited (429), service unavailable (503) and internal error (500) are
retried with an exponential back off with backoff_factor * (1, 2, 4, ... seconds).
The function parses the API response (assumed to be JSON) into a Python object and returns it.
:param host_creds: A :py:class:`mlflow.rest_utils.MlflowHostCreds` object containing
hostname and optional authentication.
:param endpoint: a string for service endpoint, e.g. "/path/to/object".
:param method: a string indicating the method to use, e.g. "GET", "POST", "PUT".
:param max_retries: maximum number of retries before throwing an exception.
:param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
:param retry_codes: a list of HTTP response error codes that qualifies for retry.
:param timeout: wait for timeout seconds for response from remote server for connect and
read request.
:param kwargs: Additional keyword arguments to pass to `requests.Session.request()`
:return: requests.Response object.
"""
hostname = host_creds.host
auth_str = None
if host_creds.username and host_creds.password:
basic_auth_str = ("%s:%s" % (host_creds.username, host_creds.password)).encode("utf-8")
auth_str = "Basic " + base64.standard_b64encode(basic_auth_str).decode("utf-8")
elif host_creds.token:
auth_str = "Bearer %s" % host_creds.token
from mlflow.tracking.request_header.registry import resolve_request_headers
headers = dict({**_DEFAULT_HEADERS, **resolve_request_headers()})
if auth_str:
headers["Authorization"] = auth_str
if host_creds.server_cert_path is None:
verify = not host_creds.ignore_tls_verification
else:
verify = host_creds.server_cert_path
if host_creds.client_cert_path is not None:
kwargs["cert"] = host_creds.client_cert_path
cleaned_hostname = strip_suffix(hostname, "/")
url = "%s%s" % (cleaned_hostname, endpoint)
try:
return _get_http_response_with_retries(
method,
url,
max_retries,
backoff_factor,
retry_codes,
headers=headers,
verify=verify,
timeout=timeout,
**kwargs,
)
except Exception as e:
raise MlflowException("API request to %s failed with exception %s" % (url, e))
def _can_parse_as_json(string):
try:
json.loads(string)
return True
except Exception:
return False
def http_request_safe(host_creds, endpoint, method, **kwargs):
"""
Wrapper around ``http_request`` that also verifies that the request succeeds with code 200.
"""
response = http_request(host_creds=host_creds, endpoint=endpoint, method=method, **kwargs)
return verify_rest_response(response, endpoint)
def verify_rest_response(response, endpoint):
"""Verify the return code and format, raise exception if the request was not successful."""
if response.status_code != 200:
if _can_parse_as_json(response.text):
raise RestException(json.loads(response.text))
else:
base_msg = "API request to endpoint %s failed with error code " "%s != 200" % (
endpoint,
response.status_code,
)
raise MlflowException("%s. Response body: '%s'" % (base_msg, response.text))
# Skip validation for endpoints (e.g. DBFS file-download API) which may return a non-JSON
# response
if endpoint.startswith(_REST_API_PATH_PREFIX) and not _can_parse_as_json(response.text):
base_msg = (
"API request to endpoint was successful but the response body was not "
"in a valid JSON format"
)
raise MlflowException("%s. Response body: '%s'" % (base_msg, response.text))
return response
def augmented_raise_for_status(response):
"""Wrap the standard `requests.response.raise_for_status()` method and return reason"""
try:
response.raise_for_status()
except HTTPError as e:
if response.text:
raise HTTPError(f"{e}. Response text: {response.text}")
else:
raise e
def _get_path(path_prefix, endpoint_path):
return "{}{}".format(path_prefix, endpoint_path)
def extract_api_info_for_service(service, path_prefix):
"""Return a dictionary mapping each API method to a tuple (path, HTTP method)"""
service_methods = service.DESCRIPTOR.methods
res = {}
for service_method in service_methods:
endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints
endpoint = endpoints[0]
endpoint_path = _get_path(path_prefix, endpoint.path)
res[service().GetRequestClass(service_method)] = (endpoint_path, endpoint.method)
return res
def extract_all_api_info_for_service(service, path_prefix):
"""Return a dictionary mapping each API method to a list of tuples [(path, HTTP method)]"""
service_methods = service.DESCRIPTOR.methods
res = {}
for service_method in service_methods:
endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints
res[service().GetRequestClass(service_method)] = [
(_get_path(path_prefix, endpoint.path), endpoint.method) for endpoint in endpoints
]
return res
def call_endpoint(host_creds, endpoint, method, json_body, response_proto):
# Convert json string to json dictionary, to pass to requests
if json_body:
json_body = json.loads(json_body)
if method == "GET":
response = http_request(
host_creds=host_creds, endpoint=endpoint, method=method, params=json_body
)
else:
response = http_request(
host_creds=host_creds, endpoint=endpoint, method=method, json=json_body
)
response = verify_rest_response(response, endpoint)
js_dict = json.loads(response.text)
parse_dict(js_dict=js_dict, message=response_proto)
return response_proto
def call_endpoints(host_creds, endpoints, json_body, response_proto):
# The order that the endpoints are called in is defined by the order
# specified in ModelRegistryService in model_registry.proto
for i, (endpoint, method) in enumerate(endpoints):
try:
return call_endpoint(host_creds, endpoint, method, json_body, response_proto)
except RestException as e:
if e.error_code != ErrorCode.Name(ENDPOINT_NOT_FOUND) or i == len(endpoints) - 1:
raise e
@contextmanager
def cloud_storage_http_request(
method,
url,
max_retries=5,
backoff_factor=2,
retry_codes=_TRANSIENT_FAILURE_RESPONSE_CODES,
timeout=None,
**kwargs,
):
"""
Performs an HTTP PUT/GET request using Python's `requests` module with automatic retry.
:param method: string of 'PUT' or 'GET', specify to do http PUT or GET
:param url: the target URL address for the HTTP request.
:param max_retries: maximum number of retries before throwing an exception.
:param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
:param retry_codes: a list of HTTP response error codes that qualifies for retry.
:param timeout: wait for timeout seconds for response from remote server for connect and
read request. Default to None owing to long duration operation in read / write.
:param kwargs: Additional keyword arguments to pass to `requests.Session.request()`
:return requests.Response object.
"""
if method.lower() not in ("put", "get"):
raise ValueError("Illegal http method: " + method)
try:
with _get_http_response_with_retries(
method, url, max_retries, backoff_factor, retry_codes, timeout=timeout, **kwargs
) as response:
yield response
except Exception as e:
raise MlflowException("API request failed with exception %s" % e)
class MlflowHostCreds(object):
"""
Provides a hostname and optional authentication for talking to an MLflow tracking server.
:param host: Hostname (e.g., http://localhost:5000) to MLflow server. Required.
:param username: Username to use with Basic authentication when talking to server.
If this is specified, password must also be specified.
:param password: Password to use with Basic authentication when talking to server.
If this is specified, username must also be specified.
:param token: Token to use with Bearer authentication when talking to server.
If provided, user/password authentication will be ignored.
:param ignore_tls_verification: If true, we will not verify the server's hostname or TLS
certificate. This is useful for certain testing situations, but should never be
true in production.
If this is set to true ``server_cert_path`` must not be set.
:param client_cert_path: Path to ssl client cert file (.pem).
Sets the cert param of the ``requests.request``
function (see https://requests.readthedocs.io/en/master/api/).
:param server_cert_path: Path to a CA bundle to use.
Sets the verify param of the ``requests.request``
function (see https://requests.readthedocs.io/en/master/api/).
If this is set ``ignore_tls_verification`` must be false.
"""
def __init__(
self,
host,
username=None,
password=None,
token=None,
ignore_tls_verification=False,
client_cert_path=None,
server_cert_path=None,
):
if not host:
raise MlflowException(
message="host is a required parameter for MlflowHostCreds",
error_code=INVALID_PARAMETER_VALUE,
)
if ignore_tls_verification and (server_cert_path is not None):
raise MlflowException(
message=(
"When 'ignore_tls_verification' is true then 'server_cert_path' "
"must not be set! This error may have occurred because the "
"'MLFLOW_TRACKING_INSECURE_TLS' and 'MLFLOW_TRACKING_SERVER_CERT_PATH' "
"environment variables are both set - only one of these environment "
"variables may be set."
),
error_code=INVALID_PARAMETER_VALUE,
)
self.host = host
self.username = username
self.password = password
self.token = token
self.ignore_tls_verification = ignore_tls_verification
self.client_cert_path = client_cert_path
self.server_cert_path = server_cert_path
|
"""
sampling.py
We sample Metropolis-Hastings:
* Random walk proposals
* Langevin proposals
* Langevin proposals with preconditioning
* Hamiltonian MC
* Hamiltonian MC with preconditioning
NOTE:
The functionality of this module is restricted to log-densities,
i.e. densities of the form p(s) = exp(-E(s)). We work with E(s) only.
The reason is that in Bayesian inference, evaluations of exp(-E(s))
are too instable in a numerical sense.
"""
import collections
from abc import ABC, abstractmethod
import numpy as np
from difflikelihoods import logdensity
def metropolishastings_rw(logpdf, nsamps, initstate, pwidth, ninits):
"""
Convenience function for Metropolis-Hastings sampling with
random walk proposal kernel.
"""
logdens = logdensity.LogDensity(logpdf)
rwmh = RandomWalkMH(logdens)
return rwmh.sample_nd(nsamps, initstate, pwidth, ninits)
def metropolishastings_lang(logpdf, loggrad, nsamps, initstate, pwidth, ninits):
"""
Convenience function for Metropolis-Hastings sampling with
Langevin dynamics proposal kernel.
"""
logdens = logdensity.LogDensity(logpdf, loggrad)
langmh = LangevinMH(logdens)
return langmh.sample_nd(nsamps, initstate, pwidth, ninits)
def metropolishastings_plang(
logpdf, loggrad, loghess, nsamps, initstate, pwidth, ninits
):
"""
Convenience function for Metropolis-Hastings sampling with
Riemannian (preconditioned) Langevin dynamics proposal kernel.
"""
logdens = logdensity.LogDensity(logpdf, loggrad, loghess)
plangmh = PrecondLangevinMH(logdens)
return plangmh.sample_nd(nsamps, initstate, pwidth, ninits)
def metropolishastings_ham(
logpdf, loggrad, nsamps, initstate, stepsize, nsteps, ninits
):
"""
Convenience function for Hamiltonian MCMC.
"""
logdens = logdensity.LogDensity(logpdf, loggrad)
hmc = HamiltonianMC(logdens, nsteps)
return hmc.sample_nd(nsamps, initstate, stepsize, ninits)
def metropolishastings_pham(
logpdf, loggrad, loghess, nsamps, initstate, stepsize, nsteps, ninits
):
"""
Convenience function for preconditioned Hamiltonian MCMC.
"""
logdens = logdensity.LogDensity(logpdf, loggrad, loghess)
phmc = PrecondHamiltonianMC(logdens, nsteps)
return phmc.sample_nd(nsamps, initstate, stepsize, ninits)
# Convenience data structure.
MCMCState = collections.namedtuple("MCMCState", "state logdens loggrad loghess")
class MetropolisHastings(ABC):
"""
Abstract Metropolis-Hastings class. Contains everything but the
proposal kernels.
"""
def __init__(self, logdens):
"""
Initialise MH sampler with a log-density function.
Args:
logdens: LogDensity object, evaluations of a negative log-
density and derivatives
"""
self.logdens = logdens
def sample_nd(self, nsamps, init_state, pwidth, ninits=None, *optional):
"""
"""
assert init_state_is_array(
init_state
), "Please enter a (d,) dimensional initial state"
states, logprobs = np.zeros((nsamps, len(init_state))), np.zeros(nsamps)
accepted = 0
if ninits is None:
ninits = 0
currstate = self.evaluate_logdens(init_state)
states[0], logprobs[0] = currstate.state, currstate.logdens
for idx in range(1, nsamps):
if idx < ninits:
proposal, corrfact = self.generate_proposal(currstate, pwidth)
else:
proposal, corrfact = self.generate_proposal(currstate, 0.2 * pwidth)
currstate, is_accept = self.accept_or_reject(
currstate, proposal, corrfact, idx, ninits
)
states[idx], logprobs[idx] = (
currstate.state.copy(),
currstate.logdens.copy(),
)
if idx >= ninits:
accepted = accepted + int(is_accept)
ratio = accepted / nsamps
return states, logprobs, ratio
def evaluate_logdens(self, loc):
"""
"""
logdenseval = self.logdens.eval(loc)
if self.logdens.has_gradient:
gradeval = self.logdens.gradeval(loc)
else:
gradeval = 0
if self.logdens.has_hessian:
hesseval = self.logdens.hesseval(loc)
else:
hesseval = 0
return MCMCState(
state=loc, logdens=logdenseval, loggrad=gradeval, loghess=hesseval
)
def accept_or_reject(self, currstate, proposal, corrfact, idx, ninits):
"""
"""
logaccprob = self.get_logaccprob(currstate, proposal, corrfact, idx, ninits)
if logaccprob < 0 or logaccprob < -np.log(np.random.rand()):
state = proposal
is_accept = True
else:
state = currstate
is_accept = False
return state, is_accept
def get_logaccprob(self, currstate, proposal, corrfact, idx, ninits):
"""
Returns NEGATIVE log acceptance probability, i.e.
corrected proposal - corrected currstate
"""
if idx < ninits:
corrfact = -corrfact
return (corrfact) + (proposal.logdens - currstate.logdens)
@abstractmethod
def generate_proposal(self, *args):
"""
"""
pass
def init_state_is_array(init_state):
"""
Checks whether init_state is compliant with an Nd algorithm.
That is, whether init_state is an (d,) np.ndarray.
"""
assert isinstance(init_state, np.ndarray), "Please enter init_state of shape (d,)"
assert len(init_state.shape) == 1, "Please enter init_state of shape (d,)"
return True
class RandomWalkMH(MetropolisHastings):
"""
"""
def __init__(self, logdens):
"""
"""
MetropolisHastings.__init__(self, logdens)
def generate_proposal(self, currstate, pwidth):
"""
"""
newloc = self.sample_randomwalk(currstate.state, pwidth)
proposal = self.evaluate_logdens(newloc)
corrfact = 0
return proposal, corrfact
def sample_randomwalk(self, mean, var):
"""
"""
return mean + np.sqrt(var) * np.random.randn(len(mean))
class LangevinMH(MetropolisHastings):
"""
"""
def __init__(self, logdens):
"""
"""
MetropolisHastings.__init__(self, logdens)
def generate_proposal(self, currstate, pwidth):
"""
"""
newloc = self.sample_langevin(currstate, pwidth)
proposal = self.evaluate_logdens(newloc)
corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)
return proposal, corrfact
def sample_langevin(self, currstate, pwidth):
"""
"""
noise = np.random.randn(len(currstate.state))
return (
currstate.state - pwidth * currstate.loggrad + np.sqrt(2 * pwidth) * noise
)
def compute_corrfact_langevin(self, currstate, proposal, pwidth):
"""
"""
lognomin = self.kernel_langevin(currstate, proposal, pwidth)
logdenom = self.kernel_langevin(proposal, currstate, pwidth)
return lognomin - logdenom
def kernel_langevin(self, state1, state2, pwidth):
"""
"""
state2_dyn = state2.state - pwidth * state2.loggrad
dist = np.linalg.norm(state1.state - state2_dyn) ** 2
return 0.5 * dist / (2 * pwidth)
class PrecondLangevinMH(MetropolisHastings):
"""
Preconditioning with (inverse) Hessian.
"""
def __init__(self, logdens):
"""
precondeval returns M (and not M^{-1}) as used in Cald&Gir
"""
MetropolisHastings.__init__(self, logdens)
def generate_proposal(self, currstate, pwidth):
"""
"""
newloc = self.sample_langevin(currstate, pwidth)
proposal = self.evaluate_logdens(newloc)
corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)
return proposal, corrfact
def sample_langevin(self, currstate, pwidth):
"""
"""
noise = np.random.multivariate_normal(
np.zeros(len(currstate.loghess)), np.linalg.inv(currstate.loghess)
)
prec_dyn = np.linalg.solve(currstate.loghess, currstate.loggrad)
return currstate.state - pwidth * prec_dyn + np.sqrt(2 * pwidth) * noise
def compute_corrfact_langevin(self, currstate, proposal, pwidth):
"""
"""
lognomin = self.kernel_langevin(currstate, proposal, pwidth)
logdenom = self.kernel_langevin(proposal, currstate, pwidth)
return lognomin - logdenom
def kernel_langevin(self, state1, state2, pwidth):
"""
"""
prec_dyn = np.linalg.solve(state2.loghess, state2.loggrad)
state2_dyn = state2.state - pwidth * prec_dyn
difference = state1.state - state2_dyn
return 0.5 * difference.dot(np.dot(state2.loghess, difference)) / (2 * pwidth)
class HamiltonianMC(MetropolisHastings):
"""
"""
def __init__(self, logdens, nsteps):
"""
"""
MetropolisHastings.__init__(self, logdens)
self.nsteps = nsteps
def generate_proposal(self, currstate, pwidth):
"""
pwidth is used as stepsize for self.nsteps leapfrog steps.
The correction factor is the quotient of the hamiltonian terms.
"""
momentum = np.random.multivariate_normal(
np.zeros(len(currstate.state)), np.eye(len(currstate.state))
)
# hamilt = self.evaluate_hamiltonian(momentum, currstate)
momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)
# prop_hamilt = self.evaluate_hamiltonian(momentum_new, proposal)
corrfact = self.get_corrfact(momentum, momentum_new)
return proposal, corrfact
def leapfrog_dynamics(self, momentum, currstate, pwidth):
"""
"""
proposal = currstate
for idx in range(self.nsteps):
momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)
return momentum, proposal
def compute_next_lfstep(self, momentum, proposal, pwidth):
"""
"""
momentum = momentum - 0.5 * pwidth * proposal.loggrad
pstate = proposal.state + pwidth * momentum
proposal = self.evaluate_logdens(pstate)
momentum = momentum - 0.5 * pwidth * proposal.loggrad
return momentum, proposal
def get_corrfact(self, mom_new, mom):
"""
"""
return 0.5 * (mom_new.T @ mom_new - mom.T @ mom)
class PrecondHamiltonianMC(MetropolisHastings):
"""
In fact, the true name would be either
* Riemannian-Gaussian HMC: if the preconditioner depends on the state
* Euclidean-Gaussian HMC: if the preconditioner is constant
[Girolami and Calderhead, 2011; Betancourt, 2018]
"""
def __init__(self, logdens, nsteps):
"""
evalprecond returns M (and not M^{-1}) as used in Cald&Gir.
M is the Hessian
"""
MetropolisHastings.__init__(self, logdens)
self.nsteps = nsteps
def generate_proposal(self, currstate, pwidth):
"""
pwidth is used as stepsize for self.nsteps leapfrog steps.
The correction factor is the quotient of the hamiltonian terms.
"""
momentum = np.random.multivariate_normal(
np.zeros(len(currstate.state)), currstate.loghess
)
momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)
corrfact = self.get_corrfact(momentum, momentum_new, currstate, proposal)
return proposal, corrfact
def leapfrog_dynamics(self, momentum, currstate, pwidth):
"""
"""
proposal = currstate
for idx in range(self.nsteps):
momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)
return momentum, proposal
def compute_next_lfstep(self, momentum, proposal, pwidth):
"""
"""
momentum = momentum - 0.5 * pwidth * proposal.loggrad
pstate = proposal.state + pwidth * np.linalg.solve(proposal.loghess, momentum)
proposal = self.evaluate_logdens(pstate)
momentum = momentum - 0.5 * pwidth * proposal.loggrad
return momentum, proposal
def get_corrfact(self, mom, mom_new, currstate, proposal):
"""
"""
return 0.5 * (
mom_new.T @ np.linalg.solve(proposal.loghess, mom_new)
+ np.log(np.linalg.det(proposal.loghess))
- mom.T @ np.linalg.solve(currstate.loghess, mom)
- np.log(np.linalg.det(currstate.loghess))
)
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomCropAndResize op in DE
"""
import numpy as np
import cv2
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.utils as mode
import mindspore.dataset as ds
from mindspore import log as logger
from util import diff_mse, save_and_check_md5, visualize_list, \
config_get_set_seed, config_get_set_num_parallel_workers
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
GENERATE_GOLDEN = False
def test_random_crop_and_resize_op_c(plot=False):
"""
Test RandomCropAndResize op in c transforms
"""
logger.info("test_random_crop_and_resize_op_c")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# With these inputs we expect the code to crop the whole image
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3))
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(input_columns=["image"], operations=decode_op)
num_iter = 0
crop_and_resize_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
crop_and_resize = item1["image"]
original = item2["image"]
# Note: resize the original image with the same size as the one applied RandomResizedCrop()
original = cv2.resize(original, (512, 256))
mse = diff_mse(crop_and_resize, original)
assert mse == 0
logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
num_iter += 1
crop_and_resize_images.append(crop_and_resize)
original_images.append(original)
if plot:
visualize_list(original_images, crop_and_resize_images)
def test_random_crop_and_resize_op_py(plot=False):
"""
Test RandomCropAndResize op in py transforms
"""
logger.info("test_random_crop_and_resize_op_py")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# With these inputs we expect the code to crop the whole image
transforms1 = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)),
py_vision.ToTensor()
]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1)
# Second dataset
# Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2)
num_iter = 0
crop_and_resize_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = cv2.resize(original, (512, 256))
mse = diff_mse(crop_and_resize, original)
# Due to rounding error the mse for Python is not exactly 0
assert mse <= 0.05
logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
num_iter += 1
crop_and_resize_images.append(crop_and_resize)
original_images.append(original)
if plot:
visualize_list(original_images, crop_and_resize_images)
def test_random_crop_and_resize_01():
"""
Test RandomCropAndResize with md5 check, expected to pass
"""
logger.info("test_random_crop_and_resize_01")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1))
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
filename1 = "random_crop_and_resize_01_c_result.npz"
filename2 = "random_crop_and_resize_01_py_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_02():
"""
Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST,
expected to pass
"""
logger.info("test_random_crop_and_resize_02")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
filename1 = "random_crop_and_resize_02_c_result.npz"
filename2 = "random_crop_and_resize_02_py_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_03():
"""
Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass
"""
logger.info("test_random_crop_and_resize_03")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), max_attempts=1)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), max_attempts=1),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
filename1 = "random_crop_and_resize_03_c_result.npz"
filename2 = "random_crop_and_resize_03_py_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_04_c():
"""
Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_04_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# If input range of scale is not in the order of (min, max), ValueError will be raised.
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_04_py():
"""
Test RandomCropAndResize with py_transforms: invalid range of scale (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_04_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# If input range of scale is not in the order of (min, max), ValueError will be raised.
py_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data = data.map(input_columns=["image"], operations=transform)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_05_c():
"""
Test RandomCropAndResize with c_transforms: invalid range of ratio (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5))
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_05_py():
"""
Test RandomCropAndResize with py_transforms: invalid range of ratio (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
py_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data = data.map(input_columns=["image"], operations=transform)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_comp(plot=False):
"""
Test RandomCropAndResize and compare between python and c image augmentation
"""
logger.info("test_random_crop_and_resize_comp")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5))
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
image_c_cropped = []
image_py_cropped = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
c_image = item1["image"]
py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_c_cropped.append(c_image)
image_py_cropped.append(py_image)
mse = diff_mse(c_image, py_image)
assert mse < 0.02 # rounding error
if plot:
visualize_list(image_c_cropped, image_py_cropped, visualize_mode=2)
def test_random_crop_and_resize_06():
"""
Test RandomCropAndResize with c_transforms: invalid values for scale,
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale="", ratio=(1, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Argument scale with value \"\" is not of type (<class 'tuple'>,)" in str(e)
try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale=(1, "2"), ratio=(1, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Argument scale[1] with value 2 is not of type (<class 'float'>, <class 'int'>)." in str(e)
if __name__ == "__main__":
test_random_crop_and_resize_op_c(True)
test_random_crop_and_resize_op_py(True)
test_random_crop_and_resize_01()
test_random_crop_and_resize_02()
test_random_crop_and_resize_03()
test_random_crop_and_resize_04_c()
test_random_crop_and_resize_04_py()
test_random_crop_and_resize_05_c()
test_random_crop_and_resize_05_py()
test_random_crop_and_resize_06()
test_random_crop_and_resize_comp(True)
|
import pytest
import torch
from torch.testing import assert_allclose
from kornia.augmentation.random_generator import (
random_prob_generator,
random_color_jitter_generator,
random_perspective_generator,
random_affine_generator,
random_rotation_generator,
random_crop_generator,
random_crop_size_generator,
random_rectangles_params_generator,
center_crop_generator,
random_motion_blur_generator,
random_solarize_generator,
random_posterize_generator,
random_sharpness_generator,
random_mixup_generator,
random_cutmix_generator,
)
class RandomGeneratorBaseTests():
def test_valid_param_combinations(self, device, dtype):
raise NotImplementedError
def test_invalid_param_combinations(self, device, dtype):
raise NotImplementedError
def test_random_gen(self, device, dtype):
raise NotImplementedError
def test_same_on_batch(self, device, dtype):
raise NotImplementedError
class TestRandomProbGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('p', [0., 0.5, 1.])
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, p, batch_size, same_on_batch, device, dtype):
random_prob_generator(batch_size=batch_size, p=p, same_on_batch=same_on_batch)
@pytest.mark.parametrize(
'p',
[
# Should be failed if p > 1. or p < 0.
(-1.),
(2.)
]
)
def test_invalid_param_combinations(self, p, device, dtype):
with pytest.raises(Exception):
random_prob_generator(batch_size=8, p=p)
@pytest.mark.parametrize(
'p,expected',
[(0., [False] * 8), (0.5, [False, False, True, False, True, False, True, False]), (1., [True] * 8)]
)
def test_random_gen(self, p, expected, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_prob_generator(batch_size=batch_size, p=p)
assert (res == torch.tensor(expected)).long().sum() == batch_size
@pytest.mark.parametrize("seed,expected", [
(42, [False] * 8),
(0, [True] * 8),
])
def test_same_on_batch(self, seed, expected, device, dtype):
torch.manual_seed(seed)
batch_size = 8
res = random_prob_generator(batch_size=batch_size, p=.5, same_on_batch=True)
assert (res == torch.tensor(expected)).long().sum() == batch_size
class TestColorJitterGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('brightness', [None, torch.tensor([0.8, 1.2])])
@pytest.mark.parametrize('contrast', [None, torch.tensor([0.8, 1.2])])
@pytest.mark.parametrize('saturation', [None, torch.tensor([0.8, 1.2])])
@pytest.mark.parametrize('hue', [None, torch.tensor([-0.1, 0.1])])
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, brightness, contrast, saturation, hue, batch_size, same_on_batch, device, dtype
):
random_color_jitter_generator(
batch_size,
brightness.to(device=device, dtype=dtype) if brightness is not None else None,
contrast.to(device=device, dtype=dtype) if contrast is not None else None,
saturation.to(device=device, dtype=dtype) if saturation is not None else None,
hue.to(device=device, dtype=dtype) if hue is not None else None, same_on_batch
)
@pytest.mark.parametrize(
'brightness,contrast,saturation,hue',
[
# Should be failed if value out of bounds or tensor.shape != [1, 2]
(torch.tensor([-1., 2.]), None, None, None),
(torch.tensor([0., 3.]), None, None, None),
(torch.tensor(0.), None, None, None),
(torch.tensor([0.]), None, None, None),
(torch.tensor([0., 1., 2.]), None, None, None),
(None, torch.tensor([-1., 2.]), None, None),
(None, torch.tensor(0.), None, None),
(None, torch.tensor([0.]), None, None),
(None, torch.tensor([0., 1., 2.]), None, None),
(None, None, torch.tensor([-1., 2.]), None),
(None, None, torch.tensor(0.), None),
(None, None, torch.tensor([0.]), None),
(None, None, torch.tensor([0., 1., 2.]), None),
(None, None, None, torch.tensor([-1., 0.])),
(None, None, None, torch.tensor([0, 1.])),
(None, None, None, torch.tensor(0.)),
(None, None, None, torch.tensor([0.])),
(None, None, None, torch.tensor([0., 1., 2.])),
]
)
def test_invalid_param_combinations(self, brightness, contrast, saturation, hue, device, dtype):
with pytest.raises(Exception):
random_color_jitter_generator(
8,
brightness.to(device=device, dtype=dtype) if brightness is not None else None,
contrast.to(device=device, dtype=dtype) if contrast is not None else None,
saturation.to(device=device, dtype=dtype) if saturation is not None else None,
hue.to(device=device, dtype=dtype) if hue is not None else None
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
jitter_params = random_color_jitter_generator(
batch_size,
brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),
contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),
hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype)
)
expected_jitter_params = {
'brightness_factor': torch.tensor(
[1.1529, 1.1660, 0.9531, 1.1837, 0.9562, 1.0404, 0.9026, 1.1175], device=device, dtype=dtype
),
'contrast_factor': torch.tensor(
[1.2645, 0.7799, 1.2608, 1.0561, 1.2216, 1.0406, 1.1447, 0.9576], device=device, dtype=dtype
),
'hue_factor': torch.tensor(
[0.0771, 0.0148, -0.0467, 0.0255, -0.0461, -0.0117, -0.0406, 0.0663], device=device, dtype=dtype
),
'saturation_factor': torch.tensor(
[0.6843, 0.8156, 0.8871, 0.7595, 1.0378, 0.6049, 1.3612, 0.6602], device=device, dtype=dtype
),
'order': torch.tensor([3, 2, 0, 1], device=device, dtype=dtype)
}
assert set(list(jitter_params.keys())) == set([
'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order']), \
"Redundant keys found apart from \
'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order'"
assert_allclose(
jitter_params['brightness_factor'], expected_jitter_params['brightness_factor'], rtol=1e-4, atol=1e-4
)
assert_allclose(
jitter_params['contrast_factor'], expected_jitter_params['contrast_factor'], rtol=1e-4, atol=1e-4
)
assert_allclose(jitter_params['hue_factor'], expected_jitter_params['hue_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(
jitter_params['saturation_factor'], expected_jitter_params['saturation_factor'], rtol=1e-4, atol=1e-4
)
assert_allclose(jitter_params['order'].to(dtype), expected_jitter_params['order'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
jitter_params = random_color_jitter_generator(
batch_size,
brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),
contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),
hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype),
same_on_batch=True
)
expected_res = {
'brightness_factor': torch.tensor([1.1529] * batch_size, device=device, dtype=dtype),
'contrast_factor': torch.tensor([1.2490] * batch_size, device=device, dtype=dtype),
'hue_factor': torch.tensor([-0.0234] * batch_size, device=device, dtype=dtype),
'saturation_factor': torch.tensor([1.3674] * batch_size, device=device, dtype=dtype),
'order': torch.tensor([2, 3, 0, 1], device=device, dtype=dtype)
}
assert_allclose(jitter_params['brightness_factor'], expected_res['brightness_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['contrast_factor'], expected_res['contrast_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['hue_factor'], expected_res['hue_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['saturation_factor'], expected_res['saturation_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['order'].to(dtype), expected_res['order'], rtol=1e-4, atol=1e-4)
class TestRandomPerspectiveGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('height,width', [(200, 200)])
@pytest.mark.parametrize('distortion_scale', [torch.tensor(0.), torch.tensor(0.5), torch.tensor(1.)])
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, height, width, distortion_scale, batch_size, same_on_batch, device, dtype):
random_perspective_generator(
batch_size=8,
height=height,
width=width,
distortion_scale=distortion_scale.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'height,width,distortion_scale',
[
# Should be failed if distortion_scale > 1. or distortion_scale < 0.
(-100, 100, torch.tensor(0.5)),
(100, -100, torch.tensor(0.5)),
(100, 100, torch.tensor(-0.5)),
(100, 100, torch.tensor(1.5)),
(100, 100, torch.tensor([0., 0.5])),
]
)
def test_invalid_param_combinations(self, height, width, distortion_scale, device, dtype):
with pytest.raises(Exception):
random_perspective_generator(
batch_size=8,
height=height,
width=width,
distortion_scale=distortion_scale.to(device=device, dtype=dtype)
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_perspective_generator(batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype))
expected = dict(
start_points=torch.tensor(
[[[0., 0.], [199., 0.], [199., 199.], [0., 199.]], [[0., 0.], [199., 0.], [199., 199.], [0., 199.]]],
device=device,
dtype=dtype
),
end_points=torch.tensor(
[
[[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]],
[[47.0386, 6.6593], [152.2701, 29.6790], [155.5298, 170.6142], [37.0547, 177.5298]]
],
device=device,
dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['start_points'], expected['start_points'])
assert_allclose(res['end_points'], expected['end_points'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_perspective_generator(
batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype), same_on_batch=True
)
expected = dict(
start_points=torch.tensor([[[0., 0.], [199., 0.], [199., 199.], [0., 199.]]], device=device,
dtype=dtype).repeat(2, 1, 1),
end_points=torch.tensor(
[[[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]]],
device=device,
dtype=dtype
).repeat(2, 1, 1),
)
assert res.keys() == expected.keys()
assert_allclose(res['start_points'], expected['start_points'])
assert_allclose(res['end_points'], expected['end_points'])
class TestRandomAffineGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 4])
@pytest.mark.parametrize('height', [200])
@pytest.mark.parametrize('width', [300])
@pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])
@pytest.mark.parametrize('translate', [None, torch.tensor([0.1, 0.1])])
@pytest.mark.parametrize('scale', [None, torch.tensor([0.7, 1.2])])
@pytest.mark.parametrize('shear', [None, torch.tensor([[0, 20], [0, 20]])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, batch_size, height, width, degrees, translate, scale, shear, same_on_batch, device, dtype
):
random_affine_generator(
batch_size=batch_size,
height=height,
width=width,
degrees=degrees.to(device=device, dtype=dtype),
translate=translate.to(device=device, dtype=dtype) if translate is not None else None,
scale=scale.to(device=device, dtype=dtype) if scale is not None else None,
shear=shear.to(device=device, dtype=dtype) if shear is not None else None,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'height,width,degrees,translate,scale,shear', [
(-100, 100, torch.tensor([10, 20]), None, None, None),
(100, -100, torch.tensor([10, 20]), None, None, None),
(100, 100, 0.5, None, None, None),
(100, 100, torch.tensor([10, 20, 30]), None, None, None),
(100, 100, torch.tensor([10, 20]), torch.tensor([0.1]), None, None),
(10, 10, torch.tensor([1, 2]), torch.tensor([0.1, 0.2, 0.3]), None, None),
(100, 100, torch.tensor([10, 20]), None, torch.tensor([1]), None),
(100, 100, torch.tensor([10, 20]), None, torch.tensor([1, 2, 3]), None),
(100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1])),
(100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1, 2])),
(10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3])),
(10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4])),
(10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4, 5])),
]
)
def test_invalid_param_combinations(self, height, width, degrees, translate, scale, shear, device, dtype):
with pytest.raises(Exception):
random_affine_generator(
batch_size=8,
height=height,
width=width,
degrees=degrees.to(device=device, dtype=dtype),
translate=translate.to(device=device, dtype=dtype) if translate is not None else None,
scale=scale.to(device=device, dtype=dtype) if scale is not None else None,
shear=shear.to(device=device, dtype=dtype) if shear is not None else None
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)
scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)
shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)
res = random_affine_generator(
batch_size=2,
height=200,
width=200,
degrees=degrees,
translate=translate,
scale=scale,
shear=shear,
same_on_batch=False
)
expected = dict(
translations=torch.tensor([[-4.3821, -9.7371], [4.0358, 11.7457]], device=device, dtype=dtype),
center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),
scale=torch.tensor([[0.8914, 0.8914], [1.1797, 1.1797]], device=device, dtype=dtype),
angle=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype),
sx=torch.tensor([19.4077, 11.3319], device=device, dtype=dtype),
sy=torch.tensor([19.3460, 15.9358], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)
assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)
assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)
scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)
shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)
res = random_affine_generator(
batch_size=2,
height=200,
width=200,
degrees=degrees,
translate=translate,
scale=scale,
shear=shear,
same_on_batch=True
)
expected = dict(
translations=torch.tensor([[-4.6854, 18.3722], [-4.6854, 18.3722]], device=device, dtype=dtype),
center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),
scale=torch.tensor([[1.1575, 1.1575], [1.1575, 1.1575]], device=device, dtype=dtype),
angle=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype),
sx=torch.tensor([13.9045, 13.9045], device=device, dtype=dtype),
sy=torch.tensor([16.0090, 16.0090], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)
assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)
assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)
class TestRandomRotationGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, degrees, same_on_batch, device, dtype):
random_rotation_generator(
batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
@pytest.mark.parametrize('degrees', [(torch.tensor(10)), (torch.tensor([10])), (torch.tensor([10, 20, 30]))])
def test_invalid_param_combinations(self, degrees, device, dtype):
batch_size = 8
with pytest.raises(Exception):
random_rotation_generator(batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype))
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20])
res = random_rotation_generator(
batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=False
)
expected = dict(degrees=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype))
assert res.keys() == expected.keys()
assert_allclose(res['degrees'], expected['degrees'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20])
res = random_rotation_generator(
batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=True
)
expected = dict(degrees=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype))
assert res.keys() == expected.keys()
assert_allclose(res['degrees'], expected['degrees'])
class TestRandomCropGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 2])
@pytest.mark.parametrize('input_size', [(200, 200)])
@pytest.mark.parametrize('size', [(100, 100), torch.tensor([50, 50])])
@pytest.mark.parametrize('resize_to', [None, (100, 100)])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, input_size, size, resize_to, same_on_batch, device, dtype):
if isinstance(size, torch.Tensor):
size = size.repeat(batch_size, 1).to(device=device, dtype=dtype)
random_crop_generator(
batch_size=batch_size,
input_size=input_size,
size=size,
resize_to=resize_to,
same_on_batch=same_on_batch,
device=device,
dtype=dtype
)
@pytest.mark.parametrize(
'input_size,size,resize_to', [
((-300, 300), (200, 200), (100, 100)),
((200, 200), torch.tensor([50, 50]), (100, 100)),
]
)
def test_invalid_param_combinations(self, input_size, size, resize_to, device, dtype):
batch_size = 2
with pytest.raises(Exception):
random_crop_generator(
batch_size=batch_size,
input_size=input_size,
size=size.to(device=device, dtype=dtype) if isinstance(size, torch.Tensor) else size,
resize_to=resize_to
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
res = random_crop_generator(
batch_size=2,
input_size=(100, 100),
size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),
resize_to=(200, 200)
)
expected = dict(
src=torch.tensor(
[[[36, 19], [95, 19], [95, 68], [36, 68]], [[19, 29], [98, 29], [98, 98], [19, 98]]],
device=device,
dtype=dtype
),
dst=torch.tensor(
[[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],
device=device,
dtype=dtype
),
input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)
)
assert res.keys() == expected.keys()
assert_allclose(res['src'], expected['src'])
assert_allclose(res['dst'], expected['dst'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
res = random_crop_generator(
batch_size=2,
input_size=(100, 100),
size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),
resize_to=(200, 200),
same_on_batch=True
)
expected = dict(
src=torch.tensor(
[[[36, 46], [95, 46], [95, 95], [36, 95]], [[36, 46], [115, 46], [115, 115], [36, 115]]],
device=device,
dtype=dtype
),
dst=torch.tensor(
[[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],
device=device,
dtype=dtype
),
input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)
)
assert res.keys() == expected.keys()
assert_allclose(res['src'], expected['src'])
assert_allclose(res['dst'], expected['dst'])
class TestRandomCropSizeGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('size', [(200, 200)])
@pytest.mark.parametrize('scale', [torch.tensor([.7, 1.3])])
@pytest.mark.parametrize('ratio', [torch.tensor([.9, 1.1])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, size, scale, ratio, same_on_batch, device, dtype):
random_crop_size_generator(
batch_size=batch_size,
size=size,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'size,scale,ratio', [
((100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),
((100, 100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),
((100, 100), torch.tensor([.7]), torch.tensor([.9, 1.1])),
((100, 100), torch.tensor([.7, 1.3, 1.5]), torch.tensor([.9, 1.1])),
((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9])),
((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1, 1.3])),
]
)
def test_invalid_param_combinations(self, size, scale, ratio, device, dtype):
batch_size = 2
with pytest.raises(Exception):
random_crop_size_generator(
batch_size=batch_size,
size=size,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
res = random_crop_size_generator(
batch_size=8,
size=(100, 100),
scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
size=torch.tensor(
[[99, 94], [91, 95], [90, 96], [87, 86], [94, 98], [87, 81], [85, 93], [83, 90]],
device=device,
dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['size'], expected['size'])
res = random_crop_size_generator(
batch_size=100,
size=(100, 100),
scale=torch.tensor([0.999, 1.], device=device, dtype=dtype),
ratio=torch.tensor([1., 1.], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(size=torch.tensor([[100, 100]], device=device, dtype=dtype).repeat(100, 1))
assert res.keys() == expected.keys()
assert_allclose(res['size'], expected['size'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20])
res = random_crop_size_generator(
batch_size=8,
size=(100, 100),
scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
size=torch.tensor(
[[99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95]],
device=device,
dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['size'], expected['size'])
class TestRandomRectangleGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('height', [200])
@pytest.mark.parametrize('width', [300])
@pytest.mark.parametrize('scale', [torch.tensor([.7, 1.1])])
@pytest.mark.parametrize('ratio', [torch.tensor([.7, 1.1])])
@pytest.mark.parametrize('value', [0])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, batch_size, height, width, scale, ratio, value, same_on_batch, device, dtype
):
random_rectangles_params_generator(
batch_size=batch_size,
height=height,
width=width,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
value=value,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'height,width,scale,ratio,value', [
(-100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),
(100, -100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),
(100, -100, torch.tensor([0.7]), torch.tensor([0.7, 1.3]), 0),
(100, 100, torch.tensor([0.7, 1.3, 1.5]), torch.tensor([0.7, 1.3]), 0),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7]), 0),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3, 1.5]), 0),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), -1),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 2),
(100, 100, torch.tensor([.5, .7]), torch.tensor([.7, .9]), torch.tensor(0.5)),
]
)
def test_invalid_param_combinations(self, height, width, scale, ratio, value, device, dtype):
batch_size = 8
with pytest.raises(Exception):
random_rectangles_params_generator(
batch_size=batch_size,
height=height,
width=width,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
value=value,
same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
width, height = 100, 150
scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
value = 0.5
res = random_rectangles_params_generator(
batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=False
)
expected = dict(
widths=torch.tensor([100, 100], device=device, dtype=dtype),
heights=torch.tensor([0, 0], device=device, dtype=dtype),
xs=torch.tensor([0, 0], device=device, dtype=dtype),
ys=torch.tensor([6, 8], device=device, dtype=dtype),
values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['xs'], expected['xs'])
assert_allclose(res['ys'], expected['ys'])
assert_allclose(res['values'], expected['values'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
width, height = 100, 150
scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
value = 0.5
res = random_rectangles_params_generator(
batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=True
)
expected = dict(
widths=torch.tensor([100, 100], device=device, dtype=dtype),
heights=torch.tensor([0, 0], device=device, dtype=dtype),
xs=torch.tensor([0, 0], device=device, dtype=dtype),
ys=torch.tensor([10, 10], device=device, dtype=dtype),
values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['xs'], expected['xs'])
assert_allclose(res['ys'], expected['ys'])
assert_allclose(res['values'], expected['values'])
class TestCenterCropGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 2])
@pytest.mark.parametrize('height', [200])
@pytest.mark.parametrize('width', [200])
@pytest.mark.parametrize('size', [(100, 100)])
def test_valid_param_combinations(self, batch_size, height, width, size, device, dtype):
center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)
@pytest.mark.parametrize(
'height,width,size', [
(200, -200, (100, 100)),
(-200, 200, (100, 100)),
(100, 100, (120, 120)),
(150, 100, (120, 120)),
(100, 150, (120, 120)),
]
)
def test_invalid_param_combinations(self, height, width, size, device, dtype):
batch_size = 2
with pytest.raises(Exception):
center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
res = center_crop_generator(batch_size=2, height=200, width=200, size=(120, 150))
expected = dict(
src=torch.tensor(
[[[25, 40], [174, 40], [174, 159], [25, 159]], [[25, 40], [174, 40], [174, 159], [25, 159]]],
device=device,
dtype=torch.long
),
dst=torch.tensor(
[[[0, 0], [149, 0], [149, 119], [0, 119]], [[0, 0], [149, 0], [149, 119], [0, 119]]],
device=device,
dtype=torch.long
),
input_size=torch.tensor([[200, 200], [200, 200]], device=device, dtype=torch.long)
)
assert res.keys() == expected.keys()
assert_allclose(res['src'].to(device=device), expected['src'])
assert_allclose(res['dst'].to(device=device), expected['dst'])
def test_same_on_batch(self, device, dtype):
pass
class TestRandomMotionBlur(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('kernel_size', [3, (3, 5)])
@pytest.mark.parametrize('angle', [torch.tensor([10, 30])])
@pytest.mark.parametrize('direction', [torch.tensor([-1, -1]), torch.tensor([1, 1])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, kernel_size, angle, direction, same_on_batch, device, dtype):
random_motion_blur_generator(
batch_size=batch_size,
kernel_size=kernel_size,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'kernel_size,angle,direction', [
(4, torch.tensor([30, 100]), torch.tensor([-1, 1])),
(1, torch.tensor([30, 100]), torch.tensor([-1, 1])),
((1, 2, 3), torch.tensor([30, 100]), torch.tensor([-1, 1])),
(3, torch.tensor([30, 100]), torch.tensor([-2, 1])),
(3, torch.tensor([30, 100]), torch.tensor([-1, 2])),
]
)
def test_invalid_param_combinations(self, kernel_size, angle, direction, device, dtype):
with pytest.raises(Exception):
random_motion_blur_generator(
batch_size=8,
kernel_size=kernel_size,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype)
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
angle = torch.tensor([30, 90])
direction = torch.tensor([-1, 1])
res = random_motion_blur_generator(
batch_size=2,
kernel_size=3,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),
angle_factor=torch.tensor([82.9362, 84.9002], device=device, dtype=dtype),
direction_factor=torch.tensor([-0.2343, 0.9186], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
angle = torch.tensor([30, 90])
direction = torch.tensor([-1, 1])
res = random_motion_blur_generator(
batch_size=2,
kernel_size=3,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),
angle_factor=torch.tensor([82.9362, 82.9362], device=device, dtype=dtype),
direction_factor=torch.tensor([0.8300, 0.8300], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)
class TestRandomSolarizeGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('thresholds', [torch.tensor([0, 1]), torch.tensor([0.4, 0.6])])
@pytest.mark.parametrize('additions', [torch.tensor([-0.5, 0.5])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, thresholds, additions, same_on_batch, device, dtype):
random_solarize_generator(
batch_size=batch_size,
thresholds=thresholds.to(device=device, dtype=dtype),
additions=additions.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'thresholds,additions', [
(torch.tensor([0, 2]), torch.tensor([-0.5, 0.5])),
(torch.tensor([-1, 1]), torch.tensor([-0.5, 0.5])),
([0, 1], torch.tensor([-0.5, 0.5])),
(torch.tensor([0, 1]), torch.tensor([-0.5, 1])),
(torch.tensor([0, 1]), torch.tensor([-1, 0.5])),
(torch.tensor([0, 1]), [-0.5, 0.5]),
]
)
def test_invalid_param_combinations(self, thresholds, additions, device, dtype):
with pytest.raises(Exception):
random_solarize_generator(
batch_size=batch_size,
thresholds=thresholds.to(device=device, dtype=dtype),
additions=additions.to(device=device, dtype=dtype)
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_solarize_generator(
batch_size=batch_size,
thresholds=torch.tensor([0, 1], device=device, dtype=dtype),
additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
thresholds_factor=torch.tensor(
[0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype
),
additions_factor=torch.tensor(
[0.4408, -0.3668, 0.4346, 0.0936, 0.3694, 0.0677, 0.2411, -0.0706], device=device, dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_solarize_generator(
batch_size=batch_size,
thresholds=torch.tensor([0, 1], device=device, dtype=dtype),
additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
thresholds_factor=torch.tensor(
[0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype
),
additions_factor=torch.tensor(
[0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150], device=device, dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)
class TestRandomPosterizeGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('bits', [torch.tensor([0, 8])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, bits, same_on_batch, device, dtype):
random_posterize_generator(
batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'bits', [
(torch.tensor([-1, 1])),
(torch.tensor([0, 9])),
(torch.tensor([3])),
([0, 8]),
]
)
def test_invalid_param_combinations(self, bits, device, dtype):
with pytest.raises(Exception):
random_posterize_generator(
batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(9)
batch_size = 8
res = random_posterize_generator(
batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=False
)
expected = dict(bits_factor=torch.tensor([5, 2, 3, 6, 7, 7, 2, 7], device=device, dtype=torch.int32))
assert res.keys() == expected.keys()
assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(9)
batch_size = 8
res = random_posterize_generator(
batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=True
)
expected = dict(bits_factor=torch.tensor([5, 5, 5, 5, 5, 5, 5, 5], device=device, dtype=torch.int32))
assert res.keys() == expected.keys()
assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)
class TestRandomSharpnessGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('sharpness', [torch.tensor([0., 1.])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, sharpness, same_on_batch, device, dtype):
random_sharpness_generator(
batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
@pytest.mark.parametrize('sharpness', [
(torch.tensor([-1, 5])),
(torch.tensor([3])),
([0, 1.]),
])
def test_invalid_param_combinations(self, sharpness, device, dtype):
with pytest.raises(Exception):
random_sharpness_generator(
batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_sharpness_generator(
batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=False
)
expected = dict(
sharpness_factor=torch.
tensor([0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_sharpness_generator(
batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=True
)
expected = dict(
sharpness_factor=torch.
tensor([0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)
class TestRandomMixUpGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('p', [0., 0.5, 1.])
@pytest.mark.parametrize('lambda_val', [None, torch.tensor([0., 1.])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, p, lambda_val, same_on_batch, device, dtype):
random_mixup_generator(
batch_size=batch_size,
p=p,
lambda_val=lambda_val.to(device=device, dtype=dtype) if isinstance(lambda_val,
(torch.Tensor)) else lambda_val,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'lambda_val', [
(torch.tensor([-1, 1])),
(torch.tensor([0, 2])),
(torch.tensor([0, 0.5, 1])),
([0., 1.]),
]
)
def test_invalid_param_combinations(self, lambda_val, device, dtype):
with pytest.raises(Exception):
random_mixup_generator(batch_size=8, lambda_val=lambda_val.to(device=device, dtype=dtype))
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_mixup_generator(
batch_size=batch_size,
p=0.5,
lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
mixup_pairs=torch.tensor([6, 1, 0, 7, 2, 5, 3, 4], device=device, dtype=torch.long),
mixup_lambdas=torch.tensor(
[0.0000, 0.0000, 0.5739, 0.0000, 0.6274, 0.0000, 0.4414, 0.0000], device=device, dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(9)
batch_size = 8
res = random_mixup_generator(
batch_size=batch_size,
p=.9999,
lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
mixup_pairs=torch.tensor([4, 6, 7, 5, 0, 1, 3, 2], device=device, dtype=torch.long),
mixup_lambdas=torch.tensor(
[0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804], device=device, dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)
class TestRandomCutMixGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('p', [0, 0.5, 1.])
@pytest.mark.parametrize('width,height', [(200, 200)])
@pytest.mark.parametrize('num_mix', [1, 3])
@pytest.mark.parametrize('beta', [None, torch.tensor(1e-15), torch.tensor(1.)])
@pytest.mark.parametrize('cut_size', [None, torch.tensor([0., 1.]), torch.tensor([0.3, 0.6])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, batch_size, p, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype
):
random_cutmix_generator(
batch_size=batch_size,
p=p,
width=width,
height=height,
num_mix=num_mix,
beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,
cut_size=cut_size.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'width,height,num_mix,beta,cut_size', [
(200, -200, 1, None, None),
(-200, 200, 1, None, None),
(200, 200, 0, None, None),
(200, 200, 1.5, None, None),
(200, 200, 1, torch.tensor([0., 1.]), None),
(200, 200, 1, None, torch.tensor([-1., 1.])),
(200, 200, 1, None, torch.tensor([0., 2.])),
]
)
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_invalid_param_combinations(self, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype):
with pytest.raises(Exception):
random_cutmix_generator(
batch_size=8,
p=0.5,
width=width,
height=height,
num_mix=num_mix,
beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,
cut_size=beta.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,
same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_cutmix_generator(
batch_size=batch_size,
width=200,
height=200,
p=0.5,
num_mix=1,
beta=torch.tensor(1., device=device, dtype=dtype),
cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
mix_pairs=torch.tensor([[0, 1]], device=device, dtype=torch.long),
crop_src=torch.tensor(
[[[[71, 108], [70, 108], [70, 107], [71, 107]], [[39, 1], [38, 1], [38, 0], [39, 0]]]],
device=device,
dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_cutmix_generator(
batch_size=batch_size,
width=200,
height=200,
p=0.5,
num_mix=1,
beta=torch.tensor(1., device=device, dtype=dtype),
cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
mix_pairs=torch.tensor([[1, 0]], device=device, dtype=torch.long),
crop_src=torch.tensor(
[[[[114, 53], [113, 53], [113, 52], [114, 52]], [[114, 53], [113, 53], [113, 52], [114, 52]]]],
device=device,
dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.alexa.presentation.apl.command import Command
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_sdk_model.interfaces.alexa.presentation.apl.command import Command as Command_bc5ff832
class ParallelCommand(Command):
"""
Execute a series of commands in parallel. The parallel command starts executing all child command simultaneously. The parallel command is considered finished when all of its child commands have finished. When the parallel command is terminated early, all currently executing commands are terminated.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param commands: An un-ordered array of commands to execute in parallel. Once all commands have finished executing the parallel command finishes. Please note that the delay of parallel command and the delay of each command are additive.
:type commands: (optional) list[ask_sdk_model.interfaces.alexa.presentation.apl.command.Command]
"""
deserialized_types = {
'object_type': 'str',
'delay': 'int',
'description': 'str',
'when': 'bool',
'commands': 'list[ask_sdk_model.interfaces.alexa.presentation.apl.command.Command]'
} # type: Dict
attribute_map = {
'object_type': 'type',
'delay': 'delay',
'description': 'description',
'when': 'when',
'commands': 'commands'
} # type: Dict
supports_multiple_types = False
def __init__(self, delay=None, description=None, when=None, commands=None):
# type: (Union[int, str, None], Optional[str], Optional[bool], Optional[List[Command_bc5ff832]]) -> None
"""Execute a series of commands in parallel. The parallel command starts executing all child command simultaneously. The parallel command is considered finished when all of its child commands have finished. When the parallel command is terminated early, all currently executing commands are terminated.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param commands: An un-ordered array of commands to execute in parallel. Once all commands have finished executing the parallel command finishes. Please note that the delay of parallel command and the delay of each command are additive.
:type commands: (optional) list[ask_sdk_model.interfaces.alexa.presentation.apl.command.Command]
"""
self.__discriminator_value = "Parallel" # type: str
self.object_type = self.__discriminator_value
super(ParallelCommand, self).__init__(object_type=self.__discriminator_value, delay=delay, description=description, when=when)
self.commands = commands
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ParallelCommand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Test Coin Super Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts.
- Create three testcoinsuperd nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.messages import msg_ping
from test_framework.mininode import P2PInterface
from test_framework.test_framework import TestCoinSuperTestFramework
class TestP2PConn(P2PInterface):
def on_version(self, message):
# Don't send a verack in response
pass
class TimeoutsTest(TestCoinSuperTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
# Setup the p2p connections
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn())
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
sleep(1)
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(30)
assert "version" in no_verack_node.last_message
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(31)
assert not no_verack_node.is_connected
assert not no_version_node.is_connected
assert not no_send_node.is_connected
if __name__ == '__main__':
TimeoutsTest().main()
|
MRPYTHON_VERSION_MAJOR = 3
MRPYTHON_VERSION_MINOR = 0
MRPYTHON_VERSION_PATCH = 9
MRPYTHON_VERSION_TAG = "beta"
def version_string():
return "{}.{}.{}{}".format(MRPYTHON_VERSION_MAJOR,
MRPYTHON_VERSION_MINOR,
MRPYTHON_VERSION_PATCH,
"" if MRPYTHON_VERSION_TAG == "" else ("-" + MRPYTHON_VERSION_TAG))
|
import torch
import torch.optim as optim
def set_optimizer(model, cfg):
r"""Sets the optimizer
"""
if cfg.optimizer == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=cfg.lr,
momentum=cfg.momentum, weight_decay=cfg.weight_decay,
nesterov=cfg.nesterov)
elif cfg.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=cfg.lr,
betas=(cfg.momentum, 0.999),
weight_decay=cfg.weight_decay)
return optimizer
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from datetime import datetime
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
from uuid import UUID, uuid4
from pydantic import BaseModel, Field, root_validator, validator
from pydantic.dataclasses import dataclass
from .consts import ONE_HOUR, SEVEN_DAYS
from .enums import (
OS,
Architecture,
Compare,
ContainerPermission,
ContainerType,
ErrorCode,
GithubIssueSearchMatch,
GithubIssueState,
HeartbeatType,
JobState,
NodeState,
NodeTaskState,
PoolState,
ScalesetState,
StatsFormat,
TaskDebugFlag,
TaskFeature,
TaskState,
TaskType,
VmState,
)
from .primitives import Container, PoolName, Region
class UserInfo(BaseModel):
application_id: Optional[UUID]
object_id: Optional[UUID]
upn: Optional[str]
# Stores the address of a secret
class SecretAddress(BaseModel):
# keyvault address of a secret
url: str
T = TypeVar("T")
# This class allows us to store some data that are intended to be secret
# The secret field stores either the raw data or the address of that data
# This class allows us to maintain backward compatibility with existing
# NotificationTemplate classes
@dataclass
class SecretData(Generic[T]):
secret: Union[T, SecretAddress]
def __init__(self, secret: Union[T, SecretAddress]):
if isinstance(secret, dict):
self.secret = SecretAddress.parse_obj(secret)
else:
self.secret = secret
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
if isinstance(self.secret, SecretAddress):
return str(self.secret)
else:
return "[REDACTED]"
class EnumModel(BaseModel):
@root_validator(pre=True)
def exactly_one(cls: Any, values: Any) -> Any:
some = []
for field, val in values.items():
if val is not None:
some.append(field)
if not some:
raise ValueError("no variant set for enum")
if len(some) > 1:
raise ValueError("multiple values set for enum: %s" % some)
return values
class Error(BaseModel):
code: ErrorCode
errors: List[str]
OkType = TypeVar("OkType")
Result = Union[OkType, Error]
class FileEntry(BaseModel):
container: Container
filename: str
sas_url: Optional[str]
class Authentication(BaseModel):
password: str
public_key: str
private_key: str
class JobConfig(BaseModel):
project: str
name: str
build: str
duration: int
@validator("duration", allow_reuse=True)
def check_duration(cls, value: int) -> int:
if value < ONE_HOUR or value > SEVEN_DAYS:
raise ValueError("invalid duration")
return value
class ReproConfig(BaseModel):
container: Container
path: str
duration: int
@validator("duration", allow_reuse=True)
def check_duration(cls, value: int) -> int:
if value < ONE_HOUR or value > SEVEN_DAYS:
raise ValueError("invalid duration")
return value
class TaskDetails(BaseModel):
type: TaskType
duration: int
target_exe: Optional[str]
target_env: Optional[Dict[str, str]]
target_options: Optional[List[str]]
target_workers: Optional[int]
target_options_merge: Optional[bool]
check_asan_log: Optional[bool]
check_debugger: Optional[bool] = Field(default=True)
check_retry_count: Optional[int]
check_fuzzer_help: Optional[bool]
expect_crash_on_failure: Optional[bool]
rename_output: Optional[bool]
supervisor_exe: Optional[str]
supervisor_env: Optional[Dict[str, str]]
supervisor_options: Optional[List[str]]
supervisor_input_marker: Optional[str]
generator_exe: Optional[str]
generator_env: Optional[Dict[str, str]]
generator_options: Optional[List[str]]
analyzer_exe: Optional[str]
analyzer_env: Optional[Dict[str, str]]
analyzer_options: Optional[List[str]]
wait_for_files: Optional[ContainerType]
stats_file: Optional[str]
stats_format: Optional[StatsFormat]
reboot_after_setup: Optional[bool]
target_timeout: Optional[int]
ensemble_sync_delay: Optional[int]
preserve_existing_outputs: Optional[bool]
report_list: Optional[List[str]]
minimized_stack_depth: Optional[int]
@validator("check_retry_count", allow_reuse=True)
def validate_check_retry_count(cls, value: int) -> int:
if value is not None:
if value < 0:
raise ValueError("invalid check_retry_count")
return value
@validator("target_timeout", allow_reuse=True)
def check_target_timeout(cls, value: Optional[int]) -> Optional[int]:
if value is not None:
if value < 1:
raise ValueError("invalid target_timeout")
return value
@validator("duration", allow_reuse=True)
def check_duration(cls, value: int) -> int:
if value < ONE_HOUR or value > SEVEN_DAYS:
raise ValueError("invalid duration")
return value
class TaskPool(BaseModel):
count: int
pool_name: PoolName
class TaskVm(BaseModel):
region: Region
sku: str
image: str
count: int = Field(default=1)
spot_instances: bool = Field(default=False)
reboot_after_setup: Optional[bool]
@validator("count", allow_reuse=True)
def check_count(cls, value: int) -> int:
if value <= 0:
raise ValueError("invalid count")
return value
class TaskContainers(BaseModel):
type: ContainerType
name: Container
class TaskConfig(BaseModel):
job_id: UUID
prereq_tasks: Optional[List[UUID]]
task: TaskDetails
vm: Optional[TaskVm]
pool: Optional[TaskPool]
containers: List[TaskContainers]
tags: Dict[str, str]
debug: Optional[List[TaskDebugFlag]]
colocate: Optional[bool]
class BlobRef(BaseModel):
account: str
container: Container
name: str
class Report(BaseModel):
input_url: Optional[str]
input_blob: Optional[BlobRef]
executable: str
crash_type: str
crash_site: str
call_stack: List[str]
call_stack_sha256: str
input_sha256: str
asan_log: Optional[str]
task_id: UUID
job_id: UUID
scariness_score: Optional[int]
scariness_description: Optional[str]
minimized_stack: Optional[List[str]]
minimized_stack_sha256: Optional[str]
minimized_stack_function_names: Optional[List[str]]
minimized_stack_function_names_sha256: Optional[str]
class NoReproReport(BaseModel):
input_sha256: str
input_blob: Optional[BlobRef]
executable: str
task_id: UUID
job_id: UUID
tries: int
error: Optional[str]
class CrashTestResult(BaseModel):
crash_report: Optional[Report]
no_repro: Optional[NoReproReport]
class RegressionReport(BaseModel):
crash_test_result: CrashTestResult
original_crash_test_result: Optional[CrashTestResult]
class ADODuplicateTemplate(BaseModel):
increment: List[str]
comment: Optional[str]
set_state: Dict[str, str]
ado_fields: Dict[str, str]
class ADOTemplate(BaseModel):
base_url: str
auth_token: SecretData[str]
project: str
type: str
unique_fields: List[str]
comment: Optional[str]
ado_fields: Dict[str, str]
on_duplicate: ADODuplicateTemplate
# validator needed for backward compatibility
@validator("auth_token", pre=True, always=True)
def validate_auth_token(cls, v: Any) -> SecretData:
if isinstance(v, str):
return SecretData(secret=v)
elif isinstance(v, SecretData):
return v
elif isinstance(v, dict):
return SecretData(secret=v["secret"])
else:
raise TypeError(f"invalid datatype {type(v)}")
class TeamsTemplate(BaseModel):
url: SecretData[str]
# validator needed for backward compatibility
@validator("url", pre=True, always=True)
def validate_url(cls, v: Any) -> SecretData:
if isinstance(v, str):
return SecretData(secret=v)
elif isinstance(v, SecretData):
return v
elif isinstance(v, dict):
return SecretData(secret=v["secret"])
else:
raise TypeError(f"invalid datatype {type(v)}")
class ContainerDefinition(BaseModel):
type: ContainerType
compare: Compare
value: int
permissions: List[ContainerPermission]
class VmDefinition(BaseModel):
compare: Compare
value: int
class TaskDefinition(BaseModel):
features: List[TaskFeature]
containers: List[ContainerDefinition]
monitor_queue: Optional[ContainerType]
vm: VmDefinition
# TODO: service shouldn't pass SyncedDir, but just the url and let the agent
# come up with paths
class SyncedDir(BaseModel):
path: str
url: str
CONTAINER_DEF = Optional[Union[SyncedDir, List[SyncedDir]]]
class ClientCredentials(BaseModel):
client_id: UUID
client_secret: str
class AgentConfig(BaseModel):
client_credentials: Optional[ClientCredentials]
onefuzz_url: str
pool_name: PoolName
heartbeat_queue: Optional[str]
instance_telemetry_key: Optional[str]
microsoft_telemetry_key: Optional[str]
multi_tenant_domain: Optional[str]
instance_id: UUID
class TaskUnitConfig(BaseModel):
instance_id: UUID
job_id: UUID
task_id: UUID
task_type: TaskType
instance_telemetry_key: Optional[str]
microsoft_telemetry_key: Optional[str]
heartbeat_queue: str
# command_queue: str
input_queue: Optional[str]
supervisor_exe: Optional[str]
supervisor_env: Optional[Dict[str, str]]
supervisor_options: Optional[List[str]]
supervisor_input_marker: Optional[str]
target_exe: Optional[str]
target_env: Optional[Dict[str, str]]
target_options: Optional[List[str]]
target_timeout: Optional[int]
target_options_merge: Optional[bool]
target_workers: Optional[int]
check_asan_log: Optional[bool]
check_debugger: Optional[bool]
check_retry_count: Optional[int]
check_fuzzer_help: Optional[bool]
expect_crash_on_failure: Optional[bool]
rename_output: Optional[bool]
generator_exe: Optional[str]
generator_env: Optional[Dict[str, str]]
generator_options: Optional[List[str]]
wait_for_files: Optional[str]
analyzer_exe: Optional[str]
analyzer_env: Optional[Dict[str, str]]
analyzer_options: Optional[List[str]]
stats_file: Optional[str]
stats_format: Optional[StatsFormat]
ensemble_sync_delay: Optional[int]
report_list: Optional[List[str]]
minimized_stack_depth: Optional[int]
# from here forwards are Container definitions. These need to be inline
# with TaskDefinitions and ContainerTypes
analysis: CONTAINER_DEF
coverage: CONTAINER_DEF
crashes: CONTAINER_DEF
inputs: CONTAINER_DEF
no_repro: CONTAINER_DEF
readonly_inputs: CONTAINER_DEF
reports: CONTAINER_DEF
tools: CONTAINER_DEF
unique_inputs: CONTAINER_DEF
unique_reports: CONTAINER_DEF
regression_reports: CONTAINER_DEF
class Forward(BaseModel):
src_port: int
dst_ip: str
dst_port: int
class ProxyConfig(BaseModel):
url: str
notification: str
region: Region
forwards: List[Forward]
instance_telemetry_key: Optional[str]
microsoft_telemetry_key: Optional[str]
instance_id: UUID
class ProxyHeartbeat(BaseModel):
region: Region
forwards: List[Forward]
timestamp: datetime = Field(default_factory=datetime.utcnow)
class Files(BaseModel):
files: List[str]
class WorkUnit(BaseModel):
job_id: UUID
task_id: UUID
task_type: TaskType
# JSON-serialized `TaskUnitConfig`.
config: str
class WorkSet(BaseModel):
reboot: bool
setup_url: str
script: bool
work_units: List[WorkUnit]
class WorkUnitSummary(BaseModel):
job_id: UUID
task_id: UUID
task_type: TaskType
class WorkSetSummary(BaseModel):
work_units: List[WorkUnitSummary]
class GithubIssueDuplicate(BaseModel):
comment: Optional[str]
labels: List[str]
reopen: bool
class GithubIssueSearch(BaseModel):
author: Optional[str]
state: Optional[GithubIssueState]
field_match: List[GithubIssueSearchMatch]
string: str
class GithubAuth(BaseModel):
user: str
personal_access_token: str
class GithubIssueTemplate(BaseModel):
auth: SecretData[GithubAuth]
organization: str
repository: str
title: str
body: str
unique_search: GithubIssueSearch
assignees: List[str]
labels: List[str]
on_duplicate: GithubIssueDuplicate
# validator needed for backward compatibility
@validator("auth", pre=True, always=True)
def validate_auth(cls, v: Any) -> SecretData:
if isinstance(v, str):
return SecretData(secret=v)
elif isinstance(v, SecretData):
return v
elif isinstance(v, dict):
try:
return SecretData(GithubAuth.parse_obj(v))
except Exception:
return SecretData(secret=v["secret"])
else:
raise TypeError(f"invalid datatype {type(v)}")
NotificationTemplate = Union[ADOTemplate, TeamsTemplate, GithubIssueTemplate]
class Notification(BaseModel):
container: Container
notification_id: UUID = Field(default_factory=uuid4)
config: NotificationTemplate
class JobTaskInfo(BaseModel):
task_id: UUID
type: TaskType
state: TaskState
class Job(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
job_id: UUID = Field(default_factory=uuid4)
state: JobState = Field(default=JobState.init)
config: JobConfig
error: Optional[str]
end_time: Optional[datetime] = None
task_info: Optional[List[JobTaskInfo]]
user_info: Optional[UserInfo]
class TaskHeartbeatEntry(BaseModel):
task_id: UUID
job_id: Optional[UUID]
machine_id: UUID
data: List[Dict[str, HeartbeatType]]
class NodeHeartbeatEntry(BaseModel):
node_id: UUID
data: List[Dict[str, HeartbeatType]]
class Node(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
pool_name: PoolName
machine_id: UUID
state: NodeState = Field(default=NodeState.init)
scaleset_id: Optional[UUID] = None
tasks: Optional[List[Tuple[UUID, NodeTaskState]]] = None
heartbeat: Optional[datetime]
version: str = Field(default="1.0.0")
reimage_requested: bool = Field(default=False)
delete_requested: bool = Field(default=False)
debug_keep_node: bool = Field(default=False)
class ScalesetSummary(BaseModel):
scaleset_id: UUID
state: ScalesetState
class NodeTasks(BaseModel):
machine_id: UUID
task_id: UUID
state: NodeTaskState = Field(default=NodeTaskState.init)
class AutoScaleConfig(BaseModel):
image: str
max_size: Optional[int] # max size of pool
min_size: int = Field(default=0) # min size of pool
region: Optional[Region]
scaleset_size: int # Individual scaleset size
spot_instances: bool = Field(default=False)
ephemeral_os_disks: bool = Field(default=False)
vm_sku: str
@validator("scaleset_size", allow_reuse=True)
def check_scaleset_size(cls, value: int) -> int:
if value < 1 or value > 1000:
raise ValueError("invalid scaleset size")
return value
@root_validator()
def check_data(cls, values: Any) -> Any:
if (
"max_size" in values
and values.get("max_size")
and values.get("min_size") > values.get("max_size")
):
raise ValueError("The pool min_size is greater than max_size")
return values
@validator("max_size", allow_reuse=True)
def check_max_size(cls, value: Optional[int]) -> Optional[int]:
if value and value < 1:
raise ValueError("Autoscale sizes are not defined properly")
return value
@validator("min_size", allow_reuse=True)
def check_min_size(cls, value: int) -> int:
if value < 0 or value > 1000:
raise ValueError("Invalid pool min_size")
return value
class Pool(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
name: PoolName
pool_id: UUID = Field(default_factory=uuid4)
os: OS
managed: bool
autoscale: Optional[AutoScaleConfig]
arch: Architecture
state: PoolState = Field(default=PoolState.init)
client_id: Optional[UUID]
nodes: Optional[List[Node]]
config: Optional[AgentConfig]
# work_queue is explicitly not saved to Tables (see save_exclude). This is
# intended to be used to pass the information to the CLI when the CLI asks
# for information about what work is in the queue for the pool.
work_queue: Optional[List[WorkSetSummary]]
# explicitly excluded from Tables
scaleset_summary: Optional[List[ScalesetSummary]]
class ScalesetNodeState(BaseModel):
machine_id: UUID
instance_id: str
state: Optional[NodeState]
class Scaleset(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
pool_name: PoolName
scaleset_id: UUID = Field(default_factory=uuid4)
state: ScalesetState = Field(default=ScalesetState.init)
auth: Optional[Authentication]
vm_sku: str
image: str
region: Region
size: int
spot_instances: bool
ephemeral_os_disks: bool = Field(default=False)
needs_config_update: bool = Field(default=False)
error: Optional[Error]
nodes: Optional[List[ScalesetNodeState]]
client_id: Optional[UUID]
client_object_id: Optional[UUID]
tags: Dict[str, str] = Field(default_factory=lambda: {})
@validator("size", allow_reuse=True)
def check_size(cls, value: int) -> int:
if value < 0:
raise ValueError("Invalid scaleset size")
return value
class NotificationConfig(BaseModel):
config: NotificationTemplate
class Repro(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
vm_id: UUID = Field(default_factory=uuid4)
task_id: UUID
config: ReproConfig
state: VmState = Field(default=VmState.init)
auth: Optional[Authentication]
os: OS
error: Optional[Error]
ip: Optional[str]
end_time: Optional[datetime]
user_info: Optional[UserInfo]
class ExitStatus(BaseModel):
code: Optional[int]
signal: Optional[int]
success: bool
class ProcessOutput(BaseModel):
exit_status: ExitStatus
stderr: str
stdout: str
class WorkerRunningEvent(BaseModel):
task_id: UUID
class WorkerDoneEvent(BaseModel):
task_id: UUID
exit_status: ExitStatus
stderr: str
stdout: str
class WorkerEvent(EnumModel):
done: Optional[WorkerDoneEvent]
running: Optional[WorkerRunningEvent]
class NodeSettingUpEventData(BaseModel):
tasks: List[UUID]
class NodeDoneEventData(BaseModel):
error: Optional[str]
script_output: Optional[ProcessOutput]
NodeStateData = Union[NodeSettingUpEventData, NodeDoneEventData]
class NodeStateUpdate(BaseModel):
state: NodeState
data: Optional[NodeStateData]
@root_validator(pre=False, skip_on_failure=True)
def check_data(cls, values: Any) -> Any:
data = values.get("data")
if data:
state = values["state"]
if state == NodeState.setting_up:
if isinstance(data, NodeSettingUpEventData):
return values
if state == NodeState.done:
if isinstance(data, NodeDoneEventData):
return values
raise ValueError(
"data for node state update event does not match state = %s" % state
)
else:
# For now, `data` is always optional.
return values
class NodeEvent(EnumModel):
state_update: Optional[NodeStateUpdate]
worker_event: Optional[WorkerEvent]
# Temporary shim type to support hot upgrade of 1.0.0 nodes.
#
# We want future variants to use an externally-tagged repr.
NodeEventShim = Union[NodeStateUpdate, NodeEvent, WorkerEvent]
class NodeEventEnvelope(BaseModel):
machine_id: UUID
event: NodeEventShim
class StopNodeCommand(BaseModel):
pass
class StopTaskNodeCommand(BaseModel):
task_id: UUID
class NodeCommandAddSshKey(BaseModel):
public_key: str
class NodeCommand(EnumModel):
stop: Optional[StopNodeCommand]
stop_task: Optional[StopTaskNodeCommand]
add_ssh_key: Optional[NodeCommandAddSshKey]
class NodeCommandEnvelope(BaseModel):
command: NodeCommand
message_id: str
class TaskEvent(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
task_id: UUID
machine_id: UUID
event_data: WorkerEvent
class TaskEventSummary(BaseModel):
timestamp: Optional[datetime]
event_data: str
event_type: str
class NodeAssignment(BaseModel):
node_id: UUID
scaleset_id: Optional[UUID]
state: NodeTaskState
class Task(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
job_id: UUID
task_id: UUID = Field(default_factory=uuid4)
state: TaskState = Field(default=TaskState.init)
os: OS
config: TaskConfig
error: Optional[Error]
auth: Optional[Authentication]
heartbeat: Optional[datetime]
end_time: Optional[datetime]
events: Optional[List[TaskEventSummary]]
nodes: Optional[List[NodeAssignment]]
user_info: Optional[UserInfo]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(873, 697)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout.addWidget(self.graphicsView, 0, 0, 1, 1)
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setEnabled(True)
self.tabWidget.setMinimumSize(QtCore.QSize(251, 489))
self.tabWidget.setMaximumSize(QtCore.QSize(251, 16777215))
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.groupSetting = QtWidgets.QGroupBox(self.tab)
self.groupSetting.setGeometry(QtCore.QRect(10, 10, 221, 110))
self.groupSetting.setMinimumSize(QtCore.QSize(221, 110))
self.groupSetting.setMaximumSize(QtCore.QSize(221, 110))
self.groupSetting.setObjectName("groupSetting")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.groupSetting)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 20, 201, 31))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.labelFruit = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.labelFruit.setObjectName("labelFruit")
self.horizontalLayout.addWidget(self.labelFruit)
self.comboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.horizontalLayout.addWidget(self.comboBox)
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.groupSetting)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(10, 60, 199, 31))
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pushWifi = QtWidgets.QPushButton(self.horizontalLayoutWidget_3)
self.pushWifi.setObjectName("pushWifi")
self.horizontalLayout_3.addWidget(self.pushWifi)
self.labelWifi = QtWidgets.QLabel(self.horizontalLayoutWidget_3)
self.labelWifi.setObjectName("labelWifi")
self.horizontalLayout_3.addWidget(self.labelWifi)
self.groupCurve = QtWidgets.QGroupBox(self.tab)
self.groupCurve.setGeometry(QtCore.QRect(10, 130, 221, 211))
self.groupCurve.setMinimumSize(QtCore.QSize(221, 211))
self.groupCurve.setMaximumSize(QtCore.QSize(221, 211))
self.groupCurve.setObjectName("groupCurve")
self.verticalLayoutWidget = QtWidgets.QWidget(self.groupCurve)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 30, 201, 168))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.labelScanTimes = QtWidgets.QLabel(self.verticalLayoutWidget)
self.labelScanTimes.setMaximumSize(QtCore.QSize(16777215, 28))
self.labelScanTimes.setObjectName("labelScanTimes")
self.horizontalLayout_2.addWidget(self.labelScanTimes)
self.spinBox = QtWidgets.QSpinBox(self.verticalLayoutWidget)
self.spinBox.setMinimum(1)
self.spinBox.setMaximum(20)
self.spinBox.setProperty("value", 3)
self.spinBox.setObjectName("spinBox")
self.horizontalLayout_2.addWidget(self.spinBox)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.pushDetection = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushDetection.setObjectName("pushDetection")
self.verticalLayout_2.addWidget(self.pushDetection)
self.pushOriginal = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushOriginal.setObjectName("pushOriginal")
self.verticalLayout_2.addWidget(self.pushOriginal)
self.pushDerivative = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushDerivative.setObjectName("pushDerivative")
self.verticalLayout_2.addWidget(self.pushDerivative)
self.pushIntegral = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushIntegral.setObjectName("pushIntegral")
self.verticalLayout_2.addWidget(self.pushIntegral)
self.tableWidget = QtWidgets.QTableWidget(self.tab)
self.tableWidget.setGeometry(QtCore.QRect(10, 350, 221, 261))
self.tableWidget.setMinimumSize(QtCore.QSize(221, 0))
self.tableWidget.setMaximumSize(QtCore.QSize(221, 16777215))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(1)
self.tableWidget.setRowCount(6)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setHorizontalHeaderItem(0, item)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupLine_1 = QtWidgets.QGroupBox(self.tab_2)
self.groupLine_1.setGeometry(QtCore.QRect(10, 10, 221, 141))
self.groupLine_1.setObjectName("groupLine_1")
self.formLayoutWidget = QtWidgets.QWidget(self.groupLine_1)
self.formLayoutWidget.setGeometry(QtCore.QRect(20, 20, 181, 101))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.labelLineWidth_1 = QtWidgets.QLabel(self.formLayoutWidget)
self.labelLineWidth_1.setObjectName("labelLineWidth_1")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelLineWidth_1)
self.horizontalSlider_1 = QtWidgets.QSlider(self.formLayoutWidget)
self.horizontalSlider_1.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_1.setObjectName("horizontalSlider_1")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.horizontalSlider_1)
self.labelColor_1 = QtWidgets.QLabel(self.formLayoutWidget)
self.labelColor_1.setObjectName("labelColor_1")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelColor_1)
self.comboColor_1 = QtWidgets.QComboBox(self.formLayoutWidget)
self.comboColor_1.setObjectName("comboColor_1")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboColor_1)
self.checkVisible_1 = QtWidgets.QCheckBox(self.formLayoutWidget)
self.checkVisible_1.setChecked(True)
self.checkVisible_1.setObjectName("checkVisible_1")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkVisible_1)
self.groupLine_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupLine_2.setGeometry(QtCore.QRect(10, 170, 221, 141))
self.groupLine_2.setObjectName("groupLine_2")
self.formLayoutWidget_3 = QtWidgets.QWidget(self.groupLine_2)
self.formLayoutWidget_3.setGeometry(QtCore.QRect(20, 20, 181, 101))
self.formLayoutWidget_3.setObjectName("formLayoutWidget_3")
self.formLayout_3 = QtWidgets.QFormLayout(self.formLayoutWidget_3)
self.formLayout_3.setContentsMargins(0, 0, 0, 0)
self.formLayout_3.setObjectName("formLayout_3")
self.labelLineWidth_2 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.labelLineWidth_2.setObjectName("labelLineWidth_2")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelLineWidth_2)
self.horizontalSlider_3 = QtWidgets.QSlider(self.formLayoutWidget_3)
self.horizontalSlider_3.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_3.setObjectName("horizontalSlider_3")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.horizontalSlider_3)
self.labelColor_2 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.labelColor_2.setObjectName("labelColor_2")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelColor_2)
self.comboColor_2 = QtWidgets.QComboBox(self.formLayoutWidget_3)
self.comboColor_2.setObjectName("comboColor_2")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboColor_2)
self.checkVisible_2 = QtWidgets.QCheckBox(self.formLayoutWidget_3)
self.checkVisible_2.setChecked(True)
self.checkVisible_2.setObjectName("checkVisible_2")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkVisible_2)
self.groupLine_3 = QtWidgets.QGroupBox(self.tab_2)
self.groupLine_3.setGeometry(QtCore.QRect(10, 330, 221, 141))
self.groupLine_3.setObjectName("groupLine_3")
self.formLayoutWidget_4 = QtWidgets.QWidget(self.groupLine_3)
self.formLayoutWidget_4.setGeometry(QtCore.QRect(20, 20, 181, 101))
self.formLayoutWidget_4.setObjectName("formLayoutWidget_4")
self.formLayout_4 = QtWidgets.QFormLayout(self.formLayoutWidget_4)
self.formLayout_4.setContentsMargins(0, 0, 0, 0)
self.formLayout_4.setObjectName("formLayout_4")
self.labelLineWidth_3 = QtWidgets.QLabel(self.formLayoutWidget_4)
self.labelLineWidth_3.setObjectName("labelLineWidth_3")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelLineWidth_3)
self.horizontalSlider_4 = QtWidgets.QSlider(self.formLayoutWidget_4)
self.horizontalSlider_4.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_4.setObjectName("horizontalSlider_4")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.horizontalSlider_4)
self.labelColor_3 = QtWidgets.QLabel(self.formLayoutWidget_4)
self.labelColor_3.setObjectName("labelColor_3")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelColor_3)
self.comboColor_3 = QtWidgets.QComboBox(self.formLayoutWidget_4)
self.comboColor_3.setObjectName("comboColor_3")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboColor_3)
self.checkVisible_3 = QtWidgets.QCheckBox(self.formLayoutWidget_4)
self.checkVisible_3.setChecked(True)
self.checkVisible_3.setObjectName("checkVisible_3")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkVisible_3)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.tabWidget, 0, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 873, 26))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuSave = QtWidgets.QMenu(self.menuFile)
self.menuSave.setObjectName("menuSave")
self.menuSettings = QtWidgets.QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionSave_Data = QtWidgets.QAction(MainWindow)
self.actionSave_Data.setObjectName("actionSave_Data")
self.actionSave_Graph = QtWidgets.QAction(MainWindow)
self.actionSave_Graph.setObjectName("actionSave_Graph")
self.actionLine = QtWidgets.QAction(MainWindow)
self.actionLine.setObjectName("actionLine")
self.actionUsage = QtWidgets.QAction(MainWindow)
self.actionUsage.setObjectName("actionUsage")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionCopyright = QtWidgets.QAction(MainWindow)
self.actionCopyright.setObjectName("actionCopyright")
self.actionWi_Fi_Setting = QtWidgets.QAction(MainWindow)
self.actionWi_Fi_Setting.setObjectName("actionWi_Fi_Setting")
self.menuSave.addAction(self.actionSave_Data)
self.menuSave.addAction(self.actionSave_Graph)
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.menuSave.menuAction())
self.menuSettings.addAction(self.actionLine)
self.menuSettings.addAction(self.actionWi_Fi_Setting)
self.menuHelp.addAction(self.actionUsage)
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addAction(self.actionCopyright)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
self.checkVisible_1.clicked.connect(self.checkVisible_1.click)
self.checkVisible_2.clicked.connect(self.checkVisible_2.click)
self.checkVisible_3.clicked.connect(self.checkVisible_3.click)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "水果光谱检测"))
self.groupSetting.setTitle(_translate("MainWindow", "Setting"))
self.labelFruit.setText(_translate("MainWindow", "Fruit"))
self.comboBox.setItemText(0, _translate("MainWindow", "None"))
self.comboBox.setItemText(1, _translate("MainWindow", "Apple"))
self.pushWifi.setText(_translate("MainWindow", "Wi-Fi"))
self.labelWifi.setText(_translate("MainWindow", "unconnected"))
self.groupCurve.setTitle(_translate("MainWindow", "Curve"))
self.labelScanTimes.setText(_translate("MainWindow", "ScanTimes"))
self.pushDetection.setText(_translate("MainWindow", "Spectral Detection"))
self.pushOriginal.setText(_translate("MainWindow", "Original Time"))
self.pushDerivative.setText(_translate("MainWindow", "Derivative Time"))
self.pushIntegral.setText(_translate("MainWindow", "Integral Time"))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Energy"))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Carbohydrates"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "-Sugars"))
item = self.tableWidget.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "Protein"))
item = self.tableWidget.verticalHeaderItem(4)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(5)
item.setText(_translate("MainWindow", "Sodium"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Per 100g"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "检测"))
self.groupLine_1.setTitle(_translate("MainWindow", "Line1"))
self.labelLineWidth_1.setText(_translate("MainWindow", "Width"))
self.labelColor_1.setText(_translate("MainWindow", "Color"))
self.comboColor_1.setItemText(0, _translate("MainWindow", "Black"))
self.comboColor_1.setItemText(1, _translate("MainWindow", "Gray"))
self.comboColor_1.setItemText(2, _translate("MainWindow", "White"))
self.comboColor_1.setItemText(3, _translate("MainWindow", "Red"))
self.comboColor_1.setItemText(4, _translate("MainWindow", "Green"))
self.comboColor_1.setItemText(5, _translate("MainWindow", "Blue"))
self.checkVisible_1.setText(_translate("MainWindow", "Visible"))
self.groupLine_2.setTitle(_translate("MainWindow", "Line2"))
self.labelLineWidth_2.setText(_translate("MainWindow", "Width"))
self.labelColor_2.setText(_translate("MainWindow", "Color"))
self.comboColor_2.setItemText(0, _translate("MainWindow", "Green"))
self.comboColor_2.setItemText(1, _translate("MainWindow", "Black"))
self.comboColor_2.setItemText(2, _translate("MainWindow", "Gray"))
self.comboColor_2.setItemText(3, _translate("MainWindow", "White"))
self.comboColor_2.setItemText(4, _translate("MainWindow", "Red"))
self.comboColor_2.setItemText(5, _translate("MainWindow", "Blue"))
self.checkVisible_2.setText(_translate("MainWindow", "Visible"))
self.groupLine_3.setTitle(_translate("MainWindow", "Line3"))
self.labelLineWidth_3.setText(_translate("MainWindow", "Width"))
self.labelColor_3.setText(_translate("MainWindow", "Color"))
self.comboColor_3.setItemText(0, _translate("MainWindow", "Red"))
self.comboColor_3.setItemText(1, _translate("MainWindow", "Black"))
self.comboColor_3.setItemText(2, _translate("MainWindow", "Gray"))
self.comboColor_3.setItemText(3, _translate("MainWindow", "White"))
self.comboColor_3.setItemText(4, _translate("MainWindow", "Green"))
self.comboColor_3.setItemText(5, _translate("MainWindow", "Blue"))
self.checkVisible_3.setText(_translate("MainWindow", "Visible"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "设置"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuSave.setTitle(_translate("MainWindow", "Save"))
self.menuSettings.setTitle(_translate("MainWindow", "Settings"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionSave_Data.setText(_translate("MainWindow", "Save Data"))
self.actionSave_Graph.setText(_translate("MainWindow", "Save Graph"))
self.actionLine.setText(_translate("MainWindow", "Line Setting"))
self.actionUsage.setText(_translate("MainWindow", "Usage"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionCopyright.setText(_translate("MainWindow", "Copyright"))
self.actionWi_Fi_Setting.setText(_translate("MainWindow", "Wi-Fi Setting"))
|
from .hn import NewsClient
|
from functools import reduce
from itertools import chain
from operator import add
from typing import Iterable, Optional, TypeVar
from lhotse.audio import Recording, RecordingSet
from lhotse.cut import Cut, CutSet, MixedCut
from lhotse.features import FeatureSet, Features
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, load_yaml
ManifestItem = TypeVar('ManifestItem', Recording, SupervisionSegment, Features, Cut, MixedCut)
Manifest = TypeVar('Manifest', RecordingSet, SupervisionSet, FeatureSet, CutSet)
def combine(*manifests: Manifest) -> Manifest:
"""Combine multiple manifests of the same type into one."""
return reduce(add, manifests)
def to_manifest(items: Iterable[ManifestItem]) -> Optional[Manifest]:
"""
Take an iterable of data types in Lhotse such as Recording, SupervisonSegment or Cut, and create the manifest of the
corresponding type. When the iterable is empty, returns None.
"""
items = iter(items)
try:
first_item = next(items)
except StopIteration:
return None
items = chain([first_item], items)
if isinstance(first_item, Recording):
return RecordingSet.from_recordings(items)
if isinstance(first_item, SupervisionSegment):
return SupervisionSet.from_segments(items)
if isinstance(first_item, (Cut, MixedCut)):
return CutSet.from_cuts(items)
if isinstance(first_item, Features):
raise ValueError("FeatureSet generic construction from iterable is not possible, as the config information "
"would have been lost. Call FeatureSet.from_features() directly instead.")
raise ValueError(f"Unknown type of manifest item: {first_item}")
def load_manifest(path: Pathlike) -> Manifest:
"""Generic utility for reading an arbitrary manifest."""
raw_data = load_yaml(path)
data_set = None
for manifest_type in [RecordingSet, SupervisionSet, FeatureSet, CutSet]:
try:
data_set = manifest_type.from_dicts(raw_data)
except Exception:
pass
if data_set is None:
raise ValueError(f'Unknown type of manifest: {path}')
return data_set
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Melody-over-chords RNN generation code as a SequenceGenerator interface."""
from functools import partial
# internal imports
from magenta.models.improv_rnn import improv_rnn_model
import magenta.music as mm
class ImprovRnnSequenceGenerator(mm.BaseSequenceGenerator):
"""Improv RNN generation code as a SequenceGenerator interface."""
def __init__(self, model, details, steps_per_quarter=4, checkpoint=None,
bundle=None):
"""Creates an ImprovRnnSequenceGenerator.
Args:
model: Instance of ImprovRnnModel.
details: A generator_pb2.GeneratorDetails for this generator.
steps_per_quarter: What precision to use when quantizing the melody and
chords. How many steps per quarter note.
checkpoint: Where to search for the most recent model checkpoint. Mutually
exclusive with `bundle`.
bundle: A GeneratorBundle object that includes both the model checkpoint
and metagraph. Mutually exclusive with `checkpoint`.
"""
super(ImprovRnnSequenceGenerator, self).__init__(
model, details, checkpoint, bundle)
self.steps_per_quarter = steps_per_quarter
def _generate(self, input_sequence, generator_options):
if len(generator_options.input_sections) > 1:
raise mm.SequenceGeneratorException(
'This model supports at most one input_sections message, but got %s' %
len(generator_options.input_sections))
if len(generator_options.generate_sections) != 1:
raise mm.SequenceGeneratorException(
'This model supports only 1 generate_sections message, but got %s' %
len(generator_options.generate_sections))
qpm = (input_sequence.tempos[0].qpm
if input_sequence and input_sequence.tempos
else mm.DEFAULT_QUARTERS_PER_MINUTE)
steps_per_second = mm.steps_per_quarter_to_steps_per_second(
self.steps_per_quarter, qpm)
generate_section = generator_options.generate_sections[0]
if generator_options.input_sections:
# Use primer melody from input section only. Take backing chords from
# beginning of input section through end of generate section.
input_section = generator_options.input_sections[0]
primer_sequence = mm.trim_note_sequence(
input_sequence, input_section.start_time, input_section.end_time)
backing_sequence = mm.trim_note_sequence(
input_sequence, input_section.start_time, generate_section.end_time)
input_start_step = mm.quantize_to_step(
input_section.start_time, steps_per_second, quantize_cutoff=0.0)
else:
# No input section. Take primer melody from the beginning of the sequence
# up until the start of the generate section.
primer_sequence = mm.trim_note_sequence(
input_sequence, 0.0, generate_section.start_time)
backing_sequence = mm.trim_note_sequence(
input_sequence, 0.0, generate_section.end_time)
input_start_step = 0
last_end_time = (max(n.end_time for n in primer_sequence.notes)
if primer_sequence.notes else 0)
if last_end_time >= generate_section.start_time:
raise mm.SequenceGeneratorException(
'Got GenerateSection request for section that is before or equal to '
'the end of the input section. This model can only extend melodies. '
'Requested start time: %s, Final note end time: %s' %
(generate_section.start_time, last_end_time))
# Quantize the priming and backing sequences.
quantized_primer_sequence = mm.quantize_note_sequence(
primer_sequence, self.steps_per_quarter)
quantized_backing_sequence = mm.quantize_note_sequence(
backing_sequence, self.steps_per_quarter)
# Setting gap_bars to infinite ensures that the entire input will be used.
extracted_melodies, _ = mm.extract_melodies(
quantized_primer_sequence, search_start_step=input_start_step,
min_bars=0, min_unique_pitches=1, gap_bars=float('inf'),
ignore_polyphonic_notes=True)
assert len(extracted_melodies) <= 1
start_step = mm.quantize_to_step(
generate_section.start_time, steps_per_second, quantize_cutoff=0.0)
# Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
# always rounds down. This avoids generating a sequence that ends at 5.0
# seconds when the requested end time is 4.99.
end_step = mm.quantize_to_step(
generate_section.end_time, steps_per_second, quantize_cutoff=1.0)
if extracted_melodies and extracted_melodies[0]:
melody = extracted_melodies[0]
else:
# If no melody could be extracted, create an empty melody that starts 1
# step before the request start_step. This will result in 1 step of
# silence when the melody is extended below.
steps_per_bar = int(
mm.steps_per_bar_in_quantized_sequence(quantized_primer_sequence))
melody = mm.Melody([],
start_step=max(0, start_step - 1),
steps_per_bar=steps_per_bar,
steps_per_quarter=self.steps_per_quarter)
extracted_chords, _ = mm.extract_chords(quantized_backing_sequence)
chords = extracted_chords[0]
# Make sure that chords and melody start on the same step.
if chords.start_step < melody.start_step:
chords.set_length(len(chords) - melody.start_step + chords.start_step)
assert chords.end_step == end_step
# Ensure that the melody extends up to the step we want to start generating.
melody.set_length(start_step - melody.start_step)
# Extract generation arguments from generator options.
arg_types = {
'temperature': lambda arg: arg.float_value,
'beam_size': lambda arg: arg.int_value,
'branch_factor': lambda arg: arg.int_value,
'steps_per_iteration': lambda arg: arg.int_value
}
args = dict((name, value_fn(generator_options.args[name]))
for name, value_fn in arg_types.items()
if name in generator_options.args)
generated_melody = self._model.generate_melody(melody, chords, **args)
generated_lead_sheet = mm.LeadSheet(generated_melody, chords)
generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm)
assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
return generated_sequence
def get_generator_map():
"""Returns a map from the generator ID to a SequenceGenerator class creator.
Binds the `config` argument so that the arguments match the
BaseSequenceGenerator class constructor.
Returns:
Map from the generator ID to its SequenceGenerator class creator with a
bound `config` argument.
"""
def create_sequence_generator(config, **kwargs):
return ImprovRnnSequenceGenerator(
improv_rnn_model.ImprovRnnModel(config), config.details,
steps_per_quarter=config.steps_per_quarter, **kwargs)
return {key: partial(create_sequence_generator, config)
for (key, config) in improv_rnn_model.default_configs.items()}
|
from csrv.model import actions
from csrv.model.actions import play_run_event
from csrv.model import cost
from csrv.model import events
from csrv.model import timing_phases
from csrv.model.cards import card_info
from csrv.model.cards import event
class TrashForFree(actions.TrashOnAccess):
COST_CLASS = cost.NullCost
def is_usable(self):
return actions.TrashOnAccess.is_usable(self) and self.card.is_being_accessed
class Card01003Action(play_run_event.PlayRunEvent):
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
play_run_event.PlayRunEvent.resolve(
self, response, ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.game.register_choice_provider(
timing_phases.AccessCard, self, 'access_card_actions')
self.game.register_listener(events.RunEnds, self)
def access_card_actions(self):
card = self.game.current_phase().card # blech
return [TrashForFree(self.game, self.player, card)]
def on_run_ends(self, sender, event):
self.game.deregister_choice_provider(
timing_phases.AccessCard, self, 'access_card_actions')
self.game.deregister_listener(events.RunEnds, self)
class Card01003(event.Event):
NAME = u'Card01003'
SET = card_info.CORE
NUMBER = 3
SIDE = card_info.RUNNER
FACTION = card_info.ANARCH
INFLUENCE = 2
UNIQUE = False
KEYWORDS = set([
card_info.RUN,
card_info.SABOTAGE,
])
COST = 2
IMAGE_SRC = '01003.png'
def build_actions(self):
event.Event.build_actions(self)
self._play_event_action = Card01003Action(self.game, self.player, self)
|
from __future__ import print_function
import copy
import time
from pprint import pformat
import uuid
import ast
import urllib
import urllib3
import requests.exceptions
from demisto_client.demisto_api.rest import ApiException
import demisto_client
import json
from Tests.test_utils import print_error, print_warning, print_color, LOG_COLORS, Docker
from Tests.scripts.constants import PB_Status
# Disable insecure warnings
urllib3.disable_warnings()
# ----- Constants ----- #
DEFAULT_TIMEOUT = 60
DEFAULT_INTERVAL = 20
ENTRY_TYPE_ERROR = 4
# ----- Functions ----- #
# get integration configuration
def __get_integration_config(client, integration_name, prints_manager, thread_index=0):
body = {
'page': 0, 'size': 100, 'query': 'name:' + integration_name
}
try:
res_raw = demisto_client.generic_request_func(self=client, path='/settings/integration/search',
method='POST', body=body)
except ApiException as conn_error:
prints_manager.add_print_job(conn_error, print, thread_index)
return None
res = ast.literal_eval(res_raw[0])
TIMEOUT = 180
SLEEP_INTERVAL = 5
total_sleep = 0
while 'configurations' not in res:
if total_sleep == TIMEOUT:
error_message = "Timeout - failed to get integration {} configuration. Error: {}".format(integration_name,
res)
prints_manager.add_print_job(error_message, print_error, thread_index)
return None
time.sleep(SLEEP_INTERVAL)
total_sleep += SLEEP_INTERVAL
all_configurations = res['configurations']
match_configurations = [x for x in all_configurations if x['name'] == integration_name]
if not match_configurations or len(match_configurations) == 0:
prints_manager.add_print_job('integration was not found', print_error, thread_index)
return None
return match_configurations[0]
# __test_integration_instance
def __test_integration_instance(client, module_instance, prints_manager, thread_index=0):
connection_retries = 3
response_code = 0
prints_manager.add_print_job("trying to connect.", print_warning, thread_index)
for i in range(connection_retries):
try:
response_data, response_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/test',
body=module_instance,
_request_timeout=120)
break
except ApiException as conn_err:
error_msg = 'Failed to test integration instance, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False, None
except urllib3.exceptions.ReadTimeoutError:
warning_msg = "Could not connect. Trying to connect for the {} time".format(i + 1)
prints_manager.add_print_job(warning_msg, print_warning, thread_index)
if int(response_code) != 200:
test_failed_msg = 'Integration-instance test ("Test" button) failed.\nBad status code: ' + str(
response_code)
prints_manager.add_print_job(test_failed_msg, print_error, thread_index)
return False, None
result_object = ast.literal_eval(response_data)
success, failure_message = bool(result_object.get('success')), result_object.get('message')
if not success:
if failure_message:
test_failed_msg = 'Test integration failed.\nFailure message: {}'.format(failure_message)
prints_manager.add_print_job(test_failed_msg, print_error, thread_index)
else:
test_failed_msg = 'Test integration failed\nNo failure message.'
prints_manager.add_print_job(test_failed_msg, print_error, thread_index)
return success, failure_message
# return instance name if succeed, None otherwise
def __create_integration_instance(client, integration_name, integration_instance_name,
integration_params, is_byoi, prints_manager, validate_test=True, thread_index=0):
start_message = 'Configuring instance for {} (instance name: {}, ' \
'validate "Test": {})'.format(integration_name, integration_instance_name, validate_test)
prints_manager.add_print_job(start_message, print, thread_index)
# get configuration config (used for later rest api
configuration = __get_integration_config(client, integration_name, prints_manager,
thread_index=thread_index)
if not configuration:
return None, 'No configuration', None
module_configuration = configuration['configuration']
if not module_configuration:
module_configuration = []
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'),
str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': configuration['name'],
'category': configuration['category'],
'configuration': configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set module params
for param_conf in module_configuration:
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# param is required - take default value
param_conf['value'] = param_conf['defaultValue']
module_instance['data'].append(param_conf)
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException as conn_err:
error_message = 'Error trying to create instance for integration: {0}:\n {1}'.format(
integration_name, conn_err
)
prints_manager.add_print_job(error_message, print_error, thread_index)
return None, error_message, None
if res[1] != 200:
error_message = 'create instance failed with status code ' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res[0]), print_error, thread_index)
return None, error_message, None
integration_config = ast.literal_eval(res[0])
module_instance['id'] = integration_config['id']
# test integration
if validate_test:
test_succeed, failure_message = __test_integration_instance(client, module_instance, prints_manager,
thread_index=thread_index)
else:
print_warning(
"Skipping test validation for integration: {} (it has test_validate set to false)".format(integration_name)
)
test_succeed = True
if not test_succeed:
__disable_integrations_instances(client, [module_instance], prints_manager, thread_index=thread_index)
return None, failure_message, None
docker_image = Docker.get_integration_image(integration_config)
return module_instance, '', docker_image
def __disable_integrations_instances(client, module_instances, prints_manager, thread_index=0):
for configured_instance in module_instances:
# tested with POSTMAN, this is the minimum required fields for the request.
module_instance = {
key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ]
}
module_instance['enable'] = "false"
module_instance['version'] = -1
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException as conn_err:
error_message = 'Failed to disable integration instance, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
if res[1] != 200:
error_message = 'disable instance failed with status code ' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res), print_error, thread_index)
def __enable_integrations_instances(client, module_instances):
for configured_instance in module_instances:
# tested with POSTMAN, this is the minimum required fields for the request.
module_instance = {
key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ]
}
module_instance['enable'] = "true"
module_instance['version'] = -1
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException as conn_err:
print_error(
'Failed to enable integration instance, error trying to communicate with demisto '
'server: {} '.format(conn_err)
)
if res[1] != 200:
print_error('Enabling instance failed with status code ' + str(res[1]) + '\n' + pformat(res))
# create incident with given name & playbook, and then fetch & return the incident
def __create_incident_with_playbook(client, name, playbook_id, integrations, prints_manager, thread_index=0):
# create incident
create_incident_request = demisto_client.demisto_api.CreateIncidentRequest()
create_incident_request.create_investigation = True
create_incident_request.playbook_id = playbook_id
create_incident_request.name = name
try:
response = client.create_incident(create_incident_request=create_incident_request)
except ApiException as err:
prints_manager.add_print_job(str(err), print_error, thread_index)
try:
inc_id = response.id
except: # noqa: E722
inc_id = 'incCreateErr'
# inc_id = response_json.get('id', 'incCreateErr')
if inc_id == 'incCreateErr':
integration_names = [integration['name'] for integration in integrations if
'name' in integration]
error_message = 'Failed to create incident for integration names: {} and playbookID: {}.' \
'Possible reasons are:\nMismatch between playbookID in conf.json and ' \
'the id of the real playbook you were trying to use,' \
'or schema problems in the TestPlaybook.'.format(str(integration_names), playbook_id)
prints_manager.add_print_job(error_message, print_error, thread_index)
return False, -1
# get incident
search_filter = demisto_client.demisto_api.SearchIncidentsData()
inc_filter = demisto_client.demisto_api.IncidentFilter()
inc_filter.query = 'id:' + str(inc_id)
# inc_filter.query
search_filter.filter = inc_filter
try:
incidents = client.search_incidents(filter=search_filter)
except ApiException as err:
prints_manager.add_print_job(err, print, thread_index)
incidents = {'total': 0}
# poll the incidents queue for a max time of 120 seconds
timeout = time.time() + 120
while incidents['total'] != 1:
try:
incidents = client.search_incidents(filter=search_filter)
except ApiException as err:
prints_manager.add_print_job(err, print, thread_index)
if time.time() > timeout:
error_message = 'Got timeout for searching incident with id {}, ' \
'got {} incidents in the search'.format(inc_id, incidents['total'])
prints_manager.add_print_job(error_message, print_error, thread_index)
return False, -1
time.sleep(1)
return incidents['data'][0], inc_id
# returns current investigation playbook state - 'inprogress'/'failed'/'completed'
def __get_investigation_playbook_state(client, inv_id, prints_manager, thread_index=0):
try:
investigation_playbook_raw = demisto_client.generic_request_func(self=client, method='GET',
path='/inv-playbook/' + inv_id)
investigation_playbook = ast.literal_eval(investigation_playbook_raw[0])
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to get investigation playbook state, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return PB_Status.FAILED
try:
state = investigation_playbook['state']
return state
except: # noqa: E722
return PB_Status.NOT_SUPPORTED_VERSION
# return True if delete-incident succeeded, False otherwise
def __delete_incident(client, incident, prints_manager, thread_index=0):
try:
body = {
'ids': [incident['id']],
'filter': {},
'all': False
}
res = demisto_client.generic_request_func(self=client, method='POST',
path='/incident/batchDelete', body=body)
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to delete incident, error trying to communicate with demisto server: {} ' \
''.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return False
if int(res[1]) != 200:
error_message = 'delete incident failed\nStatus code' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res), print_error, thread_index)
return False
return True
# return True if delete-integration-instance succeeded, False otherwise
def __delete_integration_instance(client, instance_id, prints_manager, thread_index=0):
try:
res = demisto_client.generic_request_func(self=client, method='DELETE',
path='/settings/integration/' + urllib.quote(
instance_id))
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to delete integration instance, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return False
if int(res[1]) != 200:
error_message = 'delete integration instance failed\nStatus code' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res), print_error, thread_index)
return False
return True
# delete all integration instances, return True if all succeed delete all
def __delete_integrations_instances(client, module_instances, prints_manager, thread_index=0):
succeed = True
for module_instance in module_instances:
succeed = __delete_integration_instance(client, module_instance['id'], thread_index=thread_index,
prints_manager=prints_manager) and succeed
return succeed
def __print_investigation_error(client, playbook_id, investigation_id, prints_manager, color=LOG_COLORS.RED,
thread_index=0):
try:
empty_json = {"pageSize": 1000}
res = demisto_client.generic_request_func(self=client, method='POST',
path='/investigation/' + urllib.quote(
investigation_id), body=empty_json)
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to print investigation error, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
if res and int(res[1]) == 200:
resp_json = ast.literal_eval(res[0])
entries = resp_json['entries']
prints_manager.add_print_job('Playbook ' + playbook_id + ' has failed:', print_color, thread_index,
message_color=color)
for entry in entries:
if entry['type'] == ENTRY_TYPE_ERROR and entry['parentContent']:
prints_manager.add_print_job('- Task ID: ' + entry['taskId'].encode('utf-8'), print_color, thread_index,
message_color=color)
prints_manager.add_print_job(' Command: ' + entry['parentContent'].encode('utf-8'), print_color,
thread_index, message_color=color)
body_contents_str = ' Body:\n' + entry['contents'].encode('utf-8') + '\n'
prints_manager.add_print_job(body_contents_str, print_color,
thread_index, message_color=color)
# Configure integrations to work with mock
def configure_proxy_unsecure(integration_params):
"""Copies the integration parameters dictionary.
Set proxy and insecure integration parameters to true.
Args:
integration_params: dict of the integration parameters.
"""
integration_params_copy = copy.deepcopy(integration_params)
for param in ('proxy', 'useProxy', 'insecure', 'unsecure'):
integration_params[param] = True
return integration_params_copy
# 1. create integrations instances
# 2. create incident with playbook
# 3. wait for playbook to finish run
# 4. if test pass - delete incident & instance
# return playbook status
def test_integration(client, server_url, integrations, playbook_id, prints_manager, options=None, is_mock_run=False,
thread_index=0):
options = options if options is not None else {}
# create integrations instances
module_instances = []
test_docker_images = set()
with open("./Tests/conf.json", 'r') as conf_file:
docker_thresholds = json.load(conf_file).get('docker_thresholds', {}).get('images', {})
for integration in integrations:
integration_name = integration.get('name', None)
integration_instance_name = integration.get('instance_name', '')
integration_params = integration.get('params', None)
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
if is_mock_run:
configure_proxy_unsecure(integration_params)
module_instance, failure_message, docker_image = __create_integration_instance(client, integration_name,
integration_instance_name,
integration_params,
is_byoi, prints_manager,
validate_test=validate_test,
thread_index=thread_index)
if module_instance is None:
failure_message = failure_message if failure_message else 'No failure message could be found'
msg = 'Failed to create instance: {}'.format(failure_message)
prints_manager.add_print_job(msg, print_error, thread_index) # disable-secrets-detection
__delete_integrations_instances(client, module_instances, prints_manager, thread_index=thread_index)
return False, -1
module_instances.append(module_instance)
if docker_image:
test_docker_images.update(docker_image)
prints_manager.add_print_job('Create integration {} succeed'.format(integration_name), print, thread_index)
# create incident with playbook
incident, inc_id = __create_incident_with_playbook(client, 'inc_{}'.format(playbook_id, ),
playbook_id, integrations, prints_manager,
thread_index=thread_index)
if not incident:
return False, -1
investigation_id = incident['investigationId']
if investigation_id is None or len(investigation_id) == 0:
incident_id_not_found_msg = 'Failed to get investigation id of incident:' + incident
prints_manager.add_print_job(incident_id_not_found_msg, print_error, thread_index) # disable-secrets-detection
return False, -1
prints_manager.add_print_job('Investigation URL: {}/#/WorkPlan/{}'.format(server_url, investigation_id), print,
thread_index)
timeout_amount = options['timeout'] if 'timeout' in options else DEFAULT_TIMEOUT
timeout = time.time() + timeout_amount
i = 1
# wait for playbook to finish run
while True:
# give playbook time to run
time.sleep(1)
# fetch status
playbook_state = __get_investigation_playbook_state(client, investigation_id, prints_manager,
thread_index=thread_index)
if playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION):
break
if playbook_state == PB_Status.FAILED:
if is_mock_run:
prints_manager.add_print_job(playbook_id + ' failed with error/s', print_warning, thread_index)
__print_investigation_error(client, playbook_id, investigation_id, prints_manager,
LOG_COLORS.YELLOW, thread_index=thread_index)
else:
prints_manager.add_print_job(playbook_id + ' failed with error/s', print_error, thread_index)
__print_investigation_error(client, playbook_id, investigation_id, prints_manager,
thread_index=thread_index)
break
if time.time() > timeout:
prints_manager.add_print_job(playbook_id + ' failed on timeout', print_error, thread_index)
break
if i % DEFAULT_INTERVAL == 0:
loop_number_message = 'loop no. {}, playbook state is {}'.format(
i / DEFAULT_INTERVAL, playbook_state)
prints_manager.add_print_job(loop_number_message, print, thread_index)
i = i + 1
__disable_integrations_instances(client, module_instances, prints_manager, thread_index=thread_index)
if test_docker_images:
memory_threshold = options.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE)
pids_threshold = options.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
error_message = Docker.check_resource_usage(server_url=server_url,
docker_images=test_docker_images,
def_memory_threshold=memory_threshold,
def_pid_threshold=pids_threshold,
docker_thresholds=docker_thresholds)
if error_message:
prints_manager.add_print_job(error_message, print_error, thread_index)
return PB_Status.FAILED_DOCKER_TEST, inc_id
else:
prints_manager.add_print_job("Skipping docker container memory resource check for test {}".format(playbook_id),
print_warning, thread_index)
test_pass = playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
if test_pass:
# delete incident
__delete_incident(client, incident, prints_manager, thread_index=thread_index)
# delete integration instance
__delete_integrations_instances(client, module_instances, prints_manager, thread_index=thread_index)
return playbook_state, inc_id
def disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=0):
"""
Disable all enabled integrations. Should be called at start of test loop to start out clean
Arguments:
client -- demisto py client
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
try:
body = {'size': 1000}
int_resp = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/search',
body=body)
int_instances = ast.literal_eval(int_resp[0])
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to disable all integrations, error trying to communicate with demisto server: ' \
'{} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return
if int(int_resp[1]) != 200:
error_message = 'Get all integration instances failed with status code: {}'.format(int_resp[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
return
if 'instances' not in int_instances:
prints_manager.add_print_job("No integrations instances found to disable all", print, thread_index)
return
to_disable = []
for instance in int_instances['instances']:
if instance.get('enabled') == 'true' and instance.get("isIntegrationScript"):
add_to_disable_message = "Adding to disable list. Name: {}. Brand: {}".format(instance.get("name"),
instance.get("brand"))
prints_manager.add_print_job(add_to_disable_message, print, thread_index)
to_disable.append(instance)
if len(to_disable) > 0:
__disable_integrations_instances(client, to_disable, prints_manager, thread_index=thread_index)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "PyData Sphinx Theme"
copyright = "2019, PyData Community"
author = "PyData Community"
# The full version, including alpha/beta/rc tags
release = "0.0.1dev0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"recommonmark",
"jupyter_sphinx",
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_sidebars = {
"contributing": ["sidebar-search-bs.html", "custom-template.html"],
"changelog": [],
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
html_logo = "_static/pandas.svg"
html_theme_options = {
"external_links": [
{"url": "https://pandas.pydata.org/pandas-docs/stable/", "name": "Pandas Docs"}
],
"github_url": "https://github.com/pydata/pydata-sphinx-theme",
"twitter_url": "https://twitter.com/pandas_dev",
"icon_links": [
{
"name": "PyPI",
"url": "https://pypi.org/project/pydata-sphinx-theme",
"icon": "fas fa-box",
}
],
"use_edit_page_button": True,
"show_toc_level": 1,
# "navbar_align": "right", # For testing that the navbar items align properly
}
html_context = {
"github_user": "pandas-dev",
"github_repo": "pydata-sphinx-theme",
"github_version": "master",
"doc_path": "docs",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Auto-convert markdown pages to demo --------------------------------------
import recommonmark
from recommonmark.transform import AutoStructify
def setup(app):
app.add_transform(AutoStructify)
|
from charms.reactive import Endpoint, when, set_flag, clear_flag
import charmhelpers.core.hookenv as hookenv
from charmhelpers.core.hookenv import log
class GearmanRequires(Endpoint):
@when('endpoint.{endpoint_name}.joined')
def joined(self):
# if any(unit.received['port'] for unit in self.all_joined_units):
set_flag(self.expand_name('available'))
@when('endpoint.{endpoint_name}.changed')
def changed(self):
# if any(unit.received['port'] for unit in self.all_joined_units):
set_flag(self.expand_name('available'))
def address(self):
"""Get the address to access Gearman over."""
for relation in self.relations:
for unit in relation.joined_units:
log("Unit: {}".format(unit.received))
address = unit.received['ingress-address']
if address is not None:
return address
|
import csv
import sys
from flee import flee
from flee import SimulationSettings
class InputGeography:
"""
Class which reads in Geographic information.
"""
def __init__(self):
self.locations = []
self.links = []
def ReadLocationsFromCSV(self, csv_name, columns=["name", "region", "country", "gps_x", "gps_y", "location_type", "conflict_date", "pop/cap"]):
"""
Converts a CSV file to a locations information table
"""
self.locations = []
c = {} # column map
c["location_type"] = 0
c["conflict_date"] = 0
c["country"] = 0
c["region"] = 0
for i in range(0, len(columns)):
c[columns[i]] = i
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
else:
# print(row)
self.locations.append([row[c["name"]], row[c["pop/cap"]], row[c["gps_x"]], row[c["gps_y"]], row[
c["location_type"]], row[c["conflict_date"]], row[c["region"]], row[c["country"]]])
def ReadLinksFromCSV(self, csv_name, name1_col=0, name2_col=1, dist_col=2):
"""
Converts a CSV file to a locations information table
"""
self.links = []
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
else:
# print(row)
self.links.append(
[row[name1_col], row[name2_col], row[dist_col]])
def ReadClosuresFromCSV(self, csv_name):
"""
Read the closures.csv file. Format is:
closure_type,name1,name2,closure_start,closure_end
"""
self.closures = []
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
else:
# print(row)
self.closures.append(row)
def StoreInputGeographyInEcosystem(self, e):
"""
Store the geographic information in this class in a FLEE simulation,
overwriting existing entries.
"""
lm = {}
for l in self.locations:
# if population field is empty, just set it to 0.
if len(l[1]) < 1:
l[1] = "0"
# if population field is empty, just set it to 0.
if len(l[7]) < 1:
l[7] = "unknown"
#print(l, file=sys.stderr)
movechance = l[4]
if "conflict" in l[4].lower() and int(l[5]) > 0:
movechance = "town"
if "camp" in l[4].lower():
lm[l[0]] = e.addLocation(l[0], movechance=movechance, capacity=int(
l[1]), x=l[2], y=l[3], country=l[7], region=l[6])
else:
lm[l[0]] = e.addLocation(l[0], movechance=movechance, pop=int(
l[1]), x=l[2], y=l[3], country=l[7], region=l[6])
for l in self.links:
if (len(l) > 3):
if int(l[3]) == 1:
e.linkUp(l[0], l[1], int(l[2]), True)
if int(l[3]) == 2:
e.linkUp(l[1], l[0], int(l[2]), True)
else:
e.linkUp(l[0], l[1], int(l[2]), False)
else:
e.linkUp(l[0], l[1], int(l[2]), False)
e.closures = []
for l in self.closures:
e.closures.append([l[0], l[1], l[2], int(l[3]), int(l[4])])
return e, lm
def AddNewConflictZones(self, e, time):
for l in self.locations:
if "conflict" in l[4].lower() and int(l[5]) == time:
print("Time = %s. Adding a new conflict zone [%s]" % (
time, l[0]), file=sys.stderr)
e.add_conflict_zone(l[0])
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=42)
# Average CV score on the training set was: 0.9996457287206185
exported_pipeline = KNeighborsClassifier(n_neighbors=2, p=1, weights="distance")
# Fix random state in exported estimator
if hasattr(exported_pipeline, 'random_state'):
setattr(exported_pipeline, 'random_state', 42)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@satlstack.com>`
'''
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.mock import MagicMock, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import brew
# Global Variables
brew.__context__ = {}
brew.__salt__ = {}
TAPS_STRING = 'homebrew/dupes\nhomebrew/science\nhomebrew/x11'
TAPS_LIST = ['homebrew/dupes', 'homebrew/science', 'homebrew/x11']
HOMEBREW_BIN = '/usr/local/bin/brew'
class BrewTestCase(TestCase):
'''
TestCase for salt.modules.brew module
'''
# '_list_taps' function tests: 1
def test_list_taps(self):
'''
Tests the return of the list of taps
'''
mock_taps = MagicMock(return_value=TAPS_STRING)
with patch.dict(brew.__salt__, {'cmd.run': mock_taps}):
self.assertEqual(brew._list_taps(), TAPS_LIST)
# '_tap' function tests: 3
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap_installed(self):
'''
Tests if tap argument is already installed or not
'''
self.assertTrue(brew._tap('homebrew/science'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value={}))
def test_tap_failure(self):
'''
Tests if the tap installation failed
'''
mock_failure = MagicMock(return_value=1)
with patch.dict(brew.__salt__, {'cmd.retcode': mock_failure}):
self.assertFalse(brew._tap('homebrew/test'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap(self):
'''
Tests adding unofficial Github repos to the list of brew taps
'''
mock_success = MagicMock(return_value=0)
with patch.dict(brew.__salt__, {'cmd.retcode': mock_success}):
self.assertTrue(brew._tap('homebrew/test'))
# '_homebrew_bin' function tests: 1
def test_homebrew_bin(self):
'''
Tests the path to the homebrew binary
'''
mock_path = MagicMock(return_value='/usr/local')
with patch.dict(brew.__salt__, {'cmd.run': mock_path}):
self.assertEqual(brew._homebrew_bin(), '/usr/local/bin/brew')
# 'list_pkgs' function tests: 2
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_list_pkgs_removed(self):
'''
Tests removed implementation
'''
self.assertEqual(brew.list_pkgs(removed=True), {})
def test_list_pkgs_versions_true(self):
'''
Tests if pkg.list_pkgs is already in context and is a list
'''
mock_context = {'foo': ['bar']}
with patch.dict(brew.__context__, {'pkg.list_pkgs': mock_context}):
self.assertEqual(brew.list_pkgs(versions_as_list=True),
mock_context)
# 'version' function tests: 1
def test_version(self):
'''
Tests version name returned
'''
mock_version = MagicMock(return_value='0.1.5')
with patch.dict(brew.__salt__, {'pkg_resource.version': mock_version}):
self.assertEqual(brew.version('foo'), '0.1.5')
# 'latest_version' function tests: 0
# It has not been fully implemented
# 'remove' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
@patch('salt.modules.brew.list_pkgs',
MagicMock(return_value={'test': '0.1.5'}))
def test_remove(self):
'''
Tests if package to be removed exists
'''
mock_params = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.remove('foo'), {})
# 'refresh_db' function tests: 2
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db_failure(self):
'''
Tests an update of homebrew package repository failure
'''
mock_user = MagicMock(return_value='foo')
mock_failure = MagicMock(return_value=1)
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.retcode': mock_failure}):
self.assertFalse(brew.refresh_db())
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db(self):
'''
Tests a successful update of homebrew package repository
'''
mock_user = MagicMock(return_value='foo')
mock_success = MagicMock(return_value=0)
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.retcode': mock_success}):
self.assertTrue(brew.refresh_db())
# 'install' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_install(self):
'''
Tests if package to be installed exists
'''
mock_params = MagicMock(return_value=[None, None])
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.install('name=foo'), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(BrewTestCase, needs_daemon=False)
|
# coding: utf-8
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IBM Watson™ Compare and Comply analyzes governing documents to provide details about
critical aspects of the documents.
"""
import json
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from .common import get_sdk_headers
from datetime import date
from datetime import datetime
from enum import Enum
from ibm_cloud_sdk_core import BaseService
from ibm_cloud_sdk_core import DetailedResponse
from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from typing import BinaryIO
from typing import Dict
from typing import List
##############################################################################
# Service
##############################################################################
class CompareComplyV1(BaseService):
"""The Compare Comply V1 service."""
DEFAULT_SERVICE_URL = 'https://gateway.watsonplatform.net/compare-comply/api'
DEFAULT_SERVICE_NAME = 'compare_comply'
def __init__(
self,
version: str,
authenticator: Authenticator = None,
service_name: str = DEFAULT_SERVICE_NAME,
) -> None:
"""
Construct a new client for the Compare Comply service.
:param str version: The API version date to use with the service, in
"YYYY-MM-DD" format. Whenever the API is changed in a backwards
incompatible way, a new minor version of the API is released.
The service uses the API version for the date you specify, or
the most recent version before that date. Note that you should
not programmatically specify the current date at runtime, in
case the API has been updated since your application's release.
Instead, specify a version date that is compatible with your
application, and don't change it until your application is
ready for a later version.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if not authenticator:
authenticator = get_authenticator_from_environment(service_name)
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator,
disable_ssl_verification=False)
self.version = version
self.configure_service(service_name)
#########################
# HTML conversion
#########################
def convert_to_html(self,
file: BinaryIO,
*,
file_content_type: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Convert document to HTML.
Converts a document to HTML.
:param TextIO file: The document to convert.
:param str file_content_type: (optional) The content type of file.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='convert_to_html')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = []
form_data.append(('file', (None, file, file_content_type or
'application/octet-stream')))
url = '/v1/html_conversion'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Element classification
#########################
def classify_elements(self,
file: BinaryIO,
*,
file_content_type: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param TextIO file: The document to classify.
:param str file_content_type: (optional) The content type of file.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='classify_elements')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = []
form_data.append(('file', (None, file, file_content_type or
'application/octet-stream')))
url = '/v1/element_classification'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Tables
#########################
def extract_tables(self,
file: BinaryIO,
*,
file_content_type: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Extract a document's tables.
Analyzes the tables in a document.
:param TextIO file: The document on which to run table extraction.
:param str file_content_type: (optional) The content type of file.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='extract_tables')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = []
form_data.append(('file', (None, file, file_content_type or
'application/octet-stream')))
url = '/v1/tables'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Comparison
#########################
def compare_documents(self,
file_1: BinaryIO,
file_2: BinaryIO,
*,
file_1_content_type: str = None,
file_2_content_type: str = None,
file_1_label: str = None,
file_2_label: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Compare two documents.
Compares two input documents. Documents must be in the same format.
:param TextIO file_1: The first document to compare.
:param TextIO file_2: The second document to compare.
:param str file_1_content_type: (optional) The content type of file_1.
:param str file_2_content_type: (optional) The content type of file_2.
:param str file_1_label: (optional) A text label for the first document.
:param str file_2_label: (optional) A text label for the second document.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file_1 is None:
raise ValueError('file_1 must be provided')
if file_2 is None:
raise ValueError('file_2 must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='compare_documents')
headers.update(sdk_headers)
params = {
'version': self.version,
'file_1_label': file_1_label,
'file_2_label': file_2_label,
'model': model
}
form_data = []
form_data.append(('file_1', (None, file_1, file_1_content_type or
'application/octet-stream')))
form_data.append(('file_2', (None, file_2, file_2_content_type or
'application/octet-stream')))
url = '/v1/comparison'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Feedback
#########################
def add_feedback(self,
feedback_data: 'FeedbackDataInput',
*,
user_id: str = None,
comment: str = None,
**kwargs) -> 'DetailedResponse':
"""
Add feedback.
Adds feedback in the form of _labels_ from a subject-matter expert (SME) to a
governing document.
**Important:** Feedback is not immediately incorporated into the training model,
nor is it guaranteed to be incorporated at a later date. Instead, submitted
feedback is used to suggest future updates to the training model.
:param FeedbackDataInput feedback_data: Feedback data for submission.
:param str user_id: (optional) An optional string identifying the user.
:param str comment: (optional) An optional comment on or description of the
feedback.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if feedback_data is None:
raise ValueError('feedback_data must be provided')
feedback_data = self._convert_model(feedback_data)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='add_feedback')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'feedback_data': feedback_data,
'user_id': user_id,
'comment': comment
}
url = '/v1/feedback'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def list_feedback(self,
*,
feedback_type: str = None,
before: date = None,
after: date = None,
document_title: str = None,
model_id: str = None,
model_version: str = None,
category_removed: str = None,
category_added: str = None,
category_not_changed: str = None,
type_removed: str = None,
type_added: str = None,
type_not_changed: str = None,
page_limit: int = None,
cursor: str = None,
sort: str = None,
include_total: bool = None,
**kwargs) -> 'DetailedResponse':
"""
List the feedback in a document.
Lists the feedback in a document.
:param str feedback_type: (optional) An optional string that filters the
output to include only feedback with the specified feedback type. The only
permitted value is `element_classification`.
:param date before: (optional) An optional string in the format
`YYYY-MM-DD` that filters the output to include only feedback that was
added before the specified date.
:param date after: (optional) An optional string in the format `YYYY-MM-DD`
that filters the output to include only feedback that was added after the
specified date.
:param str document_title: (optional) An optional string that filters the
output to include only feedback from the document with the specified
`document_title`.
:param str model_id: (optional) An optional string that filters the output
to include only feedback with the specified `model_id`. The only permitted
value is `contracts`.
:param str model_version: (optional) An optional string that filters the
output to include only feedback with the specified `model_version`.
:param str category_removed: (optional) An optional string in the form of a
comma-separated list of categories. If it is specified, the service filters
the output to include only feedback that has at least one category from the
list removed.
:param str category_added: (optional) An optional string in the form of a
comma-separated list of categories. If this is specified, the service
filters the output to include only feedback that has at least one category
from the list added.
:param str category_not_changed: (optional) An optional string in the form
of a comma-separated list of categories. If this is specified, the service
filters the output to include only feedback that has at least one category
from the list unchanged.
:param str type_removed: (optional) An optional string of comma-separated
`nature`:`party` pairs. If this is specified, the service filters the
output to include only feedback that has at least one `nature`:`party` pair
from the list removed.
:param str type_added: (optional) An optional string of comma-separated
`nature`:`party` pairs. If this is specified, the service filters the
output to include only feedback that has at least one `nature`:`party` pair
from the list removed.
:param str type_not_changed: (optional) An optional string of
comma-separated `nature`:`party` pairs. If this is specified, the service
filters the output to include only feedback that has at least one
`nature`:`party` pair from the list unchanged.
:param int page_limit: (optional) An optional integer specifying the number
of documents that you want the service to return.
:param str cursor: (optional) An optional string that returns the set of
documents after the previous set. Use this parameter with the `page_limit`
parameter.
:param str sort: (optional) An optional comma-separated list of fields in
the document to sort on. You can optionally specify the sort direction by
prefixing the value of the field with `-` for descending order or `+` for
ascending order (the default). Currently permitted sorting fields are
`created`, `user_id`, and `document_title`.
:param bool include_total: (optional) An optional boolean value. If
specified as `true`, the `pagination` object in the output includes a value
called `total` that gives the total count of feedback created.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_feedback')
headers.update(sdk_headers)
params = {
'version': self.version,
'feedback_type': feedback_type,
'before': before,
'after': after,
'document_title': document_title,
'model_id': model_id,
'model_version': model_version,
'category_removed': category_removed,
'category_added': category_added,
'category_not_changed': category_not_changed,
'type_removed': type_removed,
'type_added': type_added,
'type_not_changed': type_not_changed,
'page_limit': page_limit,
'cursor': cursor,
'sort': sort,
'include_total': include_total
}
url = '/v1/feedback'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_feedback(self, feedback_id: str, *, model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Get a specified feedback entry.
Gets a feedback entry with a specified `feedback_id`.
:param str feedback_id: A string that specifies the feedback entry to be
included in the output.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if feedback_id is None:
raise ValueError('feedback_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_feedback')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def delete_feedback(self, feedback_id: str, *, model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Delete a specified feedback entry.
Deletes a feedback entry with a specified `feedback_id`.
:param str feedback_id: A string that specifies the feedback entry to be
deleted from the document.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if feedback_id is None:
raise ValueError('feedback_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_feedback')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Batches
#########################
def create_batch(self,
function: str,
input_credentials_file: BinaryIO,
input_bucket_location: str,
input_bucket_name: str,
output_credentials_file: BinaryIO,
output_bucket_location: str,
output_bucket_name: str,
*,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Submit a batch-processing request.
Run Compare and Comply methods over a collection of input documents.
**Important:** Batch processing requires the use of the [IBM Cloud Object Storage
service](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-about#about-ibm-cloud-object-storage).
The use of IBM Cloud Object Storage with Compare and Comply is discussed at [Using
batch
processing](https://cloud.ibm.com/docs/services/compare-comply?topic=compare-comply-batching#before-you-batch).
:param str function: The Compare and Comply method to run across the
submitted input documents.
:param TextIO input_credentials_file: A JSON file containing the input
Cloud Object Storage credentials. At a minimum, the credentials must enable
`READ` permissions on the bucket defined by the `input_bucket_name`
parameter.
:param str input_bucket_location: The geographical location of the Cloud
Object Storage input bucket as listed on the **Endpoint** tab of your Cloud
Object Storage instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:param str input_bucket_name: The name of the Cloud Object Storage input
bucket.
:param TextIO output_credentials_file: A JSON file that lists the Cloud
Object Storage output credentials. At a minimum, the credentials must
enable `READ` and `WRITE` permissions on the bucket defined by the
`output_bucket_name` parameter.
:param str output_bucket_location: The geographical location of the Cloud
Object Storage output bucket as listed on the **Endpoint** tab of your
Cloud Object Storage instance; for example, `us-geo`, `eu-geo`, or
`ap-geo`.
:param str output_bucket_name: The name of the Cloud Object Storage output
bucket.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if function is None:
raise ValueError('function must be provided')
if input_credentials_file is None:
raise ValueError('input_credentials_file must be provided')
if input_bucket_location is None:
raise ValueError('input_bucket_location must be provided')
if input_bucket_name is None:
raise ValueError('input_bucket_name must be provided')
if output_credentials_file is None:
raise ValueError('output_credentials_file must be provided')
if output_bucket_location is None:
raise ValueError('output_bucket_location must be provided')
if output_bucket_name is None:
raise ValueError('output_bucket_name must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_batch')
headers.update(sdk_headers)
params = {'version': self.version, 'function': function, 'model': model}
form_data = []
form_data.append(('input_credentials_file',
(None, input_credentials_file, 'application/json')))
input_bucket_location = str(input_bucket_location)
form_data.append(('input_bucket_location', (None, input_bucket_location,
'text/plain')))
input_bucket_name = str(input_bucket_name)
form_data.append(
('input_bucket_name', (None, input_bucket_name, 'text/plain')))
form_data.append(('output_credentials_file',
(None, output_credentials_file, 'application/json')))
output_bucket_location = str(output_bucket_location)
form_data.append(('output_bucket_location',
(None, output_bucket_location, 'text/plain')))
output_bucket_name = str(output_bucket_name)
form_data.append(
('output_bucket_name', (None, output_bucket_name, 'text/plain')))
url = '/v1/batches'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
def list_batches(self, **kwargs) -> 'DetailedResponse':
"""
List submitted batch-processing jobs.
Lists batch-processing jobs submitted by users.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_batches')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v1/batches'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_batch(self, batch_id: str, **kwargs) -> 'DetailedResponse':
"""
Get information about a specific batch-processing job.
Gets information about a batch-processing job with a specified ID.
:param str batch_id: The ID of the batch-processing job whose information
you want to retrieve.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if batch_id is None:
raise ValueError('batch_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_batch')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v1/batches/{0}'.format(*self._encode_path_vars(batch_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def update_batch(self,
batch_id: str,
action: str,
*,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Update a pending or active batch-processing job.
Updates a pending or active batch-processing job. You can rescan the input bucket
to check for new documents or cancel a job.
:param str batch_id: The ID of the batch-processing job you want to update.
:param str action: The action you want to perform on the specified
batch-processing job.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if batch_id is None:
raise ValueError('batch_id must be provided')
if action is None:
raise ValueError('action must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_batch')
headers.update(sdk_headers)
params = {'version': self.version, 'action': action, 'model': model}
url = '/v1/batches/{0}'.format(*self._encode_path_vars(batch_id))
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
class ConvertToHtmlEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
TEXT_PLAIN = 'text/plain'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class ClassifyElementsEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class ExtractTablesEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
TEXT_PLAIN = 'text/plain'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class CompareDocumentsEnums(object):
class File1ContentType(Enum):
"""
The content type of file_1.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
class File2ContentType(Enum):
"""
The content type of file_2.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class GetFeedbackEnums(object):
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class DeleteFeedbackEnums(object):
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class CreateBatchEnums(object):
class Function(Enum):
"""
The Compare and Comply method to run across the submitted input documents.
"""
HTML_CONVERSION = 'html_conversion'
ELEMENT_CLASSIFICATION = 'element_classification'
TABLES = 'tables'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class UpdateBatchEnums(object):
class Action(Enum):
"""
The action you want to perform on the specified batch-processing job.
"""
RESCAN = 'rescan'
CANCEL = 'cancel'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
##############################################################################
# Models
##############################################################################
class Address():
"""
A party's address.
:attr str text: (optional) A string listing the address.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Address object.
:param str text: (optional) A string listing the address.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Address':
"""Initialize a Address object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Address: ' +
', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Address object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Address object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Address') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Address') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AlignedElement():
"""
AlignedElement.
:attr List[ElementPair] element_pair: (optional) Identifies two elements that
semantically align between the compared documents.
:attr bool identical_text: (optional) Specifies whether the aligned element is
identical. Elements are considered identical despite minor differences such as
leading punctuation, end-of-sentence punctuation, whitespace, the presence or
absence of definite or indefinite articles, and others.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr bool significant_elements: (optional) Indicates that the elements aligned
are contractual clauses of significance.
"""
def __init__(self,
*,
element_pair: List['ElementPair'] = None,
identical_text: bool = None,
provenance_ids: List[str] = None,
significant_elements: bool = None) -> None:
"""
Initialize a AlignedElement object.
:param List[ElementPair] element_pair: (optional) Identifies two elements
that semantically align between the compared documents.
:param bool identical_text: (optional) Specifies whether the aligned
element is identical. Elements are considered identical despite minor
differences such as leading punctuation, end-of-sentence punctuation,
whitespace, the presence or absence of definite or indefinite articles, and
others.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param bool significant_elements: (optional) Indicates that the elements
aligned are contractual clauses of significance.
"""
self.element_pair = element_pair
self.identical_text = identical_text
self.provenance_ids = provenance_ids
self.significant_elements = significant_elements
@classmethod
def from_dict(cls, _dict: Dict) -> 'AlignedElement':
"""Initialize a AlignedElement object from a json dictionary."""
args = {}
valid_keys = [
'element_pair', 'identical_text', 'provenance_ids',
'significant_elements'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class AlignedElement: '
+ ', '.join(bad_keys))
if 'element_pair' in _dict:
args['element_pair'] = [
ElementPair._from_dict(x) for x in (_dict.get('element_pair'))
]
if 'identical_text' in _dict:
args['identical_text'] = _dict.get('identical_text')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'significant_elements' in _dict:
args['significant_elements'] = _dict.get('significant_elements')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AlignedElement object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'element_pair') and self.element_pair is not None:
_dict['element_pair'] = [x._to_dict() for x in self.element_pair]
if hasattr(self, 'identical_text') and self.identical_text is not None:
_dict['identical_text'] = self.identical_text
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'significant_elements'
) and self.significant_elements is not None:
_dict['significant_elements'] = self.significant_elements
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AlignedElement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'AlignedElement') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AlignedElement') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Attribute():
"""
List of document attributes.
:attr str type: (optional) The type of attribute.
:attr str text: (optional) The text associated with the attribute.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
type: str = None,
text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Attribute object.
:param str type: (optional) The type of attribute.
:param str text: (optional) The text associated with the attribute.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.type = type
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Attribute':
"""Initialize a Attribute object from a json dictionary."""
args = {}
valid_keys = ['type', 'text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Attribute: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Attribute object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Attribute object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Attribute') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Attribute') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeEnum(Enum):
"""
The type of attribute.
"""
CURRENCY = "Currency"
DATETIME = "DateTime"
DEFINEDTERM = "DefinedTerm"
DURATION = "Duration"
LOCATION = "Location"
NUMBER = "Number"
ORGANIZATION = "Organization"
PERCENTAGE = "Percentage"
PERSON = "Person"
class BatchStatus():
"""
The batch-request status.
:attr str function: (optional) The method to be run against the documents.
Possible values are `html_conversion`, `element_classification`, and `tables`.
:attr str input_bucket_location: (optional) The geographical location of the
Cloud Object Storage input bucket as listed on the **Endpoint** tab of your COS
instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:attr str input_bucket_name: (optional) The name of the Cloud Object Storage
input bucket.
:attr str output_bucket_location: (optional) The geographical location of the
Cloud Object Storage output bucket as listed on the **Endpoint** tab of your COS
instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:attr str output_bucket_name: (optional) The name of the Cloud Object Storage
output bucket.
:attr str batch_id: (optional) The unique identifier for the batch request.
:attr DocCounts document_counts: (optional) Document counts.
:attr str status: (optional) The status of the batch request.
:attr datetime created: (optional) The creation time of the batch request.
:attr datetime updated: (optional) The time of the most recent update to the
batch request.
"""
def __init__(self,
*,
function: str = None,
input_bucket_location: str = None,
input_bucket_name: str = None,
output_bucket_location: str = None,
output_bucket_name: str = None,
batch_id: str = None,
document_counts: 'DocCounts' = None,
status: str = None,
created: datetime = None,
updated: datetime = None) -> None:
"""
Initialize a BatchStatus object.
:param str function: (optional) The method to be run against the documents.
Possible values are `html_conversion`, `element_classification`, and
`tables`.
:param str input_bucket_location: (optional) The geographical location of
the Cloud Object Storage input bucket as listed on the **Endpoint** tab of
your COS instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:param str input_bucket_name: (optional) The name of the Cloud Object
Storage input bucket.
:param str output_bucket_location: (optional) The geographical location of
the Cloud Object Storage output bucket as listed on the **Endpoint** tab of
your COS instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:param str output_bucket_name: (optional) The name of the Cloud Object
Storage output bucket.
:param str batch_id: (optional) The unique identifier for the batch
request.
:param DocCounts document_counts: (optional) Document counts.
:param str status: (optional) The status of the batch request.
:param datetime created: (optional) The creation time of the batch request.
:param datetime updated: (optional) The time of the most recent update to
the batch request.
"""
self.function = function
self.input_bucket_location = input_bucket_location
self.input_bucket_name = input_bucket_name
self.output_bucket_location = output_bucket_location
self.output_bucket_name = output_bucket_name
self.batch_id = batch_id
self.document_counts = document_counts
self.status = status
self.created = created
self.updated = updated
@classmethod
def from_dict(cls, _dict: Dict) -> 'BatchStatus':
"""Initialize a BatchStatus object from a json dictionary."""
args = {}
valid_keys = [
'function', 'input_bucket_location', 'input_bucket_name',
'output_bucket_location', 'output_bucket_name', 'batch_id',
'document_counts', 'status', 'created', 'updated'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class BatchStatus: '
+ ', '.join(bad_keys))
if 'function' in _dict:
args['function'] = _dict.get('function')
if 'input_bucket_location' in _dict:
args['input_bucket_location'] = _dict.get('input_bucket_location')
if 'input_bucket_name' in _dict:
args['input_bucket_name'] = _dict.get('input_bucket_name')
if 'output_bucket_location' in _dict:
args['output_bucket_location'] = _dict.get('output_bucket_location')
if 'output_bucket_name' in _dict:
args['output_bucket_name'] = _dict.get('output_bucket_name')
if 'batch_id' in _dict:
args['batch_id'] = _dict.get('batch_id')
if 'document_counts' in _dict:
args['document_counts'] = DocCounts._from_dict(
_dict.get('document_counts'))
if 'status' in _dict:
args['status'] = _dict.get('status')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a BatchStatus object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'function') and self.function is not None:
_dict['function'] = self.function
if hasattr(self, 'input_bucket_location'
) and self.input_bucket_location is not None:
_dict['input_bucket_location'] = self.input_bucket_location
if hasattr(self,
'input_bucket_name') and self.input_bucket_name is not None:
_dict['input_bucket_name'] = self.input_bucket_name
if hasattr(self, 'output_bucket_location'
) and self.output_bucket_location is not None:
_dict['output_bucket_location'] = self.output_bucket_location
if hasattr(
self,
'output_bucket_name') and self.output_bucket_name is not None:
_dict['output_bucket_name'] = self.output_bucket_name
if hasattr(self, 'batch_id') and self.batch_id is not None:
_dict['batch_id'] = self.batch_id
if hasattr(self,
'document_counts') and self.document_counts is not None:
_dict['document_counts'] = self.document_counts._to_dict()
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this BatchStatus object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'BatchStatus') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BatchStatus') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FunctionEnum(Enum):
"""
The method to be run against the documents. Possible values are `html_conversion`,
`element_classification`, and `tables`.
"""
ELEMENT_CLASSIFICATION = "element_classification"
HTML_CONVERSION = "html_conversion"
TABLES = "tables"
class Batches():
"""
The results of a successful **List Batches** request.
:attr List[BatchStatus] batches: (optional) A list of the status of all batch
requests.
"""
def __init__(self, *, batches: List['BatchStatus'] = None) -> None:
"""
Initialize a Batches object.
:param List[BatchStatus] batches: (optional) A list of the status of all
batch requests.
"""
self.batches = batches
@classmethod
def from_dict(cls, _dict: Dict) -> 'Batches':
"""Initialize a Batches object from a json dictionary."""
args = {}
valid_keys = ['batches']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Batches: ' +
', '.join(bad_keys))
if 'batches' in _dict:
args['batches'] = [
BatchStatus._from_dict(x) for x in (_dict.get('batches'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Batches object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'batches') and self.batches is not None:
_dict['batches'] = [x._to_dict() for x in self.batches]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Batches object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Batches') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Batches') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class BodyCells():
"""
Cells that are not table header, column header, or row header cells.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
:attr List[str] row_header_ids: (optional) An array that contains the `id` value
of a row header that is applicable to this body cell.
:attr List[str] row_header_texts: (optional) An array that contains the `text`
value of a row header that is applicable to this body cell.
:attr List[str] row_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the row header texts according to
the customization; otherwise, the same value as `row_header_texts`.
:attr List[str] column_header_ids: (optional) An array that contains the `id`
value of a column header that is applicable to the current cell.
:attr List[str] column_header_texts: (optional) An array that contains the
`text` value of a column header that is applicable to the current cell.
:attr List[str] column_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the column header texts according
to the customization; otherwise, the same value as `column_header_texts`.
:attr List[Attribute] attributes: (optional)
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None,
row_header_ids: List[str] = None,
row_header_texts: List[str] = None,
row_header_texts_normalized: List[str] = None,
column_header_ids: List[str] = None,
column_header_texts: List[str] = None,
column_header_texts_normalized: List[str] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a BodyCells object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
:param List[str] row_header_ids: (optional) An array that contains the `id`
value of a row header that is applicable to this body cell.
:param List[str] row_header_texts: (optional) An array that contains the
`text` value of a row header that is applicable to this body cell.
:param List[str] row_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the row header texts
according to the customization; otherwise, the same value as
`row_header_texts`.
:param List[str] column_header_ids: (optional) An array that contains the
`id` value of a column header that is applicable to the current cell.
:param List[str] column_header_texts: (optional) An array that contains the
`text` value of a column header that is applicable to the current cell.
:param List[str] column_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the column header texts
according to the customization; otherwise, the same value as
`column_header_texts`.
:param List[Attribute] attributes: (optional)
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
self.row_header_ids = row_header_ids
self.row_header_texts = row_header_texts
self.row_header_texts_normalized = row_header_texts_normalized
self.column_header_ids = column_header_ids
self.column_header_texts = column_header_texts
self.column_header_texts_normalized = column_header_texts_normalized
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'BodyCells':
"""Initialize a BodyCells object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'row_index_begin', 'row_index_end',
'column_index_begin', 'column_index_end', 'row_header_ids',
'row_header_texts', 'row_header_texts_normalized',
'column_header_ids', 'column_header_texts',
'column_header_texts_normalized', 'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class BodyCells: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
if 'row_header_ids' in _dict:
args['row_header_ids'] = _dict.get('row_header_ids')
if 'row_header_texts' in _dict:
args['row_header_texts'] = _dict.get('row_header_texts')
if 'row_header_texts_normalized' in _dict:
args['row_header_texts_normalized'] = _dict.get(
'row_header_texts_normalized')
if 'column_header_ids' in _dict:
args['column_header_ids'] = _dict.get('column_header_ids')
if 'column_header_texts' in _dict:
args['column_header_texts'] = _dict.get('column_header_texts')
if 'column_header_texts_normalized' in _dict:
args['column_header_texts_normalized'] = _dict.get(
'column_header_texts_normalized')
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a BodyCells object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
if hasattr(self, 'row_header_ids') and self.row_header_ids is not None:
_dict['row_header_ids'] = self.row_header_ids
if hasattr(self,
'row_header_texts') and self.row_header_texts is not None:
_dict['row_header_texts'] = self.row_header_texts
if hasattr(self, 'row_header_texts_normalized'
) and self.row_header_texts_normalized is not None:
_dict[
'row_header_texts_normalized'] = self.row_header_texts_normalized
if hasattr(self,
'column_header_ids') and self.column_header_ids is not None:
_dict['column_header_ids'] = self.column_header_ids
if hasattr(
self,
'column_header_texts') and self.column_header_texts is not None:
_dict['column_header_texts'] = self.column_header_texts
if hasattr(self, 'column_header_texts_normalized'
) and self.column_header_texts_normalized is not None:
_dict[
'column_header_texts_normalized'] = self.column_header_texts_normalized
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this BodyCells object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'BodyCells') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BodyCells') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Category():
"""
Information defining an element's subject matter.
:attr str label: (optional) The category of the associated element.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
"""
def __init__(self, *, label: str = None,
provenance_ids: List[str] = None) -> None:
"""
Initialize a Category object.
:param str label: (optional) The category of the associated element.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
"""
self.label = label
self.provenance_ids = provenance_ids
@classmethod
def from_dict(cls, _dict: Dict) -> 'Category':
"""Initialize a Category object from a json dictionary."""
args = {}
valid_keys = ['label', 'provenance_ids']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Category: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = _dict.get('label')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Category object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Category object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Category') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Category') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LabelEnum(Enum):
"""
The category of the associated element.
"""
AMENDMENTS = "Amendments"
ASSET_USE = "Asset Use"
ASSIGNMENTS = "Assignments"
AUDITS = "Audits"
BUSINESS_CONTINUITY = "Business Continuity"
COMMUNICATION = "Communication"
CONFIDENTIALITY = "Confidentiality"
DELIVERABLES = "Deliverables"
DELIVERY = "Delivery"
DISPUTE_RESOLUTION = "Dispute Resolution"
FORCE_MAJEURE = "Force Majeure"
INDEMNIFICATION = "Indemnification"
INSURANCE = "Insurance"
INTELLECTUAL_PROPERTY = "Intellectual Property"
LIABILITY = "Liability"
ORDER_OF_PRECEDENCE = "Order of Precedence"
PAYMENT_TERMS_BILLING = "Payment Terms & Billing"
PRICING_TAXES = "Pricing & Taxes"
PRIVACY = "Privacy"
RESPONSIBILITIES = "Responsibilities"
SAFETY_AND_SECURITY = "Safety and Security"
SCOPE_OF_WORK = "Scope of Work"
SUBCONTRACTS = "Subcontracts"
TERM_TERMINATION = "Term & Termination"
WARRANTIES = "Warranties"
class CategoryComparison():
"""
Information defining an element's subject matter.
:attr str label: (optional) The category of the associated element.
"""
def __init__(self, *, label: str = None) -> None:
"""
Initialize a CategoryComparison object.
:param str label: (optional) The category of the associated element.
"""
self.label = label
@classmethod
def from_dict(cls, _dict: Dict) -> 'CategoryComparison':
"""Initialize a CategoryComparison object from a json dictionary."""
args = {}
valid_keys = ['label']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class CategoryComparison: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = _dict.get('label')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CategoryComparison object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CategoryComparison object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'CategoryComparison') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CategoryComparison') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LabelEnum(Enum):
"""
The category of the associated element.
"""
AMENDMENTS = "Amendments"
ASSET_USE = "Asset Use"
ASSIGNMENTS = "Assignments"
AUDITS = "Audits"
BUSINESS_CONTINUITY = "Business Continuity"
COMMUNICATION = "Communication"
CONFIDENTIALITY = "Confidentiality"
DELIVERABLES = "Deliverables"
DELIVERY = "Delivery"
DISPUTE_RESOLUTION = "Dispute Resolution"
FORCE_MAJEURE = "Force Majeure"
INDEMNIFICATION = "Indemnification"
INSURANCE = "Insurance"
INTELLECTUAL_PROPERTY = "Intellectual Property"
LIABILITY = "Liability"
ORDER_OF_PRECEDENCE = "Order of Precedence"
PAYMENT_TERMS_BILLING = "Payment Terms & Billing"
PRICING_TAXES = "Pricing & Taxes"
PRIVACY = "Privacy"
RESPONSIBILITIES = "Responsibilities"
SAFETY_AND_SECURITY = "Safety and Security"
SCOPE_OF_WORK = "Scope of Work"
SUBCONTRACTS = "Subcontracts"
TERM_TERMINATION = "Term & Termination"
WARRANTIES = "Warranties"
class ClassifyReturn():
"""
The analysis of objects returned by the **Element classification** method.
:attr Document document: (optional) Basic information about the input document.
:attr str model_id: (optional) The analysis model used to classify the input
document. For the **Element classification** method, the only valid value is
`contracts`.
:attr str model_version: (optional) The version of the analysis model identified
by the value of the `model_id` key.
:attr List[Element] elements: (optional) Document elements identified by the
service.
:attr List[EffectiveDates] effective_dates: (optional) The date or dates on
which the document becomes effective.
:attr List[ContractAmts] contract_amounts: (optional) The monetary amounts that
identify the total amount of the contract that needs to be paid from one party
to another.
:attr List[TerminationDates] termination_dates: (optional) The dates on which
the document is to be terminated.
:attr List[ContractTypes] contract_types: (optional) The contract type as
declared in the document.
:attr List[ContractTerms] contract_terms: (optional) The durations of the
contract.
:attr List[PaymentTerms] payment_terms: (optional) The document's payment
durations.
:attr List[ContractCurrencies] contract_currencies: (optional) The contract
currencies as declared in the document.
:attr List[Tables] tables: (optional) Definition of tables identified in the
input document.
:attr DocStructure document_structure: (optional) The structure of the input
document.
:attr List[Parties] parties: (optional) Definitions of the parties identified in
the input document.
"""
def __init__(self,
*,
document: 'Document' = None,
model_id: str = None,
model_version: str = None,
elements: List['Element'] = None,
effective_dates: List['EffectiveDates'] = None,
contract_amounts: List['ContractAmts'] = None,
termination_dates: List['TerminationDates'] = None,
contract_types: List['ContractTypes'] = None,
contract_terms: List['ContractTerms'] = None,
payment_terms: List['PaymentTerms'] = None,
contract_currencies: List['ContractCurrencies'] = None,
tables: List['Tables'] = None,
document_structure: 'DocStructure' = None,
parties: List['Parties'] = None) -> None:
"""
Initialize a ClassifyReturn object.
:param Document document: (optional) Basic information about the input
document.
:param str model_id: (optional) The analysis model used to classify the
input document. For the **Element classification** method, the only valid
value is `contracts`.
:param str model_version: (optional) The version of the analysis model
identified by the value of the `model_id` key.
:param List[Element] elements: (optional) Document elements identified by
the service.
:param List[EffectiveDates] effective_dates: (optional) The date or dates
on which the document becomes effective.
:param List[ContractAmts] contract_amounts: (optional) The monetary amounts
that identify the total amount of the contract that needs to be paid from
one party to another.
:param List[TerminationDates] termination_dates: (optional) The dates on
which the document is to be terminated.
:param List[ContractTypes] contract_types: (optional) The contract type as
declared in the document.
:param List[ContractTerms] contract_terms: (optional) The durations of the
contract.
:param List[PaymentTerms] payment_terms: (optional) The document's payment
durations.
:param List[ContractCurrencies] contract_currencies: (optional) The
contract currencies as declared in the document.
:param List[Tables] tables: (optional) Definition of tables identified in
the input document.
:param DocStructure document_structure: (optional) The structure of the
input document.
:param List[Parties] parties: (optional) Definitions of the parties
identified in the input document.
"""
self.document = document
self.model_id = model_id
self.model_version = model_version
self.elements = elements
self.effective_dates = effective_dates
self.contract_amounts = contract_amounts
self.termination_dates = termination_dates
self.contract_types = contract_types
self.contract_terms = contract_terms
self.payment_terms = payment_terms
self.contract_currencies = contract_currencies
self.tables = tables
self.document_structure = document_structure
self.parties = parties
@classmethod
def from_dict(cls, _dict: Dict) -> 'ClassifyReturn':
"""Initialize a ClassifyReturn object from a json dictionary."""
args = {}
valid_keys = [
'document', 'model_id', 'model_version', 'elements',
'effective_dates', 'contract_amounts', 'termination_dates',
'contract_types', 'contract_terms', 'payment_terms',
'contract_currencies', 'tables', 'document_structure', 'parties'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ClassifyReturn: '
+ ', '.join(bad_keys))
if 'document' in _dict:
args['document'] = Document._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'elements' in _dict:
args['elements'] = [
Element._from_dict(x) for x in (_dict.get('elements'))
]
if 'effective_dates' in _dict:
args['effective_dates'] = [
EffectiveDates._from_dict(x)
for x in (_dict.get('effective_dates'))
]
if 'contract_amounts' in _dict:
args['contract_amounts'] = [
ContractAmts._from_dict(x)
for x in (_dict.get('contract_amounts'))
]
if 'termination_dates' in _dict:
args['termination_dates'] = [
TerminationDates._from_dict(x)
for x in (_dict.get('termination_dates'))
]
if 'contract_types' in _dict:
args['contract_types'] = [
ContractTypes._from_dict(x)
for x in (_dict.get('contract_types'))
]
if 'contract_terms' in _dict:
args['contract_terms'] = [
ContractTerms._from_dict(x)
for x in (_dict.get('contract_terms'))
]
if 'payment_terms' in _dict:
args['payment_terms'] = [
PaymentTerms._from_dict(x) for x in (_dict.get('payment_terms'))
]
if 'contract_currencies' in _dict:
args['contract_currencies'] = [
ContractCurrencies._from_dict(x)
for x in (_dict.get('contract_currencies'))
]
if 'tables' in _dict:
args['tables'] = [
Tables._from_dict(x) for x in (_dict.get('tables'))
]
if 'document_structure' in _dict:
args['document_structure'] = DocStructure._from_dict(
_dict.get('document_structure'))
if 'parties' in _dict:
args['parties'] = [
Parties._from_dict(x) for x in (_dict.get('parties'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ClassifyReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'elements') and self.elements is not None:
_dict['elements'] = [x._to_dict() for x in self.elements]
if hasattr(self,
'effective_dates') and self.effective_dates is not None:
_dict['effective_dates'] = [
x._to_dict() for x in self.effective_dates
]
if hasattr(self,
'contract_amounts') and self.contract_amounts is not None:
_dict['contract_amounts'] = [
x._to_dict() for x in self.contract_amounts
]
if hasattr(self,
'termination_dates') and self.termination_dates is not None:
_dict['termination_dates'] = [
x._to_dict() for x in self.termination_dates
]
if hasattr(self, 'contract_types') and self.contract_types is not None:
_dict['contract_types'] = [
x._to_dict() for x in self.contract_types
]
if hasattr(self, 'contract_terms') and self.contract_terms is not None:
_dict['contract_terms'] = [
x._to_dict() for x in self.contract_terms
]
if hasattr(self, 'payment_terms') and self.payment_terms is not None:
_dict['payment_terms'] = [x._to_dict() for x in self.payment_terms]
if hasattr(
self,
'contract_currencies') and self.contract_currencies is not None:
_dict['contract_currencies'] = [
x._to_dict() for x in self.contract_currencies
]
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables]
if hasattr(
self,
'document_structure') and self.document_structure is not None:
_dict['document_structure'] = self.document_structure._to_dict()
if hasattr(self, 'parties') and self.parties is not None:
_dict['parties'] = [x._to_dict() for x in self.parties]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ClassifyReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ClassifyReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ClassifyReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ColumnHeaders():
"""
Column-level cells, each applicable as a header to other cells in the same column as
itself, of the current table.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr object location: (optional) The location of the column header cell in the
current table as defined by its `begin` and `end` offsets, respectfully, in the
input document.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr str text_normalized: (optional) If you provide customization input, the
normalized version of the cell text according to the customization; otherwise,
the same value as `text`.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: object = None,
text: str = None,
text_normalized: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a ColumnHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param object location: (optional) The location of the column header cell
in the current table as defined by its `begin` and `end` offsets,
respectfully, in the input document.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param str text_normalized: (optional) If you provide customization input,
the normalized version of the cell text according to the customization;
otherwise, the same value as `text`.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.text_normalized = text_normalized
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'ColumnHeaders':
"""Initialize a ColumnHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'text_normalized', 'row_index_begin',
'row_index_end', 'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ColumnHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = _dict.get('location')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ColumnHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ColumnHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ColumnHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ColumnHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CompareReturn():
"""
The comparison of the two submitted documents.
:attr str model_id: (optional) The analysis model used to compare the input
documents. For the **Compare two documents** method, the only valid value is
`contracts`.
:attr str model_version: (optional) The version of the analysis model identified
by the value of the `model_id` key.
:attr List[Document] documents: (optional) Information about the documents being
compared.
:attr List[AlignedElement] aligned_elements: (optional) A list of pairs of
elements that semantically align between the compared documents.
:attr List[UnalignedElement] unaligned_elements: (optional) A list of elements
that do not semantically align between the compared documents.
"""
def __init__(self,
*,
model_id: str = None,
model_version: str = None,
documents: List['Document'] = None,
aligned_elements: List['AlignedElement'] = None,
unaligned_elements: List['UnalignedElement'] = None) -> None:
"""
Initialize a CompareReturn object.
:param str model_id: (optional) The analysis model used to compare the
input documents. For the **Compare two documents** method, the only valid
value is `contracts`.
:param str model_version: (optional) The version of the analysis model
identified by the value of the `model_id` key.
:param List[Document] documents: (optional) Information about the documents
being compared.
:param List[AlignedElement] aligned_elements: (optional) A list of pairs of
elements that semantically align between the compared documents.
:param List[UnalignedElement] unaligned_elements: (optional) A list of
elements that do not semantically align between the compared documents.
"""
self.model_id = model_id
self.model_version = model_version
self.documents = documents
self.aligned_elements = aligned_elements
self.unaligned_elements = unaligned_elements
@classmethod
def from_dict(cls, _dict: Dict) -> 'CompareReturn':
"""Initialize a CompareReturn object from a json dictionary."""
args = {}
valid_keys = [
'model_id', 'model_version', 'documents', 'aligned_elements',
'unaligned_elements'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class CompareReturn: '
+ ', '.join(bad_keys))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'documents' in _dict:
args['documents'] = [
Document._from_dict(x) for x in (_dict.get('documents'))
]
if 'aligned_elements' in _dict:
args['aligned_elements'] = [
AlignedElement._from_dict(x)
for x in (_dict.get('aligned_elements'))
]
if 'unaligned_elements' in _dict:
args['unaligned_elements'] = [
UnalignedElement._from_dict(x)
for x in (_dict.get('unaligned_elements'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CompareReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'documents') and self.documents is not None:
_dict['documents'] = [x._to_dict() for x in self.documents]
if hasattr(self,
'aligned_elements') and self.aligned_elements is not None:
_dict['aligned_elements'] = [
x._to_dict() for x in self.aligned_elements
]
if hasattr(
self,
'unaligned_elements') and self.unaligned_elements is not None:
_dict['unaligned_elements'] = [
x._to_dict() for x in self.unaligned_elements
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CompareReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'CompareReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CompareReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Contact():
"""
A contact.
:attr str name: (optional) A string listing the name of the contact.
:attr str role: (optional) A string listing the role of the contact.
"""
def __init__(self, *, name: str = None, role: str = None) -> None:
"""
Initialize a Contact object.
:param str name: (optional) A string listing the name of the contact.
:param str role: (optional) A string listing the role of the contact.
"""
self.name = name
self.role = role
@classmethod
def from_dict(cls, _dict: Dict) -> 'Contact':
"""Initialize a Contact object from a json dictionary."""
args = {}
valid_keys = ['name', 'role']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Contact: ' +
', '.join(bad_keys))
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'role' in _dict:
args['role'] = _dict.get('role')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Contact object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'role') and self.role is not None:
_dict['role'] = self.role
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Contact object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Contact') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Contact') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Contexts():
"""
Text that is related to the contents of the table and that precedes or follows the
current table.
:attr str text: (optional) The related text.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Contexts object.
:param str text: (optional) The related text.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Contexts':
"""Initialize a Contexts object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Contexts: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Contexts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Contexts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Contexts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Contexts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ContractAmts():
"""
A monetary amount identified in the input document.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract amount.
:attr str text: (optional) The monetary amount.
:attr str text_normalized: (optional) The normalized form of the amount, which
is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr Interpretation interpretation: (optional) The details of the normalized
text, if applicable. This element is optional; it is returned only if normalized
text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
interpretation: 'Interpretation' = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractAmts object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract amount.
:param str text: (optional) The monetary amount.
:param str text_normalized: (optional) The normalized form of the amount,
which is listed as a string. This element is optional; it is returned only
if normalized text exists.
:param Interpretation interpretation: (optional) The details of the
normalized text, if applicable. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.interpretation = interpretation
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractAmts':
"""Initialize a ContractAmts object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'interpretation',
'provenance_ids', 'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractAmts: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'interpretation' in _dict:
args['interpretation'] = Interpretation._from_dict(
_dict.get('interpretation'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractAmts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'interpretation') and self.interpretation is not None:
_dict['interpretation'] = self.interpretation._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractAmts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractAmts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractAmts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract amount.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class ContractCurrencies():
"""
The contract currencies that are declared in the document.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract currency.
:attr str text: (optional) The contract currency.
:attr str text_normalized: (optional) The normalized form of the contract
currency, which is listed as a string in
[ISO-4217](https://www.iso.org/iso-4217-currency-codes.html) format. This
element is optional; it is returned only if normalized text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractCurrencies object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract currency.
:param str text: (optional) The contract currency.
:param str text_normalized: (optional) The normalized form of the contract
currency, which is listed as a string in
[ISO-4217](https://www.iso.org/iso-4217-currency-codes.html) format. This
element is optional; it is returned only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractCurrencies':
"""Initialize a ContractCurrencies object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'provenance_ids',
'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractCurrencies: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractCurrencies object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractCurrencies object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractCurrencies') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractCurrencies') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract currency.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class ContractTerms():
"""
The duration or durations of the contract.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract term.
:attr str text: (optional) The contract term (duration).
:attr str text_normalized: (optional) The normalized form of the contract term,
which is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr Interpretation interpretation: (optional) The details of the normalized
text, if applicable. This element is optional; it is returned only if normalized
text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
interpretation: 'Interpretation' = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractTerms object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract term.
:param str text: (optional) The contract term (duration).
:param str text_normalized: (optional) The normalized form of the contract
term, which is listed as a string. This element is optional; it is returned
only if normalized text exists.
:param Interpretation interpretation: (optional) The details of the
normalized text, if applicable. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.interpretation = interpretation
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractTerms':
"""Initialize a ContractTerms object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'interpretation',
'provenance_ids', 'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractTerms: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'interpretation' in _dict:
args['interpretation'] = Interpretation._from_dict(
_dict.get('interpretation'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractTerms object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'interpretation') and self.interpretation is not None:
_dict['interpretation'] = self.interpretation._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractTerms object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractTerms') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractTerms') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract term.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class ContractTypes():
"""
The contract type identified in the input document.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract type.
:attr str text: (optional) The contract type.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractTypes object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract type.
:param str text: (optional) The contract type.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractTypes':
"""Initialize a ContractTypes object from a json dictionary."""
args = {}
valid_keys = ['confidence_level', 'text', 'provenance_ids', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractTypes: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractTypes object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractTypes object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractTypes') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractTypes') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract type.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class DocCounts():
"""
Document counts.
:attr int total: (optional) Total number of documents.
:attr int pending: (optional) Number of pending documents.
:attr int successful: (optional) Number of documents successfully processed.
:attr int failed: (optional) Number of documents not successfully processed.
"""
def __init__(self,
*,
total: int = None,
pending: int = None,
successful: int = None,
failed: int = None) -> None:
"""
Initialize a DocCounts object.
:param int total: (optional) Total number of documents.
:param int pending: (optional) Number of pending documents.
:param int successful: (optional) Number of documents successfully
processed.
:param int failed: (optional) Number of documents not successfully
processed.
"""
self.total = total
self.pending = pending
self.successful = successful
self.failed = failed
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocCounts':
"""Initialize a DocCounts object from a json dictionary."""
args = {}
valid_keys = ['total', 'pending', 'successful', 'failed']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocCounts: '
+ ', '.join(bad_keys))
if 'total' in _dict:
args['total'] = _dict.get('total')
if 'pending' in _dict:
args['pending'] = _dict.get('pending')
if 'successful' in _dict:
args['successful'] = _dict.get('successful')
if 'failed' in _dict:
args['failed'] = _dict.get('failed')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocCounts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'total') and self.total is not None:
_dict['total'] = self.total
if hasattr(self, 'pending') and self.pending is not None:
_dict['pending'] = self.pending
if hasattr(self, 'successful') and self.successful is not None:
_dict['successful'] = self.successful
if hasattr(self, 'failed') and self.failed is not None:
_dict['failed'] = self.failed
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocCounts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocCounts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocCounts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DocInfo():
"""
Information about the parsed input document.
:attr str html: (optional) The full text of the parsed document in HTML format.
:attr str title: (optional) The title of the parsed document. If the service did
not detect a title, the value of this element is `null`.
:attr str hash: (optional) The MD5 hash of the input document.
"""
def __init__(self, *, html: str = None, title: str = None,
hash: str = None) -> None:
"""
Initialize a DocInfo object.
:param str html: (optional) The full text of the parsed document in HTML
format.
:param str title: (optional) The title of the parsed document. If the
service did not detect a title, the value of this element is `null`.
:param str hash: (optional) The MD5 hash of the input document.
"""
self.html = html
self.title = title
self.hash = hash
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocInfo':
"""Initialize a DocInfo object from a json dictionary."""
args = {}
valid_keys = ['html', 'title', 'hash']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocInfo: ' +
', '.join(bad_keys))
if 'html' in _dict:
args['html'] = _dict.get('html')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'hash' in _dict:
args['hash'] = _dict.get('hash')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocInfo object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'html') and self.html is not None:
_dict['html'] = self.html
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocInfo object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocInfo') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocInfo') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DocStructure():
"""
The structure of the input document.
:attr List[SectionTitles] section_titles: (optional) An array containing one
object per section or subsection identified in the input document.
:attr List[LeadingSentence] leading_sentences: (optional) An array containing
one object per section or subsection, in parallel with the `section_titles`
array, that details the leading sentences in the corresponding section or
subsection.
:attr List[Paragraphs] paragraphs: (optional) An array containing one object per
paragraph, in parallel with the `section_titles` and `leading_sentences` arrays.
"""
def __init__(self,
*,
section_titles: List['SectionTitles'] = None,
leading_sentences: List['LeadingSentence'] = None,
paragraphs: List['Paragraphs'] = None) -> None:
"""
Initialize a DocStructure object.
:param List[SectionTitles] section_titles: (optional) An array containing
one object per section or subsection identified in the input document.
:param List[LeadingSentence] leading_sentences: (optional) An array
containing one object per section or subsection, in parallel with the
`section_titles` array, that details the leading sentences in the
corresponding section or subsection.
:param List[Paragraphs] paragraphs: (optional) An array containing one
object per paragraph, in parallel with the `section_titles` and
`leading_sentences` arrays.
"""
self.section_titles = section_titles
self.leading_sentences = leading_sentences
self.paragraphs = paragraphs
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocStructure':
"""Initialize a DocStructure object from a json dictionary."""
args = {}
valid_keys = ['section_titles', 'leading_sentences', 'paragraphs']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocStructure: '
+ ', '.join(bad_keys))
if 'section_titles' in _dict:
args['section_titles'] = [
SectionTitles._from_dict(x)
for x in (_dict.get('section_titles'))
]
if 'leading_sentences' in _dict:
args['leading_sentences'] = [
LeadingSentence._from_dict(x)
for x in (_dict.get('leading_sentences'))
]
if 'paragraphs' in _dict:
args['paragraphs'] = [
Paragraphs._from_dict(x) for x in (_dict.get('paragraphs'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocStructure object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'section_titles') and self.section_titles is not None:
_dict['section_titles'] = [
x._to_dict() for x in self.section_titles
]
if hasattr(self,
'leading_sentences') and self.leading_sentences is not None:
_dict['leading_sentences'] = [
x._to_dict() for x in self.leading_sentences
]
if hasattr(self, 'paragraphs') and self.paragraphs is not None:
_dict['paragraphs'] = [x._to_dict() for x in self.paragraphs]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocStructure object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocStructure') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocStructure') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Document():
"""
Basic information about the input document.
:attr str title: (optional) Document title, if detected.
:attr str html: (optional) The input document converted into HTML format.
:attr str hash: (optional) The MD5 hash value of the input document.
:attr str label: (optional) The label applied to the input document with the
calling method's `file_1_label` or `file_2_label` value. This field is specified
only in the output of the **Comparing two documents** method.
"""
def __init__(self,
*,
title: str = None,
html: str = None,
hash: str = None,
label: str = None) -> None:
"""
Initialize a Document object.
:param str title: (optional) Document title, if detected.
:param str html: (optional) The input document converted into HTML format.
:param str hash: (optional) The MD5 hash value of the input document.
:param str label: (optional) The label applied to the input document with
the calling method's `file_1_label` or `file_2_label` value. This field is
specified only in the output of the **Comparing two documents** method.
"""
self.title = title
self.html = html
self.hash = hash
self.label = label
@classmethod
def from_dict(cls, _dict: Dict) -> 'Document':
"""Initialize a Document object from a json dictionary."""
args = {}
valid_keys = ['title', 'html', 'hash', 'label']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Document: '
+ ', '.join(bad_keys))
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'html' in _dict:
args['html'] = _dict.get('html')
if 'hash' in _dict:
args['hash'] = _dict.get('hash')
if 'label' in _dict:
args['label'] = _dict.get('label')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Document object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'html') and self.html is not None:
_dict['html'] = self.html
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Document object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Document') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Document') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EffectiveDates():
"""
An effective date.
:attr str confidence_level: (optional) The confidence level in the
identification of the effective date.
:attr str text: (optional) The effective date, listed as a string.
:attr str text_normalized: (optional) The normalized form of the effective date,
which is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a EffectiveDates object.
:param str confidence_level: (optional) The confidence level in the
identification of the effective date.
:param str text: (optional) The effective date, listed as a string.
:param str text_normalized: (optional) The normalized form of the effective
date, which is listed as a string. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'EffectiveDates':
"""Initialize a EffectiveDates object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'provenance_ids',
'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class EffectiveDates: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EffectiveDates object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this EffectiveDates object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'EffectiveDates') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'EffectiveDates') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the effective date.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class Element():
"""
A component part of the document.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text of the element.
:attr List[TypeLabel] types: (optional) Description of the action specified by
the element and whom it affects.
:attr List[Category] categories: (optional) List of functional categories into
which the element falls; in other words, the subject matter of the element.
:attr List[Attribute] attributes: (optional) List of document attributes.
"""
def __init__(self,
*,
location: 'Location' = None,
text: str = None,
types: List['TypeLabel'] = None,
categories: List['Category'] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a Element object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text of the element.
:param List[TypeLabel] types: (optional) Description of the action
specified by the element and whom it affects.
:param List[Category] categories: (optional) List of functional categories
into which the element falls; in other words, the subject matter of the
element.
:param List[Attribute] attributes: (optional) List of document attributes.
"""
self.location = location
self.text = text
self.types = types
self.categories = categories
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'Element':
"""Initialize a Element object from a json dictionary."""
args = {}
valid_keys = ['location', 'text', 'types', 'categories', 'attributes']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Element: ' +
', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Element object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Element object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Element') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Element') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ElementLocations():
"""
A list of `begin` and `end` indexes that indicate the locations of the elements in the
input document.
:attr int begin: (optional) An integer that indicates the starting position of
the element in the input document.
:attr int end: (optional) An integer that indicates the ending position of the
element in the input document.
"""
def __init__(self, *, begin: int = None, end: int = None) -> None:
"""
Initialize a ElementLocations object.
:param int begin: (optional) An integer that indicates the starting
position of the element in the input document.
:param int end: (optional) An integer that indicates the ending position of
the element in the input document.
"""
self.begin = begin
self.end = end
@classmethod
def from_dict(cls, _dict: Dict) -> 'ElementLocations':
"""Initialize a ElementLocations object from a json dictionary."""
args = {}
valid_keys = ['begin', 'end']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ElementLocations: '
+ ', '.join(bad_keys))
if 'begin' in _dict:
args['begin'] = _dict.get('begin')
if 'end' in _dict:
args['end'] = _dict.get('end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ElementLocations object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'begin') and self.begin is not None:
_dict['begin'] = self.begin
if hasattr(self, 'end') and self.end is not None:
_dict['end'] = self.end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ElementLocations object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ElementLocations') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ElementLocations') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ElementPair():
"""
Details of semantically aligned elements.
:attr str document_label: (optional) The label of the document (that is, the
value of either the `file_1_label` or `file_2_label` parameters) in which the
element occurs.
:attr str text: (optional) The contents of the element.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr List[TypeLabelComparison] types: (optional) Description of the action
specified by the element and whom it affects.
:attr List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter of
the element.
:attr List[Attribute] attributes: (optional) List of document attributes.
"""
def __init__(self,
*,
document_label: str = None,
text: str = None,
location: 'Location' = None,
types: List['TypeLabelComparison'] = None,
categories: List['CategoryComparison'] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a ElementPair object.
:param str document_label: (optional) The label of the document (that is,
the value of either the `file_1_label` or `file_2_label` parameters) in
which the element occurs.
:param str text: (optional) The contents of the element.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param List[TypeLabelComparison] types: (optional) Description of the
action specified by the element and whom it affects.
:param List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter
of the element.
:param List[Attribute] attributes: (optional) List of document attributes.
"""
self.document_label = document_label
self.text = text
self.location = location
self.types = types
self.categories = categories
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'ElementPair':
"""Initialize a ElementPair object from a json dictionary."""
args = {}
valid_keys = [
'document_label', 'text', 'location', 'types', 'categories',
'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ElementPair: '
+ ', '.join(bad_keys))
if 'document_label' in _dict:
args['document_label'] = _dict.get('document_label')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'types' in _dict:
args['types'] = [
TypeLabelComparison._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
CategoryComparison._from_dict(x)
for x in (_dict.get('categories'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ElementPair object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ElementPair object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ElementPair') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ElementPair') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackDataInput():
"""
Feedback data for submission.
:attr str feedback_type: The type of feedback. The only permitted value is
`element_classification`.
:attr ShortDoc document: (optional) Brief information about the input document.
:attr str model_id: (optional) An optional string identifying the model ID. The
only permitted value is `contracts`.
:attr str model_version: (optional) An optional string identifying the version
of the model used.
:attr Location location: The numeric location of the identified element in the
document, represented with two integers labeled `begin` and `end`.
:attr str text: The text on which to submit feedback.
:attr OriginalLabelsIn original_labels: The original labeling from the input
document, without the submitted feedback.
:attr UpdatedLabelsIn updated_labels: The updated labeling from the input
document, accounting for the submitted feedback.
"""
def __init__(self,
feedback_type: str,
location: 'Location',
text: str,
original_labels: 'OriginalLabelsIn',
updated_labels: 'UpdatedLabelsIn',
*,
document: 'ShortDoc' = None,
model_id: str = None,
model_version: str = None) -> None:
"""
Initialize a FeedbackDataInput object.
:param str feedback_type: The type of feedback. The only permitted value is
`element_classification`.
:param Location location: The numeric location of the identified element in
the document, represented with two integers labeled `begin` and `end`.
:param str text: The text on which to submit feedback.
:param OriginalLabelsIn original_labels: The original labeling from the
input document, without the submitted feedback.
:param UpdatedLabelsIn updated_labels: The updated labeling from the input
document, accounting for the submitted feedback.
:param ShortDoc document: (optional) Brief information about the input
document.
:param str model_id: (optional) An optional string identifying the model
ID. The only permitted value is `contracts`.
:param str model_version: (optional) An optional string identifying the
version of the model used.
"""
self.feedback_type = feedback_type
self.document = document
self.model_id = model_id
self.model_version = model_version
self.location = location
self.text = text
self.original_labels = original_labels
self.updated_labels = updated_labels
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackDataInput':
"""Initialize a FeedbackDataInput object from a json dictionary."""
args = {}
valid_keys = [
'feedback_type', 'document', 'model_id', 'model_version',
'location', 'text', 'original_labels', 'updated_labels'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackDataInput: '
+ ', '.join(bad_keys))
if 'feedback_type' in _dict:
args['feedback_type'] = _dict.get('feedback_type')
else:
raise ValueError(
'Required property \'feedback_type\' not present in FeedbackDataInput JSON'
)
if 'document' in _dict:
args['document'] = ShortDoc._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
else:
raise ValueError(
'Required property \'location\' not present in FeedbackDataInput JSON'
)
if 'text' in _dict:
args['text'] = _dict.get('text')
else:
raise ValueError(
'Required property \'text\' not present in FeedbackDataInput JSON'
)
if 'original_labels' in _dict:
args['original_labels'] = OriginalLabelsIn._from_dict(
_dict.get('original_labels'))
else:
raise ValueError(
'Required property \'original_labels\' not present in FeedbackDataInput JSON'
)
if 'updated_labels' in _dict:
args['updated_labels'] = UpdatedLabelsIn._from_dict(
_dict.get('updated_labels'))
else:
raise ValueError(
'Required property \'updated_labels\' not present in FeedbackDataInput JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackDataInput object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_type') and self.feedback_type is not None:
_dict['feedback_type'] = self.feedback_type
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'original_labels') and self.original_labels is not None:
_dict['original_labels'] = self.original_labels._to_dict()
if hasattr(self, 'updated_labels') and self.updated_labels is not None:
_dict['updated_labels'] = self.updated_labels._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackDataInput object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackDataInput') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackDataInput') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackDataOutput():
"""
Information returned from the **Add Feedback** method.
:attr str feedback_type: (optional) A string identifying the user adding the
feedback. The only permitted value is `element_classification`.
:attr ShortDoc document: (optional) Brief information about the input document.
:attr str model_id: (optional) An optional string identifying the model ID. The
only permitted value is `contracts`.
:attr str model_version: (optional) An optional string identifying the version
of the model used.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text to which the feedback applies.
:attr OriginalLabelsOut original_labels: (optional) The original labeling from
the input document, without the submitted feedback.
:attr UpdatedLabelsOut updated_labels: (optional) The updated labeling from the
input document, accounting for the submitted feedback.
:attr Pagination pagination: (optional) Pagination details, if required by the
length of the output.
"""
def __init__(self,
*,
feedback_type: str = None,
document: 'ShortDoc' = None,
model_id: str = None,
model_version: str = None,
location: 'Location' = None,
text: str = None,
original_labels: 'OriginalLabelsOut' = None,
updated_labels: 'UpdatedLabelsOut' = None,
pagination: 'Pagination' = None) -> None:
"""
Initialize a FeedbackDataOutput object.
:param str feedback_type: (optional) A string identifying the user adding
the feedback. The only permitted value is `element_classification`.
:param ShortDoc document: (optional) Brief information about the input
document.
:param str model_id: (optional) An optional string identifying the model
ID. The only permitted value is `contracts`.
:param str model_version: (optional) An optional string identifying the
version of the model used.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text to which the feedback applies.
:param OriginalLabelsOut original_labels: (optional) The original labeling
from the input document, without the submitted feedback.
:param UpdatedLabelsOut updated_labels: (optional) The updated labeling
from the input document, accounting for the submitted feedback.
:param Pagination pagination: (optional) Pagination details, if required by
the length of the output.
"""
self.feedback_type = feedback_type
self.document = document
self.model_id = model_id
self.model_version = model_version
self.location = location
self.text = text
self.original_labels = original_labels
self.updated_labels = updated_labels
self.pagination = pagination
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackDataOutput':
"""Initialize a FeedbackDataOutput object from a json dictionary."""
args = {}
valid_keys = [
'feedback_type', 'document', 'model_id', 'model_version',
'location', 'text', 'original_labels', 'updated_labels',
'pagination'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackDataOutput: '
+ ', '.join(bad_keys))
if 'feedback_type' in _dict:
args['feedback_type'] = _dict.get('feedback_type')
if 'document' in _dict:
args['document'] = ShortDoc._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'original_labels' in _dict:
args['original_labels'] = OriginalLabelsOut._from_dict(
_dict.get('original_labels'))
if 'updated_labels' in _dict:
args['updated_labels'] = UpdatedLabelsOut._from_dict(
_dict.get('updated_labels'))
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackDataOutput object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_type') and self.feedback_type is not None:
_dict['feedback_type'] = self.feedback_type
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'original_labels') and self.original_labels is not None:
_dict['original_labels'] = self.original_labels._to_dict()
if hasattr(self, 'updated_labels') and self.updated_labels is not None:
_dict['updated_labels'] = self.updated_labels._to_dict()
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackDataOutput object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackDataOutput') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackDataOutput') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackDeleted():
"""
The status and message of the deletion request.
:attr int status: (optional) HTTP return code.
:attr str message: (optional) Status message returned from the service.
"""
def __init__(self, *, status: int = None, message: str = None) -> None:
"""
Initialize a FeedbackDeleted object.
:param int status: (optional) HTTP return code.
:param str message: (optional) Status message returned from the service.
"""
self.status = status
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackDeleted':
"""Initialize a FeedbackDeleted object from a json dictionary."""
args = {}
valid_keys = ['status', 'message']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackDeleted: '
+ ', '.join(bad_keys))
if 'status' in _dict:
args['status'] = _dict.get('status')
if 'message' in _dict:
args['message'] = _dict.get('message')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackDeleted object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackDeleted object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackDeleted') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackDeleted') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackList():
"""
The results of a successful **List Feedback** request for all feedback.
:attr List[GetFeedback] feedback: (optional) A list of all feedback for the
document.
"""
def __init__(self, *, feedback: List['GetFeedback'] = None) -> None:
"""
Initialize a FeedbackList object.
:param List[GetFeedback] feedback: (optional) A list of all feedback for
the document.
"""
self.feedback = feedback
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackList':
"""Initialize a FeedbackList object from a json dictionary."""
args = {}
valid_keys = ['feedback']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackList: '
+ ', '.join(bad_keys))
if 'feedback' in _dict:
args['feedback'] = [
GetFeedback._from_dict(x) for x in (_dict.get('feedback'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback') and self.feedback is not None:
_dict['feedback'] = [x._to_dict() for x in self.feedback]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackList object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackList') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackReturn():
"""
Information about the document and the submitted feedback.
:attr str feedback_id: (optional) The unique ID of the feedback object.
:attr str user_id: (optional) An optional string identifying the person
submitting feedback.
:attr str comment: (optional) An optional comment from the person submitting the
feedback.
:attr datetime created: (optional) Timestamp listing the creation time of the
feedback submission.
:attr FeedbackDataOutput feedback_data: (optional) Information returned from the
**Add Feedback** method.
"""
def __init__(self,
*,
feedback_id: str = None,
user_id: str = None,
comment: str = None,
created: datetime = None,
feedback_data: 'FeedbackDataOutput' = None) -> None:
"""
Initialize a FeedbackReturn object.
:param str feedback_id: (optional) The unique ID of the feedback object.
:param str user_id: (optional) An optional string identifying the person
submitting feedback.
:param str comment: (optional) An optional comment from the person
submitting the feedback.
:param datetime created: (optional) Timestamp listing the creation time of
the feedback submission.
:param FeedbackDataOutput feedback_data: (optional) Information returned
from the **Add Feedback** method.
"""
self.feedback_id = feedback_id
self.user_id = user_id
self.comment = comment
self.created = created
self.feedback_data = feedback_data
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackReturn':
"""Initialize a FeedbackReturn object from a json dictionary."""
args = {}
valid_keys = [
'feedback_id', 'user_id', 'comment', 'created', 'feedback_data'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackReturn: '
+ ', '.join(bad_keys))
if 'feedback_id' in _dict:
args['feedback_id'] = _dict.get('feedback_id')
if 'user_id' in _dict:
args['user_id'] = _dict.get('user_id')
if 'comment' in _dict:
args['comment'] = _dict.get('comment')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'feedback_data' in _dict:
args['feedback_data'] = FeedbackDataOutput._from_dict(
_dict.get('feedback_data'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'user_id') and self.user_id is not None:
_dict['user_id'] = self.user_id
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class GetFeedback():
"""
The results of a successful **Get Feedback** request for a single feedback entry.
:attr str feedback_id: (optional) A string uniquely identifying the feedback
entry.
:attr datetime created: (optional) A timestamp identifying the creation time of
the feedback entry.
:attr str comment: (optional) A string containing the user's comment about the
feedback entry.
:attr FeedbackDataOutput feedback_data: (optional) Information returned from the
**Add Feedback** method.
"""
def __init__(self,
*,
feedback_id: str = None,
created: datetime = None,
comment: str = None,
feedback_data: 'FeedbackDataOutput' = None) -> None:
"""
Initialize a GetFeedback object.
:param str feedback_id: (optional) A string uniquely identifying the
feedback entry.
:param datetime created: (optional) A timestamp identifying the creation
time of the feedback entry.
:param str comment: (optional) A string containing the user's comment about
the feedback entry.
:param FeedbackDataOutput feedback_data: (optional) Information returned
from the **Add Feedback** method.
"""
self.feedback_id = feedback_id
self.created = created
self.comment = comment
self.feedback_data = feedback_data
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetFeedback':
"""Initialize a GetFeedback object from a json dictionary."""
args = {}
valid_keys = ['feedback_id', 'created', 'comment', 'feedback_data']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class GetFeedback: '
+ ', '.join(bad_keys))
if 'feedback_id' in _dict:
args['feedback_id'] = _dict.get('feedback_id')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'comment' in _dict:
args['comment'] = _dict.get('comment')
if 'feedback_data' in _dict:
args['feedback_data'] = FeedbackDataOutput._from_dict(
_dict.get('feedback_data'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetFeedback object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetFeedback object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'GetFeedback') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetFeedback') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class HTMLReturn():
"""
The HTML converted from an input document.
:attr str num_pages: (optional) The number of pages in the input document.
:attr str author: (optional) The author of the input document, if identified.
:attr str publication_date: (optional) The publication date of the input
document, if identified.
:attr str title: (optional) The title of the input document, if identified.
:attr str html: (optional) The HTML version of the input document.
"""
def __init__(self,
*,
num_pages: str = None,
author: str = None,
publication_date: str = None,
title: str = None,
html: str = None) -> None:
"""
Initialize a HTMLReturn object.
:param str num_pages: (optional) The number of pages in the input document.
:param str author: (optional) The author of the input document, if
identified.
:param str publication_date: (optional) The publication date of the input
document, if identified.
:param str title: (optional) The title of the input document, if
identified.
:param str html: (optional) The HTML version of the input document.
"""
self.num_pages = num_pages
self.author = author
self.publication_date = publication_date
self.title = title
self.html = html
@classmethod
def from_dict(cls, _dict: Dict) -> 'HTMLReturn':
"""Initialize a HTMLReturn object from a json dictionary."""
args = {}
valid_keys = [
'num_pages', 'author', 'publication_date', 'title', 'html'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class HTMLReturn: '
+ ', '.join(bad_keys))
if 'num_pages' in _dict:
args['num_pages'] = _dict.get('num_pages')
if 'author' in _dict:
args['author'] = _dict.get('author')
if 'publication_date' in _dict:
args['publication_date'] = _dict.get('publication_date')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'html' in _dict:
args['html'] = _dict.get('html')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a HTMLReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'num_pages') and self.num_pages is not None:
_dict['num_pages'] = self.num_pages
if hasattr(self, 'author') and self.author is not None:
_dict['author'] = self.author
if hasattr(self,
'publication_date') and self.publication_date is not None:
_dict['publication_date'] = self.publication_date
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'html') and self.html is not None:
_dict['html'] = self.html
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this HTMLReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'HTMLReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'HTMLReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Interpretation():
"""
The details of the normalized text, if applicable. This element is optional; it is
returned only if normalized text exists.
:attr str value: (optional) The value that was located in the normalized text.
:attr float numeric_value: (optional) An integer or float expressing the numeric
value of the `value` key.
:attr str unit: (optional) A string listing the unit of the value that was found
in the normalized text.
**Note:** The value of `unit` is the [ISO-4217 currency
code](https://www.iso.org/iso-4217-currency-codes.html) identified for the
currency amount (for example, `USD` or `EUR`). If the service cannot
disambiguate a currency symbol (for example, `$` or `£`), the value of `unit`
contains the ambiguous symbol as-is.
"""
def __init__(self,
*,
value: str = None,
numeric_value: float = None,
unit: str = None) -> None:
"""
Initialize a Interpretation object.
:param str value: (optional) The value that was located in the normalized
text.
:param float numeric_value: (optional) An integer or float expressing the
numeric value of the `value` key.
:param str unit: (optional) A string listing the unit of the value that was
found in the normalized text.
**Note:** The value of `unit` is the [ISO-4217 currency
code](https://www.iso.org/iso-4217-currency-codes.html) identified for the
currency amount (for example, `USD` or `EUR`). If the service cannot
disambiguate a currency symbol (for example, `$` or `£`), the value of
`unit` contains the ambiguous symbol as-is.
"""
self.value = value
self.numeric_value = numeric_value
self.unit = unit
@classmethod
def from_dict(cls, _dict: Dict) -> 'Interpretation':
"""Initialize a Interpretation object from a json dictionary."""
args = {}
valid_keys = ['value', 'numeric_value', 'unit']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Interpretation: '
+ ', '.join(bad_keys))
if 'value' in _dict:
args['value'] = _dict.get('value')
if 'numeric_value' in _dict:
args['numeric_value'] = _dict.get('numeric_value')
if 'unit' in _dict:
args['unit'] = _dict.get('unit')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Interpretation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'numeric_value') and self.numeric_value is not None:
_dict['numeric_value'] = self.numeric_value
if hasattr(self, 'unit') and self.unit is not None:
_dict['unit'] = self.unit
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Interpretation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Interpretation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Interpretation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Key():
"""
A key in a key-value pair.
:attr str cell_id: (optional) The unique ID of the key in the table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text content of the table cell without HTML
markup.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None) -> None:
"""
Initialize a Key object.
:param str cell_id: (optional) The unique ID of the key in the table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text content of the table cell without HTML
markup.
"""
self.cell_id = cell_id
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'Key':
"""Initialize a Key object from a json dictionary."""
args = {}
valid_keys = ['cell_id', 'location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Key: ' +
', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Key object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Key object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Key') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Key') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class KeyValuePair():
"""
Key-value pairs detected across cell boundaries.
:attr Key key: (optional) A key in a key-value pair.
:attr List[Value] value: (optional) A list of values in a key-value pair.
"""
def __init__(self, *, key: 'Key' = None,
value: List['Value'] = None) -> None:
"""
Initialize a KeyValuePair object.
:param Key key: (optional) A key in a key-value pair.
:param List[Value] value: (optional) A list of values in a key-value pair.
"""
self.key = key
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'KeyValuePair':
"""Initialize a KeyValuePair object from a json dictionary."""
args = {}
valid_keys = ['key', 'value']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class KeyValuePair: '
+ ', '.join(bad_keys))
if 'key' in _dict:
args['key'] = Key._from_dict(_dict.get('key'))
if 'value' in _dict:
args['value'] = [Value._from_dict(x) for x in (_dict.get('value'))]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a KeyValuePair object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = [x._to_dict() for x in self.value]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this KeyValuePair object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'KeyValuePair') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'KeyValuePair') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Label():
"""
A pair of `nature` and `party` objects. The `nature` object identifies the effect of
the element on the identified `party`, and the `party` object identifies the affected
party.
:attr str nature: The identified `nature` of the element.
:attr str party: The identified `party` of the element.
"""
def __init__(self, nature: str, party: str) -> None:
"""
Initialize a Label object.
:param str nature: The identified `nature` of the element.
:param str party: The identified `party` of the element.
"""
self.nature = nature
self.party = party
@classmethod
def from_dict(cls, _dict: Dict) -> 'Label':
"""Initialize a Label object from a json dictionary."""
args = {}
valid_keys = ['nature', 'party']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Label: ' +
', '.join(bad_keys))
if 'nature' in _dict:
args['nature'] = _dict.get('nature')
else:
raise ValueError(
'Required property \'nature\' not present in Label JSON')
if 'party' in _dict:
args['party'] = _dict.get('party')
else:
raise ValueError(
'Required property \'party\' not present in Label JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Label object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'nature') and self.nature is not None:
_dict['nature'] = self.nature
if hasattr(self, 'party') and self.party is not None:
_dict['party'] = self.party
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Label object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Label') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Label') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LeadingSentence():
"""
The leading sentences in a section or subsection of the input document.
:attr str text: (optional) The text of the leading sentence.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected leading sentences.
"""
def __init__(self,
*,
text: str = None,
location: 'Location' = None,
element_locations: List['ElementLocations'] = None) -> None:
"""
Initialize a LeadingSentence object.
:param str text: (optional) The text of the leading sentence.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected leading sentences.
"""
self.text = text
self.location = location
self.element_locations = element_locations
@classmethod
def from_dict(cls, _dict: Dict) -> 'LeadingSentence':
"""Initialize a LeadingSentence object from a json dictionary."""
args = {}
valid_keys = ['text', 'location', 'element_locations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class LeadingSentence: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'element_locations' in _dict:
args['element_locations'] = [
ElementLocations._from_dict(x)
for x in (_dict.get('element_locations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a LeadingSentence object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self,
'element_locations') and self.element_locations is not None:
_dict['element_locations'] = [
x._to_dict() for x in self.element_locations
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this LeadingSentence object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'LeadingSentence') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'LeadingSentence') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Location():
"""
The numeric location of the identified element in the document, represented with two
integers labeled `begin` and `end`.
:attr int begin: The element's `begin` index.
:attr int end: The element's `end` index.
"""
def __init__(self, begin: int, end: int) -> None:
"""
Initialize a Location object.
:param int begin: The element's `begin` index.
:param int end: The element's `end` index.
"""
self.begin = begin
self.end = end
@classmethod
def from_dict(cls, _dict: Dict) -> 'Location':
"""Initialize a Location object from a json dictionary."""
args = {}
valid_keys = ['begin', 'end']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Location: '
+ ', '.join(bad_keys))
if 'begin' in _dict:
args['begin'] = _dict.get('begin')
else:
raise ValueError(
'Required property \'begin\' not present in Location JSON')
if 'end' in _dict:
args['end'] = _dict.get('end')
else:
raise ValueError(
'Required property \'end\' not present in Location JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Location object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'begin') and self.begin is not None:
_dict['begin'] = self.begin
if hasattr(self, 'end') and self.end is not None:
_dict['end'] = self.end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Location object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Location') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Location') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Mention():
"""
A mention of a party.
:attr str text: (optional) The name of the party.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Mention object.
:param str text: (optional) The name of the party.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Mention':
"""Initialize a Mention object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Mention: ' +
', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Mention object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Mention object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Mention') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Mention') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OriginalLabelsIn():
"""
The original labeling from the input document, without the submitted feedback.
:attr List[TypeLabel] types: Description of the action specified by the element
and whom it affects.
:attr List[Category] categories: List of functional categories into which the
element falls; in other words, the subject matter of the element.
"""
def __init__(self, types: List['TypeLabel'],
categories: List['Category']) -> None:
"""
Initialize a OriginalLabelsIn object.
:param List[TypeLabel] types: Description of the action specified by the
element and whom it affects.
:param List[Category] categories: List of functional categories into which
the element falls; in other words, the subject matter of the element.
"""
self.types = types
self.categories = categories
@classmethod
def from_dict(cls, _dict: Dict) -> 'OriginalLabelsIn':
"""Initialize a OriginalLabelsIn object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class OriginalLabelsIn: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
else:
raise ValueError(
'Required property \'types\' not present in OriginalLabelsIn JSON'
)
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
else:
raise ValueError(
'Required property \'categories\' not present in OriginalLabelsIn JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OriginalLabelsIn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this OriginalLabelsIn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'OriginalLabelsIn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'OriginalLabelsIn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OriginalLabelsOut():
"""
The original labeling from the input document, without the submitted feedback.
:attr List[TypeLabel] types: (optional) Description of the action specified by
the element and whom it affects.
:attr List[Category] categories: (optional) List of functional categories into
which the element falls; in other words, the subject matter of the element.
:attr str modification: (optional) A string identifying the type of modification
the feedback entry in the `updated_labels` array. Possible values are `added`,
`not_changed`, and `removed`.
"""
def __init__(self,
*,
types: List['TypeLabel'] = None,
categories: List['Category'] = None,
modification: str = None) -> None:
"""
Initialize a OriginalLabelsOut object.
:param List[TypeLabel] types: (optional) Description of the action
specified by the element and whom it affects.
:param List[Category] categories: (optional) List of functional categories
into which the element falls; in other words, the subject matter of the
element.
:param str modification: (optional) A string identifying the type of
modification the feedback entry in the `updated_labels` array. Possible
values are `added`, `not_changed`, and `removed`.
"""
self.types = types
self.categories = categories
self.modification = modification
@classmethod
def from_dict(cls, _dict: Dict) -> 'OriginalLabelsOut':
"""Initialize a OriginalLabelsOut object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories', 'modification']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class OriginalLabelsOut: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
if 'modification' in _dict:
args['modification'] = _dict.get('modification')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OriginalLabelsOut object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'modification') and self.modification is not None:
_dict['modification'] = self.modification
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this OriginalLabelsOut object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'OriginalLabelsOut') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'OriginalLabelsOut') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ModificationEnum(Enum):
"""
A string identifying the type of modification the feedback entry in the
`updated_labels` array. Possible values are `added`, `not_changed`, and `removed`.
"""
ADDED = "added"
NOT_CHANGED = "not_changed"
REMOVED = "removed"
class Pagination():
"""
Pagination details, if required by the length of the output.
:attr str refresh_cursor: (optional) A token identifying the current page of
results.
:attr str next_cursor: (optional) A token identifying the next page of results.
:attr str refresh_url: (optional) The URL that returns the current page of
results.
:attr str next_url: (optional) The URL that returns the next page of results.
:attr int total: (optional) Reserved for future use.
"""
def __init__(self,
*,
refresh_cursor: str = None,
next_cursor: str = None,
refresh_url: str = None,
next_url: str = None,
total: int = None) -> None:
"""
Initialize a Pagination object.
:param str refresh_cursor: (optional) A token identifying the current page
of results.
:param str next_cursor: (optional) A token identifying the next page of
results.
:param str refresh_url: (optional) The URL that returns the current page of
results.
:param str next_url: (optional) The URL that returns the next page of
results.
:param int total: (optional) Reserved for future use.
"""
self.refresh_cursor = refresh_cursor
self.next_cursor = next_cursor
self.refresh_url = refresh_url
self.next_url = next_url
self.total = total
@classmethod
def from_dict(cls, _dict: Dict) -> 'Pagination':
"""Initialize a Pagination object from a json dictionary."""
args = {}
valid_keys = [
'refresh_cursor', 'next_cursor', 'refresh_url', 'next_url', 'total'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Pagination: '
+ ', '.join(bad_keys))
if 'refresh_cursor' in _dict:
args['refresh_cursor'] = _dict.get('refresh_cursor')
if 'next_cursor' in _dict:
args['next_cursor'] = _dict.get('next_cursor')
if 'refresh_url' in _dict:
args['refresh_url'] = _dict.get('refresh_url')
if 'next_url' in _dict:
args['next_url'] = _dict.get('next_url')
if 'total' in _dict:
args['total'] = _dict.get('total')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Pagination object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None:
_dict['refresh_cursor'] = self.refresh_cursor
if hasattr(self, 'next_cursor') and self.next_cursor is not None:
_dict['next_cursor'] = self.next_cursor
if hasattr(self, 'refresh_url') and self.refresh_url is not None:
_dict['refresh_url'] = self.refresh_url
if hasattr(self, 'next_url') and self.next_url is not None:
_dict['next_url'] = self.next_url
if hasattr(self, 'total') and self.total is not None:
_dict['total'] = self.total
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Pagination object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Pagination') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Pagination') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Paragraphs():
"""
The locations of each paragraph in the input document.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, location: 'Location' = None) -> None:
"""
Initialize a Paragraphs object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Paragraphs':
"""Initialize a Paragraphs object from a json dictionary."""
args = {}
valid_keys = ['location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Paragraphs: '
+ ', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Paragraphs object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Paragraphs object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Paragraphs') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Paragraphs') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Parties():
"""
A party and its corresponding role, including address and contact information if
identified.
:attr str party: (optional) The normalized form of the party's name.
:attr str role: (optional) A string identifying the party's role.
:attr str importance: (optional) A string that identifies the importance of the
party.
:attr List[Address] addresses: (optional) A list of the party's address or
addresses.
:attr List[Contact] contacts: (optional) A list of the names and roles of
contacts identified in the input document.
:attr List[Mention] mentions: (optional) A list of the party's mentions in the
input document.
"""
def __init__(self,
*,
party: str = None,
role: str = None,
importance: str = None,
addresses: List['Address'] = None,
contacts: List['Contact'] = None,
mentions: List['Mention'] = None) -> None:
"""
Initialize a Parties object.
:param str party: (optional) The normalized form of the party's name.
:param str role: (optional) A string identifying the party's role.
:param str importance: (optional) A string that identifies the importance
of the party.
:param List[Address] addresses: (optional) A list of the party's address or
addresses.
:param List[Contact] contacts: (optional) A list of the names and roles of
contacts identified in the input document.
:param List[Mention] mentions: (optional) A list of the party's mentions in
the input document.
"""
self.party = party
self.role = role
self.importance = importance
self.addresses = addresses
self.contacts = contacts
self.mentions = mentions
@classmethod
def from_dict(cls, _dict: Dict) -> 'Parties':
"""Initialize a Parties object from a json dictionary."""
args = {}
valid_keys = [
'party', 'role', 'importance', 'addresses', 'contacts', 'mentions'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Parties: ' +
', '.join(bad_keys))
if 'party' in _dict:
args['party'] = _dict.get('party')
if 'role' in _dict:
args['role'] = _dict.get('role')
if 'importance' in _dict:
args['importance'] = _dict.get('importance')
if 'addresses' in _dict:
args['addresses'] = [
Address._from_dict(x) for x in (_dict.get('addresses'))
]
if 'contacts' in _dict:
args['contacts'] = [
Contact._from_dict(x) for x in (_dict.get('contacts'))
]
if 'mentions' in _dict:
args['mentions'] = [
Mention._from_dict(x) for x in (_dict.get('mentions'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Parties object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'party') and self.party is not None:
_dict['party'] = self.party
if hasattr(self, 'role') and self.role is not None:
_dict['role'] = self.role
if hasattr(self, 'importance') and self.importance is not None:
_dict['importance'] = self.importance
if hasattr(self, 'addresses') and self.addresses is not None:
_dict['addresses'] = [x._to_dict() for x in self.addresses]
if hasattr(self, 'contacts') and self.contacts is not None:
_dict['contacts'] = [x._to_dict() for x in self.contacts]
if hasattr(self, 'mentions') and self.mentions is not None:
_dict['mentions'] = [x._to_dict() for x in self.mentions]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Parties object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Parties') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Parties') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ImportanceEnum(Enum):
"""
A string that identifies the importance of the party.
"""
PRIMARY = "Primary"
UNKNOWN = "Unknown"
class PaymentTerms():
"""
The document's payment duration or durations.
:attr str confidence_level: (optional) The confidence level in the
identification of the payment term.
:attr str text: (optional) The payment term (duration).
:attr str text_normalized: (optional) The normalized form of the payment term,
which is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr Interpretation interpretation: (optional) The details of the normalized
text, if applicable. This element is optional; it is returned only if normalized
text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
interpretation: 'Interpretation' = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a PaymentTerms object.
:param str confidence_level: (optional) The confidence level in the
identification of the payment term.
:param str text: (optional) The payment term (duration).
:param str text_normalized: (optional) The normalized form of the payment
term, which is listed as a string. This element is optional; it is returned
only if normalized text exists.
:param Interpretation interpretation: (optional) The details of the
normalized text, if applicable. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.interpretation = interpretation
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'PaymentTerms':
"""Initialize a PaymentTerms object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'interpretation',
'provenance_ids', 'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class PaymentTerms: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'interpretation' in _dict:
args['interpretation'] = Interpretation._from_dict(
_dict.get('interpretation'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a PaymentTerms object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'interpretation') and self.interpretation is not None:
_dict['interpretation'] = self.interpretation._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this PaymentTerms object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'PaymentTerms') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'PaymentTerms') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the payment term.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class RowHeaders():
"""
Row-level cells, each applicable as a header to other cells in the same row as itself,
of the current table.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr str text_normalized: (optional) If you provide customization input, the
normalized version of the cell text according to the customization; otherwise,
the same value as `text`.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None,
text_normalized: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a RowHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param str text_normalized: (optional) If you provide customization input,
the normalized version of the cell text according to the customization;
otherwise, the same value as `text`.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.text_normalized = text_normalized
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'RowHeaders':
"""Initialize a RowHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'text_normalized', 'row_index_begin',
'row_index_end', 'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class RowHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RowHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RowHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'RowHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RowHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SectionTitle():
"""
The table's section title, if identified.
:attr str text: (optional) The text of the section title, if identified.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a SectionTitle object.
:param str text: (optional) The text of the section title, if identified.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'SectionTitle':
"""Initialize a SectionTitle object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class SectionTitle: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SectionTitle object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this SectionTitle object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'SectionTitle') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'SectionTitle') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SectionTitles():
"""
An array containing one object per section or subsection detected in the input
document. Sections and subsections are not nested; instead, they are flattened out and
can be placed back in order by using the `begin` and `end` values of the element and
the `level` value of the section.
:attr str text: (optional) The text of the section title, if identified.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr int level: (optional) An integer indicating the level at which the section
is located in the input document. For example, `1` represents a top-level
section, `2` represents a subsection within the level `1` section, and so forth.
:attr List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected section titles.
"""
def __init__(self,
*,
text: str = None,
location: 'Location' = None,
level: int = None,
element_locations: List['ElementLocations'] = None) -> None:
"""
Initialize a SectionTitles object.
:param str text: (optional) The text of the section title, if identified.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param int level: (optional) An integer indicating the level at which the
section is located in the input document. For example, `1` represents a
top-level section, `2` represents a subsection within the level `1`
section, and so forth.
:param List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected section titles.
"""
self.text = text
self.location = location
self.level = level
self.element_locations = element_locations
@classmethod
def from_dict(cls, _dict: Dict) -> 'SectionTitles':
"""Initialize a SectionTitles object from a json dictionary."""
args = {}
valid_keys = ['text', 'location', 'level', 'element_locations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class SectionTitles: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'level' in _dict:
args['level'] = _dict.get('level')
if 'element_locations' in _dict:
args['element_locations'] = [
ElementLocations._from_dict(x)
for x in (_dict.get('element_locations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SectionTitles object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'level') and self.level is not None:
_dict['level'] = self.level
if hasattr(self,
'element_locations') and self.element_locations is not None:
_dict['element_locations'] = [
x._to_dict() for x in self.element_locations
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this SectionTitles object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'SectionTitles') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'SectionTitles') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ShortDoc():
"""
Brief information about the input document.
:attr str title: (optional) The title of the input document, if identified.
:attr str hash: (optional) The MD5 hash of the input document.
"""
def __init__(self, *, title: str = None, hash: str = None) -> None:
"""
Initialize a ShortDoc object.
:param str title: (optional) The title of the input document, if
identified.
:param str hash: (optional) The MD5 hash of the input document.
"""
self.title = title
self.hash = hash
@classmethod
def from_dict(cls, _dict: Dict) -> 'ShortDoc':
"""Initialize a ShortDoc object from a json dictionary."""
args = {}
valid_keys = ['title', 'hash']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ShortDoc: '
+ ', '.join(bad_keys))
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'hash' in _dict:
args['hash'] = _dict.get('hash')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ShortDoc object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ShortDoc object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ShortDoc') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ShortDoc') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableHeaders():
"""
The contents of the current table's header.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr object location: (optional) The location of the table header cell in the
current table as defined by its `begin` and `end` offsets, respectfully, in the
input document.
:attr str text: (optional) The textual contents of the cell from the input
document without associated markup content.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: object = None,
text: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a TableHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param object location: (optional) The location of the table header cell in
the current table as defined by its `begin` and `end` offsets,
respectfully, in the input document.
:param str text: (optional) The textual contents of the cell from the input
document without associated markup content.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableHeaders':
"""Initialize a TableHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'row_index_begin', 'row_index_end',
'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = _dict.get('location')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableReturn():
"""
The analysis of the document's tables.
:attr DocInfo document: (optional) Information about the parsed input document.
:attr str model_id: (optional) The ID of the model used to extract the table
contents. The value for table extraction is `tables`.
:attr str model_version: (optional) The version of the `tables` model ID.
:attr List[Tables] tables: (optional) Definitions of the tables identified in
the input document.
"""
def __init__(self,
*,
document: 'DocInfo' = None,
model_id: str = None,
model_version: str = None,
tables: List['Tables'] = None) -> None:
"""
Initialize a TableReturn object.
:param DocInfo document: (optional) Information about the parsed input
document.
:param str model_id: (optional) The ID of the model used to extract the
table contents. The value for table extraction is `tables`.
:param str model_version: (optional) The version of the `tables` model ID.
:param List[Tables] tables: (optional) Definitions of the tables identified
in the input document.
"""
self.document = document
self.model_id = model_id
self.model_version = model_version
self.tables = tables
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableReturn':
"""Initialize a TableReturn object from a json dictionary."""
args = {}
valid_keys = ['document', 'model_id', 'model_version', 'tables']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableReturn: '
+ ', '.join(bad_keys))
if 'document' in _dict:
args['document'] = DocInfo._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'tables' in _dict:
args['tables'] = [
Tables._from_dict(x) for x in (_dict.get('tables'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableTitle():
"""
If identified, the title or caption of the current table of the form `Table x.: ...`.
Empty when no title is identified. When exposed, the `title` is also excluded from the
`contexts` array of the same table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text of the identified table title or caption.
"""
def __init__(self, *, location: 'Location' = None,
text: str = None) -> None:
"""
Initialize a TableTitle object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text of the identified table title or
caption.
"""
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableTitle':
"""Initialize a TableTitle object from a json dictionary."""
args = {}
valid_keys = ['location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableTitle: '
+ ', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableTitle object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableTitle object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableTitle') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableTitle') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Tables():
"""
The contents of the tables extracted from a document.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The textual contents of the current table from the
input document without associated markup content.
:attr SectionTitle section_title: (optional) The table's section title, if
identified.
:attr TableTitle title: (optional) If identified, the title or caption of the
current table of the form `Table x.: ...`. Empty when no title is identified.
When exposed, the `title` is also excluded from the `contexts` array of the same
table.
:attr List[TableHeaders] table_headers: (optional) An array of table-level cells
that apply as headers to all the other cells in the current table.
:attr List[RowHeaders] row_headers: (optional) An array of row-level cells, each
applicable as a header to other cells in the same row as itself, of the current
table.
:attr List[ColumnHeaders] column_headers: (optional) An array of column-level
cells, each applicable as a header to other cells in the same column as itself,
of the current table.
:attr List[BodyCells] body_cells: (optional) An array of cells that are neither
table header nor column header nor row header cells, of the current table with
corresponding row and column header associations.
:attr List[Contexts] contexts: (optional) An array of objects that list text
that is related to the table contents and that precedes or follows the current
table.
:attr List[KeyValuePair] key_value_pairs: (optional) An array of key-value pairs
identified in the current table.
"""
def __init__(self,
*,
location: 'Location' = None,
text: str = None,
section_title: 'SectionTitle' = None,
title: 'TableTitle' = None,
table_headers: List['TableHeaders'] = None,
row_headers: List['RowHeaders'] = None,
column_headers: List['ColumnHeaders'] = None,
body_cells: List['BodyCells'] = None,
contexts: List['Contexts'] = None,
key_value_pairs: List['KeyValuePair'] = None) -> None:
"""
Initialize a Tables object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The textual contents of the current table from
the input document without associated markup content.
:param SectionTitle section_title: (optional) The table's section title, if
identified.
:param TableTitle title: (optional) If identified, the title or caption of
the current table of the form `Table x.: ...`. Empty when no title is
identified. When exposed, the `title` is also excluded from the `contexts`
array of the same table.
:param List[TableHeaders] table_headers: (optional) An array of table-level
cells that apply as headers to all the other cells in the current table.
:param List[RowHeaders] row_headers: (optional) An array of row-level
cells, each applicable as a header to other cells in the same row as
itself, of the current table.
:param List[ColumnHeaders] column_headers: (optional) An array of
column-level cells, each applicable as a header to other cells in the same
column as itself, of the current table.
:param List[BodyCells] body_cells: (optional) An array of cells that are
neither table header nor column header nor row header cells, of the current
table with corresponding row and column header associations.
:param List[Contexts] contexts: (optional) An array of objects that list
text that is related to the table contents and that precedes or follows the
current table.
:param List[KeyValuePair] key_value_pairs: (optional) An array of key-value
pairs identified in the current table.
"""
self.location = location
self.text = text
self.section_title = section_title
self.title = title
self.table_headers = table_headers
self.row_headers = row_headers
self.column_headers = column_headers
self.body_cells = body_cells
self.contexts = contexts
self.key_value_pairs = key_value_pairs
@classmethod
def from_dict(cls, _dict: Dict) -> 'Tables':
"""Initialize a Tables object from a json dictionary."""
args = {}
valid_keys = [
'location', 'text', 'section_title', 'title', 'table_headers',
'row_headers', 'column_headers', 'body_cells', 'contexts',
'key_value_pairs'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Tables: ' +
', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'section_title' in _dict:
args['section_title'] = SectionTitle._from_dict(
_dict.get('section_title'))
if 'title' in _dict:
args['title'] = TableTitle._from_dict(_dict.get('title'))
if 'table_headers' in _dict:
args['table_headers'] = [
TableHeaders._from_dict(x) for x in (_dict.get('table_headers'))
]
if 'row_headers' in _dict:
args['row_headers'] = [
RowHeaders._from_dict(x) for x in (_dict.get('row_headers'))
]
if 'column_headers' in _dict:
args['column_headers'] = [
ColumnHeaders._from_dict(x)
for x in (_dict.get('column_headers'))
]
if 'body_cells' in _dict:
args['body_cells'] = [
BodyCells._from_dict(x) for x in (_dict.get('body_cells'))
]
if 'contexts' in _dict:
args['contexts'] = [
Contexts._from_dict(x) for x in (_dict.get('contexts'))
]
if 'key_value_pairs' in _dict:
args['key_value_pairs'] = [
KeyValuePair._from_dict(x)
for x in (_dict.get('key_value_pairs'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Tables object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'section_title') and self.section_title is not None:
_dict['section_title'] = self.section_title._to_dict()
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title._to_dict()
if hasattr(self, 'table_headers') and self.table_headers is not None:
_dict['table_headers'] = [x._to_dict() for x in self.table_headers]
if hasattr(self, 'row_headers') and self.row_headers is not None:
_dict['row_headers'] = [x._to_dict() for x in self.row_headers]
if hasattr(self, 'column_headers') and self.column_headers is not None:
_dict['column_headers'] = [
x._to_dict() for x in self.column_headers
]
if hasattr(self, 'body_cells') and self.body_cells is not None:
_dict['body_cells'] = [x._to_dict() for x in self.body_cells]
if hasattr(self, 'contexts') and self.contexts is not None:
_dict['contexts'] = [x._to_dict() for x in self.contexts]
if hasattr(self,
'key_value_pairs') and self.key_value_pairs is not None:
_dict['key_value_pairs'] = [
x._to_dict() for x in self.key_value_pairs
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Tables object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Tables') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Tables') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TerminationDates():
"""
Termination dates identified in the input document.
:attr str confidence_level: (optional) The confidence level in the
identification of the termination date.
:attr str text: (optional) The termination date.
:attr str text_normalized: (optional) The normalized form of the termination
date, which is listed as a string. This element is optional; it is returned only
if normalized text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a TerminationDates object.
:param str confidence_level: (optional) The confidence level in the
identification of the termination date.
:param str text: (optional) The termination date.
:param str text_normalized: (optional) The normalized form of the
termination date, which is listed as a string. This element is optional; it
is returned only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'TerminationDates':
"""Initialize a TerminationDates object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'provenance_ids',
'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TerminationDates: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TerminationDates object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TerminationDates object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TerminationDates') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TerminationDates') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the termination date.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class TypeLabel():
"""
Identification of a specific type.
:attr Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified `party`,
and the `party` object identifies the affected party.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
"""
def __init__(self,
*,
label: 'Label' = None,
provenance_ids: List[str] = None) -> None:
"""
Initialize a TypeLabel object.
:param Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified
`party`, and the `party` object identifies the affected party.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
"""
self.label = label
self.provenance_ids = provenance_ids
@classmethod
def from_dict(cls, _dict: Dict) -> 'TypeLabel':
"""Initialize a TypeLabel object from a json dictionary."""
args = {}
valid_keys = ['label', 'provenance_ids']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TypeLabel: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = Label._from_dict(_dict.get('label'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TypeLabel object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TypeLabel object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TypeLabel') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TypeLabel') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeLabelComparison():
"""
Identification of a specific type.
:attr Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified `party`,
and the `party` object identifies the affected party.
"""
def __init__(self, *, label: 'Label' = None) -> None:
"""
Initialize a TypeLabelComparison object.
:param Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified
`party`, and the `party` object identifies the affected party.
"""
self.label = label
@classmethod
def from_dict(cls, _dict: Dict) -> 'TypeLabelComparison':
"""Initialize a TypeLabelComparison object from a json dictionary."""
args = {}
valid_keys = ['label']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TypeLabelComparison: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = Label._from_dict(_dict.get('label'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TypeLabelComparison object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TypeLabelComparison object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TypeLabelComparison') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TypeLabelComparison') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UnalignedElement():
"""
Element that does not align semantically between two compared documents.
:attr str document_label: (optional) The label assigned to the document by the
value of the `file_1_label` or `file_2_label` parameters on the **Compare two
documents** method.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text of the element.
:attr List[TypeLabelComparison] types: (optional) Description of the action
specified by the element and whom it affects.
:attr List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter of
the element.
:attr List[Attribute] attributes: (optional) List of document attributes.
"""
def __init__(self,
*,
document_label: str = None,
location: 'Location' = None,
text: str = None,
types: List['TypeLabelComparison'] = None,
categories: List['CategoryComparison'] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a UnalignedElement object.
:param str document_label: (optional) The label assigned to the document by
the value of the `file_1_label` or `file_2_label` parameters on the
**Compare two documents** method.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text of the element.
:param List[TypeLabelComparison] types: (optional) Description of the
action specified by the element and whom it affects.
:param List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter
of the element.
:param List[Attribute] attributes: (optional) List of document attributes.
"""
self.document_label = document_label
self.location = location
self.text = text
self.types = types
self.categories = categories
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'UnalignedElement':
"""Initialize a UnalignedElement object from a json dictionary."""
args = {}
valid_keys = [
'document_label', 'location', 'text', 'types', 'categories',
'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class UnalignedElement: '
+ ', '.join(bad_keys))
if 'document_label' in _dict:
args['document_label'] = _dict.get('document_label')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'types' in _dict:
args['types'] = [
TypeLabelComparison._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
CategoryComparison._from_dict(x)
for x in (_dict.get('categories'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UnalignedElement object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UnalignedElement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'UnalignedElement') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UnalignedElement') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UpdatedLabelsIn():
"""
The updated labeling from the input document, accounting for the submitted feedback.
:attr List[TypeLabel] types: Description of the action specified by the element
and whom it affects.
:attr List[Category] categories: List of functional categories into which the
element falls; in other words, the subject matter of the element.
"""
def __init__(self, types: List['TypeLabel'],
categories: List['Category']) -> None:
"""
Initialize a UpdatedLabelsIn object.
:param List[TypeLabel] types: Description of the action specified by the
element and whom it affects.
:param List[Category] categories: List of functional categories into which
the element falls; in other words, the subject matter of the element.
"""
self.types = types
self.categories = categories
@classmethod
def from_dict(cls, _dict: Dict) -> 'UpdatedLabelsIn':
"""Initialize a UpdatedLabelsIn object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class UpdatedLabelsIn: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
else:
raise ValueError(
'Required property \'types\' not present in UpdatedLabelsIn JSON'
)
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
else:
raise ValueError(
'Required property \'categories\' not present in UpdatedLabelsIn JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UpdatedLabelsIn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UpdatedLabelsIn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'UpdatedLabelsIn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UpdatedLabelsIn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UpdatedLabelsOut():
"""
The updated labeling from the input document, accounting for the submitted feedback.
:attr List[TypeLabel] types: (optional) Description of the action specified by
the element and whom it affects.
:attr List[Category] categories: (optional) List of functional categories into
which the element falls; in other words, the subject matter of the element.
:attr str modification: (optional) The type of modification the feedback entry
in the `updated_labels` array. Possible values are `added`, `not_changed`, and
`removed`.
"""
def __init__(self,
*,
types: List['TypeLabel'] = None,
categories: List['Category'] = None,
modification: str = None) -> None:
"""
Initialize a UpdatedLabelsOut object.
:param List[TypeLabel] types: (optional) Description of the action
specified by the element and whom it affects.
:param List[Category] categories: (optional) List of functional categories
into which the element falls; in other words, the subject matter of the
element.
:param str modification: (optional) The type of modification the feedback
entry in the `updated_labels` array. Possible values are `added`,
`not_changed`, and `removed`.
"""
self.types = types
self.categories = categories
self.modification = modification
@classmethod
def from_dict(cls, _dict: Dict) -> 'UpdatedLabelsOut':
"""Initialize a UpdatedLabelsOut object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories', 'modification']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class UpdatedLabelsOut: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
if 'modification' in _dict:
args['modification'] = _dict.get('modification')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UpdatedLabelsOut object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'modification') and self.modification is not None:
_dict['modification'] = self.modification
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UpdatedLabelsOut object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'UpdatedLabelsOut') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UpdatedLabelsOut') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ModificationEnum(Enum):
"""
The type of modification the feedback entry in the `updated_labels` array.
Possible values are `added`, `not_changed`, and `removed`.
"""
ADDED = "added"
NOT_CHANGED = "not_changed"
REMOVED = "removed"
class Value():
"""
A value in a key-value pair.
:attr str cell_id: (optional) The unique ID of the value in the table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text content of the table cell without HTML
markup.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None) -> None:
"""
Initialize a Value object.
:param str cell_id: (optional) The unique ID of the value in the table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text content of the table cell without HTML
markup.
"""
self.cell_id = cell_id
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'Value':
"""Initialize a Value object from a json dictionary."""
args = {}
valid_keys = ['cell_id', 'location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Value: ' +
', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Value object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Value object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Value') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Value') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
|
import torch
from torch.nn import functional as F
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
from dassl.engine import TRAINER_REGISTRY, TrainerX
from dassl.engine.trainer import SimpleNet
@TRAINER_REGISTRY.register()
class CrossGrad(TrainerX):
"""Cross-gradient training.
https://arxiv.org/abs/1804.10745.
"""
def __init__(self, cfg):
super().__init__(cfg)
self.eps_f = cfg.TRAINER.CG.EPS_F
self.eps_d = cfg.TRAINER.CG.EPS_D
self.alpha_f = cfg.TRAINER.CG.ALPHA_F
self.alpha_d = cfg.TRAINER.CG.ALPHA_D
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, self.num_classes)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
print('Building D')
self.D = SimpleNet(cfg, cfg.MODEL, self.dm.num_source_domains)
self.D.to(self.device)
print('# params: {:,}'.format(count_num_param(self.D)))
self.optim_D = build_optimizer(self.D, cfg.OPTIM)
self.sched_D = build_lr_scheduler(self.optim_D, cfg.OPTIM)
self.register_model('D', self.D, self.optim_D, self.sched_D)
def forward_backward(self, batch):
input, label, domain = self.parse_batch_train(batch)
input.requires_grad = True
# Compute domain perturbation
loss_d = F.cross_entropy(self.D(input), domain)
loss_d.backward()
grad_d = torch.clamp(input.grad.data, min=-0.1, max=0.1)
input_d = input.data + self.eps_f * grad_d
# Compute label perturbation
input.grad.data.zero_()
loss_f = F.cross_entropy(self.F(input), label)
loss_f.backward()
grad_f = torch.clamp(input.grad.data, min=-0.1, max=0.1)
input_f = input.data + self.eps_d * grad_f
input = input.detach()
# Update label net
loss_f1 = F.cross_entropy(self.F(input), label)
loss_f2 = F.cross_entropy(self.F(input_d), label)
loss_f = (1 - self.alpha_f) * loss_f1 + self.alpha_f * loss_f2
self.model_backward_and_update(loss_f, 'F')
# Update domain net
loss_d1 = F.cross_entropy(self.D(input), domain)
loss_d2 = F.cross_entropy(self.D(input_f), domain)
loss_d = (1 - self.alpha_d) * loss_d1 + self.alpha_d * loss_d2
self.model_backward_and_update(loss_d, 'D')
output_dict = {
'loss_f': loss_f.item(),
'loss_d': loss_d.item(),
'lr': self.optim_F.param_groups[0]['lr']
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return output_dict
def model_inference(self, input):
return self.F(input)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class DeleteGlobalAccelerationInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'DeleteGlobalAccelerationInstance','vpc')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_GlobalAccelerationInstanceId(self):
return self.get_query_params().get('GlobalAccelerationInstanceId')
def set_GlobalAccelerationInstanceId(self,GlobalAccelerationInstanceId):
self.add_query_param('GlobalAccelerationInstanceId',GlobalAccelerationInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import itertools
import collections
import matplotlib.pyplot as plt
# Read in data
df = pd.read_csv("Chinese_Names_Corpus_Gender(120W).txt", header=2)
df = df[df.sex != "未知"]
names = df["dict"].values
# Compute character frequency
chars = [list(name) for name in names]
chars_flatten = list(itertools.chain(*chars))
freq = collections.Counter(chars_flatten)
freq = pd.DataFrame(freq.items(), columns=["char", "freq"])
freq = freq.sort_values(by="freq", ascending=False)
# Power law (?)
char_rank = np.arange(freq.shape[0])
char_freq = freq["freq"].values
plt.plot(char_rank, char_freq)
plt.plot(np.log(1.0 + char_rank), np.log(char_freq))
# Prepare data
dict_size = 500
dict = list(freq["char"].values[:dict_size])
dict_set = set(dict)
filtered = list(filter(lambda item: set(item[1]).issubset(dict_set), enumerate(names)))
ind = [idx for idx, name in filtered]
dat = df.iloc[ind]
dat["y"] = np.where(dat["sex"] == "男", 0, 1)
# Split training set and test set
# train = dat.sample(frac=0.8, random_state=123)
# test = dat.drop(train.index)
train = dat.sample(n=10000, random_state=123)
test = dat.sample(n=1000, random_state=321)
# One-hot encoding
def char2index(char):
return dict.index(char)
def name2index(name):
return [char2index(char) for char in name]
def name2tensor(name):
tensor = torch.zeros(len(name), 1, dict_size)
for i, char in enumerate(name):
tensor[i, 0, char2index(char)] = 1
return tensor
char2index("李")
name2index("李兴")
name2tensor("李兴")
# Build model
class RNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.h2o = nn.Linear(hidden_size, 1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), dim=1)
hidden = torch.tanh(self.i2h(combined))
output = torch.sigmoid(self.h2o(hidden))
return output, hidden
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
# n_hidden = 128
# rnn = RNN(dict_size, n_hidden)
# input = name2tensor("李兴")
# hidden = rnn.init_hidden()
# output, next_hidden = rnn(input[0], hidden)
np.random.seed(123)
torch.random.manual_seed(123)
n = train.shape[0]
n_hidden = 64
nepoch = 5
bs = 100
rnn = RNN(dict_size, n_hidden)
opt = torch.optim.Adam(rnn.parameters(), lr=0.001)
train_ind = np.arange(n)
losses = []
t1 = time.time()
for k in range(nepoch):
np.random.shuffle(train_ind)
# Update on mini-batches
for j in range(0, n, bs):
# Create mini-batch
mb = train.iloc[train_ind[j:(j + bs)]]
mb_size = mb.shape[0]
loss = 0.0
# Loop over each name in the mini-batch
for i in range(mb_size):
name = mb["dict"].values[i]
input = name2tensor(name)
hidden = rnn.init_hidden()
y = mb["y"].values[i]
for s in range(input.shape[0]):
output, hidden = rnn(input[s], hidden)
loss = loss - y * torch.log(output) - (1.0 - y) * torch.log(1.0 - output)
loss = loss / mb_size
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.item())
if j // bs % 10 == 0:
print(f"epoch {k}, batch {j // bs}, loss = {loss.item()}")
t2 = time.time()
print(t2 - t1)
plt.plot(losses)
# Prediction on test set
ntest = test.shape[0]
true_label = test["y"].values
pred = np.zeros(ntest)
rnn.eval()
for i in range(ntest):
input = name2tensor(test["dict"].values[i])
hidden = rnn.init_hidden()
with torch.no_grad():
for s in range(input.shape[0]):
output, hidden = rnn(input[s], hidden)
pred[i] = output.item()
if i % 100 == 0:
print(f"processed {i}")
loss = -np.mean(true_label * np.log(pred) + (1.0 - true_label) * np.log(1.0 - pred))
print(loss)
pred_label = (pred > 0.5).astype(int)
print(np.mean(pred_label == true_label))
# Random cases
np.random.seed(123)
torch.random.manual_seed(123)
ind = np.random.choice(ntest, 10)
ypred = 1 * (pred[ind] > 0.5)
print(test.iloc[ind])
print(test["y"].values[ind])
print(ypred)
names = ["李", "李雪", "李雪峰"]
for name in names:
input = name2tensor(name)
hidden = rnn.init_hidden()
with torch.no_grad():
for s in range(input.shape[0]):
output, hidden = rnn(input[s], hidden)
pred = output.item()
print(f"namae: {name}, P(female) = {pred}")
|
import sys
import numpy as np
import pennylane as qml
import pytest
import qiskit
from pennylane_qiskit import AerDevice, BasicAerDevice
from conftest import state_backends
pldevices = [("qiskit.aer", qiskit.Aer), ("qiskit.basicaer", qiskit.BasicAer)]
class TestDeviceIntegration:
"""Test the devices work correctly from the PennyLane frontend."""
@pytest.mark.parametrize("d", pldevices)
def test_load_device(self, d, backend):
"""Test that the qiskit device loads correctly"""
dev = qml.device(d[0], wires=2, backend=backend, shots=1024)
assert dev.num_wires == 2
assert dev.shots == 1024
assert dev.short_name == d[0]
assert dev.provider == d[1]
def test_incorrect_backend(self):
"""Test that exception is raised if name is incorrect"""
with pytest.raises(ValueError, match="Backend 'none' does not exist"):
qml.device("qiskit.aer", wires=2, backend="none")
def test_incorrect_backend_wires(self):
"""Test that exception is raised if number of wires is too large"""
with pytest.raises(ValueError, match=r"Backend 'statevector\_simulator' supports maximum"):
qml.device("qiskit.aer", wires=100, backend="statevector_simulator")
def test_args(self):
"""Test that the device requires correct arguments"""
with pytest.raises(TypeError, match="missing 1 required positional argument"):
qml.device("qiskit.aer")
with pytest.raises(qml.DeviceError, match="specified number of shots needs to be at least 1"):
qml.device("qiskit.aer", backend="qasm_simulator", wires=1, shots=0)
@pytest.mark.parametrize("d", pldevices)
@pytest.mark.parametrize("analytic", [True, False])
@pytest.mark.parametrize("shots", [8192])
def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):
"""Test that devices provide correct result for a simple circuit"""
if backend not in state_backends and analytic:
pytest.skip("Hardware simulators do not support analytic mode")
dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)
a = 0.543
b = 0.123
c = 0.987
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([1]), wires=0)
qml.Hadamard(wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), np.cos(a) * np.sin(b), **tol)
@pytest.mark.parametrize("d", pldevices)
@pytest.mark.parametrize("analytic", [False])
@pytest.mark.parametrize("shots", [8192])
def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):
"""Integration test for the Basisstate and Rot operations for when analytic
is False"""
dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)
a = 0
b = 0
c = np.pi
expected = 1
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([0]), wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), expected, **tol)
def test_gradient_for_tensor_product(self):
"""Test that the gradient of a circuit containing a tensor product is
computed without any errors."""
n_qubits = 2
depth = 2
def ansatz(weights):
weights = weights.reshape(depth, n_qubits)
qml.RX(weights[0][0], wires=[0])
qml.RZ(weights[0][1], wires=[0])
qml.RX(weights[1][0], wires=[0])
qml.RZ(weights[1][1], wires=[0])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
dev_qsk = qml.device(
"qiskit.aer",
wires=n_qubits,
shots=1000,
backend="qasm_simulator",
)
weights = np.random.random((depth, n_qubits)).flatten()
# Want to get expectation value and gradient
exp_sampled = qml.QNode(ansatz, dev_qsk, diff_method="parameter-shift")
grad_shift = qml.grad(exp_sampled, argnum=0)
exp_sampled(weights)
grad_shift(weights)
class TestKeywordArguments:
"""Test keyword argument logic is correct"""
@pytest.mark.parametrize("d", pldevices)
def test_compile_backend(self, d):
"""Test that the compile backend argument is properly
extracted"""
dev = qml.device(d[0], wires=2, compile_backend="test value")
assert dev.compile_backend == "test value"
def test_noise_model(self):
"""Test that the noise model argument is properly
extracted if the backend supports it"""
dev = qml.device("qiskit.aer", wires=2, noise_model="test value")
assert dev.noise_model == "test value"
def test_invalid_noise_model(self):
"""Test that the noise model argument causes an exception to be raised
if the backend does not support it"""
with pytest.raises(ValueError, match="does not support noisy simulations"):
dev = qml.device("qiskit.basicaer", wires=2, noise_model="test value")
def test_overflow_kwargs(self):
"""Test all overflow kwargs are extracted for the AerDevice"""
dev = qml.device('qiskit.aer', wires=2, k1="v1", k2="v2")
assert dev.run_args["k1"] == "v1"
assert dev.run_args["k2"] == "v2"
class TestLoadIntegration:
"""Integration tests for the PennyLane load function. This test ensures that the PennyLane-Qiskit
specific load functions integrate properly with the PennyLane-Qiskit plugin."""
hadamard_qasm = 'OPENQASM 2.0;' \
'include "qelib1.inc";' \
'qreg q[1];' \
'h q[0];'
def test_load_qiskit_circuit(self):
"""Test that the default load function works correctly."""
theta = qiskit.circuit.Parameter('θ')
qc = qiskit.QuantumCircuit(2)
qc.rx(theta, 0)
my_template = qml.load(qc, format='qiskit')
dev = qml.device('default.qubit', wires=2)
angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])
@qml.qnode(dev)
def loaded_quantum_circuit(angle):
my_template({theta: angle})
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit(angle):
qml.RX(angle, wires=[0])
return qml.expval(qml.PauliZ(0))
for x in angles:
assert np.allclose(loaded_quantum_circuit(x), quantum_circuit(x))
def test_load_from_qasm_string(self):
"""Test that quantum circuits can be loaded from a qasm string."""
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def loaded_quantum_circuit():
qml.from_qasm(TestLoadIntegration.hadamard_qasm)(wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit():
qml.Hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
assert np.allclose(loaded_quantum_circuit(), quantum_circuit())
@pytest.mark.skipif(sys.version_info < (3, 6), reason="tmpdir fixture requires Python >=3.6")
def test_load_qasm_from_file(self, tmpdir):
"""Test that quantum circuits can be loaded from a qasm file."""
apply_hadamard = tmpdir.join("hadamard.qasm")
with open(apply_hadamard, "w") as f:
f.write(TestLoadIntegration.hadamard_qasm)
hadamard = qml.from_qasm_file(apply_hadamard)
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def loaded_quantum_circuit():
hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit():
qml.Hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
assert np.allclose(loaded_quantum_circuit(), quantum_circuit())
class TestPLOperations:
"""Integration tests for checking certain PennyLane specific operations."""
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_rotation(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the QubitStateVector and Rot operations are decomposed using a
Qiskit device with statevector backend"""
dev = state_vector_device(1)
if dev.backend_name == "unitary_simulator":
pytest.skip("Test only runs for backends that are not the unitary simulator.")
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
I = np.eye(2)
Y = np.array([[0, -1j], [1j, 0]]) #: Pauli-Y matrix
Z = np.array([[1, 0], [0, -1]]) #: Pauli-Z matrix
def ry(theta):
return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Y
def rz(theta):
return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Z
@qml.qnode(dev)
def qubitstatevector_and_rot():
qml.QubitStateVector(state, wires=[0])
qml.Rot(a, b, c, wires=[0])
return qml.expval(qml.Identity(0))
qubitstatevector_and_rot()
assert np.allclose(np.abs(dev.state) ** 2, np.abs(rz(c) @ ry(b) @ rz(a) @ state) ** 2, **tol)
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_basisstate(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the Basisstate is decomposed using a Qiskit device with
statevector backend"""
dev = state_vector_device(2)
state = np.array([1, 0])
@qml.qnode(dev)
def basisstate():
qml.BasisState(state, wires=[0, 1])
return qml.expval(qml.Identity(0))
basisstate()
expected_state = np.zeros(2**dev.num_wires)
expected_state[2] = 1
assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_basisstate_init_all_zero_states(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the Basisstate that receives the all zero state is decomposed using
a Qiskit device with statevector backend"""
dev = state_vector_device(4)
state = np.array([0, 0, 0, 0])
@qml.qnode(dev)
def basisstate():
qml.BasisState(state, wires=[0, 1, 2, 3])
return qml.expval(qml.Identity(0))
basisstate()
expected_state = np.zeros(2**dev.num_wires)
expected_state[0] = 1
assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)
class TestInverses:
"""Integration tests checking that the inverse of the operations are applied."""
def test_inverse_of_operation(self):
"""Test that the inverse of operations works as expected
by comparing a simple circuit with default.qubit."""
dev = qml.device('default.qubit', wires=2)
dev2 = qml.device('qiskit.aer', backend='statevector_simulator', shots=5, wires=2, analytic=True)
angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])
@qml.qnode(dev)
def circuit_with_inverses(angle):
qml.Hadamard(0).inv()
qml.RX(angle, wires=0).inv()
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev2)
def circuit_with_inverses_default_qubit(angle):
qml.Hadamard(0).inv()
qml.RX(angle, wires=0).inv()
return qml.expval(qml.PauliZ(0))
for x in angles:
assert np.allclose(circuit_with_inverses(x), circuit_with_inverses_default_qubit(x))
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util import."""
# pylint: disable=wildcard-import
from tensorflow_datasets.core.utils.image_utils import *
from tensorflow_datasets.core.utils.py_utils import *
from tensorflow_datasets.core.utils.tf_utils import *
from tensorflow_datasets.core.utils.tqdm_utils import *
from tensorflow_datasets.core.utils.version import Experiment
from tensorflow_datasets.core.utils.version import Version
# pylint: enable=wildcard-import
|
# -*- coding: utf-8 -*-
""" rwlock.py
A class to implement read-write locks on top of the standard threading
library.
This is implemented with two mutexes (threading.Lock instances) as per this
wikipedia pseudocode:
https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Using_two_mutexes
Code written by Tyler Neylon at Unbox Research.
This file is public domain.
Modified to add a w_demote function to convert a writer lock to a reader lock
"""
# _______________________________________________________________________
# Imports
from contextlib import contextmanager
from threading import Lock
# _______________________________________________________________________
# Class
class RWLock(object):
""" RWLock class; this is meant to allow an object to be read from by
multiple threads, but only written to by a single thread at a time. See:
https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
Usage:
from rwlock import RWLock
my_obj_rwlock = RWLock()
# When reading from my_obj:
with my_obj_rwlock.r_locked():
do_read_only_things_with(my_obj)
# When writing to my_obj:
with my_obj_rwlock.w_locked():
mutate(my_obj)
"""
def __init__(self):
self.w_lock = Lock()
self.num_r_lock = Lock()
self.num_r = 0
# The d_lock is needed to handle the demotion case,
# so that the writer can become a reader without releasing the w_lock.
# the d_lock is held by the writer, and prevents any other thread from taking the
# num_r_lock during that time, which means the writer thread is able to take the
# num_r_lock to update the num_r.
self.d_lock = Lock()
# ___________________________________________________________________
# Reading methods.
def r_acquire(self):
self.d_lock.acquire()
self.num_r_lock.acquire()
self.num_r += 1
if self.num_r == 1:
self.w_lock.acquire()
self.num_r_lock.release()
self.d_lock.release()
def r_release(self):
assert self.num_r > 0
self.num_r_lock.acquire()
self.num_r -= 1
if self.num_r == 0:
self.w_lock.release()
self.num_r_lock.release()
@contextmanager
def r_locked(self):
""" This method is designed to be used via the `with` statement. """
try:
self.r_acquire()
yield
finally:
self.r_release()
# ___________________________________________________________________
# Writing methods.
def w_acquire(self):
self.d_lock.acquire()
self.w_lock.acquire()
def w_acquire_non_blocking(self):
# if d_lock and w_lock can be acquired without blocking, acquire and return True,
# else immediately return False.
if self.d_lock.acquire(blocking=False):
if self.w_lock.acquire(blocking=False):
return True
else:
self.d_lock.release()
return False
def w_release(self):
self.w_lock.release()
self.d_lock.release()
def w_demote(self):
"""demote a writer lock to a reader lock"""
# the d_lock is already held from w_acquire.
# releasing the d_lock at the end of this function allows multiple readers.
# incrementing num_r makes this thread one of those readers.
self.num_r_lock.acquire()
self.num_r += 1
self.num_r_lock.release()
self.d_lock.release()
@contextmanager
def w_locked(self):
""" This method is designed to be used via the `with` statement. """
try:
self.w_acquire()
yield
finally:
self.w_release()
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 11:36:58 2020
@author: nastavirs
"""
import numpy as np
import tensorflow as tf
def net_u(self, x, t):
u = self.neural_net(tf.concat([x,t],1), self.weights, self.biases)
return u
|
# flake8: noqa
# DEV: Skip linting, we lint with Python 2, we'll get SyntaxErrors from `yield from`
# stdlib
import time
import asyncio
# 3p
import aiopg
from psycopg2 import extras
# project
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.aiopg.patch import patch, unpatch
from ddtrace import Pin
# testing
from tests.opentracer.utils import init_tracer
from tests.contrib.config import POSTGRES_CONFIG
from tests.test_tracer import get_dummy_tracer
from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio
TEST_PORT = str(POSTGRES_CONFIG['port'])
class AiopgTestCase(AsyncioTestCase):
# default service
TEST_SERVICE = 'postgres'
def setUp(self):
super().setUp()
self._conn = None
patch()
def tearDown(self):
super().tearDown()
if self._conn and not self._conn.closed:
self._conn.close()
unpatch()
@asyncio.coroutine
def _get_conn_and_tracer(self):
conn = self._conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(tracer=self.tracer).onto(conn)
return conn, self.tracer
@asyncio.coroutine
def assert_conn_is_traced(self, tracer, db, service):
# ensure the trace aiopg client doesn't add non-standard
# methods
try:
yield from db.execute('select \'foobar\'')
except AttributeError:
pass
writer = tracer.writer
# Ensure we can run a query and it's correctly traced
q = 'select \'foobarblah\''
start = time.time()
cursor = yield from db.cursor()
yield from cursor.execute(q)
rows = yield from cursor.fetchall()
end = time.time()
assert rows == [('foobarblah',)]
assert rows
spans = writer.pop()
assert spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'postgres.query'
assert span.resource == q
assert span.service == service
assert span.meta['sql.query'] == q
assert span.error == 0
assert span.span_type == 'sql'
assert start <= span.start <= end
assert span.duration <= end - start
# Ensure OpenTracing compatibility
ot_tracer = init_tracer('aiopg_svc', tracer)
with ot_tracer.start_active_span('aiopg_op'):
cursor = yield from db.cursor()
yield from cursor.execute(q)
rows = yield from cursor.fetchall()
assert rows == [('foobarblah',)]
spans = writer.pop()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm the parenting
assert ot_span.parent_id == None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == 'aiopg_op'
assert ot_span.service == 'aiopg_svc'
assert dd_span.name == 'postgres.query'
assert dd_span.resource == q
assert dd_span.service == service
assert dd_span.meta['sql.query'] == q
assert dd_span.error == 0
assert dd_span.span_type == 'sql'
# run a query with an error and ensure all is well
q = 'select * from some_non_existant_table'
cur = yield from db.cursor()
try:
yield from cur.execute(q)
except Exception:
pass
else:
assert 0, 'should have an error'
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'postgres.query'
assert span.resource == q
assert span.service == service
assert span.meta['sql.query'] == q
assert span.error == 1
# assert span.meta['out.host'] == 'localhost'
assert span.meta['out.port'] == TEST_PORT
assert span.span_type == 'sql'
@mark_asyncio
def test_disabled_execute(self):
conn, tracer = yield from self._get_conn_and_tracer()
tracer.enabled = False
# these calls were crashing with a previous version of the code.
yield from (yield from conn.cursor()).execute(query='select \'blah\'')
yield from (yield from conn.cursor()).execute('select \'blah\'')
assert not tracer.writer.pop()
@mark_asyncio
def test_manual_wrap_extension_types(self):
conn, _ = yield from self._get_conn_and_tracer()
# NOTE: this will crash if it doesn't work.
# _ext.register_type(_ext.UUID, conn_or_curs)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_uuid(conn_or_curs=conn)
@mark_asyncio
def test_connect_factory(self):
tracer = get_dummy_tracer()
services = ['db', 'another']
for service in services:
conn, _ = yield from self._get_conn_and_tracer()
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from self.assert_conn_is_traced(tracer, conn, service)
conn.close()
# ensure we have the service types
service_meta = tracer.writer.pop_services()
expected = {}
assert service_meta == expected
@mark_asyncio
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
# Test patch idempotence
patch()
patch()
service = 'fo'
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert not spans, spans
# Test patch again
patch()
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
class AiopgAnalyticsTestCase(AiopgTestCase):
@asyncio.coroutine
def trace_spans(self):
service = 'db'
conn, _ = yield from self._get_conn_and_tracer()
Pin.get_from(conn).clone(service='db', tracer=self.tracer).onto(conn)
cursor = yield from conn.cursor()
yield from cursor.execute('select \'foobar\'')
rows = yield from cursor.fetchall()
assert rows
return self.get_spans()
@mark_asyncio
def test_analytics_default(self):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY))
@mark_asyncio
def test_analytics_with_rate(self):
with self.override_config(
'aiopg',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
@mark_asyncio
def test_analytics_without_rate(self):
with self.override_config(
'aiopg',
dict(analytics_enabled=True)
):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisionDataset that allows user to get example indices."""
import torchvision
class VisionDatasetWithIndices(torchvision.datasets.vision.VisionDataset):
"""VisionDataset that allows user to get example indices.
Dataset that returns a triple (data, targets, indices)
instead of just (data, targets). Indices of training examples can be
used to track model performance on individual examples, for instance to find
training examples that are learned faster than others.
"""
def __init__(self, dataset):
super(VisionDatasetWithIndices, self).__init__(None)
self.dataset = dataset
def __getitem__(self, index):
data, target = self.dataset.__getitem__(index)
return data, target, index
def __len__(self):
return len(self.dataset)
|
#!/usr/bin/python -tt
import os
import argparse
import subprocess
import shutil
parser = argparse.ArgumentParser(description="Compile the Mount&Blade Warband shaders.")
parser.add_argument("-b", "--compile-b", action="store_true", help="compile the ps_2_b profile as well")
args = parser.parse_args()
if not os.access(os.path.join("shaders", "fxc.exe"), os.R_OK|os.X_OK):
print "You must copy fxc.exe from the TaleWorlds Warband shader package to the shaders subdirectory."
exit(1)
import module_info
def compile_profile(profile, name):
command_list = ["./fxc.exe", "/nologo", "/T", "fx_2_0", "/D", "PS_2_X=%s" % profile, "/Fo", "mb.fxo", "mb.fx"]
exit_code = subprocess.call(command_list, cwd="shaders")
output_fxo = os.path.join("shaders", "mb.fxo")
if exit_code == 0:
module_fxo = module_info.export_path(name)
try:
os.remove(module_fxo)
except Exception:
pass
shutil.move(output_fxo, module_fxo)
else:
try:
os.remove(output_fxo)
except Exception:
pass
exit(exit_code)
compile_profile("ps_2_a", "mb.fx")
if args.compile_b:
compile_profile("ps_2_b", "mb_2b.fx")
|
# import riaps
from riaps.run.comp import Component
import logging
class Hello(Component):
def __init__(self):
super(Hello, self).__init__()
def on_clock(self):
now = self.clock.recv_pyobj() # Receive time.time() as float
self.logger.info('on_clock(): %s' % str(now))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['DataConnector']
class DataConnector(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_connector_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataConnectorKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Data connector.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_connector_id: Connector ID
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[Union[str, 'DataConnectorKind']] kind: The data connector kind
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_connector_id'] = data_connector_id
__props__['etag'] = etag
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__['kind'] = kind
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights:DataConnector"), pulumi.Alias(type_="azure-nextgen:securityinsights/latest:DataConnector")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataConnector, __self__).__init__(
'azure-nextgen:securityinsights/v20200101:DataConnector',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataConnector':
"""
Get an existing DataConnector resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DataConnector(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The data connector kind
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
# CONV => RELU => POOL #1
model.add(Conv2D(20, (5, 5), padding='same', input_shape=inputShape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
# CONV => RELU => POOL #2
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# FC => RELU
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
# softmax clfier
model.add(Dense(classes))
model.add(Activation('softmax'))
return model
|
# Copyright 2021 Torsten Mehnert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from complatecpp import Stream
class TestStream(Stream):
__test__ = False
def __init__(self, *args, **kwargs):
# Don't use super()
Stream.__init__(self, *args, **kwargs)
self.data = str()
def write(self, string, length):
self.data += string[0:length]
def writeln(self, string, length):
self.data += string[0:length]
self.data += '\n'
def flush(self):
pass
def str(self):
return self.data
|
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def multiple_line_items():
clientReferenceInformationCode = "addressEg"
clientReferenceInformationComments = "dav-All fields"
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments
)
orderInformationBillToAddress1 = "12301 research st"
orderInformationBillToAddress2 = "1"
orderInformationBillToAddress3 = "2"
orderInformationBillToAddress4 = "3"
orderInformationBillToAdministrativeArea = "TX"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Austin"
orderInformationBillToPostalCode = "78759"
orderInformationBillTo = Riskv1addressverificationsOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
address2 = orderInformationBillToAddress2,
address3 = orderInformationBillToAddress3,
address4 = orderInformationBillToAddress4,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode
)
orderInformationShipToAddress1 = "PO Box 9088"
orderInformationShipToAddress2 = ""
orderInformationShipToAddress3 = ""
orderInformationShipToAddress4 = ""
orderInformationShipToAdministrativeArea = "CA"
orderInformationShipToCountry = "US"
orderInformationShipToLocality = "San Jose"
orderInformationShipToPostalCode = "95132"
orderInformationShipTo = Riskv1addressverificationsOrderInformationShipTo(
address1 = orderInformationShipToAddress1,
address2 = orderInformationShipToAddress2,
address3 = orderInformationShipToAddress3,
address4 = orderInformationShipToAddress4,
administrative_area = orderInformationShipToAdministrativeArea,
country = orderInformationShipToCountry,
locality = orderInformationShipToLocality,
postal_code = orderInformationShipToPostalCode
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1addressverificationsOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "9966223",
product_name = "headset",
product_code = "electronix"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformationLineItems2 = Riskv1addressverificationsOrderInformationLineItems(
unit_price = "10.50",
quantity = 2,
product_sku = "9966226",
product_name = "wwrdf",
product_code = "electronic"
)
orderInformationLineItems.append(orderInformationLineItems2.__dict__)
orderInformation = Riskv1addressverificationsOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
buyerInformationMerchantCustomerId = "QWERTY"
buyerInformation = Riskv1addressverificationsBuyerInformation(
merchant_customer_id = buyerInformationMerchantCustomerId
)
requestObj = VerifyCustomerAddressRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__,
buyer_information = buyerInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.verify_customer_address(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->verify_customer_address: %s\n" % e)
if __name__ == "__main__":
multiple_line_items()
|
#Crie um programa que vai ler vários números e colocar em uma lista.
#Depois disso, crie duas listas extras que vão conter apenas valores pares
#e os valores impares digitados, respectivamente.
#Ao final, mostre o conteúdo das três listas geradas
principal = []
par = []
impar = []
while True:
n = int(input('Digite um valor: '))
principal.append(n)
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
while True:
opção = str(input('Quer continuar? [S/N]: ')).upper()
if opção == 'S':
break
elif opção == 'N':
break
elif opção not in 'SN':
print('Opção inválida. Digite apenas S ou N')
if opção == 'N':
break
print(f'Lista principal de números: {principal}')
print(f'Lista dos números pares: {par}')
print(f'Lista dos números impares: {impar}')
|
# -*- coding: utf-8 -*-
import sys
import os
import re
sys.path.append('../') # noqa
from jinja2 import Template
from cli_bdd.core.steps import (
command,
environment,
file as file_steps,
)
BASE_PATH = os.path.dirname(os.path.normpath(__file__))
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
STEPS_MODULES = [
command,
environment,
file_steps,
]
def _prepare_docstring(value):
if not value:
return ''
remove_spaces = 0
for line in value.split('\n')[1:]:
if line:
for char in line:
if char != ' ':
break
else:
remove_spaces += 1
break
return re.sub(
r'^ {%s}' % remove_spaces,
'',
unicode(value),
flags=re.MULTILINE
).strip()
def _render_and_save_template(path, dest, context):
template_path = os.path.join(TEMPLATES_PATH, path + '.tpl')
destination_path = os.path.join(BASE_PATH, dest + '.md')
with open(destination_path, 'wt') as dest_file:
dest_file.write(
Template(open(template_path).read()).render(context)
)
def generate_api_reference():
generate_steps_reference()
def generate_steps_reference():
steps_by_types = []
for step_module in STEPS_MODULES:
name = step_module.__name__.split('.')[-1]
steps_by_types.append({
'name': name,
'module': step_module.__name__,
'base_steps': step_module.base_steps
})
steps_dir = os.path.join(BASE_PATH, 'steps/')
if not os.path.exists(steps_dir):
os.makedirs(steps_dir)
for step_type in steps_by_types:
_render_and_save_template(
'steps',
'steps/' + step_type['name'],
{
'step_type': step_type,
'prepare_docstring': _prepare_docstring
}
)
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Michaellaoliu Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import MichaellaoliuTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
import os
class WalletHDTest(MichaellaoliuTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: locale.py
""" Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import operator
import functools
__all__ = [
'getlocale', 'getdefaultlocale', 'getpreferredencoding', 'Error',
'setlocale', 'resetlocale', 'localeconv', 'strcoll', 'strxfrm',
'str', 'atof', 'atoi', 'format', 'format_string', 'currency',
'normalize', 'LC_CTYPE', 'LC_COLLATE', 'LC_TIME', 'LC_MONETARY',
'LC_NUMERIC', 'LC_ALL', 'CHAR_MAX']
try:
from _locale import *
except ImportError:
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
return {'grouping': [127],'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127
}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error, '_locale emulation only supports "C" locale'
return 'C'
def strcoll(a, b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return cmp(a, b)
def strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
_localeconv = localeconv
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
if interval == CHAR_MAX:
return
if interval == 0:
if last_interval is None:
raise ValueError('invalid grouping')
while True:
yield last_interval
yield interval
last_interval = interval
return
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
result = ''
seps = 0
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in '0123456789':
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1))
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos + 1]
_percent_re = re.compile('%(?:\\((?P<key>.*?)\\))?(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
match = _percent_re.match(percent)
if not match or len(match.group()) != len(percent):
raise ValueError('format() must be given exactly one %%char format specifier, %s not valid' % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point' or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if operator.isMappingType(val):
new_val = []
for perc in percents:
if perc.group()[-1] == '%':
new_val.append('%')
else:
new_val.append(format(perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (
val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1] == '%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(), val[i], grouping, False, *val[i + 1:i + 1 + starcount]))
i += 1 + starcount
val = tuple(new_val)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val < 0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val < 0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val < 0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val < 0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format('%.12g', val)
def atof(string, func=float):
"""Parses a string as a float according to the locale settings."""
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
return func(string)
def atoi(str):
"""Converts a string to an integer according to the locale settings."""
return atof(str, int)
def _test():
setlocale(LC_ALL, '')
s1 = format('%d', 123456789, 1)
print s1, 'is', atoi(s1)
s1 = str(3.14)
print s1, 'is', atof(s1)
_setlocale = setlocale
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
fullname = localename.lower()
if ':' in fullname:
fullname = fullname.replace(':', '.')
if '.' in fullname:
langname, encoding = fullname.split('.')[:2]
fullname = langname + '.' + encoding
else:
langname = fullname
encoding = ''
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lookup_name = langname + '.' + encoding
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
else:
code = locale_alias.get(langname, None)
if code is not None:
if '.' in code:
langname, defenc = code.split('.')
else:
langname = code
defenc = ''
if encoding:
norm_encoding = encodings.normalize_encoding(encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
else:
encoding = defenc
if encoding:
return langname + '.' + encoding
else:
return langname
else:
return localename
return
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
code, modifier = code.split('@')
if modifier == 'euro' and '.' not in code:
return (
code, 'iso-8859-15')
if '.' in code:
return tuple(code.split('.')[:2])
else:
if code == 'C':
return (None, None)
raise ValueError, 'unknown locale: %s' % localename
return None
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
return
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
if sys.platform == 'win32' and code and code[:2] == '0x':
code = windows_locale.get(int(code, 0))
return (
code, encoding)
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable, None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError, 'category LC_ALL is not supported'
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, a locale tuple (language code, encoding), or None.
Locale tuples are converted to strings the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and type(locale) is not type(''):
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith('win'):
def getpreferredencoding(do_setlocale=True):
"""Return the charset that the user is likely using."""
import _locale
return _locale._getdefaultlocale()[1]
else:
try:
CODESET
except NameError:
def getpreferredencoding(do_setlocale=True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
return getdefaultlocale()[1]
else:
def getpreferredencoding(do_setlocale=True):
"""Return the charset that the user is likely using,
according to the system configuration."""
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, '')
except Error:
pass
result = nl_langinfo(CODESET)
setlocale(LC_CTYPE, oldloc)
return result
else:
return nl_langinfo(CODESET)
locale_encoding_alias = {'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_u': 'KOI8-U'
}
locale_alias = {'a3': 'a3_AZ.KOI8-C',
'a3_az': 'a3_AZ.KOI8-C',
'a3_az.koi8c': 'a3_AZ.KOI8-C',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'american.iso88591': 'en_US.ISO8859-1',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_aa.iso88596': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_ae.iso88596': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_bh.iso88596': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_dz.iso88596': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_eg.iso88596': 'ar_EG.ISO8859-6',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_iq.iso88596': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_jo.iso88596': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_kw.iso88596': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_lb.iso88596': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ly.iso88596': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_ma.iso88596': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_om.iso88596': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_qa.iso88596': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sa.iso88596': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sd.iso88596': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_sy.iso88596': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_tn.iso88596': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'ar_ye.iso88596': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'arabic.iso88596': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_by': 'be_BY.CP1251',
'be_by.cp1251': 'be_BY.CP1251',
'be_by.microsoftcp1251': 'be_BY.CP1251',
'be_by.utf8@latin': 'be_BY.UTF-8@latin',
'be_by@latin': 'be_BY.UTF-8@latin',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bg_bg.cp1251': 'bg_BG.CP1251',
'bg_bg.iso88595': 'bg_BG.ISO8859-5',
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
'bn_in': 'bn_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokml': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'br_fr.iso88591': 'br_FR.ISO8859-1',
'br_fr.iso885914': 'br_FR.ISO8859-14',
'br_fr.iso885915': 'br_FR.ISO8859-15',
'br_fr.iso885915@euro': 'br_FR.ISO8859-15',
'br_fr.utf8@euro': 'br_FR.UTF-8',
'br_fr@euro': 'br_FR.ISO8859-15',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bs_ba.iso88592': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c-french.iso88591': 'fr_CA.ISO8859-1',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_ad.iso88591': 'ca_AD.ISO8859-1',
'ca_ad.iso885915': 'ca_AD.ISO8859-15',
'ca_ad.iso885915@euro': 'ca_AD.ISO8859-15',
'ca_ad.utf8@euro': 'ca_AD.UTF-8',
'ca_ad@euro': 'ca_AD.ISO8859-15',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es.iso88591': 'ca_ES.ISO8859-1',
'ca_es.iso885915': 'ca_ES.ISO8859-15',
'ca_es.iso885915@euro': 'ca_ES.ISO8859-15',
'ca_es.utf8@euro': 'ca_ES.UTF-8',
'ca_es@euro': 'ca_ES.ISO8859-15',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_fr.iso88591': 'ca_FR.ISO8859-1',
'ca_fr.iso885915': 'ca_FR.ISO8859-15',
'ca_fr.iso885915@euro': 'ca_FR.ISO8859-15',
'ca_fr.utf8@euro': 'ca_FR.UTF-8',
'ca_fr@euro': 'ca_FR.ISO8859-15',
'ca_it': 'ca_IT.ISO8859-1',
'ca_it.iso88591': 'ca_IT.ISO8859-1',
'ca_it.iso885915': 'ca_IT.ISO8859-15',
'ca_it.iso885915@euro': 'ca_IT.ISO8859-15',
'ca_it.utf8@euro': 'ca_IT.UTF-8',
'ca_it@euro': 'ca_IT.ISO8859-15',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'cextend.en': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cs.iso88592': 'cs_CS.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cy_gb.iso88591': 'cy_GB.ISO8859-1',
'cy_gb.iso885914': 'cy_GB.ISO8859-14',
'cy_gb.iso885915': 'cy_GB.ISO8859-15',
'cy_gb@euro': 'cy_GB.ISO8859-15',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da.iso885915': 'da_DK.ISO8859-15',
'da_dk': 'da_DK.ISO8859-1',
'da_dk.88591': 'da_DK.ISO8859-1',
'da_dk.885915': 'da_DK.ISO8859-15',
'da_dk.iso88591': 'da_DK.ISO8859-1',
'da_dk.iso885915': 'da_DK.ISO8859-15',
'da_dk@euro': 'da_DK.ISO8859-15',
'danish': 'da_DK.ISO8859-1',
'danish.iso88591': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de.iso885915': 'de_DE.ISO8859-15',
'de_at': 'de_AT.ISO8859-1',
'de_at.iso88591': 'de_AT.ISO8859-1',
'de_at.iso885915': 'de_AT.ISO8859-15',
'de_at.iso885915@euro': 'de_AT.ISO8859-15',
'de_at.utf8@euro': 'de_AT.UTF-8',
'de_at@euro': 'de_AT.ISO8859-15',
'de_be': 'de_BE.ISO8859-1',
'de_be.iso88591': 'de_BE.ISO8859-1',
'de_be.iso885915': 'de_BE.ISO8859-15',
'de_be.iso885915@euro': 'de_BE.ISO8859-15',
'de_be.utf8@euro': 'de_BE.UTF-8',
'de_be@euro': 'de_BE.ISO8859-15',
'de_ch': 'de_CH.ISO8859-1',
'de_ch.iso88591': 'de_CH.ISO8859-1',
'de_ch.iso885915': 'de_CH.ISO8859-15',
'de_ch@euro': 'de_CH.ISO8859-15',
'de_de': 'de_DE.ISO8859-1',
'de_de.88591': 'de_DE.ISO8859-1',
'de_de.885915': 'de_DE.ISO8859-15',
'de_de.885915@euro': 'de_DE.ISO8859-15',
'de_de.iso88591': 'de_DE.ISO8859-1',
'de_de.iso885915': 'de_DE.ISO8859-15',
'de_de.iso885915@euro': 'de_DE.ISO8859-15',
'de_de.utf8@euro': 'de_DE.UTF-8',
'de_de@euro': 'de_DE.ISO8859-15',
'de_lu': 'de_LU.ISO8859-1',
'de_lu.iso88591': 'de_LU.ISO8859-1',
'de_lu.iso885915': 'de_LU.ISO8859-15',
'de_lu.iso885915@euro': 'de_LU.ISO8859-15',
'de_lu.utf8@euro': 'de_LU.UTF-8',
'de_lu@euro': 'de_LU.ISO8859-15',
'deutsch': 'de_DE.ISO8859-1',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'ee_ee.iso88594': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr.iso88597': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en.iso88591': 'en_US.ISO8859-1',
'en_au': 'en_AU.ISO8859-1',
'en_au.iso88591': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_be@euro': 'en_BE.ISO8859-15',
'en_bw': 'en_BW.ISO8859-1',
'en_bw.iso88591': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_ca.iso88591': 'en_CA.ISO8859-1',
'en_gb': 'en_GB.ISO8859-1',
'en_gb.88591': 'en_GB.ISO8859-1',
'en_gb.iso88591': 'en_GB.ISO8859-1',
'en_gb.iso885915': 'en_GB.ISO8859-15',
'en_gb@euro': 'en_GB.ISO8859-15',
'en_hk': 'en_HK.ISO8859-1',
'en_hk.iso88591': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_ie.iso88591': 'en_IE.ISO8859-1',
'en_ie.iso885915': 'en_IE.ISO8859-15',
'en_ie.iso885915@euro': 'en_IE.ISO8859-15',
'en_ie.utf8@euro': 'en_IE.UTF-8',
'en_ie@euro': 'en_IE.ISO8859-15',
'en_in': 'en_IN.ISO8859-1',
'en_nz': 'en_NZ.ISO8859-1',
'en_nz.iso88591': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_ph.iso88591': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_sg.iso88591': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us.88591': 'en_US.ISO8859-1',
'en_us.885915': 'en_US.ISO8859-15',
'en_us.iso88591': 'en_US.ISO8859-1',
'en_us.iso885915': 'en_US.ISO8859-15',
'en_us.iso885915@euro': 'en_US.ISO8859-15',
'en_us@euro': 'en_US.ISO8859-15',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_za.88591': 'en_ZA.ISO8859-1',
'en_za.iso88591': 'en_ZA.ISO8859-1',
'en_za.iso885915': 'en_ZA.ISO8859-15',
'en_za@euro': 'en_ZA.ISO8859-15',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.iso88591': 'en_ZW.ISO8859-1',
'eng_gb': 'en_GB.ISO8859-1',
'eng_gb.8859': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_uk.8859': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'english_us.8859': 'en_US.ISO8859-1',
'english_us.ascii': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_eo.iso88593': 'eo_EO.ISO8859-3',
'eo_xx': 'eo_XX.ISO8859-3',
'eo_xx.iso88593': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_ar.iso88591': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_bo.iso88591': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_cl.iso88591': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_co.iso88591': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cr.iso88591': 'es_CR.ISO8859-1',
'es_do': 'es_DO.ISO8859-1',
'es_do.iso88591': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_ec.iso88591': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_es.88591': 'es_ES.ISO8859-1',
'es_es.iso88591': 'es_ES.ISO8859-1',
'es_es.iso885915': 'es_ES.ISO8859-15',
'es_es.iso885915@euro': 'es_ES.ISO8859-15',
'es_es.utf8@euro': 'es_ES.UTF-8',
'es_es@euro': 'es_ES.ISO8859-15',
'es_gt': 'es_GT.ISO8859-1',
'es_gt.iso88591': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_hn.iso88591': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_mx.iso88591': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_ni.iso88591': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pa.iso88591': 'es_PA.ISO8859-1',
'es_pa.iso885915': 'es_PA.ISO8859-15',
'es_pa@euro': 'es_PA.ISO8859-15',
'es_pe': 'es_PE.ISO8859-1',
'es_pe.iso88591': 'es_PE.ISO8859-1',
'es_pe.iso885915': 'es_PE.ISO8859-15',
'es_pe@euro': 'es_PE.ISO8859-15',
'es_pr': 'es_PR.ISO8859-1',
'es_pr.iso88591': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_py.iso88591': 'es_PY.ISO8859-1',
'es_py.iso885915': 'es_PY.ISO8859-15',
'es_py@euro': 'es_PY.ISO8859-15',
'es_sv': 'es_SV.ISO8859-1',
'es_sv.iso88591': 'es_SV.ISO8859-1',
'es_sv.iso885915': 'es_SV.ISO8859-15',
'es_sv@euro': 'es_SV.ISO8859-15',
'es_us': 'es_US.ISO8859-1',
'es_us.iso88591': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_uy.iso88591': 'es_UY.ISO8859-1',
'es_uy.iso885915': 'es_UY.ISO8859-15',
'es_uy@euro': 'es_UY.ISO8859-15',
'es_ve': 'es_VE.ISO8859-1',
'es_ve.iso88591': 'es_VE.ISO8859-1',
'es_ve.iso885915': 'es_VE.ISO8859-15',
'es_ve@euro': 'es_VE.ISO8859-15',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'et_ee.iso88591': 'et_EE.ISO8859-1',
'et_ee.iso885913': 'et_EE.ISO8859-13',
'et_ee.iso885915': 'et_EE.ISO8859-15',
'et_ee.iso88594': 'et_EE.ISO8859-4',
'et_ee@euro': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_es.iso88591': 'eu_ES.ISO8859-1',
'eu_es.iso885915': 'eu_ES.ISO8859-15',
'eu_es.iso885915@euro': 'eu_ES.ISO8859-15',
'eu_es.utf8@euro': 'eu_ES.UTF-8',
'eu_es@euro': 'eu_ES.ISO8859-15',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'fi': 'fi_FI.ISO8859-15',
'fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fi_fi.88591': 'fi_FI.ISO8859-1',
'fi_fi.iso88591': 'fi_FI.ISO8859-1',
'fi_fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi.iso885915@euro': 'fi_FI.ISO8859-15',
'fi_fi.utf8@euro': 'fi_FI.UTF-8',
'fi_fi@euro': 'fi_FI.ISO8859-15',
'finnish': 'fi_FI.ISO8859-1',
'finnish.iso88591': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fo_fo.iso88591': 'fo_FO.ISO8859-1',
'fo_fo.iso885915': 'fo_FO.ISO8859-15',
'fo_fo@euro': 'fo_FO.ISO8859-15',
'fr': 'fr_FR.ISO8859-1',
'fr.iso885915': 'fr_FR.ISO8859-15',
'fr_be': 'fr_BE.ISO8859-1',
'fr_be.88591': 'fr_BE.ISO8859-1',
'fr_be.iso88591': 'fr_BE.ISO8859-1',
'fr_be.iso885915': 'fr_BE.ISO8859-15',
'fr_be.iso885915@euro': 'fr_BE.ISO8859-15',
'fr_be.utf8@euro': 'fr_BE.UTF-8',
'fr_be@euro': 'fr_BE.ISO8859-15',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ca.88591': 'fr_CA.ISO8859-1',
'fr_ca.iso88591': 'fr_CA.ISO8859-1',
'fr_ca.iso885915': 'fr_CA.ISO8859-15',
'fr_ca@euro': 'fr_CA.ISO8859-15',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_ch.88591': 'fr_CH.ISO8859-1',
'fr_ch.iso88591': 'fr_CH.ISO8859-1',
'fr_ch.iso885915': 'fr_CH.ISO8859-15',
'fr_ch@euro': 'fr_CH.ISO8859-15',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_fr.88591': 'fr_FR.ISO8859-1',
'fr_fr.iso88591': 'fr_FR.ISO8859-1',
'fr_fr.iso885915': 'fr_FR.ISO8859-15',
'fr_fr.iso885915@euro': 'fr_FR.ISO8859-15',
'fr_fr.utf8@euro': 'fr_FR.UTF-8',
'fr_fr@euro': 'fr_FR.ISO8859-15',
'fr_lu': 'fr_LU.ISO8859-1',
'fr_lu.88591': 'fr_LU.ISO8859-1',
'fr_lu.iso88591': 'fr_LU.ISO8859-1',
'fr_lu.iso885915': 'fr_LU.ISO8859-15',
'fr_lu.iso885915@euro': 'fr_LU.ISO8859-15',
'fr_lu.utf8@euro': 'fr_LU.UTF-8',
'fr_lu@euro': 'fr_LU.ISO8859-15',
'franais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'fre_fr.8859': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'french_france.8859': 'fr_FR.ISO8859-1',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'ga_ie.iso88591': 'ga_IE.ISO8859-1',
'ga_ie.iso885914': 'ga_IE.ISO8859-14',
'ga_ie.iso885915': 'ga_IE.ISO8859-15',
'ga_ie.iso885915@euro': 'ga_IE.ISO8859-15',
'ga_ie.utf8@euro': 'ga_IE.UTF-8',
'ga_ie@euro': 'ga_IE.ISO8859-15',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'gd_gb.iso88591': 'gd_GB.ISO8859-1',
'gd_gb.iso885914': 'gd_GB.ISO8859-14',
'gd_gb.iso885915': 'gd_GB.ISO8859-15',
'gd_gb@euro': 'gd_GB.ISO8859-15',
'ger_de': 'de_DE.ISO8859-1',
'ger_de.8859': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'german_germany.8859': 'de_DE.ISO8859-1',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'gl_es.iso88591': 'gl_ES.ISO8859-1',
'gl_es.iso885915': 'gl_ES.ISO8859-15',
'gl_es.iso885915@euro': 'gl_ES.ISO8859-15',
'gl_es.utf8@euro': 'gl_ES.UTF-8',
'gl_es@euro': 'gl_ES.ISO8859-15',
'greek': 'el_GR.ISO8859-7',
'greek.iso88597': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'gv_gb.iso88591': 'gv_GB.ISO8859-1',
'gv_gb.iso885914': 'gv_GB.ISO8859-14',
'gv_gb.iso885915': 'gv_GB.ISO8859-15',
'gv_gb@euro': 'gv_GB.ISO8859-15',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'he_il.cp1255': 'he_IL.CP1255',
'he_il.iso88598': 'he_IL.ISO8859-8',
'he_il.microsoftcp1255': 'he_IL.CP1255',
'hebrew': 'iw_IL.ISO8859-8',
'hebrew.iso88598': 'iw_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hne': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hr_hr.iso88592': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hu_hu.iso88592': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'icelandic': 'is_IS.ISO8859-1',
'icelandic.iso88591': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'is_is.iso88591': 'is_IS.ISO8859-1',
'is_is.iso885915': 'is_IS.ISO8859-15',
'is_is@euro': 'is_IS.ISO8859-15',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it.iso885915': 'it_IT.ISO8859-15',
'it_ch': 'it_CH.ISO8859-1',
'it_ch.iso88591': 'it_CH.ISO8859-1',
'it_ch.iso885915': 'it_CH.ISO8859-15',
'it_ch@euro': 'it_CH.ISO8859-15',
'it_it': 'it_IT.ISO8859-1',
'it_it.88591': 'it_IT.ISO8859-1',
'it_it.iso88591': 'it_IT.ISO8859-1',
'it_it.iso885915': 'it_IT.ISO8859-15',
'it_it.iso885915@euro': 'it_IT.ISO8859-15',
'it_it.utf8@euro': 'it_IT.UTF-8',
'it_it@euro': 'it_IT.ISO8859-15',
'italian': 'it_IT.ISO8859-1',
'italian.iso88591': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.iso88598': 'he_IL.ISO8859-8',
'ja': 'ja_JP.eucJP',
'ja.jis': 'ja_JP.JIS7',
'ja.sjis': 'ja_JP.SJIS',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.ajec': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.eucjp': 'ja_JP.eucJP',
'ja_jp.iso-2022-jp': 'ja_JP.JIS7',
'ja_jp.iso2022jp': 'ja_JP.JIS7',
'ja_jp.jis': 'ja_JP.JIS7',
'ja_jp.jis7': 'ja_JP.JIS7',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'ja_jp.sjis': 'ja_JP.SJIS',
'ja_jp.ujis': 'ja_JP.eucJP',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'japanese.sjis': 'ja_JP.SJIS',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'kl_gl.iso88591': 'kl_GL.ISO8859-1',
'kl_gl.iso885915': 'kl_GL.ISO8859-15',
'kl_gl@euro': 'kl_GL.ISO8859-15',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'ko_kr.euckr': 'ko_KR.eucKR',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in@devanagari': 'ks_IN@devanagari.UTF-8',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'kw_gb.iso88591': 'kw_GB.ISO8859-1',
'kw_gb.iso885914': 'kw_GB.ISO8859-14',
'kw_gb.iso885915': 'kw_GB.ISO8859-15',
'kw_gb@euro': 'kw_GB.ISO8859-15',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lt_lt.iso885913': 'lt_LT.ISO8859-13',
'lt_lt.iso88594': 'lt_LT.ISO8859-4',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
'mai': 'mai_IN.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'mk_mk.cp1251': 'mk_MK.CP1251',
'mk_mk.iso88595': 'mk_MK.ISO8859-5',
'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
'ml': 'ml_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'ms_my.iso88591': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'mt_mt.iso88593': 'mt_MT.ISO8859-3',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nb_no.88591': 'nb_NO.ISO8859-1',
'nb_no.iso88591': 'nb_NO.ISO8859-1',
'nb_no.iso885915': 'nb_NO.ISO8859-15',
'nb_no@euro': 'nb_NO.ISO8859-15',
'nl': 'nl_NL.ISO8859-1',
'nl.iso885915': 'nl_NL.ISO8859-15',
'nl_be': 'nl_BE.ISO8859-1',
'nl_be.88591': 'nl_BE.ISO8859-1',
'nl_be.iso88591': 'nl_BE.ISO8859-1',
'nl_be.iso885915': 'nl_BE.ISO8859-15',
'nl_be.iso885915@euro': 'nl_BE.ISO8859-15',
'nl_be.utf8@euro': 'nl_BE.UTF-8',
'nl_be@euro': 'nl_BE.ISO8859-15',
'nl_nl': 'nl_NL.ISO8859-1',
'nl_nl.88591': 'nl_NL.ISO8859-1',
'nl_nl.iso88591': 'nl_NL.ISO8859-1',
'nl_nl.iso885915': 'nl_NL.ISO8859-15',
'nl_nl.iso885915@euro': 'nl_NL.ISO8859-15',
'nl_nl.utf8@euro': 'nl_NL.UTF-8',
'nl_nl@euro': 'nl_NL.ISO8859-15',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'nn_no.88591': 'nn_NO.ISO8859-1',
'nn_no.iso88591': 'nn_NO.ISO8859-1',
'nn_no.iso885915': 'nn_NO.ISO8859-15',
'nn_no@euro': 'nn_NO.ISO8859-15',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.88591': 'no_NO.ISO8859-1',
'no_no.iso88591': 'no_NO.ISO8859-1',
'no_no.iso885915': 'no_NO.ISO8859-15',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'no_no@euro': 'no_NO.ISO8859-15',
'norwegian': 'no_NO.ISO8859-1',
'norwegian.iso88591': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nr_za.iso88591': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'nso_za.iso885915': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'ny_no.88591': 'ny_NO.ISO8859-1',
'ny_no.iso88591': 'ny_NO.ISO8859-1',
'ny_no.iso885915': 'ny_NO.ISO8859-15',
'ny_no@euro': 'ny_NO.ISO8859-15',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'oc_fr.iso88591': 'oc_FR.ISO8859-1',
'oc_fr.iso885915': 'oc_FR.ISO8859-15',
'oc_fr@euro': 'oc_FR.ISO8859-15',
'or': 'or_IN.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_de.iso88591': 'pd_DE.ISO8859-1',
'pd_de.iso885915': 'pd_DE.ISO8859-15',
'pd_de@euro': 'pd_DE.ISO8859-15',
'pd_us': 'pd_US.ISO8859-1',
'pd_us.iso88591': 'pd_US.ISO8859-1',
'pd_us.iso885915': 'pd_US.ISO8859-15',
'pd_us@euro': 'pd_US.ISO8859-15',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'ph_ph.iso88591': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'pl_pl.iso88592': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese.iso88591': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'portuguese_brazil.8859': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'pp_an.iso88591': 'pp_AN.ISO8859-1',
'pt': 'pt_PT.ISO8859-1',
'pt.iso885915': 'pt_PT.ISO8859-15',
'pt_br': 'pt_BR.ISO8859-1',
'pt_br.88591': 'pt_BR.ISO8859-1',
'pt_br.iso88591': 'pt_BR.ISO8859-1',
'pt_br.iso885915': 'pt_BR.ISO8859-15',
'pt_br@euro': 'pt_BR.ISO8859-15',
'pt_pt': 'pt_PT.ISO8859-1',
'pt_pt.88591': 'pt_PT.ISO8859-1',
'pt_pt.iso88591': 'pt_PT.ISO8859-1',
'pt_pt.iso885915': 'pt_PT.ISO8859-15',
'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15',
'pt_pt.utf8@euro': 'pt_PT.UTF-8',
'pt_pt@euro': 'pt_PT.ISO8859-15',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'ro_ro.iso88592': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru': 'ru_RU.UTF-8',
'ru_ru.cp1251': 'ru_RU.CP1251',
'ru_ru.iso88595': 'ru_RU.ISO8859-5',
'ru_ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru.microsoftcp1251': 'ru_RU.CP1251',
'ru_ua': 'ru_UA.KOI8-U',
'ru_ua.cp1251': 'ru_UA.CP1251',
'ru_ua.koi8u': 'ru_UA.KOI8-U',
'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'rw_rw.iso88591': 'rw_RW.ISO8859-1',
'sd': 'sd_IN@devanagari.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sk_sk.iso88592': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'sl_si.iso88592': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish.iso88591': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'spanish_spain.8859': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_al.iso88592': 'sq_AL.ISO8859-2',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latin': 'sr_RS.UTF-8@latin',
'sr@latn': 'sr_RS.UTF-8@latin',
'sr_cs': 'sr_RS.UTF-8',
'sr_cs.iso88592': 'sr_CS.ISO8859-2',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs.iso88595': 'sr_CS.ISO8859-5',
'sr_cs.utf8@latn': 'sr_RS.UTF-8@latin',
'sr_cs@latn': 'sr_RS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs.utf8@latn': 'sr_RS.UTF-8@latin',
'sr_rs@latin': 'sr_RS.UTF-8@latin',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'ss_za.iso88591': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'st_za.iso88591': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv.iso885915': 'sv_SE.ISO8859-15',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_fi.iso88591': 'sv_FI.ISO8859-1',
'sv_fi.iso885915': 'sv_FI.ISO8859-15',
'sv_fi.iso885915@euro': 'sv_FI.ISO8859-15',
'sv_fi.utf8@euro': 'sv_FI.UTF-8',
'sv_fi@euro': 'sv_FI.ISO8859-15',
'sv_se': 'sv_SE.ISO8859-1',
'sv_se.88591': 'sv_SE.ISO8859-1',
'sv_se.iso88591': 'sv_SE.ISO8859-1',
'sv_se.iso885915': 'sv_SE.ISO8859-15',
'sv_se@euro': 'sv_SE.ISO8859-15',
'swedish': 'sv_SE.ISO8859-1',
'swedish.iso88591': 'sv_SE.ISO8859-1',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'te': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'tg_tj.koi8c': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.iso885911': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tl_ph.iso88591': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tn_za.iso885915': 'tn_ZA.ISO8859-15',
'tr': 'tr_TR.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'tr_tr.iso88599': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'ts_za.iso88591': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.koi8c': 'tt_RU.KOI8-C',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'turkish': 'tr_TR.ISO8859-9',
'turkish.iso88599': 'tr_TR.ISO8859-9',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'uk_ua.cp1251': 'uk_UA.CP1251',
'uk_ua.iso88595': 'uk_UA.ISO8859-5',
'uk_ua.koi8u': 'uk_UA.KOI8-U',
'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
'univ': 'en_US.utf',
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_pk': 'ur_PK.CP1256',
'ur_pk.cp1256': 'ur_PK.CP1256',
'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz.iso88591': 'uz_UZ.ISO8859-1',
'uz_uz.utf8@cyrillic': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wa_be.iso88591': 'wa_BE.ISO8859-1',
'wa_be.iso885915': 'wa_BE.ISO8859-15',
'wa_be.iso885915@euro': 'wa_BE.ISO8859-15',
'wa_be@euro': 'wa_BE.ISO8859-15',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'xh_za.iso88591': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yi_us.cp1255': 'yi_US.CP1255',
'yi_us.microsoftcp1255': 'yi_US.CP1255',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_cn.gb18030': 'zh_CN.gb18030',
'zh_cn.gb2312': 'zh_CN.gb2312',
'zh_cn.gbk': 'zh_CN.gbk',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5': 'zh_HK.big5',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_hk.big5hkscs': 'zh_HK.big5hkscs',
'zh_tw': 'zh_TW.big5',
'zh_tw.big5': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
'zu_za.iso88591': 'zu_ZA.ISO8859-1'
}
windows_locale = {1078: 'af_ZA',
1052: 'sq_AL',
1156: 'gsw_FR',
1118: 'am_ET',
1025: 'ar_SA',
2049: 'ar_IQ',
3073: 'ar_EG',
4097: 'ar_LY',
5121: 'ar_DZ',
6145: 'ar_MA',
7169: 'ar_TN',
8193: 'ar_OM',
9217: 'ar_YE',
10241: 'ar_SY',
11265: 'ar_JO',
12289: 'ar_LB',
13313: 'ar_KW',
14337: 'ar_AE',
15361: 'ar_BH',
16385: 'ar_QA',
1067: 'hy_AM',
1101: 'as_IN',
1068: 'az_AZ',
2092: 'az_AZ',
1133: 'ba_RU',
1069: 'eu_ES',
1059: 'be_BY',
1093: 'bn_IN',
8218: 'bs_BA',
5146: 'bs_BA',
1150: 'br_FR',
1026: 'bg_BG',
1027: 'ca_ES',
4: 'zh_CHS',
1028: 'zh_TW',
2052: 'zh_CN',
3076: 'zh_HK',
4100: 'zh_SG',
5124: 'zh_MO',
31748: 'zh_CHT',
1155: 'co_FR',
1050: 'hr_HR',
4122: 'hr_BA',
1029: 'cs_CZ',
1030: 'da_DK',
1164: 'gbz_AF',
1125: 'div_MV',
1043: 'nl_NL',
2067: 'nl_BE',
1033: 'en_US',
2057: 'en_GB',
3081: 'en_AU',
4105: 'en_CA',
5129: 'en_NZ',
6153: 'en_IE',
7177: 'en_ZA',
8201: 'en_JA',
9225: 'en_CB',
10249: 'en_BZ',
11273: 'en_TT',
12297: 'en_ZW',
13321: 'en_PH',
16393: 'en_IN',
17417: 'en_MY',
18441: 'en_IN',
1061: 'et_EE',
1080: 'fo_FO',
1124: 'fil_PH',
1035: 'fi_FI',
1036: 'fr_FR',
2060: 'fr_BE',
3084: 'fr_CA',
4108: 'fr_CH',
5132: 'fr_LU',
6156: 'fr_MC',
1122: 'fy_NL',
1110: 'gl_ES',
1079: 'ka_GE',
1031: 'de_DE',
2055: 'de_CH',
3079: 'de_AT',
4103: 'de_LU',
5127: 'de_LI',
1032: 'el_GR',
1135: 'kl_GL',
1095: 'gu_IN',
1128: 'ha_NG',
1037: 'he_IL',
1081: 'hi_IN',
1038: 'hu_HU',
1039: 'is_IS',
1057: 'id_ID',
1117: 'iu_CA',
2141: 'iu_CA',
2108: 'ga_IE',
1040: 'it_IT',
2064: 'it_CH',
1041: 'ja_JP',
1099: 'kn_IN',
1087: 'kk_KZ',
1107: 'kh_KH',
1158: 'qut_GT',
1159: 'rw_RW',
1111: 'kok_IN',
1042: 'ko_KR',
1088: 'ky_KG',
1108: 'lo_LA',
1062: 'lv_LV',
1063: 'lt_LT',
2094: 'dsb_DE',
1134: 'lb_LU',
1071: 'mk_MK',
1086: 'ms_MY',
2110: 'ms_BN',
1100: 'ml_IN',
1082: 'mt_MT',
1153: 'mi_NZ',
1146: 'arn_CL',
1102: 'mr_IN',
1148: 'moh_CA',
1104: 'mn_MN',
2128: 'mn_CN',
1121: 'ne_NP',
1044: 'nb_NO',
2068: 'nn_NO',
1154: 'oc_FR',
1096: 'or_IN',
1123: 'ps_AF',
1065: 'fa_IR',
1045: 'pl_PL',
1046: 'pt_BR',
2070: 'pt_PT',
1094: 'pa_IN',
1131: 'quz_BO',
2155: 'quz_EC',
3179: 'quz_PE',
1048: 'ro_RO',
1047: 'rm_CH',
1049: 'ru_RU',
9275: 'smn_FI',
4155: 'smj_NO',
5179: 'smj_SE',
1083: 'se_NO',
2107: 'se_SE',
3131: 'se_FI',
8251: 'sms_FI',
6203: 'sma_NO',
7227: 'sma_SE',
1103: 'sa_IN',
3098: 'sr_SP',
7194: 'sr_BA',
2074: 'sr_SP',
6170: 'sr_BA',
1115: 'si_LK',
1132: 'ns_ZA',
1074: 'tn_ZA',
1051: 'sk_SK',
1060: 'sl_SI',
1034: 'es_ES',
2058: 'es_MX',
3082: 'es_ES',
4106: 'es_GT',
5130: 'es_CR',
6154: 'es_PA',
7178: 'es_DO',
8202: 'es_VE',
9226: 'es_CO',
10250: 'es_PE',
11274: 'es_AR',
12298: 'es_EC',
13322: 'es_CL',
14346: 'es_UR',
15370: 'es_PY',
16394: 'es_BO',
17418: 'es_SV',
18442: 'es_HN',
19466: 'es_NI',
20490: 'es_PR',
21514: 'es_US',
1089: 'sw_KE',
1053: 'sv_SE',
2077: 'sv_FI',
1114: 'syr_SY',
1064: 'tg_TJ',
2143: 'tmz_DZ',
1097: 'ta_IN',
1092: 'tt_RU',
1098: 'te_IN',
1054: 'th_TH',
2129: 'bo_BT',
1105: 'bo_CN',
1055: 'tr_TR',
1090: 'tk_TM',
1152: 'ug_CN',
1058: 'uk_UA',
1070: 'wen_DE',
1056: 'ur_PK',
2080: 'ur_IN',
1091: 'uz_UZ',
2115: 'uz_UZ',
1066: 'vi_VN',
1106: 'cy_GB',
1160: 'wo_SN',
1076: 'xh_ZA',
1157: 'sah_RU',
1144: 'ii_CN',
1130: 'yo_NG',
1077: 'zu_ZA'
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k, v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print 'Locale defaults as determined by getdefaultlocale():'
print '-' * 72
lang, enc = getdefaultlocale()
print 'Language: ', lang or '(undefined)'
print 'Encoding: ', enc or '(undefined)'
print
print 'Locale settings on startup:'
print '-' * 72
for name, category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
print
print 'Locale settings after calling resetlocale():'
print '-' * 72
resetlocale()
for name, category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
try:
setlocale(LC_ALL, '')
except:
print 'NOTE:'
print 'setlocale(LC_ALL, "") does not support the default locale'
print 'given in the OS environment variables.'
else:
print
print 'Locale settings after calling setlocale(LC_ALL, ""):'
print '-' * 72
for name, category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append('LC_MESSAGES')
if __name__ == '__main__':
print 'Locale aliasing:'
print
_print_locale()
print
print 'Number formatting:'
print
_test()
|
"""Utilities for working with data.
These are intended to be used predominantly by the filing cabinet
in order to read and write datasets appropriately.
"""
from typing import Any
def _get_handlers(location:str=""):
"""Returns available file handlers.
This checks for a handlers folder at this location and will
scrape the handlers available, returning them, along with all
the handlers in this package, as a dictionary keyed by the file
type.
Parameters
----------
location: str = ""
The location where *extra* handlers should be examined for.
Note that any malformed handlers will simply be skipped.
"""
|
from __future__ import print_function
import yaml
import subprocess
import re
import argparse
from keras.models import model_from_yaml
from betago.model import KerasBot
from betago.processor import SevenPlaneProcessor
from betago.gtp.board import gtp_position_to_coords, coords_to_gtp_position
argparser = argparse.ArgumentParser()
argparser.add_argument('handicap', type=int, nargs=1)
argparser.add_argument('output_sgf', nargs='?', default='output.sgf')
args = argparser.parse_args()
processor = SevenPlaneProcessor()
bot_name = '100_epochs_cnn'
model_file = 'model_zoo/' + bot_name + '_bot.yml'
weight_file = 'model_zoo/' + bot_name + '_weights.hd5'
with open(model_file, 'r') as f:
yml = yaml.load(f)
model = model_from_yaml(yaml.dump(yml))
# Note that in Keras 1.0 we have to recompile the model explicitly
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.load_weights(weight_file)
bot = KerasBot(model=model, processor=processor)
pachi_cmd = ["pachi"]
p = subprocess.Popen(pachi_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def send_command(gtpStream, cmd):
gtpStream.stdin.write(cmd)
print(cmd.strip())
def get_response(gtpStream):
succeeded = False
result = ''
while succeeded == False:
line = gtpStream.stdout.readline()
if line[0] == '=':
succeeded = True
line = line.strip()
print("Response is: " + line)
result = re.sub('^= ?', '', line)
return result
letters = 'abcdefghijklmnopqrs'
def sgfCoord(coords):
row, col = coords
return letters[col] + letters[18 - row]
# deal with handicap. Parse multi-stone response to see where it was placed.
handicap = args.handicap[0]
send_command(p, "boardsize 19\n")
get_response(p)
sgf = "(;GM[1]FF[4]CA[UTF-8]SZ[19]RU[Chinese]\n"
if(handicap == 0):
send_command(p, "komi 7.5\n")
get_response(p)
sgf = sgf + "KM[7.5]\n"
else:
send_command(p, "fixed_handicap " + str(handicap) + "\n")
stones = get_response(p)
sgf_handicap = "HA[" + str(handicap) + "]AB"
for pos in stones.split(" "):
move = gtp_position_to_coords(pos)
bot.apply_move('b', move)
sgf_handicap = sgf_handicap + "[" + sgfCoord(move) + "]"
sgf = sgf + sgf_handicap + "\n"
passes = 0
our_color = 'b' # assume we are black for now
their_color = 'w' # assume we are black for now
last_color = 'w'
if(handicap > 1):
last_color = 'b'
colors = {}
colors['w'] = 'white'
colors['b'] = 'black'
while passes < 2:
if(last_color != our_color):
move = bot.select_move(our_color) # applies the move too
if move is None:
send_command(p, "play " + colors[our_color] + " pass\n")
sgf = sgf + ";" + our_color.upper() + "[]\n"
passes = passes + 1
else:
pos = coords_to_gtp_position(move)
send_command(p, "play " + colors[our_color] + " " + pos + "\n")
sgf = sgf + ";" + our_color.upper() + "[" + sgfCoord(move) + "]\n"
passes = 0
resp = get_response(p)
last_color = our_color
else:
send_command(p, "genmove " + colors[their_color] + "\n")
pos = get_response(p)
if(pos == 'resign'):
passes = 2
elif(pos == 'pass'):
sgf = sgf + ";" + their_color.upper() + "[]\n"
passes = passes + 1
else:
move = gtp_position_to_coords(pos)
bot.apply_move(their_color, move)
sgf = sgf + ";" + their_color.upper() + "[" + sgfCoord(move) + "]\n"
passes = 0
last_color = their_color
sgf = sgf + ")\n"
with open(args.output_sgf, 'w') as out_h:
out_h.write(sgf)
|
import os
import sys
from time import sleep
import threading
import window
import comunication
import DisplayActions
debugMode = True
def main():
print("starting...",end='')
state = True
print("[" + ("OK" if state else "ERROR" ) + "]")
winThread.start()
comunication.start()
winThread = threading.Thread(target=window.start)
if __name__ == '__main__':
main()
|
# Write a script that generates daily edit AND view counts for Panama Papers over its first 30 days of existence, and prints them to a CSV or TSV file in reverse-chronological order. You file should have three colums with the headers "date", "edits" and "views".
import csv
import json
import requests
import operator
from urllib.parse import quote
ENDPOINT = 'https://en.wikipedia.org/w/api.php'
parameters = { 'action' : 'query',
'prop' : 'revisions',
'titles' : 'Panama_Papers',
'format' : 'json',
'rvdir' : 'newer',
'rvstart': '2016-04-03T17:59:05Z',
'rvend' : '2016-05-03T00:00:00Z',
'rvlimit' : 500,
'continue' : '' }
days = {}
done = False
while not done:
wp_call = requests.get(ENDPOINT, params=parameters)
response = wp_call.json()
pages = response['query']['pages']
for page_id in pages:
page = pages[page_id]
revisions = page['revisions']
for rev in revisions:
revday = rev['timestamp'][:10].replace("-","")
revhour = rev['timestamp'][11:13]
if revday in days.keys():
if revhour in days[revday].keys():
days[revday][revhour] += 1
else:
days[revday][revhour] = 1
else:
days[revday] = {}
days[revday][revhour] = 1
if 'continue' in response:
parameters['continue'] = response['continue']['continue']
parameters['rvcontinue'] = response['continue']['rvcontinue']
else:
done = True
# print(days)
for dkey, dval in days.items():
daily_edits = 0
for hkey, hval in dval.items():
daily_edits += hval
days[dkey]['total'] = daily_edits
# print(days)
ENDPOINT = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/'
wp_code = 'en.wikipedia'
access = 'all-access'
agents = 'all-agents'
page_title = 'Panama Papers'
period = 'daily'
start_date = '20160403'
end_date = '20160502'
wp_call = requests.get(ENDPOINT + wp_code + '/' + access + '/' + agents + '/' + quote(page_title, safe='') + '/' + period + '/' + start_date + '/' + end_date)
response = wp_call.json()
# print(json.dumps(response, indent=4))
for dv in response['items']:
# print(dv['timestamp'])
ts = dv['timestamp'][:-2]
if ts in days.keys():
days[ts]['views'] = dv['views']
# print(json.dumps(days, indent=4))
days_sorted = sorted(days.items(), key=operator.itemgetter(0), reverse=True)
print(days_sorted)
with open('pp30days_views_edits.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(('date', 'edits', 'views'))
for n in days_sorted:
writer.writerow((n[0], n[1]['total'], n[1]['views'],))
|
from unittest.mock import MagicMock
from twisted.web.resource import NoResource
from txweb.resources import RoutingResource
from txweb import App
from txweb.http_codes import Unrenderable
from txweb.resources import ViewClassResource
from unittest.mock import sentinel
import typing as T
from .helper import RequestRetval
import pytest
def test_instantiates_without_error():
class FakeSite:
pass
fake_site = FakeSite()
resource = RoutingResource(fake_site)
def test_how_head_requests_are_handled(dummy_request:RequestRetval):
app = App(__name__)
@app.add("/foo", methods=["POST"])
def handle_foo(request):
return b"123"
dummy_request.request.site = app.site
dummy_request.channel.site = app.site
dummy_request.request.requestReceived(b"HEAD", b"/foo", b"HTTP/1.1")
assert dummy_request.request.code == 405
assert dummy_request.request.code_message == b"Method not allowed"
def test_ensure_blows_up_with_a_bad_add():
app = App(__name__)
bad_asset = sentinel
with pytest.raises(ValueError) as excinfo:
app.add("/trash")(bad_asset)
assert "expected callable|Object|twisted.web.resource.Resource" in str(excinfo.value)
def test_ensure_blowsup_with_a_class_that_has_no_way_to_render():
app = App(__name__)
with pytest.raises(Unrenderable):
@app.add("/trash")
class BaseClass(object):
pass
def test_ensure_a_classic_like_class_is_routed():
app = App(__name__)
@app.add("/trash")
class GoodClass(object):
def render(self, request):
return b"Rendered"
first_key = next(iter(app.router.iter_rules()))
endpoint = app.router._endpoints[first_key.endpoint]
assert isinstance(endpoint, ViewClassResource)
debug = 1
def test_ensure_resource_is_added():
app = App(__name__)
app.add_resource("/404", resource=NoResource())
first_key = next(iter(app.router.iter_rules()))
endpoint = app.router._endpoints[first_key.endpoint]
assert isinstance(endpoint, NoResource)
debug = 1
def test_handle_add_slashes(dummy_request:RequestRetval):
app = App(__name__)
mock = MagicMock()
app.route("/js/")(mock)
dummy_request.request.site = app.site
dummy_request.channel.site = app.site
dummy_request.request.requestReceived(b"GET", b"/js", b"HTTP/1.1")
assert dummy_request.request.code == 308
assert dummy_request.request.code_message == b"Permanent Redirect"
assert dummy_request.request.responseHeaders.getRawHeaders(b"location") == [b"http://10.0.0.1/js/"]
assert mock.call_count == 0
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Test that maximizing likelihood allows to correctly recover distribution parameters for all
distributions exposed to the user.
"""
# Standard library imports
from typing import Iterable, List, Tuple
# Third-party imports
import mxnet as mx
import numpy as np
import pytest
from pydantic import PositiveFloat, PositiveInt
# First-party imports
from gluonts.model.common import NPArrayLike
from gluonts.distribution.box_cox_tranform import (
InverseBoxCoxTransform,
InverseBoxCoxTransformOutput,
)
from gluonts.distribution import (
DistributionOutput,
StudentT,
StudentTOutput,
MultivariateGaussian,
MultivariateGaussianOutput,
LowrankMultivariateGaussian,
LowrankMultivariateGaussianOutput,
NegativeBinomial,
NegativeBinomialOutput,
Laplace,
LaplaceOutput,
Gaussian,
GaussianOutput,
PiecewiseLinear,
PiecewiseLinearOutput,
Binned,
BinnedOutput,
)
from gluonts.distribution.transformed_distribution_output import (
TransformedDistributionOutput,
)
from gluonts.distribution.transformed_distribution import (
TransformedDistribution,
)
NUM_SAMPLES = 2000
BATCH_SIZE = 32
TOL = 0.3
START_TOL_MULTIPLE = 1
np.random.seed(1)
mx.random.seed(1)
def inv_softplus(y: NPArrayLike) -> np.ndarray:
# y = log(1 + exp(x)) ==> x = log(exp(y) - 1)
return np.log(np.exp(y) - 1)
def maximum_likelihood_estimate_sgd(
distr_output: DistributionOutput,
samples: mx.ndarray,
init_biases: List[mx.ndarray.NDArray] = None,
num_epochs: PositiveInt = PositiveInt(5),
learning_rate: PositiveFloat = PositiveFloat(1e-2),
hybridize: bool = True,
) -> Iterable[float]:
model_ctx = mx.cpu()
arg_proj = distr_output.get_args_proj()
arg_proj.initialize()
if hybridize:
arg_proj.hybridize()
if init_biases is not None:
for param, bias in zip(arg_proj.proj, init_biases):
param.params[param.prefix + "bias"].initialize(
mx.initializer.Constant(bias), force_reinit=True
)
trainer = mx.gluon.Trainer(
arg_proj.collect_params(),
"sgd",
{"learning_rate": learning_rate, "clip_gradient": 10.0},
)
# The input data to our model is one-dimensional
dummy_data = mx.nd.array(np.ones((len(samples), 1)))
train_data = mx.gluon.data.DataLoader(
mx.gluon.data.ArrayDataset(dummy_data, samples),
batch_size=BATCH_SIZE,
shuffle=True,
)
for e in range(num_epochs):
cumulative_loss = 0
num_batches = 0
# inner loop
for i, (data, sample_label) in enumerate(train_data):
data = data.as_in_context(model_ctx)
sample_label = sample_label.as_in_context(model_ctx)
with mx.autograd.record():
distr_args = arg_proj(data)
distr = distr_output.distribution(distr_args)
loss = distr.loss(sample_label)
if not hybridize:
assert loss.shape == distr.batch_shape
loss.backward()
trainer.step(BATCH_SIZE)
num_batches += 1
cumulative_loss += mx.nd.mean(loss).asscalar()
print("Epoch %s, loss: %s" % (e, cumulative_loss / num_batches))
return [
param[0].asnumpy() for param in arg_proj(mx.nd.array(np.ones((1, 1))))
]
@pytest.mark.parametrize("mu, sigma, nu", [(2.3, 0.7, 6.0)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_studentT_likelihood(
mu: float, sigma: float, nu: float, hybridize: bool
) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
nus = mx.nd.zeros((NUM_SAMPLES,)) + nu
distr = StudentT(mus, sigmas, nus)
samples = distr.sample()
# nu takes very long to learn, so we initialize it at the true value.
# transform used is softplus(x) + 2
init_bias = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
inv_softplus(nu - 2),
]
mu_hat, sigma_hat, nu_hat = maximum_likelihood_estimate_sgd(
StudentTOutput(),
samples,
init_biases=init_bias,
hybridize=hybridize,
num_epochs=PositiveInt(10),
learning_rate=PositiveFloat(1e-2),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(sigma_hat - sigma) < TOL * sigma
), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
assert (
np.abs(nu_hat - nu) < TOL * nu
), "nu0 did not match: nu0 = %s, nu_hat = %s" % (nu, nu_hat)
@pytest.mark.parametrize("mu, sigma", [(1.0, 0.1)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_gaussian_likelihood(mu: float, sigma: float, hybridize: bool):
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
distr = Gaussian(mus, sigmas)
samples = distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
]
mu_hat, sigma_hat = maximum_likelihood_estimate_sgd(
GaussianOutput(),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.001),
num_epochs=PositiveInt(5),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(sigma_hat - sigma) < TOL * sigma
), f"alpha did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
@pytest.mark.timeout(10)
def test_multivariate_gaussian() -> None:
num_samples = 2000
dim = 2
mu = np.arange(0, dim) / float(dim)
L_diag = np.ones((dim,))
L_low = 0.1 * np.ones((dim, dim)) * np.tri(dim, k=-1)
L = np.diag(L_diag) + L_low
Sigma = L.dot(L.transpose())
distr = MultivariateGaussian(mu=mx.nd.array(mu), L=mx.nd.array(L))
samples = distr.sample(num_samples)
mu_hat, L_hat = maximum_likelihood_estimate_sgd(
MultivariateGaussianOutput(dim=dim),
samples,
init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case
hybridize=False,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(10),
)
distr = MultivariateGaussian(
mu=mx.nd.array([mu_hat]), L=mx.nd.array([L_hat])
)
Sigma_hat = distr.variance[0].asnumpy()
assert np.allclose(
mu_hat, mu, atol=0.1, rtol=0.1
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert np.allclose(
Sigma_hat, Sigma, atol=0.1, rtol=0.1
), f"Sigma did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}"
@pytest.mark.timeout(10)
def test_lowrank_multivariate_gaussian() -> None:
num_samples = 2000
dim = 2
rank = 1
mu = np.arange(0, dim) / float(dim)
D = np.eye(dim) * (np.arange(dim) / dim + 0.5)
W = np.sqrt(np.ones((dim, rank)) * 0.2)
Sigma = D + W.dot(W.transpose())
distr = LowrankMultivariateGaussian(
mu=mx.nd.array([mu]),
D=mx.nd.array([np.diag(D)]),
W=mx.nd.array([W]),
dim=dim,
rank=rank,
)
assert np.allclose(
distr.variance[0].asnumpy(), Sigma, atol=0.1, rtol=0.1
), f"did not match: sigma = {Sigma}, sigma_hat = {distr.variance[0]}"
samples = distr.sample(num_samples).squeeze().asnumpy()
mu_hat, D_hat, W_hat = maximum_likelihood_estimate_sgd(
LowrankMultivariateGaussianOutput(dim=dim, rank=rank),
samples,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(10),
init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case
hybridize=False,
)
distr = LowrankMultivariateGaussian(
dim=dim,
rank=rank,
mu=mx.nd.array([mu_hat]),
D=mx.nd.array([D_hat]),
W=mx.nd.array([W_hat]),
)
Sigma_hat = distr.variance.asnumpy()
assert np.allclose(
mu_hat, mu, atol=0.2, rtol=0.1
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert np.allclose(
Sigma_hat, Sigma, atol=0.1, rtol=0.1
), f"alpha did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}"
@pytest.mark.parametrize("mu", [6.0])
@pytest.mark.parametrize("hybridize", [True, False])
def test_deterministic_l2(mu: float, hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters.
This tests uses the Gaussian distribution with fixed variance and sample mean.
This essentially reduces to determistic L2.
"""
# generate samples
mu = mu
mus = mx.nd.zeros(NUM_SAMPLES) + mu
deterministic_distr = Gaussian(mu=mus, sigma=0.1 * mx.nd.ones_like(mus))
samples = deterministic_distr.sample()
class GaussianFixedVarianceOutput(GaussianOutput):
@classmethod
def domain_map(cls, F, mu, sigma):
sigma = 0.1 * F.ones_like(sigma)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1)
mu_hat, _ = maximum_likelihood_estimate_sgd(
GaussianFixedVarianceOutput(),
samples,
init_biases=[3 * mu, 0.1],
hybridize=hybridize,
num_epochs=PositiveInt(1),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
@pytest.mark.parametrize("mu", [1.0])
@pytest.mark.parametrize("hybridize", [True, False])
def test_deterministic_l1(mu: float, hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters.
This tests uses the Laplace distribution with fixed variance and sample mean.
This essentially reduces to determistic L1.
"""
# generate samples
mu = mu
mus = mx.nd.zeros(NUM_SAMPLES) + mu
class LaplaceFixedVarianceOutput(LaplaceOutput):
@classmethod
def domain_map(cls, F, mu, b):
b = 0.1 * F.ones_like(b)
return mu.squeeze(axis=-1), b.squeeze(axis=-1)
deterministic_distr = Laplace(mu=mus, b=0.1 * mx.nd.ones_like(mus))
samples = deterministic_distr.sample()
mu_hat, _ = maximum_likelihood_estimate_sgd(
LaplaceFixedVarianceOutput(),
samples,
init_biases=[3 * mu, 0.1],
learning_rate=PositiveFloat(1e-3),
hybridize=hybridize,
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
@pytest.mark.parametrize("mu_alpha", [(2.5, 0.7)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_neg_binomial(mu_alpha: Tuple[float, float], hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# test instance
mu, alpha = mu_alpha
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
alphas = mx.nd.zeros((NUM_SAMPLES,)) + alpha
neg_bin_distr = NegativeBinomial(mu=mus, alpha=alphas)
samples = neg_bin_distr.sample()
init_biases = [
inv_softplus(mu - START_TOL_MULTIPLE * TOL * mu),
inv_softplus(alpha + START_TOL_MULTIPLE * TOL * alpha),
]
mu_hat, alpha_hat = maximum_likelihood_estimate_sgd(
NegativeBinomialOutput(),
samples,
hybridize=hybridize,
init_biases=init_biases,
num_epochs=PositiveInt(15),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(alpha_hat - alpha) < TOL * alpha
), f"alpha did not match: alpha = {alpha}, alpha_hat = {alpha_hat}"
@pytest.mark.timeout(10)
@pytest.mark.parametrize("mu_b", [(3.3, 0.7)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# test instance
mu, b = mu_b
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
bs = mx.nd.zeros((NUM_SAMPLES,)) + b
laplace_distr = Laplace(mu=mus, b=bs)
samples = laplace_distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(b + START_TOL_MULTIPLE * TOL * b),
]
mu_hat, b_hat = maximum_likelihood_estimate_sgd(
LaplaceOutput(), samples, hybridize=hybridize, init_biases=init_biases
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(b_hat - b) < TOL * b
), f"b did not match: b = {b}, b_hat = {b_hat}"
@pytest.mark.parametrize(
"gamma, slopes, knot_spacings",
[(2.0, np.array([3, 1, 3, 4]), np.array([0.3, 0.2, 0.35, 0.15]))],
)
@pytest.mark.parametrize("hybridize", [True, False])
def test_piecewise_linear(
gamma: float,
slopes: np.ndarray,
knot_spacings: np.ndarray,
hybridize: bool,
) -> None:
"""
Test to check that minimizing the CRPS recovers the quantile function
"""
num_samples = 500 # use a few samples for timeout failure
gammas = mx.nd.zeros((num_samples,)) + gamma
slopess = mx.nd.zeros((num_samples, len(slopes))) + mx.nd.array(slopes)
knot_spacingss = mx.nd.zeros(
(num_samples, len(knot_spacings))
) + mx.nd.array(knot_spacings)
pwl_sqf = PiecewiseLinear(gammas, slopess, knot_spacingss)
samples = pwl_sqf.sample()
# Parameter initialization
gamma_init = gamma - START_TOL_MULTIPLE * TOL * gamma
slopes_init = slopes - START_TOL_MULTIPLE * TOL * slopes
knot_spacings_init = knot_spacings
# We perturb knot spacings such that even after the perturbation they sum to 1.
mid = len(slopes) // 2
knot_spacings_init[:mid] = (
knot_spacings[:mid] - START_TOL_MULTIPLE * TOL * knot_spacings[:mid]
)
knot_spacings_init[mid:] = (
knot_spacings[mid:] + START_TOL_MULTIPLE * TOL * knot_spacings[mid:]
)
init_biases = [gamma_init, slopes_init, knot_spacings_init]
# check if it returns original parameters of mapped
gamma_hat, slopes_hat, knot_spacings_hat = maximum_likelihood_estimate_sgd(
PiecewiseLinearOutput(len(slopes)),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(20),
)
# Since the problem is highly non-convex we may not be able to recover the exact parameters
# Here we check if the estimated parameters yield similar function evaluations at different quantile levels.
quantile_levels = np.arange(0.1, 1.0, 0.1)
# create a LinearSplines instance with the estimated parameters to have access to .quantile
pwl_sqf_hat = PiecewiseLinear(
mx.nd.array(gamma_hat),
mx.nd.array(slopes_hat).expand_dims(axis=0),
mx.nd.array(knot_spacings_hat).expand_dims(axis=0),
)
# Compute quantiles with the estimated parameters
quantiles_hat = np.squeeze(
pwl_sqf_hat.quantile(
mx.nd.array(quantile_levels).expand_dims(axis=0), axis=1
).asnumpy()
)
# Compute quantiles with the original parameters
# Since params is replicated across samples we take only the first entry
quantiles = np.squeeze(
pwl_sqf.quantile(
mx.nd.array(quantile_levels)
.expand_dims(axis=0)
.repeat(axis=0, repeats=num_samples),
axis=1,
).asnumpy()[0, :]
)
for ix, (quantile, quantile_hat) in enumerate(
zip(quantiles, quantiles_hat)
):
assert np.abs(quantile_hat - quantile) < TOL * quantile, (
f"quantile level {quantile_levels[ix]} didn't match:"
f" "
f"q = {quantile}, q_hat = {quantile_hat}"
)
@pytest.mark.skip("this test fails when run locally")
@pytest.mark.parametrize("lam_1, lam_2", [(0.1, 0.01)])
@pytest.mark.parametrize("mu, sigma", [(-1.5, 0.5)])
@pytest.mark.parametrize("hybridize", [True])
def test_box_cox_tranform(
lam_1: float, lam_2: float, mu: float, sigma: float, hybridize: bool
):
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# generate samples
lamdas_1 = mx.nd.zeros((NUM_SAMPLES,)) + lam_1
lamdas_2 = mx.nd.zeros((NUM_SAMPLES,)) + lam_2
transform = InverseBoxCoxTransform(lamdas_1, lamdas_2)
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
gausian_distr = Gaussian(mus, sigmas)
# Here the base distribution is Guassian which is transformed to
# non-Gaussian via the inverse Box-Cox transform.
# Sampling from `trans_distr` gives non-Gaussian samples
trans_distr = TransformedDistribution(gausian_distr, transform)
# Given the non-Gaussian samples find the true parameters
# of the Box-Cox transformation as well as the underlying Gaussian distribution.
samples = trans_distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
lam_1 - START_TOL_MULTIPLE * TOL * lam_1,
inv_softplus(lam_2 - START_TOL_MULTIPLE * TOL * lam_2),
]
mu_hat, sigma_hat, lam_1_hat, lam_2_hat = maximum_likelihood_estimate_sgd(
TransformedDistributionOutput(
GaussianOutput(),
InverseBoxCoxTransformOutput(lb_obs=lam_2, fix_lambda_2=True),
),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(18),
)
assert (
np.abs(lam_1_hat - lam_1) < TOL * lam_1
), f"lam_1 did not match: lam_1 = {lam_1}, lam_1_hat = {lam_1_hat}"
# assert (
# np.abs(lam_2_hat - lam_2) < TOL * lam_2
# ), f"lam_2 did not match: lam_2 = {lam_2}, lam_2_hat = {lam_2_hat}"
assert np.abs(mu_hat - mu) < TOL * np.abs(
mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(sigma_hat - sigma) < TOL * sigma
), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
@pytest.mark.parametrize("num_bins", [6])
@pytest.mark.parametrize(
"bin_probabilites", [np.array([0.3, 0.1, 0.05, 0.2, 0.1, 0.25])]
)
@pytest.mark.parametrize("hybridize", [True, False])
def test_binned_likelihood(
num_bins: float, bin_probabilites: np.ndarray, hybridize: bool
):
"""
Test to check that maximizing the likelihood recovers the parameters
"""
bin_prob = mx.nd.array(bin_probabilites)
bin_center = mx.nd.array(np.logspace(-1, 1, num_bins))
# generate samples
bin_probs = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_prob
bin_centers = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_center
distr = Binned(bin_probs, bin_centers)
samples = distr.sample()
# add some jitter to the uniform initialization and normalize
bin_prob_init = mx.nd.random_uniform(1 - TOL, 1 + TOL, num_bins) * bin_prob
bin_prob_init = bin_prob_init / bin_prob_init.sum()
init_biases = [bin_prob_init]
bin_prob_hat, = maximum_likelihood_estimate_sgd(
BinnedOutput(list(bin_center.asnumpy())),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.05),
num_epochs=PositiveInt(25),
)
assert all(
mx.nd.abs(mx.nd.array(bin_prob_hat) - bin_prob) < TOL * bin_prob
), f"bin_prob did not match: bin_prob = {bin_prob}, bin_prob_hat = {bin_prob_hat}"
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Nitf(KaitaiStruct):
"""The NITF (National Image Transition Format) format is a file format developed by the U.S. Government for
storing imagery, e.g. from satellites.
According to the [foreword of the specification](https://gwg.nga.mil/ntb/baseline/docs/2500c/2500C.pdf):
> The National Imagery Transmission Format Standard (NITFS) is the suite of standards for formatting digital
> imagery and imagery-related products and exchanging them among members of the Intelligence Community (IC) as
> defined by the Executive Order 12333, and other United States Government departments and agencies."
This implementation is set to version format (`file_version`) of 02.10 and `standard_type` of `BF01`.
It was implemented by [River Loop Security](https://riverloopsecurity.com).
.. seealso::
Source - https://gwg.nga.mil/ntb/baseline/docs/2500c/2500C.pdf
"""
SEQ_FIELDS = ["header", "image_segments", "graphics_segments", "text_segments", "data_extension_segments", "reserved_extension_segments"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.Header(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['image_segments']['start'] = self._io.pos()
self.image_segments = [None] * (int(self.header.num_image_segments))
for i in range(int(self.header.num_image_segments)):
if not 'arr' in self._debug['image_segments']:
self._debug['image_segments']['arr'] = []
self._debug['image_segments']['arr'].append({'start': self._io.pos()})
_t_image_segments = self._root.ImageSegment(i, self._io, self, self._root)
_t_image_segments._read()
self.image_segments[i] = _t_image_segments
self._debug['image_segments']['arr'][i]['end'] = self._io.pos()
self._debug['image_segments']['end'] = self._io.pos()
self._debug['graphics_segments']['start'] = self._io.pos()
self.graphics_segments = [None] * (int(self.header.num_graphics_segments))
for i in range(int(self.header.num_graphics_segments)):
if not 'arr' in self._debug['graphics_segments']:
self._debug['graphics_segments']['arr'] = []
self._debug['graphics_segments']['arr'].append({'start': self._io.pos()})
_t_graphics_segments = self._root.GraphicsSegment(i, self._io, self, self._root)
_t_graphics_segments._read()
self.graphics_segments[i] = _t_graphics_segments
self._debug['graphics_segments']['arr'][i]['end'] = self._io.pos()
self._debug['graphics_segments']['end'] = self._io.pos()
self._debug['text_segments']['start'] = self._io.pos()
self.text_segments = [None] * (int(self.header.num_text_files))
for i in range(int(self.header.num_text_files)):
if not 'arr' in self._debug['text_segments']:
self._debug['text_segments']['arr'] = []
self._debug['text_segments']['arr'].append({'start': self._io.pos()})
_t_text_segments = self._root.TextSegment(i, self._io, self, self._root)
_t_text_segments._read()
self.text_segments[i] = _t_text_segments
self._debug['text_segments']['arr'][i]['end'] = self._io.pos()
self._debug['text_segments']['end'] = self._io.pos()
self._debug['data_extension_segments']['start'] = self._io.pos()
self.data_extension_segments = [None] * (int(self.header.num_data_extension))
for i in range(int(self.header.num_data_extension)):
if not 'arr' in self._debug['data_extension_segments']:
self._debug['data_extension_segments']['arr'] = []
self._debug['data_extension_segments']['arr'].append({'start': self._io.pos()})
_t_data_extension_segments = self._root.DataExtensionSegment(i, self._io, self, self._root)
_t_data_extension_segments._read()
self.data_extension_segments[i] = _t_data_extension_segments
self._debug['data_extension_segments']['arr'][i]['end'] = self._io.pos()
self._debug['data_extension_segments']['end'] = self._io.pos()
self._debug['reserved_extension_segments']['start'] = self._io.pos()
self.reserved_extension_segments = [None] * (int(self.header.num_reserved_extension))
for i in range(int(self.header.num_reserved_extension)):
if not 'arr' in self._debug['reserved_extension_segments']:
self._debug['reserved_extension_segments']['arr'] = []
self._debug['reserved_extension_segments']['arr'].append({'start': self._io.pos()})
_t_reserved_extension_segments = self._root.ReservedExtensionSegment(i, self._io, self, self._root)
_t_reserved_extension_segments._read()
self.reserved_extension_segments[i] = _t_reserved_extension_segments
self._debug['reserved_extension_segments']['arr'][i]['end'] = self._io.pos()
self._debug['reserved_extension_segments']['end'] = self._io.pos()
class ReservedExtensionSegment(KaitaiStruct):
SEQ_FIELDS = ["reserved_sub_header", "reserved_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved_sub_header']['start'] = self._io.pos()
self._raw_reserved_sub_header = self._io.read_bytes(int(self._parent.header.lrnfo[self.idx].length_reserved_extension_subheader))
_io__raw_reserved_sub_header = KaitaiStream(BytesIO(self._raw_reserved_sub_header))
self.reserved_sub_header = self._root.ReservedSubHeader(_io__raw_reserved_sub_header, self, self._root)
self.reserved_sub_header._read()
self._debug['reserved_sub_header']['end'] = self._io.pos()
self._debug['reserved_data_field']['start'] = self._io.pos()
self.reserved_data_field = self._io.read_bytes(int(self._parent.header.lrnfo[self.idx].length_reserved_extension_segment))
self._debug['reserved_data_field']['end'] = self._io.pos()
class ImageComment(KaitaiStruct):
SEQ_FIELDS = ["_unnamed0"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['_unnamed0']['start'] = self._io.pos()
self._unnamed0 = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['_unnamed0']['end'] = self._io.pos()
class LengthReservedInfo(KaitaiStruct):
SEQ_FIELDS = ["length_reserved_extension_subheader", "length_reserved_extension_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_reserved_extension_subheader']['start'] = self._io.pos()
self.length_reserved_extension_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_reserved_extension_subheader']['end'] = self._io.pos()
self._debug['length_reserved_extension_segment']['start'] = self._io.pos()
self.length_reserved_extension_segment = (self._io.read_bytes(7)).decode(u"UTF-8")
self._debug['length_reserved_extension_segment']['end'] = self._io.pos()
class Tre(KaitaiStruct):
SEQ_FIELDS = ["extension_type_id", "edata_length", "edata"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['extension_type_id']['start'] = self._io.pos()
self.extension_type_id = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['extension_type_id']['end'] = self._io.pos()
self._debug['edata_length']['start'] = self._io.pos()
self.edata_length = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['edata_length']['end'] = self._io.pos()
self._debug['edata']['start'] = self._io.pos()
self.edata = (self._io.read_bytes(int(self.edata_length))).decode(u"UTF-8")
self._debug['edata']['end'] = self._io.pos()
class BandInfo(KaitaiStruct):
SEQ_FIELDS = ["representation", "subcategory", "img_filter_condition", "img_filter_code", "num_luts", "num_lut_entries", "luts"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['representation']['start'] = self._io.pos()
self.representation = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['representation']['end'] = self._io.pos()
self._debug['subcategory']['start'] = self._io.pos()
self.subcategory = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['subcategory']['end'] = self._io.pos()
self._debug['img_filter_condition']['start'] = self._io.pos()
self.img_filter_condition = self._io.read_bytes(1)
self._debug['img_filter_condition']['end'] = self._io.pos()
if not self.img_filter_condition == b"\x4E":
raise kaitaistruct.ValidationNotEqualError(b"\x4E", self.img_filter_condition, self._io, u"/types/band_info/seq/2")
self._debug['img_filter_code']['start'] = self._io.pos()
self.img_filter_code = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['img_filter_code']['end'] = self._io.pos()
self._debug['num_luts']['start'] = self._io.pos()
self.num_luts = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['num_luts']['end'] = self._io.pos()
if int(self.num_luts) != 0:
self._debug['num_lut_entries']['start'] = self._io.pos()
self.num_lut_entries = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['num_lut_entries']['end'] = self._io.pos()
self._debug['luts']['start'] = self._io.pos()
self.luts = [None] * (int(self.num_luts))
for i in range(int(self.num_luts)):
if not 'arr' in self._debug['luts']:
self._debug['luts']['arr'] = []
self._debug['luts']['arr'].append({'start': self._io.pos()})
self.luts[i] = self._io.read_bytes(int(self.num_lut_entries))
self._debug['luts']['arr'][i]['end'] = self._io.pos()
self._debug['luts']['end'] = self._io.pos()
class ImageSegment(KaitaiStruct):
SEQ_FIELDS = ["image_sub_header", "image_data_mask", "image_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['image_sub_header']['start'] = self._io.pos()
self.image_sub_header = self._root.ImageSubHeader(self._io, self, self._root)
self.image_sub_header._read()
self._debug['image_sub_header']['end'] = self._io.pos()
if self.has_mask:
self._debug['image_data_mask']['start'] = self._io.pos()
self.image_data_mask = self._root.ImageDataMask(self._io, self, self._root)
self.image_data_mask._read()
self._debug['image_data_mask']['end'] = self._io.pos()
if self.has_mask:
self._debug['image_data_field']['start'] = self._io.pos()
self.image_data_field = self._io.read_bytes((int(self._parent.header.linfo[self.idx].length_image_segment) - self.image_data_mask.total_size))
self._debug['image_data_field']['end'] = self._io.pos()
@property
def has_mask(self):
if hasattr(self, '_m_has_mask'):
return self._m_has_mask if hasattr(self, '_m_has_mask') else None
self._m_has_mask = ((self.image_sub_header.img_compression[0:1] == u"M") or (self.image_sub_header.img_compression[1:2] == u"M"))
return self._m_has_mask if hasattr(self, '_m_has_mask') else None
class TextSegment(KaitaiStruct):
SEQ_FIELDS = ["text_sub_header", "text_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['text_sub_header']['start'] = self._io.pos()
self.text_sub_header = self._io.read_bytes(1)
self._debug['text_sub_header']['end'] = self._io.pos()
self._debug['text_data_field']['start'] = self._io.pos()
self.text_data_field = self._io.read_bytes(int(self._parent.header.ltnfo[self.idx].length_text_segment))
self._debug['text_data_field']['end'] = self._io.pos()
class GraphicSubHeader(KaitaiStruct):
SEQ_FIELDS = ["file_part_type_sy", "graphic_id", "graphic_name", "graphic_classification", "encryption", "graphic_type", "reserved1", "graphic_display_level", "graphic_attachment_level", "graphic_location", "first_graphic_bound_loc", "graphic_color", "second_graphic_bound_loc", "reserved2", "graphics_extended_sub_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type_sy']['start'] = self._io.pos()
self.file_part_type_sy = self._io.read_bytes(2)
self._debug['file_part_type_sy']['end'] = self._io.pos()
if not self.file_part_type_sy == b"\x53\x59":
raise kaitaistruct.ValidationNotEqualError(b"\x53\x59", self.file_part_type_sy, self._io, u"/types/graphic_sub_header/seq/0")
self._debug['graphic_id']['start'] = self._io.pos()
self.graphic_id = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['graphic_id']['end'] = self._io.pos()
self._debug['graphic_name']['start'] = self._io.pos()
self.graphic_name = (self._io.read_bytes(20)).decode(u"UTF-8")
self._debug['graphic_name']['end'] = self._io.pos()
self._debug['graphic_classification']['start'] = self._io.pos()
self.graphic_classification = self._root.Clasnfo(self._io, self, self._root)
self.graphic_classification._read()
self._debug['graphic_classification']['end'] = self._io.pos()
self._debug['encryption']['start'] = self._io.pos()
self.encryption = self._root.Encrypt(self._io, self, self._root)
self.encryption._read()
self._debug['encryption']['end'] = self._io.pos()
self._debug['graphic_type']['start'] = self._io.pos()
self.graphic_type = self._io.read_bytes(1)
self._debug['graphic_type']['end'] = self._io.pos()
if not self.graphic_type == b"\x43":
raise kaitaistruct.ValidationNotEqualError(b"\x43", self.graphic_type, self._io, u"/types/graphic_sub_header/seq/5")
self._debug['reserved1']['start'] = self._io.pos()
self.reserved1 = (self._io.read_bytes(13)).decode(u"UTF-8")
self._debug['reserved1']['end'] = self._io.pos()
self._debug['graphic_display_level']['start'] = self._io.pos()
self.graphic_display_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['graphic_display_level']['end'] = self._io.pos()
self._debug['graphic_attachment_level']['start'] = self._io.pos()
self.graphic_attachment_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['graphic_attachment_level']['end'] = self._io.pos()
self._debug['graphic_location']['start'] = self._io.pos()
self.graphic_location = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['graphic_location']['end'] = self._io.pos()
self._debug['first_graphic_bound_loc']['start'] = self._io.pos()
self.first_graphic_bound_loc = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['first_graphic_bound_loc']['end'] = self._io.pos()
self._debug['graphic_color']['start'] = self._io.pos()
self.graphic_color = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['graphic_color']['end'] = self._io.pos()
self._debug['second_graphic_bound_loc']['start'] = self._io.pos()
self.second_graphic_bound_loc = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['second_graphic_bound_loc']['end'] = self._io.pos()
self._debug['reserved2']['start'] = self._io.pos()
self.reserved2 = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['reserved2']['end'] = self._io.pos()
self._debug['graphics_extended_sub_header']['start'] = self._io.pos()
self.graphics_extended_sub_header = self._root.TreHeader(self._io, self, self._root)
self.graphics_extended_sub_header._read()
self._debug['graphics_extended_sub_header']['end'] = self._io.pos()
class Clasnfo(KaitaiStruct):
SEQ_FIELDS = ["security_class", "security_system", "codewords", "control_and_handling", "releaseability", "declass_type", "declass_date", "declass_exemption", "downgrade", "downgrade_date", "class_text", "class_authority_type", "class_authority", "class_reason", "source_date", "control_number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['security_class']['start'] = self._io.pos()
self.security_class = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['security_class']['end'] = self._io.pos()
self._debug['security_system']['start'] = self._io.pos()
self.security_system = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['security_system']['end'] = self._io.pos()
self._debug['codewords']['start'] = self._io.pos()
self.codewords = (self._io.read_bytes(11)).decode(u"UTF-8")
self._debug['codewords']['end'] = self._io.pos()
self._debug['control_and_handling']['start'] = self._io.pos()
self.control_and_handling = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['control_and_handling']['end'] = self._io.pos()
self._debug['releaseability']['start'] = self._io.pos()
self.releaseability = (self._io.read_bytes(20)).decode(u"UTF-8")
self._debug['releaseability']['end'] = self._io.pos()
self._debug['declass_type']['start'] = self._io.pos()
self.declass_type = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['declass_type']['end'] = self._io.pos()
self._debug['declass_date']['start'] = self._io.pos()
self.declass_date = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['declass_date']['end'] = self._io.pos()
self._debug['declass_exemption']['start'] = self._io.pos()
self.declass_exemption = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['declass_exemption']['end'] = self._io.pos()
self._debug['downgrade']['start'] = self._io.pos()
self.downgrade = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['downgrade']['end'] = self._io.pos()
self._debug['downgrade_date']['start'] = self._io.pos()
self.downgrade_date = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['downgrade_date']['end'] = self._io.pos()
self._debug['class_text']['start'] = self._io.pos()
self.class_text = (self._io.read_bytes(43)).decode(u"UTF-8")
self._debug['class_text']['end'] = self._io.pos()
self._debug['class_authority_type']['start'] = self._io.pos()
self.class_authority_type = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['class_authority_type']['end'] = self._io.pos()
self._debug['class_authority']['start'] = self._io.pos()
self.class_authority = (self._io.read_bytes(40)).decode(u"UTF-8")
self._debug['class_authority']['end'] = self._io.pos()
self._debug['class_reason']['start'] = self._io.pos()
self.class_reason = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['class_reason']['end'] = self._io.pos()
self._debug['source_date']['start'] = self._io.pos()
self.source_date = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['source_date']['end'] = self._io.pos()
self._debug['control_number']['start'] = self._io.pos()
self.control_number = (self._io.read_bytes(15)).decode(u"UTF-8")
self._debug['control_number']['end'] = self._io.pos()
class LengthGraphicInfo(KaitaiStruct):
SEQ_FIELDS = ["length_graphic_subheader", "length_graphic_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_graphic_subheader']['start'] = self._io.pos()
self.length_graphic_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_graphic_subheader']['end'] = self._io.pos()
self._debug['length_graphic_segment']['start'] = self._io.pos()
self.length_graphic_segment = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['length_graphic_segment']['end'] = self._io.pos()
class Encrypt(KaitaiStruct):
SEQ_FIELDS = ["_unnamed0"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['_unnamed0']['start'] = self._io.pos()
self._unnamed0 = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['_unnamed0']['end'] = self._io.pos()
class ImageDataMask(KaitaiStruct):
SEQ_FIELDS = ["blocked_img_data_offset", "bmrlnth", "tmrlnth", "tpxcdlnth", "tpxcd", "bmrbnd", "tmrbnd"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['blocked_img_data_offset']['start'] = self._io.pos()
self.blocked_img_data_offset = self._io.read_u4be()
self._debug['blocked_img_data_offset']['end'] = self._io.pos()
self._debug['bmrlnth']['start'] = self._io.pos()
self.bmrlnth = self._io.read_u2be()
self._debug['bmrlnth']['end'] = self._io.pos()
self._debug['tmrlnth']['start'] = self._io.pos()
self.tmrlnth = self._io.read_u2be()
self._debug['tmrlnth']['end'] = self._io.pos()
self._debug['tpxcdlnth']['start'] = self._io.pos()
self.tpxcdlnth = self._io.read_u2be()
self._debug['tpxcdlnth']['end'] = self._io.pos()
self._debug['tpxcd']['start'] = self._io.pos()
self.tpxcd = self._io.read_bytes(self.tpxcd_size)
self._debug['tpxcd']['end'] = self._io.pos()
if self.has_bmr:
self._debug['bmrbnd']['start'] = self._io.pos()
self.bmrbnd = [None] * (self.bmrtmr_count)
for i in range(self.bmrtmr_count):
if not 'arr' in self._debug['bmrbnd']:
self._debug['bmrbnd']['arr'] = []
self._debug['bmrbnd']['arr'].append({'start': self._io.pos()})
self.bmrbnd[i] = self._io.read_u4be()
self._debug['bmrbnd']['arr'][i]['end'] = self._io.pos()
self._debug['bmrbnd']['end'] = self._io.pos()
if self.has_tmr:
self._debug['tmrbnd']['start'] = self._io.pos()
self.tmrbnd = [None] * (self.bmrtmr_count)
for i in range(self.bmrtmr_count):
if not 'arr' in self._debug['tmrbnd']:
self._debug['tmrbnd']['arr'] = []
self._debug['tmrbnd']['arr'].append({'start': self._io.pos()})
self.tmrbnd[i] = self._io.read_u4be()
self._debug['tmrbnd']['arr'][i]['end'] = self._io.pos()
self._debug['tmrbnd']['end'] = self._io.pos()
@property
def has_bmr(self):
if hasattr(self, '_m_has_bmr'):
return self._m_has_bmr if hasattr(self, '_m_has_bmr') else None
self._m_has_bmr = self.bmrlnth != 0
return self._m_has_bmr if hasattr(self, '_m_has_bmr') else None
@property
def has_tmr(self):
if hasattr(self, '_m_has_tmr'):
return self._m_has_tmr if hasattr(self, '_m_has_tmr') else None
self._m_has_tmr = self.tmrlnth != 0
return self._m_has_tmr if hasattr(self, '_m_has_tmr') else None
@property
def tmrbnd_size(self):
if hasattr(self, '_m_tmrbnd_size'):
return self._m_tmrbnd_size if hasattr(self, '_m_tmrbnd_size') else None
self._m_tmrbnd_size = ((self.bmrtmr_count * 4) if self.has_tmr else 0)
return self._m_tmrbnd_size if hasattr(self, '_m_tmrbnd_size') else None
@property
def tpxcd_size(self):
if hasattr(self, '_m_tpxcd_size'):
return self._m_tpxcd_size if hasattr(self, '_m_tpxcd_size') else None
self._m_tpxcd_size = (self.tpxcdlnth if (self.tpxcdlnth % 8) == 0 else (self.tpxcdlnth + (8 - (self.tpxcdlnth % 8)))) // 8
return self._m_tpxcd_size if hasattr(self, '_m_tpxcd_size') else None
@property
def total_size(self):
if hasattr(self, '_m_total_size'):
return self._m_total_size if hasattr(self, '_m_total_size') else None
self._m_total_size = ((((((4 + 2) + 2) + 2) + self.tpxcd_size) + self.bmrbnd_size) + self.tmrbnd_size)
return self._m_total_size if hasattr(self, '_m_total_size') else None
@property
def bmrbnd_size(self):
if hasattr(self, '_m_bmrbnd_size'):
return self._m_bmrbnd_size if hasattr(self, '_m_bmrbnd_size') else None
self._m_bmrbnd_size = ((self.bmrtmr_count * 4) if self.has_bmr else 0)
return self._m_bmrbnd_size if hasattr(self, '_m_bmrbnd_size') else None
@property
def bmrtmr_count(self):
if hasattr(self, '_m_bmrtmr_count'):
return self._m_bmrtmr_count if hasattr(self, '_m_bmrtmr_count') else None
self._m_bmrtmr_count = ((int(self._parent.image_sub_header.num_blocks_per_row) * int(self._parent.image_sub_header.num_blocks_per_col)) * (1 if self._parent.image_sub_header.img_mode != u"S" else (int(self._parent.image_sub_header.num_bands) if int(self._parent.image_sub_header.num_bands) != 0 else int(self._parent.image_sub_header.num_multispectral_bands))))
return self._m_bmrtmr_count if hasattr(self, '_m_bmrtmr_count') else None
class GraphicsSegment(KaitaiStruct):
SEQ_FIELDS = ["graphic_sub_header", "graphic_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['graphic_sub_header']['start'] = self._io.pos()
self.graphic_sub_header = self._root.GraphicSubHeader(self._io, self, self._root)
self.graphic_sub_header._read()
self._debug['graphic_sub_header']['end'] = self._io.pos()
self._debug['graphic_data_field']['start'] = self._io.pos()
self.graphic_data_field = self._io.read_bytes(int(self._parent.header.lnnfo[self.idx].length_graphic_segment))
self._debug['graphic_data_field']['end'] = self._io.pos()
class DataSubHeader(KaitaiStruct):
SEQ_FIELDS = ["des_base", "overflowed_header_type", "data_item_overflowed", "des_defined_subheader_fields_len", "desshf", "des_defined_data_field"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['des_base']['start'] = self._io.pos()
self.des_base = self._root.DataSubHeaderBase(self._io, self, self._root)
self.des_base._read()
self._debug['des_base']['end'] = self._io.pos()
if self.tre_ofl:
self._debug['overflowed_header_type']['start'] = self._io.pos()
self.overflowed_header_type = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['overflowed_header_type']['end'] = self._io.pos()
if self.tre_ofl:
self._debug['data_item_overflowed']['start'] = self._io.pos()
self.data_item_overflowed = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['data_item_overflowed']['end'] = self._io.pos()
self._debug['des_defined_subheader_fields_len']['start'] = self._io.pos()
self.des_defined_subheader_fields_len = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['des_defined_subheader_fields_len']['end'] = self._io.pos()
self._debug['desshf']['start'] = self._io.pos()
self.desshf = (self._io.read_bytes(int(self.des_defined_subheader_fields_len))).decode(u"UTF-8")
self._debug['desshf']['end'] = self._io.pos()
self._debug['des_defined_data_field']['start'] = self._io.pos()
self.des_defined_data_field = (self._io.read_bytes_full()).decode(u"UTF-8")
self._debug['des_defined_data_field']['end'] = self._io.pos()
@property
def tre_ofl(self):
if hasattr(self, '_m_tre_ofl'):
return self._m_tre_ofl if hasattr(self, '_m_tre_ofl') else None
self._m_tre_ofl = self.des_base.desid == u"TRE_OVERFLOW"
return self._m_tre_ofl if hasattr(self, '_m_tre_ofl') else None
class DataExtensionSegment(KaitaiStruct):
SEQ_FIELDS = ["data_sub_header", "data_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data_sub_header']['start'] = self._io.pos()
self._raw_data_sub_header = self._io.read_bytes(int(self._parent.header.ldnfo[self.idx].length_data_extension_subheader))
_io__raw_data_sub_header = KaitaiStream(BytesIO(self._raw_data_sub_header))
self.data_sub_header = self._root.DataSubHeader(_io__raw_data_sub_header, self, self._root)
self.data_sub_header._read()
self._debug['data_sub_header']['end'] = self._io.pos()
self._debug['data_data_field']['start'] = self._io.pos()
self.data_data_field = self._io.read_bytes(int(self._parent.header.ldnfo[self.idx].length_data_extension_segment))
self._debug['data_data_field']['end'] = self._io.pos()
class DataSubHeaderTre(KaitaiStruct):
SEQ_FIELDS = ["des_base", "overflowed_header_type", "data_item_overflowed", "des_defined_subheader_fields_len", "des_defined_data_field"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['des_base']['start'] = self._io.pos()
self.des_base = self._root.DataSubHeaderBase(self._io, self, self._root)
self.des_base._read()
self._debug['des_base']['end'] = self._io.pos()
if self.des_base.desid == u"TRE_OVERFLOW":
self._debug['overflowed_header_type']['start'] = self._io.pos()
self.overflowed_header_type = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['overflowed_header_type']['end'] = self._io.pos()
if self.des_base.desid == u"TRE_OVERFLOW":
self._debug['data_item_overflowed']['start'] = self._io.pos()
self.data_item_overflowed = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['data_item_overflowed']['end'] = self._io.pos()
self._debug['des_defined_subheader_fields_len']['start'] = self._io.pos()
self.des_defined_subheader_fields_len = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['des_defined_subheader_fields_len']['end'] = self._io.pos()
self._debug['des_defined_data_field']['start'] = self._io.pos()
self.des_defined_data_field = (self._io.read_bytes(int(self.des_defined_subheader_fields_len))).decode(u"UTF-8")
self._debug['des_defined_data_field']['end'] = self._io.pos()
class ImageSubHeader(KaitaiStruct):
SEQ_FIELDS = ["file_part_type", "image_id_1", "image_date_time", "target_id", "image_id_2", "image_security_classification", "encryption", "image_source", "num_sig_rows", "num_sig_cols", "pixel_value_type", "image_representation", "image_category", "actual_bits_per_pixel_per_band", "pixel_justification", "image_coordinate_rep", "image_geo_loc", "num_img_comments", "img_comments", "img_compression", "compression_rate_code", "num_bands", "num_multispectral_bands", "bands", "img_sync_code", "img_mode", "num_blocks_per_row", "num_blocks_per_col", "num_pixels_per_block_horz", "num_pixels_per_block_vert", "num_pixels_per_band", "img_display_level", "attachment_level", "img_location", "img_magnification", "user_def_img_data_len", "user_def_overflow", "user_def_img_data", "image_extended_sub_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type']['start'] = self._io.pos()
self.file_part_type = self._io.read_bytes(2)
self._debug['file_part_type']['end'] = self._io.pos()
if not self.file_part_type == b"\x49\x4D":
raise kaitaistruct.ValidationNotEqualError(b"\x49\x4D", self.file_part_type, self._io, u"/types/image_sub_header/seq/0")
self._debug['image_id_1']['start'] = self._io.pos()
self.image_id_1 = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['image_id_1']['end'] = self._io.pos()
self._debug['image_date_time']['start'] = self._io.pos()
self.image_date_time = self._root.DateTime(self._io, self, self._root)
self.image_date_time._read()
self._debug['image_date_time']['end'] = self._io.pos()
self._debug['target_id']['start'] = self._io.pos()
self.target_id = (self._io.read_bytes(17)).decode(u"UTF-8")
self._debug['target_id']['end'] = self._io.pos()
self._debug['image_id_2']['start'] = self._io.pos()
self.image_id_2 = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['image_id_2']['end'] = self._io.pos()
self._debug['image_security_classification']['start'] = self._io.pos()
self.image_security_classification = self._root.Clasnfo(self._io, self, self._root)
self.image_security_classification._read()
self._debug['image_security_classification']['end'] = self._io.pos()
self._debug['encryption']['start'] = self._io.pos()
self.encryption = self._root.Encrypt(self._io, self, self._root)
self.encryption._read()
self._debug['encryption']['end'] = self._io.pos()
self._debug['image_source']['start'] = self._io.pos()
self.image_source = (self._io.read_bytes(42)).decode(u"UTF-8")
self._debug['image_source']['end'] = self._io.pos()
self._debug['num_sig_rows']['start'] = self._io.pos()
self.num_sig_rows = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['num_sig_rows']['end'] = self._io.pos()
self._debug['num_sig_cols']['start'] = self._io.pos()
self.num_sig_cols = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['num_sig_cols']['end'] = self._io.pos()
self._debug['pixel_value_type']['start'] = self._io.pos()
self.pixel_value_type = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['pixel_value_type']['end'] = self._io.pos()
self._debug['image_representation']['start'] = self._io.pos()
self.image_representation = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['image_representation']['end'] = self._io.pos()
self._debug['image_category']['start'] = self._io.pos()
self.image_category = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['image_category']['end'] = self._io.pos()
self._debug['actual_bits_per_pixel_per_band']['start'] = self._io.pos()
self.actual_bits_per_pixel_per_band = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['actual_bits_per_pixel_per_band']['end'] = self._io.pos()
self._debug['pixel_justification']['start'] = self._io.pos()
self.pixel_justification = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['pixel_justification']['end'] = self._io.pos()
self._debug['image_coordinate_rep']['start'] = self._io.pos()
self.image_coordinate_rep = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['image_coordinate_rep']['end'] = self._io.pos()
self._debug['image_geo_loc']['start'] = self._io.pos()
self.image_geo_loc = (self._io.read_bytes(60)).decode(u"UTF-8")
self._debug['image_geo_loc']['end'] = self._io.pos()
self._debug['num_img_comments']['start'] = self._io.pos()
self.num_img_comments = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['num_img_comments']['end'] = self._io.pos()
self._debug['img_comments']['start'] = self._io.pos()
self.img_comments = [None] * (int(self.num_img_comments))
for i in range(int(self.num_img_comments)):
if not 'arr' in self._debug['img_comments']:
self._debug['img_comments']['arr'] = []
self._debug['img_comments']['arr'].append({'start': self._io.pos()})
_t_img_comments = self._root.ImageComment(self._io, self, self._root)
_t_img_comments._read()
self.img_comments[i] = _t_img_comments
self._debug['img_comments']['arr'][i]['end'] = self._io.pos()
self._debug['img_comments']['end'] = self._io.pos()
self._debug['img_compression']['start'] = self._io.pos()
self.img_compression = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['img_compression']['end'] = self._io.pos()
self._debug['compression_rate_code']['start'] = self._io.pos()
self.compression_rate_code = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['compression_rate_code']['end'] = self._io.pos()
self._debug['num_bands']['start'] = self._io.pos()
self.num_bands = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['num_bands']['end'] = self._io.pos()
if int(self.num_bands) == 0:
self._debug['num_multispectral_bands']['start'] = self._io.pos()
self.num_multispectral_bands = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['num_multispectral_bands']['end'] = self._io.pos()
self._debug['bands']['start'] = self._io.pos()
self.bands = [None] * ((int(self.num_bands) if int(self.num_bands) != 0 else int(self.num_multispectral_bands)))
for i in range((int(self.num_bands) if int(self.num_bands) != 0 else int(self.num_multispectral_bands))):
if not 'arr' in self._debug['bands']:
self._debug['bands']['arr'] = []
self._debug['bands']['arr'].append({'start': self._io.pos()})
_t_bands = self._root.BandInfo(self._io, self, self._root)
_t_bands._read()
self.bands[i] = _t_bands
self._debug['bands']['arr'][i]['end'] = self._io.pos()
self._debug['bands']['end'] = self._io.pos()
self._debug['img_sync_code']['start'] = self._io.pos()
self.img_sync_code = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['img_sync_code']['end'] = self._io.pos()
self._debug['img_mode']['start'] = self._io.pos()
self.img_mode = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['img_mode']['end'] = self._io.pos()
self._debug['num_blocks_per_row']['start'] = self._io.pos()
self.num_blocks_per_row = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_blocks_per_row']['end'] = self._io.pos()
self._debug['num_blocks_per_col']['start'] = self._io.pos()
self.num_blocks_per_col = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_blocks_per_col']['end'] = self._io.pos()
self._debug['num_pixels_per_block_horz']['start'] = self._io.pos()
self.num_pixels_per_block_horz = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_pixels_per_block_horz']['end'] = self._io.pos()
self._debug['num_pixels_per_block_vert']['start'] = self._io.pos()
self.num_pixels_per_block_vert = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_pixels_per_block_vert']['end'] = self._io.pos()
self._debug['num_pixels_per_band']['start'] = self._io.pos()
self.num_pixels_per_band = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['num_pixels_per_band']['end'] = self._io.pos()
self._debug['img_display_level']['start'] = self._io.pos()
self.img_display_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['img_display_level']['end'] = self._io.pos()
self._debug['attachment_level']['start'] = self._io.pos()
self.attachment_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['attachment_level']['end'] = self._io.pos()
self._debug['img_location']['start'] = self._io.pos()
self.img_location = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['img_location']['end'] = self._io.pos()
self._debug['img_magnification']['start'] = self._io.pos()
self.img_magnification = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['img_magnification']['end'] = self._io.pos()
self._debug['user_def_img_data_len']['start'] = self._io.pos()
self.user_def_img_data_len = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['user_def_img_data_len']['end'] = self._io.pos()
if int(self.user_def_img_data_len) != 0:
self._debug['user_def_overflow']['start'] = self._io.pos()
self.user_def_overflow = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['user_def_overflow']['end'] = self._io.pos()
if int(self.user_def_img_data_len) > 2:
self._debug['user_def_img_data']['start'] = self._io.pos()
self.user_def_img_data = [None] * ((int(self.user_def_img_data_len) - 3))
for i in range((int(self.user_def_img_data_len) - 3)):
if not 'arr' in self._debug['user_def_img_data']:
self._debug['user_def_img_data']['arr'] = []
self._debug['user_def_img_data']['arr'].append({'start': self._io.pos()})
self.user_def_img_data[i] = self._io.read_u1()
self._debug['user_def_img_data']['arr'][i]['end'] = self._io.pos()
self._debug['user_def_img_data']['end'] = self._io.pos()
self._debug['image_extended_sub_header']['start'] = self._io.pos()
self.image_extended_sub_header = self._root.TreHeader(self._io, self, self._root)
self.image_extended_sub_header._read()
self._debug['image_extended_sub_header']['end'] = self._io.pos()
class ReservedSubHeader(KaitaiStruct):
SEQ_FIELDS = ["file_part_type_re", "res_type_id", "res_version", "reclasnfo", "res_user_defined_subheader_length", "res_user_defined_subheader_fields", "res_user_defined_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type_re']['start'] = self._io.pos()
self.file_part_type_re = self._io.read_bytes(2)
self._debug['file_part_type_re']['end'] = self._io.pos()
if not self.file_part_type_re == b"\x52\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x52\x45", self.file_part_type_re, self._io, u"/types/reserved_sub_header/seq/0")
self._debug['res_type_id']['start'] = self._io.pos()
self.res_type_id = (self._io.read_bytes(25)).decode(u"UTF-8")
self._debug['res_type_id']['end'] = self._io.pos()
self._debug['res_version']['start'] = self._io.pos()
self.res_version = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['res_version']['end'] = self._io.pos()
self._debug['reclasnfo']['start'] = self._io.pos()
self.reclasnfo = self._root.Clasnfo(self._io, self, self._root)
self.reclasnfo._read()
self._debug['reclasnfo']['end'] = self._io.pos()
self._debug['res_user_defined_subheader_length']['start'] = self._io.pos()
self.res_user_defined_subheader_length = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['res_user_defined_subheader_length']['end'] = self._io.pos()
self._debug['res_user_defined_subheader_fields']['start'] = self._io.pos()
self.res_user_defined_subheader_fields = (self._io.read_bytes(int(self.res_user_defined_subheader_length))).decode(u"UTF-8")
self._debug['res_user_defined_subheader_fields']['end'] = self._io.pos()
self._debug['res_user_defined_data']['start'] = self._io.pos()
self.res_user_defined_data = (self._io.read_bytes_full()).decode(u"UTF-8")
self._debug['res_user_defined_data']['end'] = self._io.pos()
class DataSubHeaderBase(KaitaiStruct):
SEQ_FIELDS = ["file_part_type_de", "desid", "data_definition_version", "declasnfo"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type_de']['start'] = self._io.pos()
self.file_part_type_de = self._io.read_bytes(2)
self._debug['file_part_type_de']['end'] = self._io.pos()
if not self.file_part_type_de == b"\x44\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x44\x45", self.file_part_type_de, self._io, u"/types/data_sub_header_base/seq/0")
self._debug['desid']['start'] = self._io.pos()
self.desid = (self._io.read_bytes(25)).decode(u"UTF-8")
self._debug['desid']['end'] = self._io.pos()
self._debug['data_definition_version']['start'] = self._io.pos()
self.data_definition_version = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['data_definition_version']['end'] = self._io.pos()
self._debug['declasnfo']['start'] = self._io.pos()
self.declasnfo = self._root.Clasnfo(self._io, self, self._root)
self.declasnfo._read()
self._debug['declasnfo']['end'] = self._io.pos()
class TextSubHeader(KaitaiStruct):
SEQ_FIELDS = ["text_date_time", "text_title", "text_security_class", "encryp", "text_format", "text_extended_sub_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['text_date_time']['start'] = self._io.pos()
self.text_date_time = (self._io.read_bytes(14)).decode(u"UTF-8")
self._debug['text_date_time']['end'] = self._io.pos()
self._debug['text_title']['start'] = self._io.pos()
self.text_title = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['text_title']['end'] = self._io.pos()
self._debug['text_security_class']['start'] = self._io.pos()
self.text_security_class = self._root.Clasnfo(self._io, self, self._root)
self.text_security_class._read()
self._debug['text_security_class']['end'] = self._io.pos()
self._debug['encryp']['start'] = self._io.pos()
self.encryp = self._root.Encrypt(self._io, self, self._root)
self.encryp._read()
self._debug['encryp']['end'] = self._io.pos()
self._debug['text_format']['start'] = self._io.pos()
self.text_format = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['text_format']['end'] = self._io.pos()
self._debug['text_extended_sub_header']['start'] = self._io.pos()
self.text_extended_sub_header = self._root.TreHeader(self._io, self, self._root)
self.text_extended_sub_header._read()
self._debug['text_extended_sub_header']['end'] = self._io.pos()
class DateTime(KaitaiStruct):
SEQ_FIELDS = ["_unnamed0"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['_unnamed0']['start'] = self._io.pos()
self._unnamed0 = (self._io.read_bytes(14)).decode(u"UTF-8")
self._debug['_unnamed0']['end'] = self._io.pos()
class Header(KaitaiStruct):
SEQ_FIELDS = ["file_profile_name", "file_version", "complexity_level", "standard_type", "originating_station_id", "file_date_time", "file_title", "file_security", "file_copy_number", "file_num_of_copys", "encryption", "file_bg_color", "originator_name", "originator_phone", "file_length", "file_header_length", "num_image_segments", "linfo", "num_graphics_segments", "lnnfo", "reserved_numx", "num_text_files", "ltnfo", "num_data_extension", "ldnfo", "num_reserved_extension", "lrnfo", "user_defined_header", "extended_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_profile_name']['start'] = self._io.pos()
self.file_profile_name = self._io.read_bytes(4)
self._debug['file_profile_name']['end'] = self._io.pos()
if not self.file_profile_name == b"\x4E\x49\x54\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x4E\x49\x54\x46", self.file_profile_name, self._io, u"/types/header/seq/0")
self._debug['file_version']['start'] = self._io.pos()
self.file_version = self._io.read_bytes(5)
self._debug['file_version']['end'] = self._io.pos()
if not self.file_version == b"\x30\x32\x2E\x31\x30":
raise kaitaistruct.ValidationNotEqualError(b"\x30\x32\x2E\x31\x30", self.file_version, self._io, u"/types/header/seq/1")
self._debug['complexity_level']['start'] = self._io.pos()
self.complexity_level = self._io.read_bytes(2)
self._debug['complexity_level']['end'] = self._io.pos()
self._debug['standard_type']['start'] = self._io.pos()
self.standard_type = self._io.read_bytes(4)
self._debug['standard_type']['end'] = self._io.pos()
if not self.standard_type == b"\x42\x46\x30\x31":
raise kaitaistruct.ValidationNotEqualError(b"\x42\x46\x30\x31", self.standard_type, self._io, u"/types/header/seq/3")
self._debug['originating_station_id']['start'] = self._io.pos()
self.originating_station_id = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['originating_station_id']['end'] = self._io.pos()
self._debug['file_date_time']['start'] = self._io.pos()
self.file_date_time = self._root.DateTime(self._io, self, self._root)
self.file_date_time._read()
self._debug['file_date_time']['end'] = self._io.pos()
self._debug['file_title']['start'] = self._io.pos()
self.file_title = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['file_title']['end'] = self._io.pos()
self._debug['file_security']['start'] = self._io.pos()
self.file_security = self._root.Clasnfo(self._io, self, self._root)
self.file_security._read()
self._debug['file_security']['end'] = self._io.pos()
self._debug['file_copy_number']['start'] = self._io.pos()
self.file_copy_number = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['file_copy_number']['end'] = self._io.pos()
self._debug['file_num_of_copys']['start'] = self._io.pos()
self.file_num_of_copys = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['file_num_of_copys']['end'] = self._io.pos()
self._debug['encryption']['start'] = self._io.pos()
self.encryption = self._root.Encrypt(self._io, self, self._root)
self.encryption._read()
self._debug['encryption']['end'] = self._io.pos()
self._debug['file_bg_color']['start'] = self._io.pos()
self.file_bg_color = self._io.read_bytes(3)
self._debug['file_bg_color']['end'] = self._io.pos()
self._debug['originator_name']['start'] = self._io.pos()
self.originator_name = (self._io.read_bytes(24)).decode(u"UTF-8")
self._debug['originator_name']['end'] = self._io.pos()
self._debug['originator_phone']['start'] = self._io.pos()
self.originator_phone = (self._io.read_bytes(18)).decode(u"UTF-8")
self._debug['originator_phone']['end'] = self._io.pos()
self._debug['file_length']['start'] = self._io.pos()
self.file_length = (self._io.read_bytes(12)).decode(u"UTF-8")
self._debug['file_length']['end'] = self._io.pos()
self._debug['file_header_length']['start'] = self._io.pos()
self.file_header_length = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['file_header_length']['end'] = self._io.pos()
self._debug['num_image_segments']['start'] = self._io.pos()
self.num_image_segments = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_image_segments']['end'] = self._io.pos()
self._debug['linfo']['start'] = self._io.pos()
self.linfo = [None] * (int(self.num_image_segments))
for i in range(int(self.num_image_segments)):
if not 'arr' in self._debug['linfo']:
self._debug['linfo']['arr'] = []
self._debug['linfo']['arr'].append({'start': self._io.pos()})
_t_linfo = self._root.LengthImageInfo(self._io, self, self._root)
_t_linfo._read()
self.linfo[i] = _t_linfo
self._debug['linfo']['arr'][i]['end'] = self._io.pos()
self._debug['linfo']['end'] = self._io.pos()
self._debug['num_graphics_segments']['start'] = self._io.pos()
self.num_graphics_segments = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_graphics_segments']['end'] = self._io.pos()
self._debug['lnnfo']['start'] = self._io.pos()
self.lnnfo = [None] * (int(self.num_graphics_segments))
for i in range(int(self.num_graphics_segments)):
if not 'arr' in self._debug['lnnfo']:
self._debug['lnnfo']['arr'] = []
self._debug['lnnfo']['arr'].append({'start': self._io.pos()})
_t_lnnfo = self._root.LengthGraphicInfo(self._io, self, self._root)
_t_lnnfo._read()
self.lnnfo[i] = _t_lnnfo
self._debug['lnnfo']['arr'][i]['end'] = self._io.pos()
self._debug['lnnfo']['end'] = self._io.pos()
self._debug['reserved_numx']['start'] = self._io.pos()
self.reserved_numx = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['reserved_numx']['end'] = self._io.pos()
self._debug['num_text_files']['start'] = self._io.pos()
self.num_text_files = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_text_files']['end'] = self._io.pos()
self._debug['ltnfo']['start'] = self._io.pos()
self.ltnfo = [None] * (int(self.num_text_files))
for i in range(int(self.num_text_files)):
if not 'arr' in self._debug['ltnfo']:
self._debug['ltnfo']['arr'] = []
self._debug['ltnfo']['arr'].append({'start': self._io.pos()})
_t_ltnfo = self._root.LengthTextInfo(self._io, self, self._root)
_t_ltnfo._read()
self.ltnfo[i] = _t_ltnfo
self._debug['ltnfo']['arr'][i]['end'] = self._io.pos()
self._debug['ltnfo']['end'] = self._io.pos()
self._debug['num_data_extension']['start'] = self._io.pos()
self.num_data_extension = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_data_extension']['end'] = self._io.pos()
self._debug['ldnfo']['start'] = self._io.pos()
self.ldnfo = [None] * (int(self.num_data_extension))
for i in range(int(self.num_data_extension)):
if not 'arr' in self._debug['ldnfo']:
self._debug['ldnfo']['arr'] = []
self._debug['ldnfo']['arr'].append({'start': self._io.pos()})
_t_ldnfo = self._root.LengthDataInfo(self._io, self, self._root)
_t_ldnfo._read()
self.ldnfo[i] = _t_ldnfo
self._debug['ldnfo']['arr'][i]['end'] = self._io.pos()
self._debug['ldnfo']['end'] = self._io.pos()
self._debug['num_reserved_extension']['start'] = self._io.pos()
self.num_reserved_extension = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_reserved_extension']['end'] = self._io.pos()
self._debug['lrnfo']['start'] = self._io.pos()
self.lrnfo = [None] * (int(self.num_reserved_extension))
for i in range(int(self.num_reserved_extension)):
if not 'arr' in self._debug['lrnfo']:
self._debug['lrnfo']['arr'] = []
self._debug['lrnfo']['arr'].append({'start': self._io.pos()})
_t_lrnfo = self._root.LengthReservedInfo(self._io, self, self._root)
_t_lrnfo._read()
self.lrnfo[i] = _t_lrnfo
self._debug['lrnfo']['arr'][i]['end'] = self._io.pos()
self._debug['lrnfo']['end'] = self._io.pos()
self._debug['user_defined_header']['start'] = self._io.pos()
self.user_defined_header = self._root.TreHeader(self._io, self, self._root)
self.user_defined_header._read()
self._debug['user_defined_header']['end'] = self._io.pos()
self._debug['extended_header']['start'] = self._io.pos()
self.extended_header = self._root.TreHeader(self._io, self, self._root)
self.extended_header._read()
self._debug['extended_header']['end'] = self._io.pos()
class DataSubHeaderStreaming(KaitaiStruct):
"""Streaming file Header Data Extension Segment Subheader."""
SEQ_FIELDS = ["des_base", "des_defined_subheader_fields_len", "sfh_l1", "sfh_delim1", "sfh_dr", "sfh_delim2", "sfh_l2"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['des_base']['start'] = self._io.pos()
self.des_base = self._root.DataSubHeaderBase(self._io, self, self._root)
self.des_base._read()
self._debug['des_base']['end'] = self._io.pos()
self._debug['des_defined_subheader_fields_len']['start'] = self._io.pos()
self.des_defined_subheader_fields_len = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['des_defined_subheader_fields_len']['end'] = self._io.pos()
self._debug['sfh_l1']['start'] = self._io.pos()
self.sfh_l1 = (self._io.read_bytes(7)).decode(u"UTF-8")
self._debug['sfh_l1']['end'] = self._io.pos()
self._debug['sfh_delim1']['start'] = self._io.pos()
self.sfh_delim1 = self._io.read_u4be()
self._debug['sfh_delim1']['end'] = self._io.pos()
self._debug['sfh_dr']['start'] = self._io.pos()
self.sfh_dr = [None] * (int(self.sfh_l1))
for i in range(int(self.sfh_l1)):
if not 'arr' in self._debug['sfh_dr']:
self._debug['sfh_dr']['arr'] = []
self._debug['sfh_dr']['arr'].append({'start': self._io.pos()})
self.sfh_dr[i] = self._io.read_u1()
self._debug['sfh_dr']['arr'][i]['end'] = self._io.pos()
self._debug['sfh_dr']['end'] = self._io.pos()
self._debug['sfh_delim2']['start'] = self._io.pos()
self.sfh_delim2 = self._io.read_u4be()
self._debug['sfh_delim2']['end'] = self._io.pos()
self._debug['sfh_l2']['start'] = self._io.pos()
self.sfh_l2 = (self._io.read_bytes(7)).decode(u"UTF-8")
self._debug['sfh_l2']['end'] = self._io.pos()
class TreHeader(KaitaiStruct):
SEQ_FIELDS = ["header_data_length", "header_overflow", "header_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header_data_length']['start'] = self._io.pos()
self.header_data_length = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['header_data_length']['end'] = self._io.pos()
if int(self.header_data_length) != 0:
self._debug['header_overflow']['start'] = self._io.pos()
self.header_overflow = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['header_overflow']['end'] = self._io.pos()
if int(self.header_data_length) > 2:
self._debug['header_data']['start'] = self._io.pos()
self.header_data = [None] * ((int(self.header_data_length) - 3))
for i in range((int(self.header_data_length) - 3)):
if not 'arr' in self._debug['header_data']:
self._debug['header_data']['arr'] = []
self._debug['header_data']['arr'].append({'start': self._io.pos()})
self.header_data[i] = self._io.read_u1()
self._debug['header_data']['arr'][i]['end'] = self._io.pos()
self._debug['header_data']['end'] = self._io.pos()
class LengthImageInfo(KaitaiStruct):
SEQ_FIELDS = ["length_image_subheader", "length_image_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_image_subheader']['start'] = self._io.pos()
self.length_image_subheader = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['length_image_subheader']['end'] = self._io.pos()
self._debug['length_image_segment']['start'] = self._io.pos()
self.length_image_segment = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['length_image_segment']['end'] = self._io.pos()
class LengthDataInfo(KaitaiStruct):
SEQ_FIELDS = ["length_data_extension_subheader", "length_data_extension_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_data_extension_subheader']['start'] = self._io.pos()
self.length_data_extension_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_data_extension_subheader']['end'] = self._io.pos()
self._debug['length_data_extension_segment']['start'] = self._io.pos()
self.length_data_extension_segment = (self._io.read_bytes(9)).decode(u"UTF-8")
self._debug['length_data_extension_segment']['end'] = self._io.pos()
class LengthTextInfo(KaitaiStruct):
SEQ_FIELDS = ["length_text_subheader", "length_text_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_text_subheader']['start'] = self._io.pos()
self.length_text_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_text_subheader']['end'] = self._io.pos()
self._debug['length_text_segment']['start'] = self._io.pos()
self.length_text_segment = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['length_text_segment']['end'] = self._io.pos()
|
import json
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_minnesota_zip(zip: str):
list_of_dicts_of_minnesota_zips = zipcodes.filter_by(state="MN")
list_of_minnesota_zips = [d["zip_code"] for d in list_of_dicts_of_minnesota_zips]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_minnesota_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidMinnesotaZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_minnesota_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_minnesota_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidMinnesotaZip(ColumnMapExpectation):
"""Expect values in this column to be valid Minnesota zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_minnesota_zip": ["55040", "55330", "55781", "55968"],
"invalid_minnesota_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_minnesota_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_minnesota_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_minnesota_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMinnesotaZip().print_diagnostic_checklist()
|
#!/usr/bin/python
from setuptools import setup, find_packages
import sys
install_requires = []
pyversion = sys.version_info[:2]
if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
install_requires.append('argparse')
setup(
name='radosgw-agent',
version='1.0',
packages=find_packages(),
author='Josh Durgin',
author_email='josh.durgin@inktank.com',
description='Synchronize users and data between radosgw clusters',
license='MIT',
keywords='radosgw ceph radosgw-agent',
url="https://github.com/ceph/radosgw-agent",
install_requires=[
'setuptools',
'boto ==2.2.2',
'requests',
] + install_requires,
entry_points={
'console_scripts': [
'radosgw-agent = radosgw_agent.cli:main',
],
},
)
|
from flask import Blueprint, request
from app.spiders.core import *
from app.utils import build_result
from app.constants import code
core = Blueprint('core', __name__)
@core.route('/login', methods=['POST'])
def login():
data = request.form
username = data.get('username')
password = data.get('password')
return core_login(username, password)
@core.route('/book_borrow_info', methods=['GET'])
def book_borrow_info():
token = request.args.get('token')
return get_book_borrow_info(token)
@core.route('/trans_list', methods=['GET'])
def trans_list():
token = request.args.get('token')
return get_trans_list(token)
@core.route('/tel_book', methods=['GET'])
def tel_book():
department_id = request.args.get('department_id')
return get_tel_book(department_id)
|
#!C:\Users\Aditya\PycharmProjects\python-docs-azure-app\venv\Scripts\python.exe
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pygal
parser = argparse.ArgumentParser(
description='Generate pygal chart in command line',
prog='pygal_gen')
parser.add_argument('-t', '--type', dest='type', default='Line',
choices=map(lambda x: x.__name__, pygal.CHARTS),
help='Kind of chart to generate')
parser.add_argument('-o', '--output', dest='filename', default='pygal_out.svg',
help='Filename to write the svg to')
parser.add_argument('-s', '--serie', dest='series', nargs='+', action='append',
help='Add a serie in the form (title val1 val2...)')
parser.add_argument('--version', action='version',
version='pygal %s' % pygal.__version__)
for key in pygal.config.CONFIG_ITEMS:
opt_name = key.name
val = key.value
opts = {}
if key.type == list:
opts['type'] = key.subtype
opts['nargs'] = '+'
else:
opts['type'] = key.type
if opts['type'] == bool:
del opts['type']
opts['action'] = 'store_true' if not val else 'store_false'
if val:
opt_name = 'no-' + opt_name
if key.name == 'interpolate':
opts['choices'] = list(pygal.interpolate.INTERPOLATIONS.keys())
parser.add_argument(
'--%s' % opt_name, dest=key.name, default=val, **opts)
config = parser.parse_args()
chart = getattr(pygal, config.type)(**vars(config))
for serie in config.series:
chart.add(serie[0], map(float, serie[1:]))
chart.render_to_file(config.filename)
|
"""
Python wrapper for libui.
"""
import ctypes
from . import clibui
class uiCombobox(ctypes.Structure):
"""Wrapper for the uiCombobox C struct."""
pass
def uiComboboxPointer(obj):
"""
Casts an object to uiCombobox pointer type.
:param obj: a generic object
:return: uiCombobox
"""
return ctypes.cast(obj, ctypes.POINTER(uiCombobox))
# - void uiComboboxAppend(uiCombobox *c, const char *text);
def uiComboboxAppend(combobox, text):
"""
Appends a new item to the combobox.
:param combobox: uiCombobox
:param text: string
:return: None
"""
clibui.uiComboboxAppend(combobox, bytes(text, 'utf-8'))
# - int uiComboboxSelected(uiCombobox *c);
def uiComboboxSelected(combobox):
"""
Returns selected items index.
:param combobox: uiCombobox
:return: int
"""
return clibui.uiComboboxSelected(combobox)
# - void uiComboboxSetSelected(uiCombobox *c, int n);
def uiComboboxSetSelected(combobox, n):
"""
Sets selected item.
:param combobox: uiCombobox
:param n: integer
:return: None
"""
clibui.uiComboboxSetSelected(combobox, n)
# - void uiComboboxOnSelected(uiCombobox *c, void (*f)(uiCombobox *c, void *data), void *data);
def uiComboboxOnSelected(combobox, callback, data):
"""
Executes a callback function when an item selected.
:param combobox: uiCombobox
:param callback: function
:param data: data
:return: reference to C callback function
"""
c_type = ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.POINTER(uiCombobox), ctypes.c_void_p)
c_callback = c_type(callback)
clibui.uiComboboxOnSelected(combobox, c_callback, data)
return c_callback
def uiNewCombobox():
"""
Creates a new combobox.
:return: uiCombobox
"""
clibui.uiNewCombobox.restype = ctypes.POINTER(uiCombobox)
return clibui.uiNewCombobox()
|
import click
import pandas as pd
# Due textacy problems
try:
from textacy.preprocess import preprocess_text
except Exception:
from textacy.preprocess import preprocess_text
def preprocess_f(text, fix_unicode=True, lowercase=True,
no_urls=True, no_emails=True,
no_phone_numbers=True,
no_numbers=True, no_currency_symbols=True,
no_punct=True, no_accents=True):
"""Preprocess text."""
clean_text = preprocess_text(text, fix_unicode=fix_unicode,
lowercase=lowercase,
no_urls=no_urls, no_emails=no_emails,
no_phone_numbers=no_phone_numbers,
no_numbers=no_numbers,
no_currency_symbols=no_currency_symbols,
no_punct=no_punct,
no_accents=no_accents)
return clean_text
@click.command()
@click.option('--input_path', type=click.STRING, help='Path to input file')
@click.option('--output_path', type=click.STRING, help='Path to input file')
@click.option('--set_', type=click.Choice(['train', 'test']), help="set")
def preprocess(input_path, output_path, set_):
"""pre-process script
:param input_path: path to input file
:type input_path: str
:param output_path: path to output file
:type output_path: str
:param set_: kind of data
:type set_: str
"""
if set_ == "train":
df = pd.read_csv(input_path, sep='|')
else:
df = pd.read_csv(input_path)
df["clean_txt"] = df["Pregunta"].apply(lambda x: preprocess_f(x))
df.to_csv(output_path, index=False)
if __name__ == "__main__":
preprocess()
|
#coding=utf-8
import sys
import getopt
from core.interface.action import server_action
from core.helper.usage import usage_helper
from prettytable import PrettyTable
from core.helper.parser import config_parser
import re
class action_list(server_action):
# 参数列表示例,可以在这边增加参数描述,最终代码将根据argname生成对对应的参数
_parameters = [
{"name":"h", "needarg":False, "desc":"显示这条帮助信息", "argname":"help"},
{"name":"n", "needarg":True, "desc":"根据服务器名称进行模糊搜索", "argname":"name"},
{"name":"t", "needarg":True, "desc":"根据标签进行搜索", "argname":"tag"}
]
def __init__(self):
# 创建帮助信息
self._usage_helper = usage_helper(sys.argv[0], "list", self._parameters)
self._config = config_parser()
def _usage(self):
# 输出action的帮助信息
self._usage_helper.output()
# 对参数进行预处理
# 将参数描述数组重组成便于描述的字典,用于后续参数解析
# 另外根据argname初始化参数,需要参数值的初始化成None,不需要参数值的初始化成False
def _prepare_parameters(self):
recognized_parameter={}
for obj in self._parameters:
obj_key = '-' + obj['name'] # 类似参数-h -n -a的样式作为字典的key
recognized_parameter[obj_key] = obj # 原参数描述的内容原封不动的存到字典里
parameter_name = "_%s"%(obj['argname'])
if obj['needarg'] == True:
setattr(self, parameter_name, None)
else:
setattr(self, parameter_name, False)
return recognized_parameter
# action的简要描述,当执行smng help时,这个会输出到屏幕
def description(self):
return "列出服务器信息"
# 通用的参数解析方法,如果需要增加参数处理过程请在这个方法内添加
def parse_parameters(self):
try:
opts, argv = getopt.getopt(sys.argv[2:], self._usage_helper.get_opt_string())
except Exception as e:
self._usage()
exit()
parameters = self._prepare_parameters()
for opt,arg in opts:
if parameters[opt]['needarg'] == True:
setattr(self,"_%s"%(parameters[opt]['argname']), arg) # 需要传入值的参数设置值
else:
setattr(self,"_%s"%(parameters[opt]['argname']), True) # 不需要传入值的参数设置成True
# 单独处理help参数,默认输出帮助信息
if self._help == True:
self._usage()
exit()
# ToDo: 自定义的解析方法
self._tag=[]
prog_with_value = re.compile(r'^[\w]+=[0-9a-zA-Z-_]+$')
prog_without_value = re.compile(r'^[\w]+$')
for opt, arg in opts:
if opt == '-t':
if prog_with_value.match(arg) is not None:
# 带值的标签,例如tag=hello
name,value = arg.split('=')
self._tag.append({name:value})
elif prog_without_value.match(arg) is not None:
# 不带值的标签,例如tag
self._tag.append({arg:''})
else:
print("%s is bad value"%(arg))
# 根据服务器名称进行模糊搜索,如果没有要求名称搜索,则返回全集
def _search_by_name(self):
ret_array = []
# 不需要使用名称进行过滤时,返回全集
if self._name == None:
for i in self._config:
ret_array.append(i['ip'])
return set(ret_array)
# 否则进行模糊搜索,把搜索出来的ip放到集合
prog = re.compile('^.*%s.*$'%(self._name))
for i in self._config:
if 'name' not in i:
continue
if prog.match(i['name']) != None:
ret_array.append(i['ip'])
return set(ret_array)
# 根据服务器的标签进行搜索,如果没有要求标签进行搜索,则返回全集
def _search_by_tag(self):
ret_array = []
# 没有使用tag筛选,返回全集
if len(self._tag) == 0:
for i in self._config:
ret_array.append(i['ip'])
return set(ret_array)
# 格式化tag输入参数
for i in self._config:
if 'tags' in i:
print(i['tags'])
return set([])
pass
# action实际执行的动作,请将action的行为添加到这个方法内
def run(self):
# 先按照参数过滤
name_set = self._search_by_name()
tag_set = self._search_by_tag()
finnal_set = name_set & tag_set
prog = re.compile('^%s$'%(self._name))
disp = PrettyTable(["IP", "服务器名称","标签"])
for i in self._config:
# 检测记录是否在搜索结果内
if i['ip'] in finnal_set:
name = i['name'] if 'name' in i else ''
tag = []
if 'tags' in i:
for t in i['tags']:
tag.append("%s:%s"%(t, i['tags'][t]))
disp.add_row([i['ip'], name, ','.join(tag)])
print(disp)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import timeutils
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import backup as backup_api
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context as cinder_context
from cinder import coordination
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import glance
from cinder.image import image_utils
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
If rescheduling doesn't occur this task errors out the volume.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, driver, scheduler_rpcapi,
do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume',
'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.do_reschedule = do_reschedule
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.driver = driver
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
exception.ImageTooBig,
exception.InvalidSignatureImage,
exception.ImageSignatureVerificationException
]
def execute(self, **kwargs):
pass
def _pre_reschedule(self, volume):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Update volume's timestamp and host.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
# We don't need to update the volume's status to creating, since
# we haven't changed it to error.
update = {
'scheduled_at': timeutils.utcnow(),
'host': None,
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume.id})
volume.update(update)
volume.save()
except exception.CinderException:
# Don't let updating the state cause the rescheduling to fail.
LOG.exception("Volume %s: update volume state failed.",
volume.id)
def _reschedule(self, context, cause, request_spec, filter_properties,
volume):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume.id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume.id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, volume, request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, volume):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume.id)
# NOTE(dulek): Here we should be sure that rescheduling occurred and
# host field will be erased. Just in case volume was already created at
# the backend, we attempt to delete it.
try:
self.driver.delete_volume(volume)
except Exception:
# Most likely the volume weren't created at the backend. We can
# safely ignore this.
pass
def revert(self, context, result, flow_failures, volume, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're returning boolean flag that will
# indicate that. It which will be available in flow engine store
# through get_revert_result method.
# If do not want to be rescheduled, just set the volume's status to
# error and return.
if not self.do_reschedule:
common.error_out(volume)
LOG.error("Volume %s: create failed", volume.id)
return False
# Check if we have a cause which can tell us not to reschedule and
# set the volume's status to error.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
common.error_out(volume)
LOG.error("Volume %s: create failed", volume.id)
return False
# Use a different context when rescheduling.
if self.reschedule_context:
cause = list(flow_failures.values())[0]
context = self.reschedule_context
try:
self._pre_reschedule(volume)
self._reschedule(context, cause, volume=volume, **kwargs)
self._post_reschedule(volume)
return True
except exception.CinderException:
LOG.exception("Volume %s: rescheduling failed", volume.id)
return False
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'refreshed'
def __init__(self, db, host, set_error=True):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
self.set_error = set_error
def execute(self, context, volume):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume.refresh()
return volume
def revert(self, context, volume, result, **kwargs):
if isinstance(result, ft.Failure) or not self.set_error:
return
reason = _('Volume create failed while extracting volume ref.')
common.error_out(volume, reason)
LOG.error("Volume %s: create failed", volume.id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['volume', 'request_spec']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume, request_spec):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume.name
volume_size = utils.as_int(volume.size, quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume.status,
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image, backup]
'volume_id': volume.id,
'volume_name': volume_name,
'volume_size': volume_size,
}
if volume.snapshot_id:
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': volume.snapshot_id,
})
elif volume.source_volid:
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = volume.source_volid
source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref.status,
'type': 'source_vol',
})
elif request_spec.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = request_spec['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
elif request_spec.get('backup_id'):
# We are making a backup based volume instead of a raw volume.
specs.update({
'type': 'backup',
'backup_id': request_spec['backup_id'],
# NOTE(luqitao): if the driver does not implement the method
# `create_volume_from_backup`, cinder-backup will update the
# volume's status, otherwise we need update it in the method
# `CreateVolumeOnFinishTask`.
'need_update_volume': True,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, ft.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume):
try:
volume_utils.notify_about_volume_usage(context, volume,
self.event_suffix,
host=volume.host)
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s",
{'event': self.event_suffix, 'volume_id': volume.id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, manager, db, driver, image_volume_cache=None):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.manager = manager
self.db = db
self.driver = driver
self.image_volume_cache = image_volume_cache
self.message = message_api.API()
self.backup_api = backup_api.API()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
def _handle_bootable_volume_glance_meta(self, context, volume,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
volume_utils.enable_bootable_flag(volume)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume.id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume.id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self._capture_volume_image_metadata(context, volume.id,
image_id, image_meta)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(exception_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume, snapshot_id,
**kwargs):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
try:
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
finally:
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = objects.Volume.get_by_id(context,
snapshot.volume_id)
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception("Failed fetching snapshot %(snapshot_id)s bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference",
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot.volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume,
snapshot_id=snapshot_id)
return model_update
def _create_from_source_volume(self, context, volume, source_volid,
**kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = objects.Volume.get_by_id(context, source_volid)
try:
model_update = self.driver.create_cloned_volume(volume, srcvol_ref)
finally:
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context, volume, source_volid=srcvol_ref.id)
return model_update
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
volume_metadata = volume_utils.get_volume_image_metadata(
image_id, image_meta)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.",
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
self.db.volume_glance_metadata_bulk_create(context, volume_id,
volume_metadata)
def _clone_image_volume(self, context, volume, image_location, image_meta):
"""Create a volume efficiently from an existing image.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
# NOTE (lixiaoy1): currently can't create volume from source vol with
# different encryptions, so just return.
if not image_location or volume.encryption_key_id:
return None, False
if (image_meta.get('container_format') != 'bare' or
image_meta.get('disk_format') != 'raw'):
LOG.info("Requested image %(id)s is not in raw format.",
{'id': image_meta.get('id')})
return None, False
image_volume = None
direct_url, locations = image_location
urls = set([direct_url] + [loc.get('url') for loc in locations or []])
image_volume_ids = [url[9:] for url in urls
if url and url.startswith('cinder://')]
image_volumes = self.db.volume_get_all_by_host(
context, volume['host'], filters={'id': image_volume_ids})
for image_volume in image_volumes:
# For the case image volume is stored in the service tenant,
# image_owner volume metadata should also be checked.
image_owner = None
volume_metadata = image_volume.get('volume_metadata') or {}
for m in volume_metadata:
if m['key'] == 'image_owner':
image_owner = m['value']
if (image_meta['owner'] != volume['project_id'] and
image_meta['owner'] != image_owner):
LOG.info("Skipping image volume %(id)s because "
"it is not accessible by current Tenant.",
{'id': image_volume.id})
continue
LOG.info("Will clone a volume from the image volume "
"%(id)s.", {'id': image_volume.id})
break
else:
LOG.debug("No accessible image volume for image %(id)s found.",
{'id': image_meta['id']})
return None, False
try:
ret = self.driver.create_cloned_volume(volume, image_volume)
self._cleanup_cg_in_volume(volume)
return ret, True
except (NotImplementedError, exception.CinderException):
LOG.exception('Failed to clone image volume %(id)s.',
{'id': image_volume['id']})
return None, False
def _create_from_image_download(self, context, volume, image_location,
image_meta, image_service):
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
model_update = self.driver.create_volume(volume) or {}
self._cleanup_cg_in_volume(volume)
model_update['status'] = 'downloading'
try:
volume.update(model_update)
volume.save()
except exception.CinderException:
LOG.exception("Failed updating volume %(volume_id)s with "
"%(updates)s",
{'volume_id': volume.id,
'updates': model_update})
try:
volume_utils.copy_image_to_volume(self.driver, context, volume,
image_meta, image_location,
image_service)
except exception.ImageTooBig:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to copy image to volume "
"%(volume_id)s due to insufficient space",
{'volume_id': volume.id})
return model_update
def _create_from_image_cache(self, context, internal_context, volume,
image_id, image_meta):
"""Attempt to create the volume using the image cache.
Best case this will simply clone the existing volume in the cache.
Worst case the image is out of date and will be evicted. In that case
a clone will not be created and the image must be downloaded again.
"""
LOG.debug('Attempting to retrieve cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
# Currently can't create volume from source vol with different
# encryptions, so just return
if volume.encryption_key_id:
return None, False
try:
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume,
image_id,
image_meta)
if cache_entry:
LOG.debug('Creating from source image-volume %(volume_id)s',
{'volume_id': cache_entry['volume_id']})
model_update = self._create_from_source_volume(
context,
volume,
cache_entry['volume_id']
)
return model_update, True
except NotImplementedError:
LOG.warning('Backend does not support creating image-volume '
'clone. Image will be downloaded from Glance.')
return None, False
@coordination.synchronized('{image_id}')
def _prepare_image_cache_entry(self, context, volume,
image_location, image_id,
image_meta, image_service):
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
return None, False
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume,
image_id,
image_meta)
# If the entry is in the cache then return ASAP in order to minimize
# the scope of the lock. If it isn't in the cache then do the work
# that adds it. The work is done inside the locked region to ensure
# only one cache entry is created.
if cache_entry:
LOG.debug('Found cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
return None, False
else:
LOG.debug('Preparing cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
model_update = self._create_from_image_cache_or_download(
context,
volume,
image_location,
image_id,
image_meta,
image_service,
update_cache=True)
return model_update, True
def _create_from_image_cache_or_download(self, context, volume,
image_location, image_id,
image_meta, image_service,
update_cache=False):
# NOTE(e0ne): check for free space in image_conversion_dir before
# image downloading.
# NOTE(mnaser): This check *only* happens if the backend is not able
# to clone volumes and we have to resort to downloading
# the image from Glance and uploading it.
if CONF.image_conversion_dir:
fileutils.ensure_tree(CONF.image_conversion_dir)
try:
image_utils.check_available_space(
CONF.image_conversion_dir,
image_meta['size'], image_id)
except exception.ImageTooBig as err:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE,
exception=err)
# Try and use the image cache.
should_create_cache_entry = False
cloned = False
model_update = None
if self.image_volume_cache:
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
LOG.info('Unable to get Cinder internal context, will '
'not use image-volume cache.')
else:
try:
model_update, cloned = self._create_from_image_cache(
context,
internal_context,
volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning('Failed to create volume from image-volume '
'cache, image will be downloaded from Glance. '
'Error: %(exception)s',
{'exception': e})
# If an exception occurred when cloning the image-volume,
# it may be the image-volume reached its snapshot limit.
# Create another "fresh" cache entry.
update_cache = True
# Don't cache unless directed.
if not cloned and update_cache:
should_create_cache_entry = True
# cleanup consistencygroup field in the volume,
# because when creating cache entry, it will need
# to update volume object.
self._cleanup_cg_in_volume(volume)
# Fall back to default behavior of creating volume,
# download the image data and copy it into the volume.
original_size = volume.size
backend_name = volume_utils.extract_host(volume.service_topic_queue)
try:
if not cloned:
try:
with image_utils.TemporaryImages.fetch(
image_service, context, image_id,
backend_name) as tmp_image:
if CONF.verify_glance_signatures != 'disabled':
# Verify image signature via reading content from
# temp image, and store the verification flag if
# required.
verified = \
image_utils.verify_glance_image_signature(
context, image_service,
image_id, tmp_image)
self.db.volume_glance_metadata_bulk_create(
context, volume.id,
{'signature_verified': verified})
# Try to create the volume as the minimal size,
# then we can extend once the image has been
# downloaded.
data = image_utils.qemu_img_info(tmp_image)
virtual_size = image_utils.check_virtual_size(
data.virtual_size, volume.size, image_id)
if should_create_cache_entry:
if virtual_size and virtual_size != original_size:
volume.size = virtual_size
volume.save()
model_update = self._create_from_image_download(
context,
volume,
image_location,
image_meta,
image_service
)
except exception.ImageTooBig as e:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=
message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE,
exception=e)
except exception.ImageSignatureVerificationException as err:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=
message_field.Detail.SIGNATURE_VERIFICATION_FAILED,
exception=err)
if should_create_cache_entry:
# Update the newly created volume db entry before we clone it
# for the image-volume creation.
if model_update:
volume.update(model_update)
volume.save()
self.manager._create_image_cache_volume_entry(internal_context,
volume,
image_id,
image_meta)
finally:
# If we created the volume as the minimal size, extend it back to
# what was originally requested. If an exception has occurred or
# extending it back failed, we still need to put this back before
# letting it be raised further up the stack.
if volume.size != original_size:
try:
self.driver.extend_volume(volume, original_size)
finally:
volume.size = original_size
volume.save()
return model_update
def _create_from_image(self, context, volume,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume.id,
'image_location': image_location, 'image_id': image_id})
virtual_size = image_meta.get('virtual_size')
if virtual_size:
virtual_size = image_utils.check_virtual_size(virtual_size,
volume.size,
image_id)
# Create the volume from an image.
#
# First see if the driver can clone the image directly.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
# NOTE (lixiaoy1): Currently all images are raw data, we can't
# use clone_image to copy data if new volume is encrypted.
volume_is_encrypted = volume.encryption_key_id is not None
cloned = False
model_update = None
if not volume_is_encrypted:
model_update, cloned = self.driver.clone_image(context,
volume,
image_location,
image_meta,
image_service)
# Try and clone the image if we have it set as a glance location.
if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
model_update, cloned = self._clone_image_volume(context,
volume,
image_location,
image_meta)
# If we're going to try using the image cache then prepare the cache
# entry. Note: encrypted volume images are not cached.
if not cloned and self.image_volume_cache and not volume_is_encrypted:
# If _prepare_image_cache_entry() has to create the cache entry
# then it will also create the volume. But if the volume image
# is already in the cache then it returns (None, False), and
# _create_from_image_cache_or_download() will use the cache.
model_update, cloned = self._prepare_image_cache_entry(
context,
volume,
image_location,
image_id,
image_meta,
image_service)
# Try and use the image cache, and download if not cached.
if not cloned:
model_update = self._create_from_image_cache_or_download(
context,
volume,
image_location,
image_id,
image_meta,
image_service)
self._handle_bootable_volume_glance_meta(context, volume,
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_from_backup(self, context, volume, backup_id, **kwargs):
LOG.info("Creating volume %(volume_id)s from backup %(backup_id)s.",
{'volume_id': volume.id,
'backup_id': backup_id})
ret = {}
backup = objects.Backup.get_by_id(context, backup_id)
try:
ret = self.driver.create_volume_from_backup(volume, backup)
need_update_volume = True
except NotImplementedError:
LOG.info("Backend does not support creating volume from "
"backup %(id)s. It will directly create the raw volume "
"at the backend and then schedule the request to the "
"backup service to restore the volume with backup.",
{'id': backup_id})
model_update = self._create_raw_volume(
context, volume, **kwargs) or {}
volume.update(model_update)
volume.save()
backup_host = self.backup_api.get_available_backup_service_host(
backup.host, backup.availability_zone)
updates = {'status': fields.BackupStatus.RESTORING,
'restore_volume_id': volume.id,
'host': backup_host}
backup.update(updates)
backup.save()
self.backup_rpcapi.restore_backup(context, backup.host, backup,
volume.id)
need_update_volume = False
LOG.info("Created volume %(volume_id)s from backup %(backup_id)s "
"successfully.",
{'volume_id': volume.id,
'backup_id': backup_id})
return ret, need_update_volume
def _create_raw_volume(self, context, volume, **kwargs):
try:
ret = self.driver.create_volume(volume)
except Exception as ex:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.CREATE_VOLUME_FROM_BACKEND,
resource_uuid=volume.id,
detail=message_field.Detail.DRIVER_FAILED_CREATE,
exception=ex)
finally:
self._cleanup_cg_in_volume(volume)
return ret
def execute(self, context, volume, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume.id
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error("Unable to create volume. "
"Volume driver %s not initialized", driver_name)
raise exception.DriverNotInitialized()
# NOTE(xyang): Populate consistencygroup_id and consistencygroup
# fields before passing to the driver. This is to support backward
# compatibility of consistencygroup.
if volume.group_id:
volume.consistencygroup_id = volume.group_id
cg = consistencygroup.ConsistencyGroup()
cg.from_group(volume.group)
volume.consistencygroup = cg
create_type = volume_spec.pop('type', None)
LOG.info("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s",
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(
context, volume, **volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context, volume,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume,
**volume_spec)
elif create_type == 'backup':
model_update, need_update_volume = self._create_from_backup(
context, volume, **volume_spec)
volume_spec.update({'need_update_volume': need_update_volume})
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
with volume.obj_as_admin():
volume.update(model_update)
volume.save()
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception("Failed updating model of volume %(volume_id)s "
"with creation provided model %(model)s",
{'volume_id': volume_id, 'model': model_update})
raise
return volume_spec
def _cleanup_cg_in_volume(self, volume):
# NOTE(xyang): Cannot have both group_id and consistencygroup_id.
# consistencygroup_id needs to be removed to avoid DB reference
# error because there isn't an entry in the consistencygroups table.
if (('group_id' in volume and volume.group_id) and
('consistencygroup_id' in volume and
volume.consistencygroup_id)):
volume.consistencygroup_id = None
if 'consistencygroup' in volume:
volume.consistencygroup = None
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
need_update_volume = volume_spec.pop('need_update_volume', True)
if not need_update_volume:
super(CreateVolumeOnFinishTask, self).execute(context, volume)
return
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'creating' if this fails)??
volume.update(update)
volume.save()
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume)
except exception.CinderException:
LOG.exception("Failed updating volume %(volume_id)s with "
"%(update)s", {'volume_id': volume.id,
'update': update})
# Even if the update fails, the volume is ready.
LOG.info("Volume %(volume_name)s (%(volume_id)s): "
"created successfully",
{'volume_name': volume_spec['volume_name'],
'volume_id': volume.id})
def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume,
allow_reschedule, reschedule_context, request_spec,
filter_properties, image_volume_cache=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has started to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches an on-success *only* task that notifies that the volume
creation has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'request_spec': request_spec,
'volume': volume,
}
volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False))
retry = filter_properties.get('retry', None)
# Always add OnFailureRescheduleTask and we handle the change of volume's
# status when reverting the flow. Meanwhile, no need to revert process of
# ExtractVolumeRefTask.
do_reschedule = allow_reschedule and request_spec and retry
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, driver,
scheduler_rpcapi, do_reschedule))
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(manager,
db,
driver,
image_volume_cache),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
|
#make print in python 2, 3 compatible
from __future__ import print_function
import numpy as np
import pyedda as edda
#Univariate Gaussian
print("//////////Univariate Gaussian///////")
dummy_data = np.random.rand(100)
gaussian = edda.Gaussian(100, 20)
print("gaussian.getMean():", gaussian.getMean())
print("gaussian.getVar():", gaussian.getVar())
print("gaussian.getPdf(105):", gaussian.getPdf(105))
print("gaussian.getSample():", gaussian.getSample())
print("gaussian.getCdf(105):", gaussian.getCdf(105))
print("gaussian.getCdfPrecise():", gaussian.getCdfPrecise(105))
print("Output gaussian:")
gaussian.output()
print()
|
import graphene
from graphene import Node
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.rest_framework.mutation import SerializerMutation
from graphene_django.types import DjangoObjectType
from rest_framework.generics import get_object_or_404
from contact.models import Contact
from contact.serializers import ContactSerializer
class ContactModelMutation(SerializerMutation):
class Meta:
serializer_class = ContactSerializer
convert_choices_to_enum = False
class ContactNode(DjangoObjectType):
class Meta:
model = Contact
interfaces = (Node,)
fields = "__all__"
filter_fields = ["first_name"]
class ContactType(DjangoObjectType):
class Meta:
model = Contact
fields = "__all__"
class Query(graphene.ObjectType):
contact_node = Node.Field(ContactNode)
contacts_node = DjangoFilterConnectionField(ContactNode)
contact = graphene.Field(ContactType, id=graphene.Int())
contacts = graphene.List(ContactType)
def resolve_contacts(self, info, **kwargs):
return Contact.objects.all()
def resolve_contact(self, info, id):
return get_object_or_404(Contact, pk=id)
class DeleteMutation(graphene.Mutation):
class Arguments:
# The input arguments for this mutation
id = graphene.Int(required=True)
# The class attributes define the response of the mutation
id = graphene.ID()
message = graphene.String()
@classmethod
def mutate(cls, root, info, id):
contact = get_object_or_404(Contact, pk=id)
contact.delete()
return cls(id=id, message='deleted')
class Mutation(graphene.ObjectType):
create_contact = ContactModelMutation.Field()
update_contact = ContactModelMutation.Field()
delete_contact = DeleteMutation.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
|
# µPing (MicroPing) for MicroPython
# copyright (c) 2018 Shawwwn <shawwwn1@gmail.com>
# License: MIT
# Internet Checksum Algorithm
# Author: Olav Morken
# https://github.com/olavmrk/python-ping/blob/master/ping.py
# @data: bytes
# ping statistics, loop mode and KeyboardInterrupt handler, + esp8266 compatible
# copyright (c) 2020 Carglglz
# License: MIT
def checksum(data):
if len(data) & 0x1: # Odd number of bytes
data += b'\0'
cs = 0
for pos in range(0, len(data), 2):
b1 = data[pos]
b2 = data[pos + 1]
cs += (b1 << 8) + b2
while cs >= 0x10000:
cs = (cs & 0xffff) + (cs >> 16)
cs = ~cs & 0xffff
return cs
def stddev(data):
N = len(data)
avg = sum(data)/N
num = sum([(x-avg)**2 for x in data])
den = N - 1
stddev = (num/den)**0.5
return stddev
def ping(host, count=4, timeout=5000, interval=10, quiet=False, size=64,
rtn=True, loop=False, int_loop=800):
import utime
import uselect
import uctypes
import usocket
import ustruct
import urandom
from sys import platform
import gc
from array import array
# prepare packet
assert size >= 16, "pkt size too small"
pkt = b'Q'*size
pkt_desc = {
"type": uctypes.UINT8 | 0,
"code": uctypes.UINT8 | 1,
"checksum": uctypes.UINT16 | 2,
"id": uctypes.UINT16 | 4,
"seq": uctypes.INT16 | 6,
"timestamp": uctypes.UINT64 | 8,
} # packet header descriptor
h = uctypes.struct(uctypes.addressof(pkt), pkt_desc, uctypes.BIG_ENDIAN)
h.type = 8 # ICMP_ECHO_REQUEST
h.code = 0
h.checksum = 0
if platform == 'esp8266':
h.id = urandom.getrandbits(16)
else:
h.id = urandom.randint(0, 65535)
h.seq = 1
time_data = array("f", (0 for _ in range(0)))
# init socket
sock = usocket.socket(usocket.AF_INET, usocket.SOCK_RAW, 1)
sock.setblocking(0)
sock.settimeout(timeout/1000)
addr = usocket.getaddrinfo(host, 1)[0][-1][0] # ip address
sock.connect((addr, 1))
not quiet and print("PING %s (%s): %u data bytes" % (host, addr, len(pkt)))
seq_loop = -1
try:
if loop:
n_trans = 0
n_recv = 0
while True:
gc.collect()
utime.sleep_ms(int_loop)
count = 1
seq_loop += 1
seqs = list(range(1, count+1)) # [1,2,...,count]
c = 1
t = 0
finish = False
while t < timeout:
if t == interval and c <= count:
# send packet
h.checksum = 0
h.seq = c
h.timestamp = utime.ticks_us()
h.checksum = checksum(pkt)
if sock.send(pkt) == size:
n_trans += 1
t = 0 # reset timeout
else:
seqs.remove(c)
if loop:
count += 1
seqs.append(count)
c += 1
# recv packet
while 1:
socks, _, _ = uselect.select([sock], [], [], 0)
if socks:
resp = socks[0].recv(4096)
resp_mv = memoryview(resp)
h2 = uctypes.struct(uctypes.addressof(
resp_mv[20:]), pkt_desc, uctypes.BIG_ENDIAN)
# TODO: validate checksum (optional)
seq = h2.seq
# 0: ICMP_ECHO_REPLY
if h2.type == 0 and h2.id == h.id and (seq in seqs):
t_elapsed = (utime.ticks_us()-h2.timestamp) / 1000
ttl = ustruct.unpack('!B', resp_mv[8:9])[0] # time-to-live
n_recv += 1
not quiet and print("{} bytes from {}: icmp_seq={} ttl={} time={:.3f} ms".format(
len(resp), addr, seq_loop, ttl, t_elapsed))
time_data.append(t_elapsed)
seqs.remove(seq)
if len(seqs) == 0:
finish = True
break
else:
break
if finish:
break
utime.sleep_ms(1)
t += 1
else:
seqs = list(range(1, count+1)) # [1,2,...,count]
c = 1
t = 0
n_trans = 0
n_recv = 0
finish = False
while t < timeout:
if t == interval and c <= count:
# send packet
h.checksum = 0
h.seq = c
h.timestamp = utime.ticks_us()
h.checksum = checksum(pkt)
if sock.send(pkt) == size:
n_trans += 1
t = 0 # reset timeout
else:
seqs.remove(c)
if loop:
count += 1
seqs.append(count)
c += 1
# recv packet
while 1:
socks, _, _ = uselect.select([sock], [], [], 0)
if socks:
resp = socks[0].recv(4096)
resp_mv = memoryview(resp)
h2 = uctypes.struct(uctypes.addressof(
resp_mv[20:]), pkt_desc, uctypes.BIG_ENDIAN)
# TODO: validate checksum (optional)
seq = h2.seq
# 0: ICMP_ECHO_REPLY
if h2.type == 0 and h2.id == h.id and (seq in seqs):
t_elapsed = (utime.ticks_us()-h2.timestamp) / 1000
ttl = ustruct.unpack('!B', resp_mv[8:9])[0] # time-to-live
n_recv += 1
not quiet and print("{} bytes from {}: icmp_seq={} ttl={} time={:.3f} ms".format(
len(resp), addr, seq, ttl, t_elapsed))
time_data.append(t_elapsed)
seqs.remove(seq)
if loop:
count += 1
seqs.append(count)
utime.sleep_ms(int_loop)
if len(seqs) == 0:
finish = True
break
else:
break
if finish:
if not loop:
break
utime.sleep_ms(1)
t += 1
sock.close()
if not quiet:
print('--- {} ping statistics ---'.format(host))
print("{} packets transmitted, {} packets received, {:.1f}% packet loss".format(
n_trans, n_recv, (1-(n_recv/n_trans))*100))
print("round-trip min/avg/max/stddev = {:.2f}/{:.2f}/{:.2f}/{:.2f} ms".format(min(time_data),sum(time_data)/len(time_data),max(time_data), stddev(time_data)))
gc.collect()
if rtn:
return (n_trans, n_recv)
except KeyboardInterrupt:
# close
sock.close()
gc.collect()
if not quiet:
print('^C')
print('--- {} ping statistics ---'.format(host))
print("{} packets transmitted, {} packets received, {:.1f}% packet loss".format(
n_trans, n_recv, (1-(n_recv/n_trans))*100))
print("round-trip min/avg/max/stddev = {:.2f}/{:.2f}/{:.2f}/{:.2f} ms".format(min(time_data),sum(time_data)/len(time_data),max(time_data), stddev(time_data)))
if rtn:
return (n_trans, n_recv)
except Exception as e:
print(e)
|
from __future__ import absolute_import
import operator
from celery.concurrency import solo
from celery.utils.functional import noop
from celery.tests.case import AppCase
class test_solo_TaskPool(AppCase):
def test_on_start(self):
x = solo.TaskPool()
x.on_start()
def test_on_apply(self):
x = solo.TaskPool()
x.on_start()
x.on_apply(operator.add, (2, 2), {}, noop, noop)
def test_info(self):
x = solo.TaskPool()
x.on_start()
self.assertTrue(x.info)
|
#-*- coding: utf-8 -*-
import sys
import os
import random
import re
import time
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
#sys.path.append('../')
from hybrid_bid_t1_model import Seq2Seq
from hybrid_data_utils import *
sub = '-'*20
def init_command_line(argv):
from argparse import ArgumentParser
usage = "seq2seq"
description = ArgumentParser(usage)
description.add_argument("--w2v_path", type=str, default="/users3/yfwang/data/w2v/opensubtitle/")
description.add_argument("--corpus_path", type=str, default="/users3/yfwang/data/corpus/opensubtitle/")
description.add_argument("--w2v", type=str, default="train_all_200e.w2v")
description.add_argument("--test_file", type=str, default="test_sessions.txt")
description.add_argument("--max_context_size", type=int, default=2)
description.add_argument("--batch_size", type=int, default=64)
description.add_argument("--enc_hidden_size", type=int, default=512)
description.add_argument("--max_senten_len", type=int, default=15)
description.add_argument("--dropout", type=float, default=0.5)
description.add_argument("--teach_forcing", type=int, default=1)
description.add_argument("--print_every", type=int, default=100, help="print every batches when training")
description.add_argument("--weights", type=str, default=None)
return description.parse_args(argv)
opts = init_command_line(sys.argv[1:])
print ("Configure:")
print (" w2v:",os.path.join(opts.w2v_path,opts.w2v))
print (" test_file:",os.path.join(opts.corpus_path,opts.test_file))
print (" max_context_size:",opts.max_context_size)
print (" batch_size:",opts.batch_size)
print (" enc_hidden_size:",opts.enc_hidden_size)
print (" max_senten_len:",opts.max_senten_len)
print (" dropout:",opts.dropout)
print (" teach_forcing:",opts.teach_forcing)
print (" print_every:",opts.print_every)
print (" weights:",opts.weights)
print ("")
def readingTestCorpus(test_file_path):
print ("reading...")
test_file = open(test_file_path,'r')
list_pairs = []
tmp_pair = []
for line in test_file:
line = line.strip('\n')
if line == sub:
list_pairs.append(tmp_pair)
tmp_pair = []
else:
tmp_pair.append(line)
test_file.close()
test_contexts = []
test_replys = []
max_con_size = 0
min_con_size = 10000
for pair in list_pairs:
if len(pair) >= 3:
test_contexts.append(pair[0:-1])
test_replys.append(pair[-1])
max_con_size = max(len(pair[0:-1]),max_con_size)
min_con_size = min(len(pair[0:-1]),min_con_size)
else:
pass
print (max_con_size)
print (min_con_size)
return test_contexts,test_replys
def preProcess(word2index,test_contexts,unk_char,ini_char,max_senten_len,max_context_size):
print ("preprocessing...")
filter_test_contexts = []
for context in test_contexts:
filter_context = [filteringSenten(word2index,senten,unk_char,ini_char) for senten in context]
filter_test_contexts.append(filter_context)
padded_test_pairs = []
for context in filter_test_contexts:
pad_list = [0]*len(context)
if len(context) <= max_context_size:
pad_list = [1]*(max_context_size-len(context)) + pad_list
context = ['<unk>']*(max_context_size-len(context)) + context
else:
pad_list = pad_list[-max_context_size:]
context = context[-max_context_size:]
padded_context = [paddingSenten(senten,max_senten_len) for senten in context]
padded_test_pairs.append([padded_context,pad_list])
return padded_test_pairs
# 读入一个句子的list,构建batch后进行预测
def predictSentences(index2word,unk_char,ini_char,ini_idx,model,test_pairs,
print_every,batch_size,max_senten_len,max_context_size):
model.eval()
#构造batch的list
pairs_batches,num_batches = buildingPairsBatch(test_pairs,batch_size,shuffle=False)
print ("")
print ("num of batch:",num_batches)
predict_sentences = []
idx_batch = 0
for contexts_tensor_batch, pad_matrix_batch in getTensorsContextPairsBatch(word2index,pairs_batches,max_context_size):
predict_batch = model.predict(contexts_tensor_batch,index2word,pad_matrix_batch,ini_idx,sep_char='\t')
predict_sentences.extend(predict_batch)
if (idx_batch+1)%print_every == 0:
print ("{} batches finished".format(idx_batch+1))
idx_batch += 1
predict_sentences = predict_sentences[0:len(test_pairs)]
return predict_sentences
if __name__ == '__main__':
ini_char = '</i>'
unk_char = '<unk>'
t0 = time.time()
print ("loading word2vec...")
ctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)
print(" dict size:",ctable.getDictSize())
print (" emb size:",ctable.getEmbSize())
print (time.time()-t0)
print ("")
seq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing).cuda()
if opts.weights != None:
print ("load model parameters...")
seq2seq.load_state_dict(torch.load(opts.weights))
else:
print ("No model parameters!")
exit()
test_contexts,test_replys = readingTestCorpus(os.path.join(opts.corpus_path,opts.test_file))
print ("len(test_contexts):",len(test_contexts))
print ("len(test_replys):",len(test_replys))
word2index = ctable.getWord2Index()
test_pairs = preProcess(word2index,test_contexts,unk_char,ini_char,opts.max_senten_len,opts.max_context_size)
print ("len(test_pairs):",len(test_pairs))
'''test_pair = test_pairs[100]
test_context = test_pair[0]
pad_list = test_pair[1]
for senten in test_context:
print senten
print pad_list'''
print ("start predicting...")
ini_idx = word2index[ini_char]
predict_sentences = predictSentences(ctable.getIndex2Word(),unk_char,ini_char,ini_idx,seq2seq,test_pairs,
opts.print_every,opts.batch_size,opts.max_senten_len,opts.max_context_size)
print ("writing...")
if not os.path.exists('./result/'):
os.mkdir('./result/')
pred_res_file = open("./result/open_pred_res_hyb_t1_len2",'w')
pred_ans_file = open("./result/open_pred_ans_hyb_t1_len2",'w')
for idx,senten in enumerate(predict_sentences):
test_context = test_contexts[idx]
for test_post in test_context:
pred_res_file.write(test_post+'\n')
pred_res_file.write(senten+'\n')
pred_res_file.write(sub+'\n')
senten_l = [c for c in senten.split('\t') if c != '</s>']
pred_ans_file.write(' '.join(senten_l)+' __eou__'+'\n')
pred_res_file.close()
pred_ans_file.close()
print ("end")
|
import json
import logging
import os
import re
import sys
import time
def setup_logger():
console = logging.StreamHandler(sys.stdout)
handlers = [console]
logging.basicConfig(handlers=handlers)
root = logging.getLogger()
root.setLevel(logging.INFO)
setup_logger()
log = logging.getLogger(__name__)
class NS:
@staticmethod
def dict(ns, deep=True):
dic = ns.__dict__
if not deep:
return dic
for k, v in dic.items():
if isinstance(v, NS):
dic[k] = NS.dict(v)
return dic
@staticmethod
def from_dict(dic, deep=True):
ns = NS(dic)
if not deep:
return ns
for k, v in ns.__dict__.items():
if isinstance(v, dict):
ns.__dict__[k] = NS.from_dict(v)
return ns
@staticmethod
def walk(ns, fn, inplace=False):
nns = ns if inplace else NS()
for k, v in ns.__dict__.items():
nk, nv = fn(k, v)
if nk is not None:
if v is nv and isinstance(v, NS):
nv = NS.walk(nv, fn, inplace)
nns.__dict__[nk] = nv
return nns
def __init__(self, *args, **kwargs):
self.__dict__.update(dict(*args, **kwargs))
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
class Timer:
@staticmethod
def _zero():
return 0
def __init__(self, clock=time.time, enabled=True):
self.start = 0
self.stop = 0
self._time = clock if enabled else Timer._zero
def __enter__(self):
self.start = self._time()
return self
def __exit__(self, *args):
self.stop = self._time()
@property
def duration(self):
if self.stop > 0:
return self.stop - self.start
return self._time() - self.start
def result(output_file=None,
predictions=None, truth=None,
probabilities=None, probabilities_labels=None,
target_is_encoded=False,
error_message=None,
models_count=None,
training_duration=None):
return locals()
data_keys = re.compile("^(X|y|data)(_.+)?$")
def call_run(run_fn):
import numpy as np
params = NS.from_dict(json.loads(sys.stdin.read()))
def load_data(name, path):
if isinstance(path, str) and data_keys.match(name):
return name, np.load(path, allow_pickle=True)
return name, path
print(params.dataset)
ds = NS.walk(params.dataset, load_data)
config = params.config
config.framework_params = NS.dict(config.framework_params)
try:
result = run_fn(ds, config)
res = dict(result)
for name in ['predictions', 'truth', 'probabilities']:
arr = result[name]
if arr is not None:
res[name] = os.path.join(config.result_dir, '.'.join([name, 'npy']))
np.save(res[name], arr, allow_pickle=True)
except Exception as e:
log.exception(e)
res = dict(
error_message=str(e),
models_count=0
)
print(config.result_token)
print(json.dumps(res, separators=(',', ':')))
|
# -*- coding: utf-8 -*-
"""
Goes through the clean dataset and determines when audio is occuring by
measuring across a simple threshold
We use these labels for the 15, 10 5, 0dB SNR samples
Created on Sun Dec 4 15:37:11 2016
@author: brady
"""
import os
import wavio
from fe_utils import *
from config import TRAIN_CLEAN
os.chdir(TRAIN_CLEAN)
for file in os.listdir():
if not file.endswith('.wav'):
continue
mWav = wavio.read(file)
frame_len = int(getFrameSize(mWav.rate))
mWav.data = normalizeAudio(mWav.data, mWav.sampwidth)
frame_cnt = int(len(mWav.data)/frame_len )
if (len(mWav.data)%frame_len):
frame_cnt += 1
class_list = []
for idx in range(frame_cnt):
if (idx == frame_cnt-1):
# last chunk may be truncated
chunk = mWav.data[idx*frame_len :]
else:
chunk = mWav.data[idx*frame_len : (idx+1)*frame_len]
if aboveFrameThreshold(chunk):
class_list.append(1)
else:
class_list.append(0)
filename = os.path.splitext(file)[0]
with open(filename + '.csv', 'w') as f:
f.write(','.join([str(c) for c in class_list]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.