text
stringlengths 2
999k
|
|---|
"""
API endpoints for nunaserver.
"""
import tempfile
import os
import uuid
from pathlib import Path
import flask
from minio.commonconfig import Filter, ENABLED
from minio.lifecycleconfig import LifecycleConfig, Expiration, Rule
from nunaserver import settings
from nunaserver.minio_connection import storage
from nunaserver.limiter import limiter
from nunaserver.utils.archive_utils import fetch_remote_namespace, unzip_to_directory
from nunaserver.generator import generate_dsdl
from nunaserver.forms import UploadForm, ValidationError
api = flask.Blueprint("api", __name__)
@api.route("/", methods=["GET"])
def root():
"""
Return 200 OK status with some server information.
"""
return f"Nunaserver {settings.SERVER_VERSION}"
# pylint: disable=invalid-name,too-many-locals
@api.route("/upload", methods=["POST"])
@limiter.limit(settings.UPLOAD_LIMITS)
def upload():
"""
Handle uploaded DSDL namespace repository archives.
This expects either an already made zip archive uploaded
as a file or a URL link to a zip archive.
Frontend converts GitHub links into zip archives.
Takes multipart/form-data (obviously, because we have a file upload).
"""
build_uuid = str(uuid.uuid4())
storage.make_bucket(build_uuid)
config = LifecycleConfig(
[
Rule(
ENABLED,
rule_filter=Filter(prefix="uploads/"),
rule_id=f"delete_rule_{build_uuid}",
expiration=Expiration(days=30),
),
],
)
storage.set_bucket_lifecycle(build_uuid, config)
try:
form = UploadForm(flask.request.form, flask.request.files)
except ValidationError as error:
return flask.jsonify(error.errors)
for file in form.archive_files:
size = os.fstat(file.fileno()).st_size
storage.put_object(build_uuid, f"uploads/{file.filename}", file, size)
# Kick off Celery task to generate DSDL
task = generate_dsdl.delay(
build_uuid,
form.archive_urls,
form.target_lang,
form.target_endian,
form.flags,
form.doc_url
)
return (
flask.jsonify(
{
"task_url": flask.url_for("api.taskstatus", task_id=task.id),
}
),
202,
)
@api.route("/status/<task_id>")
def taskstatus(task_id):
"""
Fetch the status of a running generation task.
"""
try:
task = generate_dsdl.AsyncResult(task_id)
if task.state == "PENDING":
# job did not start yet
response = {
"state": task.state,
"current": 0,
"total": 1,
"status": "Pending...",
}
elif task.state != "FAILURE":
response = {
"state": task.state,
"current": task.info.get("current", 0),
"total": task.info.get("total", 1),
"status": task.info.get("status", ""),
"command": task.info.get("command", ""),
"type": task.info.get("type", ""),
}
if "result" in task.info:
response["result"] = task.info["result"]
else:
# something went wrong in the background job
response = {
"state": task.state,
"current": 1,
"total": 1,
"status": str(task.info), # this is the exception raised
}
return flask.jsonify(response)
except AttributeError:
return flask.jsonify(
{
"state": "CANCELED",
"current": "0",
"total": "1",
"status": "Task was canceled.",
}
)
@api.route("/status/<task_id>/cancel")
def taskcancel(task_id):
"""
Cancel a running generation task.
"""
task = generate_dsdl.AsyncResult(task_id)
task.revoke(terminate=True)
return flask.jsonify({"response": "OK"}), 200
|
"""
Forces method classes.
The dynamic model should inherit from these classes in order to get the proper forces.
"""
import opensim as osim
class ResidualForces:
def __init__(self, residual_actuator_xml=None):
if residual_actuator_xml:
res_force_set = osim.ForceSet(residual_actuator_xml, True)
n_forces = res_force_set.getSize()
model_force_set = self.model.getForceSet()
for i in range(n_forces):
force = res_force_set.get(i)
model_force_set.cloneAndAppend(force)
class ExternalForces:
def __init__(self, external_load_xml=None):
if external_load_xml:
force_set = osim.ForceSet(external_load_xml)
n_forces = force_set.getSize()
for i in range(n_forces):
self.model.getForceSet().append(force_set.get(i))
|
import sys
import os
import re
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
warnings.filterwarnings('ignore',
r'.+ distutils\b.+ deprecated',
DeprecationWarning)
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
# With the DistutilsMetaFinder in place,
# perform an import to cause distutils to be
# loaded from setuptools._distutils. Ref #2906.
add_shim()
importlib.import_module('distutils')
remove_shim()
# check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
if path is not None:
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
import importlib.abc
import importlib.util
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
return importlib.import_module('setuptools._distutils')
def exec_module(self, module):
pass
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@staticmethod
def pip_imported_during_build():
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
frame.f_globals['__file__'].endswith('setup.py')
for frame, line in traceback.walk_stack(None)
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass
|
# -*- coding: utf8 -*-
import json
from console.models import Resource, resource_repo
from console.exceptions import NotFound, AlreadyExist, PermissionDenied
from console.factory import logger
class ResourceService:
resource_repo = resource_repo
def __init__(self, rid: str = None, task_intra_id: str = None, name: str = None):
if rid:
self.resource = self.resource_repo.get(rid)
elif task_intra_id and name:
self.resource = self.resource_repo.filter(task_intra_id=task_intra_id, name=name)
def create_resource(self, owner_id: str, task_intra_id: str, name: str, uri: str, comment: str, config: str):
if self.resource_repo.filter(task_intra_id=task_intra_id, name=name) is not None:
raise AlreadyExist(message='resource does already exist')
self.resource = Resource(owner_id=owner_id, task_intra_id=task_intra_id, name=name, uri=uri, comment=comment,
config=config)
self.resource_repo.insert_or_update(self.resource)
return self.resource
def update_resource(self, request_data: dict) -> Resource:
if self.resource is None:
raise NotFound(message='resource object init failed')
need_update = False
if 'comment' in request_data:
self.resource.comment = request_data['comment']
need_update = True
if 'config' in request_data:
config = json.loads(self.resource.config) if self.resource.config else {}
config.update(request_data['config'])
self.resource.config = json.dumps(config)
need_update = True
if need_update:
self.resource_repo.insert_or_update(self.resource)
return self.resource
def delete_resource(self):
if self.resource is None:
return
self.resource_repo.delete(self.resource)
def get_resource_list(self, task_intra_id: str):
return self.resource_repo.get_all(task_intra_id=task_intra_id)
|
########################################################################
import sys
import numpy
import vtk
from vtk_py import *
########################################################################
def transform_mesh_w_4x4mat(infilename, outfilename, matrix):
ugrid = vtk.vtkUnstructuredGrid()
if(infilename[len(infilename)-4:len(infilename)] == ".vtu"):
ugrid = readXMLUGrid(infilename)
elif(infilename[len(infilename)-4:len(infilename)] == ".vtk"):
ugrid = readUGrid(infilename)
rot_mat = vtk.vtkMatrix4x4()
rot_mat.DeepCopy(matrix)
transform = vtk.vtkTransform()
transform.SetMatrix(rot_mat)
transform.Update()
transformfilter = vtk.vtkTransformFilter()
if (vtk.vtkVersion.GetVTKMajorVersion() >= 6):
transformfilter.SetInputData(ugrid)
else:
transformfilter.SetInput(ugrid)
transformfilter.SetTransform(transform)
transformfilter.Update()
if(outfilename[len(outfilename)-4:len(outfilename)] == ".vtu"):
writeXMLUGrid(transformfilter.GetOutput(), outfilename)
elif(outfilename[len(outfilename)-4:len(outfilename)] == ".vtk"):
writeUGrid(transformfilter.GetOutput(), outfilename)
return transformfilter.GetOutput()
if (__name__ == "__main__"):
print len(sys.argv)
assert (len(sys.argv) == 19), 'Number of arguments must be 3.'
infilename = sys.argv[1]
outfilename = sys.argv[2]
matrix = map(float,sys.argv[3:19])
print matrix
transform_mesh_w_4x4mat(infilename, outfilename, matrix)
|
import pygame, sys
import numpy as np
import subprocess
import time
import json
from timeit import default_timer as timer
# text box initialization
def update_message (message):
font = pygame.font.Font(None, 24)
text = font.render(message, 1, (0, 0, 0))
text_rect = text.get_rect(center =(WIDTH / 2, HEIGHT-50))
screen.fill ((255,255,255), (0, HEIGHT-100, WIDTH, 100))
screen.blit(text, text_rect)
pygame.display.update()
# utility function to execute shell command and parse result
def subprocess_run (cmd):
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result = result.stdout.decode('utf-8')[:-1] # remove trailing newline
return result
# redraw the screen and the mass at new x location
def update_figures(ball_x):
screen.fill (BG_COLOR, (0, 0, WIDTH, HEIGHT-100)) # reset screen first
# draw the point mass
pygame.draw.circle(
screen,
BALL_COLOR,
(ball_x, BALL_Y), # center coordinate
BALL_RADIUS,
0 # fill the circle
)
# draw the spring for fun
spring_circle_first_x = SPRING_CIRCLE_RADIUS
spring_circle_last_x = ball_x - BALL_RADIUS - SPRING_CIRCLE_RADIUS
spring_circle_distance = (spring_circle_last_x - spring_circle_first_x)/(N_SPRING_CIRCLE-1)
spring_circle_x = spring_circle_first_x
for i in range(N_SPRING_CIRCLE):
pygame.draw.circle(
screen,
SPRING_COLOR,
(spring_circle_x, BALL_Y), # center coordinate
SPRING_CIRCLE_RADIUS,
1
)
spring_circle_x += spring_circle_distance
pygame.display.update()
# scene setup
WIDTH = 600
HEIGHT = 600+100
BALL_RADIUS = 30
SPRING_CIRCLE_RADIUS = 10
N_SPRING_CIRCLE = 20
MESSAGE = "Simple Harmonic Oscillator on StarkNet LFG"
BG_COLOR = (25, 25, 112)
BALL_COLOR = (239, 231, 200)
SPRING_COLOR = (239, 231, 200)
BALL_X_OFFSET = 300
BALL_Y = 300
# contract setup
CONTRACT_ADDRESS = '0x3280705f884bb08c0fd6c53f67e51d1b06c8118397f68234072a78a63b13c9c'
SCALE_FP = 10000 # for fixed-point arithmetic
PRIME = 3618502788666131213697322783095070105623107215331596699973092056135872020481
PRIME_HALF = PRIME//2
pygame.init()
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption( 'SHO' )
screen.fill( BG_COLOR )
AMPLITUDE = 100
SCALE_X = (WIDTH/2.-BALL_RADIUS*2)/100
x = AMPLITUDE
xd = 0 # stationary at start
t_fp = int( 0 * SCALE_FP )
dt_fp = int( 0.02 * SCALE_FP )
x_fp = int( x * SCALE_FP )
xd_fp = int( xd * SCALE_FP )
update_figures(ball_x = BALL_X_OFFSET + x*SCALE_X)
update_message(MESSAGE)
while True:
# retrieve N sample from contract
N = 100
print(f'> Begin retrieval of {N} coordinates from StarkNet rk4 integrator.')
x_fp_s = [x_fp]
xd_fp_s = [xd_fp]
for i in range(N):
# sending side must mod P to send only positive felt values; receiving side could receive negative value
x_fp = x_fp if x_fp>=0 else x_fp+PRIME
xd_fp = xd_fp if xd_fp>=0 else xd_fp+PRIME
cmd = f"starknet call --network=alpha --address {CONTRACT_ADDRESS} --abi sho_contract_abi.json " + \
f"--function query_next_given_coordinates --inputs {t_fp} {dt_fp} {x_fp} {xd_fp}"
cmd = cmd.split(' ')
result = subprocess_run(cmd)
result = result.split(' ')
x_fp = int(result[0])
xd_fp = int(result[1])
t_fp += dt_fp
x_fp_s.append( x_fp )
xd_fp_s.append( xd_fp )
print(f'> {i+1}th/{N} coordinate retrieved from StarkNet rk4 integrator.')
print()
x_s = [x_fp/SCALE_FP for x_fp in x_fp_s]
print('> printing all retrieved coordinates: {x_s}\n')
# saving the last coordinate for next loop
x_fp = x_fp_s[-1]
xd_fp = xd_fp_s[-1]
# render animation from retrieved coordinates
print('>>> Begin animation rendering.')
for i in range(N):
update_figures(ball_x = BALL_X_OFFSET + x_s[i]*SCALE_X)
update_message(MESSAGE)
time.sleep(0.05)
# check for quit() event
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
|
from ..datasets import CleanData,DownloadData
# from .CleanData import cleanEngXml,cleanMathXml,CleanDataWiki,CleanDataSO
|
from apistar.backends import sqlalchemy_backend
from apistar.frameworks.wsgi import WSGIApp as App
from project.routes import routes
from project.settings import settings
app = App(
routes=routes,
settings=settings,
commands=sqlalchemy_backend.commands,
components=sqlalchemy_backend.components
)
if __name__ == '__main__':
app.main()
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on AWS."""
from absl import flags
from perfkitbenchmarker.providers.aws import util
flags.DEFINE_string(
'aws_user_name', '', 'This determines the user name that Perfkit will '
'attempt to use. Defaults are OS specific.')
flags.DEFINE_integer('aws_provisioned_iops', None,
'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('aws_provisioned_throughput', None,
'Provisioned throughput (MB/s) for (SSD) volumes in AWS.')
flags.DEFINE_string('aws_dax_node_type', 'dax.r4.large',
'The node type used for creating AWS DAX cluster.')
flags.DEFINE_integer('aws_dax_replication_factor', 3,
'The replication factor of AWS DAX cluster.')
flags.DEFINE_string('aws_emr_loguri', None,
'The log-uri parameter to pass to AWS when creating a '
'cluster. If not set, a bucket will be created.')
flags.DEFINE_integer('aws_emr_job_wait_time', 18000,
'The time to wait for an EMR job to finish, in seconds')
flags.DEFINE_boolean('aws_spot_instances', False,
'Whether to use AWS spot instances for any AWS VMs.')
flags.DEFINE_float('aws_spot_price', None,
'The spot price to bid for AWS spot instances. Defaults '
'to on-demand price when left as None.')
flags.DEFINE_enum('aws_spot_block_duration_minutes', None,
['60', '120', '180', '240', '300', '360'], 'The required '
'duration for the Spot Instances (also known as Spot blocks),'
' in minutes. This value must be a multiple of 60.')
flags.DEFINE_integer('aws_boot_disk_size', None,
'The boot disk size in GiB for AWS VMs.')
flags.DEFINE_string('kops', 'kops',
'The path to the kops binary.')
flags.DEFINE_string('aws_image_name_filter', None,
'The filter to use when searching for an image for a VM. '
'See usage details in aws_virtual_machine.py around '
'IMAGE_NAME_FILTER.')
flags.DEFINE_string('aws_image_name_regex', None,
'The Python regex to use to further filter images for a '
'VM. This applies after the aws_image_name_filter. See '
'usage details in aws_virtual_machine.py around '
'IMAGE_NAME_REGEX.')
flags.DEFINE_string('aws_preprovisioned_data_bucket', None,
'AWS bucket where pre-provisioned data has been copied.')
flags.DEFINE_string('cache_node_type',
'cache.m4.large',
'The AWS cache node type to use for elasticache clusters.')
flags.DEFINE_string('aws_elasticache_failover_zone',
None,
'AWS elasticache failover zone')
flags.DEFINE_string('aws_efs_token', None,
'The creation token used to create the EFS resource. '
'If the file system already exists, it will use that '
'instead of creating a new one.')
flags.DEFINE_boolean('aws_delete_file_system', True,
'Whether to delete the EFS file system.')
flags.DEFINE_list('eks_zones', [],
'DEPRECATED: Set container_cluster.vm_spec.AWS.zone instead.'
'The single region or multiple zones into which the EKS '
'cluster will be deployed. If a region is passed zones will '
'be decided by EKS. All zones must be from the same region.')
flags.register_validator('eks_zones',
util.EksZonesValidator)
flags.DEFINE_enum('efs_throughput_mode', 'provisioned',
['provisioned', 'bursting'],
'The throughput mode to use for EFS.')
flags.DEFINE_float('efs_provisioned_throughput', 1024.0,
'The throughput limit of EFS (in MiB/s) when run in '
'provisioned mode.')
flags.DEFINE_boolean('provision_athena', False,
'Whether to provision the Athena database.')
flags.DEFINE_boolean('teardown_athena', True,
'Whether to teardown the Athena database.')
flags.DEFINE_string(
'athena_output_location_prefix', 'athena-cli-results',
'Prefix of the S3 bucket name for Athena Query Output. Suffix will be the '
'region and the run URI, and the bucket will be dynamically created and '
'deleted during the test.')
flags.DEFINE_string('eksctl', 'eksctl', 'Path to eksctl.')
flags.DEFINE_enum('redshift_client_interface', 'JDBC', ['JDBC'],
'The Runtime Interface used when interacting with Redshift.')
flags.DEFINE_enum('athena_client_interface', 'JAVA', ['JAVA'],
'The Runtime Interface used when interacting with Athena.')
flags.DEFINE_string('athena_query_timeout', '600', 'Query timeout in seconds.')
flags.DEFINE_string('athena_workgroup', '',
'Use athena workgroup to separate applications and choose '
'execution configuration like the engine version.')
flags.DEFINE_boolean(
'athena_metrics_collection', False,
'Should the cloud watch metrics be collected for Athena query executions.')
flags.DEFINE_boolean(
'athena_workgroup_delete', True,
'Should the dedicated athena workgroups be deleted or kept alive for investigations.'
)
flags.DEFINE_enum('aws_credit_specification', None,
['CpuCredits=unlimited', 'CpuCredits=standard'],
'Credit specification for burstable vms.')
flags.DEFINE_boolean('aws_vm_hibernate', False,
'Whether to hibernate(suspend) an aws vm'
'instance.')
flags.DEFINE_string(
'aws_glue_crawler_role', None,
"Role's ARN to be used by the crawler. Must have policies that grant "
'permission for using AWS Glue and read access to S3.')
flags.DEFINE_integer(
'aws_glue_crawler_sample_size', None,
'Sets how many files will be crawled in each leaf directory. If left '
'unset, all the files will be crawled. May range from 1 to 249.',
1, 249
)
|
"""
Runs pedal as a toplevel module
"""
import sys
from pedal.command_line.command_line import parse_args, main
args = parse_args()
main(args)
|
# Generated by Django 2.1.15 on 2020-04-21 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
#!/usr/bin/python
################################################################################
# 2301f556-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "2301f556-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main', 'EnableAutoUpgrade')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main', ('EnableAutoUpgrade=' + str(dword))]
if dword == 0:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Internet Explorer'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main' -name 'EnableAutoUpgrade' -value 0 -Type DWord")
|
"""Logic for dealing with cwltool as an optional dependency.
Use this as the import interface for cwltool and just call
:func:`ensure_cwltool_available` before using any of the imported
functionality at runtime.
"""
import re
import warnings
warnings.filterwarnings("ignore", message=r"[\n.]DEPRECATION: Python 2", module="cwltool")
import requests
try:
from cwltool import (
job,
main,
pathmapper,
process,
workflow,
)
except ImportError:
main = None # type: ignore[assignment]
workflow = None # type: ignore[assignment]
job = None # type: ignore[assignment]
process = None # type: ignore[assignment]
pathmapper = None # type: ignore[assignment]
try:
from cwltool.context import (
getdefault,
LoadingContext,
RuntimeContext,
)
from cwltool.job import relink_initialworkdir
from cwltool.stdfsaccess import StdFsAccess
except ImportError:
getdefault = None # type: ignore[assignment]
LoadingContext = None # type: ignore[assignment,misc]
relink_initialworkdir = None # type: ignore[assignment]
RuntimeContext = None # type: ignore[assignment,misc]
StdFsAccess = None # type: ignore[assignment,misc]
try:
from cwltool import load_tool
from cwltool.load_tool import (
default_loader,
resolve_and_validate_document,
)
except ImportError:
default_loader = None # type: ignore[assignment]
load_tool = None # type: ignore[assignment]
resolve_and_validate_document = None # type: ignore[assignment]
try:
from cwltool import command_line_tool
except ImportError:
command_line_tool = None # type: ignore[assignment]
try:
from cwltool.load_tool import resolve_and_validate_document
except ImportError:
resolve_and_validate_document = None # type: ignore[assignment]
try:
import shellescape
except ImportError:
shellescape = None
try:
import schema_salad
from schema_salad import (
ref_resolver,
sourceline,
)
except ImportError:
schema_salad = None # type: ignore[assignment]
ref_resolver = None # type: ignore[assignment]
sourceline = None # type: ignore[assignment]
needs_shell_quoting = re.compile(r"""(^$|[\s|&;()<>\'"$@])""").search
# if set to True, file format checking is not performed.
beta_relaxed_fmt_check = True
def ensure_cwltool_available():
"""Assert optional dependencies proxied via this module are available at runtime.
Throw an ImportError with a description of the problem if they do not exist.
"""
if main is None or workflow is None or shellescape is None:
message = "This feature requires cwltool and dependencies to be available, they are not."
if main is None:
message += " cwltool is not unavailable."
elif resolve_and_validate_document is None:
message += " cwltool.load_tool.resolve_and_validate_document is unavailable - cwltool version is too old."
if requests is None:
message += " Library 'requests' unavailable."
if shellescape is None:
message += " Library 'shellescape' unavailable."
if schema_salad is None:
message += " Library 'schema_salad' unavailable."
raise ImportError(message)
__all__ = (
"default_loader",
"ensure_cwltool_available",
"getdefault",
"load_tool",
"LoadingContext",
"main",
"needs_shell_quoting",
"pathmapper",
"process",
"ref_resolver",
"relink_initialworkdir",
"resolve_and_validate_document",
"RuntimeContext",
"schema_salad",
"shellescape",
"sourceline",
"StdFsAccess",
"workflow",
)
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
# def was_published_recently(self):
# now = timezone.now()
# return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
from streamlink.plugins.ard_live import ARDLive
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlARDLive(PluginCanHandleUrl):
__plugin__ = ARDLive
should_match = [
'https://daserste.de/live/index.html',
'https://www.daserste.de/live/index.html',
]
should_not_match = [
'http://mediathek.daserste.de/live',
]
|
#!/usr/bin/python3
import sys
import json
import os
import getopt
import ipaddress
import uuid
ip = ''
function = ''
gdc = None
description = ''
listofips = None
if len(sys.argv) <= 4:
print("Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -d <GDC_Description>")
print("")
print("This simple tool will update a JSON file with ip addresses (v4 and v6) used with the Generic Data Center Objects as described in SK167210 for R81.")
print("Examples:")
print("Add a new IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -i 10.2.0.1")
print("")
print("Add a new IP addresses to a Generic Data Center to an existing JSON file from a list of ip's")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -l listofip_address.txt")
print("")
print("Delete an IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f DelIP -i 10.2.0.1")
print("")
print("Add a new Generic Data Center to an existing JSON file. IP address must be included.")
print("gdc.py -g GDC_LIST_New -j gdc.json -f AddGDC -d GDC_LIST_NEW_Description -i 10.2.0.1")
print("")
print("Delete a Generic Data Center in an existing JSON file. ")
print("gdc.py -g GDC_LIST_New -j gdc.json -f DelGDC")
print("")
exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:],"g:j:f:i:d:l:", ['gdc=','function=','ip=','desc=','listofips' 'help'])
except getopt.GetoptError:
print('Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit()
elif opt in ("-g", "--gdc"):
gdc = arg
elif opt in ("-f", "--function"):
function = arg
elif opt in ("-j", "--json"):
jsonfile = arg
elif opt in ('-i', '--ip'):
ip = arg
elif opt in ('-d', '--desc'):
desc = arg
elif opt in ('-l', '--listofips'):
listofips = arg
### Functions
# Function to Remove Duplicates - Used to make sure IP's are uniuqe
def remove_dupe_dicts(l):
list_of_strings = [
json.dumps(d, sort_keys=True)
for d in l
]
list_of_strings = set(list_of_strings)
return [
json.loads(s)
for s in list_of_strings
]
# Function to Check for name in json
def gdc_exist(gdc,jsondata):
match = False
for dc in jsondata:
if dc["name"] == gdc:
match = True
return match
# Function to check if JSON file exists
def fileexists(fn):
try:
open(fn,"r")
except IOError:
print('File: %s - specified does not appear to exist' % fn)
sys.exit()
# Function to check for valid ip address
def check_ip(checkip):
# Check if range is provided by a dash in the ip address
isrange = ("-" in checkip)
if isrange == True:
range = checkip.split("-")
# Check if range ip 1 is less than 2
ip = (ipaddress.ip_address(range[0]) < ipaddress.ip_address(range[1]))
if ip == True:
return
else:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
try:
ip = ipaddress.ip_address(checkip)
except ValueError:
try:
ip = ipaddress.ip_network(checkip)
except ValueError:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
#### Verify that GDC was passed from CLI ####
if not gdc:
print("Generic Data Center was not passed as a flag to the command. Include -g <Data_Center_Name>")
sys.exit()
#### Add IP to Data Center ####
if function == "AddIP":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check and see if the name of the Data Center exists
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
# Check to see if this is a list of ips from a file
if not listofips:
# Add an IP to the list
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Remove IP from Data Center ####
if function == "DelIP":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check and see if the name of the Data Center exists
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
item = obj['objects']
if not listofips:
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
for a in item['ranges'][:]:
if (a == ip):
item['ranges'].remove(a)
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
for t in iplist:
try:
item['ranges'].remove(t)
except:
print('IP address %s is not in the file %s.' % (t, listofips))
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Add Data Center ####
if function == "AddGDC":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
item = obj['objects']
uuid = uuid.uuid4()
# Make sure Description is set
try:
desc
except NameError:
print("Description was not provided as a paramater, please use -d to add the description while adding a new Data Center")
sys.exit()
# Check and see if the name of the Data Center already exists
match = gdc_exist(gdc,obj['objects'])
if match == True:
print('Data Center Object : %s already exists in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
# Add GDC data to JSON
item = obj['objects']
add = {"description": desc,
"id": str(uuid),
"name": gdc,
"ranges": []}
item.append(add)
# Check to see if this is a list of ips from a file
if not listofips:
# Add an IP to the list
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Delete Data Center ####
if function == "DelGDC":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check if Data Center exists before deletion
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s does not exist in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
for i in range(len(obj['objects'])):
if obj['objects'][i]['name'] == gdc:
obj['objects'].pop(i)
break
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
|
import pandas as pd
import torch
from transformers import BertJapaneseTokenizer
from wtfml.data_loaders.nlp.utils import clean_sentence
import transformers
class BERTSimpleDataset:
"""
Dataset for bert which can accept clearning function
"""
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = BertJapaneseTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144 # twitter
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
# return_tensors="pt"
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long), # floatからlongに変更
}
class DistilBERTDataset:
"""
Dataset for bert which can accept clearning function
"""
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144 # twitter
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
# return_tensors="pt"
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long), # floatからlongに変更
}
|
# cec2017.utils
# Author: Duncan Tilley
# Additional functions for graphing and benchmarking
def surface_plot(function, domain=(-100, 100), points=30, dimension=2, ax=None):
"""
Creates a surface plot of a function.
Args:
function (function): The objective function to be called at each point.
domain (num, num): The inclusive (min, max) domain for each dimension.
points (int): The number of points to collect on each dimension. A total
of points^2 function evaluations will be performed.
dimension (int): The dimension to pass to the function. If this is more
than 2, the elements after the first 2 will simply be zero,
providing a slice at x_3 = 0, ..., x_n = 0.
ax (matplotlib axes): Optional axes to use (must have projection='3d').
Note, if specified plt.show() will not be called.
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits import mplot3d
# create points^2 tuples of (x,y) and populate z
xys = np.linspace(domain[0], domain[1], points)
xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])
zs = np.zeros(points * points)
if dimension > 2:
# concatenate remaining zeros
tail = np.zeros(dimension - 2)
for i in range(0, xys.shape[0]):
zs[i] = function(np.concatenate([xys[i], tail]))
else:
for i in range(0, xys.shape[0]):
zs[i] = function(xys[i])
# create the plot
ax_in = ax
if ax is None:
ax = plt.axes(projection="3d")
X = xys[:, 0].reshape((points, points))
Y = xys[:, 1].reshape((points, points))
Z = zs.reshape((points, points))
ax.plot_surface(X, Y, Z, cmap="gist_ncar", edgecolor="none")
ax.set_title(function.__name__)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
if ax_in is None:
plt.show()
def time(function, domain=(-100, 100), points=30):
"""
Returns the time in seconds to calculate points^2 evaluations of the
given function.
function
The objective function to be called at each point.
domain
The inclusive (min, max) domain for each dimension.
points
The number of points to collect on each dimension. A total of points^2
function evaluations will be performed.
"""
from time import time
import numpy as np
# create points^2 tuples of (x,y) and populate z
xys = np.linspace(domain[0], domain[1], points)
xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])
zs = np.zeros(points * points)
before = time()
for i in range(0, xys.shape[0]):
zs[i] = function(xys[i])
return time() - before
|
#!/usr/bin/env python3
""" Create a new Table of Contents file (toc.xhtml) for The Big Book of Key. """
import re
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from typing import Dict, List
from lxml.html import parse, fromstring, tostring, HtmlElement
from copy import deepcopy
class Settings:
dummy_run: bool
verbose: bool
bigbook: Path
settings = Settings()
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('-d', '--dummy-run', action='store_true', help="don't write 'toc.xhtml'")
parser.add_argument('-v', '--verbose', action='store_true', help='print file names as they are processed')
parser.add_argument('bigbook', type=Path, help='path to bigbook directory')
parser.parse_args(namespace=settings)
text_dir = settings.bigbook / 'Text'
if not text_dir.is_dir():
parser.print_usage()
exit(1)
else:
run(text_dir, settings.dummy_run, settings.verbose)
def extract_iso_date(text: str) -> datetime.date:
match = re.search(r'\d\d\d\d-\d\d-\d\d', text)
if match:
return datetime.strptime(match.group(), '%Y-%m-%d')
else:
raise ValueError(f'No YYYY-MM-DD date found: {text}')
def get_contents(toc: Path) -> List[HtmlElement]:
date = extract_iso_date(p.text_content)
def get_title_link(page: Path) -> HtmlElement:
html = parse(str(page)).getroot()
elements = html.xpath('.//h1')
if len(elements) == 0 or elements[0].text_content().strip() == 'Quote of the Day':
elements = html.xpath('.//title')
link = deepcopy(elements[0])
link.tag = 'a'
link.attrib.clear()
link.attrib['href'] = page.name
return link
def merge_in_sorted(item_list: List[HtmlElement], sorted_list: List[HtmlElement]) -> List[HtmlElement]:
if not item_list:
return sorted_list
elif not sorted_list:
return item_list
else:
new_list = []
if item_list[0].text < sorted_list[0]:
new_list = [sorted_list[0], item_list[0]]
del sorted_list[0]
del item_list[0]
for item in item_list:
while sorted_list and sorted_list[0] > :
new_list.append(sorted_list[0])
del sorted_list[0]
new_list.append(item)
return new_list
def run(text_dir: Path, dummy_run: bool = False, verbose: bool = False) -> None:
toc = text_dir / 'toc.xhtml'
if toc.exists():
html = parse(str(toc)).getroot()
contents_div = html.xpath('.//div[@contents]')[0]
old_contents = list(html.xpath('.//div[@contents]/p'))
else:
old_contents = []
new_contents = []
for file in text_dir.glob('[12][90][0-9][0-9]*.xhtml'): # files stating with dates
link = get_title_link(file)
date = extract_iso_date(file.name)
item = P(f"{date} — ", link)
item.tail = '\n'
new_contents.append(item)
if verbose:
print(file.name, "'" + link.text_content() + "'")
new_contents.sort()
contents = []
for item in old_contents:
list_xhtml = tostring(ordered_list, pretty_print=True, method='xml', encoding='unicode')
xhtml = Template.format(LIST=list_xhtml, DATE=datetime.now().isoformat())
if not dummy_run:
toc.write_text(xhtml)
if verbose:
print('Done.')
Template = r'''<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta name="generator" content="Scrawled in GreasePencil" />
<title>Contents</title>
<meta name="author" content="Frank Key" />
<meta name="description" content="Indiscriminately collected prose works by Frank Key." />
<meta name="language" content="en-GB" />
<meta name="generator" content="Scrawled in GreasePencil" />
<meta name="date" content="{DATE}" />
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link href="../Styles/style.css" rel="stylesheet" type="text/css" />
</head>
<body class="toc">
<h1>Contents</h1>
{LIST}
</body>
</html>'''
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# A Survey on Negative Transfer
# https://github.com/chamwen/NT-Benchmark
import numpy as np
import argparse
import os
import torch as tr
import torch.nn as nn
import torch.optim as optim
from utils import network, loss, utils
from utils.network import calc_coeff
from utils.dataloader import read_syn_src_tar
from utils.utils import lr_scheduler_full, fix_random_seed, add_label_noise_noimg
from utils.loss import CELabelSmooth, CDANE, Entropy, RandomLayer
import torch.utils.data as Data
def data_load(Xs, Ys, Xt, Yt, args):
dset_loaders = {}
train_bs = args.batch_size
if args.noise_rate > 0:
Ys = add_label_noise_noimg(Ys, args.seed, args.class_num, args.noise_rate)
sample_idx_tar = tr.from_numpy(np.arange(len(Yt))).long()
data_src = Data.TensorDataset(Xs, Ys)
data_tar = Data.TensorDataset(Xt, Yt)
data_tar_idx = Data.TensorDataset(Xt, Yt, sample_idx_tar)
# for DAN/DANN/CDAN/MCC
dset_loaders["source"] = Data.DataLoader(data_src, batch_size=train_bs, shuffle=True, drop_last=True)
dset_loaders["target"] = Data.DataLoader(data_tar_idx, batch_size=train_bs, shuffle=True, drop_last=True)
dset_loaders["Target"] = Data.DataLoader(data_tar, batch_size=train_bs * 3, shuffle=False, drop_last=False)
return dset_loaders
def train_target(args):
X_src, y_src, X_tar, y_tar = read_syn_src_tar(args)
dset_loaders = data_load(X_src, y_src, X_tar, y_tar, args)
netF, netC = network.backbone_net(args, args.bottleneck)
netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))
netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))
base_network = nn.Sequential(netF, netC)
max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"]))
args.max_iter = args.max_epoch * max_len
ad_net = network.AdversarialNetwork(args.bottleneck, 20).cuda()
ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_full.pt'))
random_layer = RandomLayer([args.bottleneck, args.class_num], args.bottleneck)
random_layer.cuda()
optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)
optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)
optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)
max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"]))
max_iter = args.max_epoch * max_len
interval_iter = max_iter // 10
iter_num = 0
base_network.train()
class_num = args.class_num
mem_fea = tr.rand(len(dset_loaders["target"].dataset), args.bottleneck).cuda()
mem_fea = mem_fea / tr.norm(mem_fea, p=2, dim=1, keepdim=True)
mem_cls = tr.ones(len(dset_loaders["target"].dataset), class_num).cuda() / class_num
while iter_num < max_iter:
try:
inputs_source, labels_source = iter_source.next()
except:
iter_source = iter(dset_loaders["source"])
inputs_source, labels_source = iter_source.next()
try:
inputs_target, _, idx = iter_target.next()
except:
iter_target = iter(dset_loaders["target"])
inputs_target, _, idx = iter_target.next()
if inputs_source.size(0) == 1:
continue
iter_num += 1
lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)
lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)
lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)
inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()
features_source, outputs_source = base_network(inputs_source)
features_target, outputs_target = base_network(inputs_target)
features = tr.cat((features_source, features_target), dim=0)
# new version img loss
args.loss_trade_off = 1.0
outputs = tr.cat((outputs_source, outputs_target), dim=0)
softmax_out = nn.Softmax(dim=1)(outputs)
entropy = Entropy(softmax_out)
transfer_loss = CDANE([features, softmax_out], ad_net, entropy, calc_coeff(iter_num), random_layer=random_layer)
classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_source, labels_source)
# ATDOC
dis = -tr.mm(features_target.detach(), mem_fea.t())
for di in range(dis.size(0)):
dis[di, idx[di]] = tr.max(dis)
_, p1 = tr.sort(dis, dim=1)
w = tr.zeros(features_target.size(0), mem_fea.size(0)).cuda()
for wi in range(w.size(0)):
for wj in range(args.K):
w[wi][p1[wi, wj]] = 1 / args.K
weight_, pred = tr.max(w.mm(mem_cls), 1)
loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred)
classifier_loss_atdoc = tr.sum(weight_ * loss_) / (tr.sum(weight_).item())
eff = iter_num / args.max_iter
total_loss = args.loss_trade_off * transfer_loss + classifier_loss + args.tar_par * eff * classifier_loss_atdoc
optimizer_f.zero_grad()
optimizer_c.zero_grad()
optimizer_d.zero_grad()
total_loss.backward()
optimizer_f.step()
optimizer_c.step()
optimizer_d.step()
# label memory
netF.eval()
netC.eval()
with tr.no_grad():
features_target, outputs_target = netC(netF(inputs_target))
features_target = features_target / tr.norm(features_target, p=2, dim=1, keepdim=True)
softmax_out = nn.Softmax(dim=1)(outputs_target)
outputs_target = softmax_out ** 2 / ((softmax_out ** 2).sum(dim=0))
mem_fea[idx] = (1.0 - args.momentum) * mem_fea[idx] + args.momentum * features_target.clone()
mem_cls[idx] = (1.0 - args.momentum) * mem_cls[idx] + args.momentum * outputs_target.clone()
if iter_num % interval_iter == 0 or iter_num == max_iter:
base_network.eval()
acc_t_te = utils.cal_acc_base(dset_loaders["Target"], base_network)
log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)
print(log_str)
base_network.train()
return acc_t_te
if __name__ == '__main__':
data_name = 'moon'
if data_name == 'moon': num_class = 2
base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']
domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']
file_list = [data_name + i for i in base_name_list]
num_domain = len(domain_list)
args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,
epsilon=1e-05, layer='wn', class_num=num_class, smooth=0)
args.K = 5
args.momentum = 1.0
args.tar_par = 0.2
args.method = 'CDANE-ATDOC'
args.dset = data_name
args.backbone = 'ShallowNet'
args.batch_size = 32
args.max_epoch = 50
args.input_dim = 2
args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'
args.noise_rate = 0
dset_n = args.dset + '_' + str(args.noise_rate)
os.environ["CUDA_VISIBLE_DEVICES"] = '5'
args.data_env = 'gpu' # 'local'
args.seed = 2022
fix_random_seed(args.seed)
tr.backends.cudnn.deterministic = True
print(dset_n, args.method)
args.root_path = './data_synth/'
args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'
args.result_dir = 'results/target/'
acc_all = np.zeros((len(domain_list) - 1))
for s in range(1, num_domain): # source
for t in [0]: # target
itr_idx = s - 1
info_str = '\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])
print(info_str)
args.src, args.tar = file_list[s], file_list[t]
args.task_str = domain_list[s] + '_' + domain_list[t]
print(args)
acc_all[itr_idx] = train_target(args)
print('All acc: ', np.round(acc_all, 2))
print('Avg acc: ', np.round(np.mean(acc_all), 2))
|
#!/bin/python3
'''
This file is to plot a graph with the following setting.
1. We first select an image x_0
2. We then add some pertubation to the image to get x_1 (its type shall
configurable in the future, but we set it to be random or loaded from file
currently)
3. Next, we plot f(x) for all x on the segment x_0 to x_1
4. Finally, we optionally save the pertuabation for future work
Example:
python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01
python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01
python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization
python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization
python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization
'''
import argparse
import TrainModel
import scipy.io as sio
import os
import torch
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--img', type=str, help='the base image')
parser.add_argument('-p', '--pertubation', type=str, default='',
help='the pertubation of the image, will be randomly generated if not presented')
parser.add_argument('--pertubation_length', type=float, default=0.01,
help='the length of the pertubataion, if random generation is nessesary')
parser.add_argument('-s', '--save_pertubation', type=str, default='',
help='whether the pertubation should be saved')
parser.add_argument("--train", type=bool, default=True)
parser.add_argument('--get_scores', type=bool, default=False)
parser.add_argument("--use_cuda", type=bool, default=True)
# parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--resume", action='store_true')
parser.add_argument("--seed", type=int, default=19901116)
parser.add_argument("--backbone", type=str, default='resnet34')
parser.add_argument("--fc", type=bool, default=True)
parser.add_argument('--scnn_root', type=str, default='saved_weights/scnn.pkl')
parser.add_argument("--network", type=str, default="basecnn",
help='basecnn or dbcnn or lfc')
parser.add_argument("--representation", type=str, default="BCNN")
parser.add_argument("--ranking", type=bool, default=True,
help='True for learning-to-rank False for regular regression')
parser.add_argument("--fidelity", type=bool, default=True,
help='True for fidelity loss False for regular ranknet with CE loss')
parser.add_argument("--std_modeling", type=bool,
default=True) # True for modeling std False for not
parser.add_argument("--std_loss", type=bool, default=True)
parser.add_argument("--fixvar", action='store_true') #+
parser.add_argument("--force_normalization", action='store_true')
parser.add_argument("--lipschitz", action='store_true')
parser.add_argument("--margin", type=float, default=0.025)
parser.add_argument("--split", type=int, default=1)
parser.add_argument("--trainset", type=str, default="./IQA_database/")
parser.add_argument("--live_set", type=str, default="./IQA_database/databaserelease2/")
parser.add_argument("--csiq_set", type=str, default="./IQA_database/CSIQ/")
parser.add_argument("--tid2013_set", type=str, default="./IQA_database/TID2013/")
parser.add_argument("--bid_set", type=str, default="./IQA_database/BID/")
#parser.add_argument("--cid_set", type=str, default="./IQA_database/CID2013_camera/")
parser.add_argument("--clive_set", type=str, default="./IQA_database/ChallengeDB_release/")
parser.add_argument("--koniq10k_set", type=str, default="./IQA_database/koniq-10k/")
parser.add_argument("--kadid10k_set", type=str, default="./IQA_database/kadid10k/")
parser.add_argument("--eval_live", type=bool, default=True)
parser.add_argument("--eval_csiq", type=bool, default=True)
parser.add_argument("--eval_tid2013", type=bool, default=False)
parser.add_argument("--eval_kadid10k", type=bool, default=True)
parser.add_argument("--eval_bid", type=bool, default=True)
parser.add_argument("--eval_clive", type=bool, default=True)
parser.add_argument("--eval_koniq10k", type=bool, default=True)
parser.add_argument("--split_modeling", type=bool, default=False)
parser.add_argument('--ckpt_path', default='./checkpoint', type=str,
metavar='PATH', help='path to checkpoints')
parser.add_argument('--ckpt', default=None, type=str, help='name of the checkpoint to load')
parser.add_argument("--train_txt", type=str, default='train.txt') # train.txt | train_synthetic.txt | train_authentic.txt | train_sub2.txt | train_score.txt
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--batch_size2", type=int, default=32)
parser.add_argument("--image_size", type=int, default=384, help='None means random resolution')
parser.add_argument("--max_epochs", type=int, default=3)
parser.add_argument("--max_epochs2", type=int, default=12)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--decay_interval", type=int, default=3)
parser.add_argument("--decay_ratio", type=float, default=0.1)
parser.add_argument("--epochs_per_eval", type=int, default=1)
parser.add_argument("--epochs_per_save", type=int, default=1)
parser.add_argument("--verbose", action='store_true')
config = parser.parse_args()
config.to_test = []
return config
def main(config):
t = TrainModel.Trainer(config)
# checking compatability
if config.fixvar and not config.network.startswith('lfc'):
raise NotImplementedError()
if str(config.backbone).startswith('lfc') and not config.std_modeling:
raise NotImplementedError()
model = t.model
pil_img = Image.open(config.img)
# pil_img = pil_img.reshape((1,) + tuple(pil_img.shape))
img = t.test_transform(pil_img).to(t.device)
if config.pertubation:
with open(config.pertubation, 'rb') as f:
pertubation = torch.load(f)
else:
pertubation = torch.rand(img.shape) * config.pertubation_length
pertubation = pertubation.to(t.device)
img = img.unsqueeze(0)
print(img.shape)
if config.save_pertubation:
with open(config.save_pertubation, 'wb') as f:
torch.save(pertubation, f)
should_normalize = not config.network.startswith('lfc') or config.force_normalization
if should_normalize:
normalization_transform = \
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
pertubation = normalization_transform(pertubation)
x = list(np.linspace(0, 1, 100))
y = [t.predict_single_image(img + p * pertubation).detach().cpu().numpy() for p in x]
plt.plot(x, y)
plt.show()
if __name__ == "__main__":
config = parse_config()
main(config)
|
# -*-coding:utf-8-*-
import os
from flask import request, render_template, g
from flask_babel import gettext
from flask_wtf.csrf import CSRFError
from werkzeug.utils import redirect
from apps.configs.sys_config import DEFAULT_ADMIN_LOGIN_PAGE
from apps.core.auth.rest_token_auth import OsrTokenError, SecretTokenError, AccessTokenError
from apps.core.blueprint import api, theme_view, admin_view
from apps.core.flask.login_manager import LoginReqError
from apps.core.flask.response import response_format
from apps.core.template.template import render_absolute_path_template
from apps.core.utils.get_config import get_config
from apps.modules.global_data.process.global_data import get_global_site_data
__author__ = "Allen Woo"
class ErrorHandler:
"""
配置各种异常状态返回数据 http status
"""
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
@app.errorhandler(401)
def internal_server_error_401(e):
return internal_server_error(e)
@app.errorhandler(404)
def internal_server_error_404(e):
return internal_server_error(e)
@app.errorhandler(500)
def internal_server_error_500(e):
return internal_server_error(e)
@app.errorhandler(SecretTokenError)
def handle_rest_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40101}
return response_format(data)
@app.errorhandler(AccessTokenError)
def handle_rest_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40102}
return response_format(data)
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40103}
return response_format(data)
@app.errorhandler(OsrTokenError)
def handle_osr_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40104,
"help": gettext(
"Please add the 'OSR-RestToken' or 'X-CSRFToken' request header,"
" the specific use please refer to the osroom system documentation:"
" http://osroom.com")}
return response_format(data)
@app.errorhandler(LoginReqError)
def handle_login_error(e):
data = {
"custom_status": e.code,
"msg": gettext("Not logged in"),
"error_msg": e.description,
"msg_type": "e",
"to_url": get_config(
"login_manager",
"LOGIN_VIEW"),
"error_id": 40105}
if request.headers.get('OSR-RestToken'):
data["to_url"] = get_config("login_manager", "LOGIN_VIEW")
if request.path.startswith(api.url_prefix):
# api 响应Json数据
return response_format(data)
# 页面, 跳转到登录
if request.path.startswith("/osr-admin"):
return redirect(DEFAULT_ADMIN_LOGIN_PAGE)
else:
return redirect(data["to_url"])
def internal_server_error(e):
"""
处理服务器错误
:param e:
:return:
"""
try:
code = e.code
except BaseException:
code = 500
msg_type = "w"
msg = gettext("An error occurred. Please contact the administrator")
if code == 401:
msg = gettext("Permission denied")
elif code == 404:
msg = gettext("The api does not exist or has been deprecated")
elif code == 500:
msg = gettext("Server error")
msg_type = "e"
elif isinstance(code, int) and code // 500 == 1:
msg = gettext(
"Server error, please check whether the third-party plug-in is normal")
msg_type = "e"
data = {
"http_status": code,
"custom_status": None,
"request_id": g.weblog_id,
"msg": msg,
"msg_type": msg_type}
if request.path.startswith(api.url_prefix):
return response_format(data)
else:
g.site_global = dict(g.site_global,
**get_global_site_data(req_type="view"))
path = "{}/pages/{}.html".format(get_config("theme",
"CURRENT_THEME_NAME"), code)
absolute_path = os.path.abspath(
"{}/{}".format(theme_view.template_folder, path))
if not os.path.isfile(absolute_path):
# 主题不存在<e.code>错误页面(如404页面),使用系统自带的页面
path = "{}/module/exception/{}.html".format(
admin_view.template_folder, code)
return render_absolute_path_template(path, data=data), 404
return render_template(path, data=data), code
|
#!/usr/bin/env python
import glob
import os
import sys
verbose = '--verbose' in sys.argv
dump = '--dump' in sys.argv
internal = '--internal' in sys.argv
plain_output = '--plain-output' in sys.argv
if plain_output:
plain_file = open('plain_text_out.txt', 'w+')
in_code = None
paths = ['include/libtorrent/*.hpp', 'include/libtorrent/kademlia/*.hpp', 'include/libtorrent/extensions/*.hpp']
if internal:
paths.append('include/libtorrent/aux_/*.hpp')
files = []
for p in paths:
files.extend(glob.glob(os.path.join('..', p)))
functions = []
classes = []
enums = []
# maps filename to overview description
overviews = {}
# maps names -> URL
symbols = {}
# some files that need pre-processing to turn symbols into
# links into the reference documentation
preprocess_rst = \
{
'manual.rst':'manual-ref.rst',
'settings.rst':'settings-ref.rst'
}
# some pre-defined sections from the main manual
symbols = \
{
"queuing_": "manual-ref.html#queuing",
"fast-resume_": "manual-ref.html#fast-resume",
"storage-allocation_": "manual-ref.html#storage-allocation",
"alerts_": "manual-ref.html#alerts",
"upnp-and-nat-pmp_": "manual-ref.html#upnp-and-nat-pmp",
"http-seeding_": "manual-ref.html#http-seeding",
"metadata-from-peers_": "manual-ref.html#metadata-from-peers",
"magnet-links_": "manual-ref.html#magnet-links",
"ssl-torrents_": "manual-ref.html#ssl-torrents",
"dynamic-loading-of-torrent-files_": "manual-ref.html#dynamic-loading-of-torrent-files",
"session-statistics_": "manual-ref.html#session-statistics",
"peer-classes_": "manual-ref.html#peer-classes"
}
static_links = \
{
".. _`BEP 3`: http://bittorrent.org/beps/bep_0003.html",
".. _`BEP 17`: http://bittorrent.org/beps/bep_0017.html",
".. _`BEP 19`: http://bittorrent.org/beps/bep_0019.html"
}
anon_index = 0
category_mapping = {
'ed25519.hpp': 'ed25519',
'session.hpp': 'Core',
'add_torrent_params.hpp': 'Core',
'session_status.hpp': 'Core',
'error_code.hpp': 'Error Codes',
'file.hpp': 'File',
'storage.hpp': 'Custom Storage',
'storage_defs.hpp': 'Storage',
'file_storage.hpp': 'Storage',
'file_pool.hpp': 'Custom Storage',
'extensions.hpp': 'Plugins',
'ut_metadata.hpp': 'Plugins',
'ut_pex.hpp': 'Plugins',
'ut_trackers.hpp': 'Plugins',
'metadata_transfer.hpp': 'Plugins',
'smart_ban.hpp': 'Plugins',
'lt_trackers.hpp': 'Plugins',
'create_torrent.hpp': 'Create Torrents',
'alert.hpp': 'Alerts',
'alert_types.hpp': 'Alerts',
'bencode.hpp': 'Bencoding',
'lazy_entry.hpp': 'Bencoding',
'bdecode.hpp': 'Bdecoding',
'entry.hpp': 'Bencoding',
'time.hpp': 'Time',
'escape_string.hpp': 'Utility',
'enum_net.hpp': 'Network',
'broadcast_socket.hpp': 'Network',
'socket.hpp': 'Network',
'socket_io.hpp': 'Network',
'bitfield.hpp': 'Utility',
'sha1_hash.hpp': 'Utility',
'hasher.hpp': 'Utility',
'identify_client.hpp': 'Utility',
'thread.hpp': 'Utility',
'ip_filter.hpp': 'Filter',
'session_settings.hpp': 'Settings',
'settings_pack.hpp': 'Settings',
'operations.hpp': 'Alerts',
'disk_buffer_holder.hpp': 'Custom Storage',
'alert_dispatcher.hpp': 'Alerts',
}
category_fun_mapping = {
'min_memory_usage()': 'Settings',
'high_performance_seed()': 'Settings',
'cache_status': 'Core',
}
def categorize_symbol(name, filename):
f = os.path.split(filename)[1]
if name.endswith('_category()') \
or name.endswith('_error_code') \
or name.endswith('error_code_enum'):
return 'Error Codes'
if name in category_fun_mapping:
return category_fun_mapping[name]
if f in category_mapping:
return category_mapping[f]
return 'Core'
def suppress_warning(filename, name):
f = os.path.split(filename)[1]
if f != 'alert_types.hpp': return False
# if name.endswith('_alert') or name == 'message()':
return True
# return False
def first_item(itr):
for i in itr:
return i
return None
def is_visible(desc):
if desc.strip().startswith('hidden'): return False
if internal: return True
if desc.strip().startswith('internal'): return False
return True
def highlight_signature(s):
s = s.replace('TORRENT_OVERRIDE', 'override').replace('TORRENT_FINAL', 'final')
name = s.split('(', 1)
name2 = name[0].split(' ')
if len(name2[-1]) == 0: return s
# make the name of the function bold
name2[-1] = '**' + name2[-1] + '** '
# if there is a return value, make sure we preserve pointer types
if len(name2) > 1:
name2[0] = name2[0].replace('*', '\\*')
name[0] = ' '.join(name2)
# we have to escape asterisks, since this is rendered into
# a parsed literal in rst
name[1] = name[1].replace('*', '\\*')
# we also have to escape colons
name[1] = name[1].replace(':', '\\:')
# escape trailing underscores
name[1] = name[1].replace('_', '\\_')
# comments in signatures are italic
name[1] = name[1].replace('/\\*', '*/\\*')
name[1] = name[1].replace('\\*/', '\\*/*')
return '('.join(name)
def html_sanitize(s):
ret = ''
for i in s:
if i == '<': ret += '<'
elif i == '>': ret += '>'
elif i == '&': ret += '&'
else: ret += i
return ret
def looks_like_namespace(line):
line = line.strip()
if line.startswith('namespace'): return True
return False
def looks_like_blank(line):
line = line.split('//')[0]
line = line.replace('{', '')
line = line.replace('}', '')
line = line.replace('[', '')
line = line.replace(']', '')
line = line.replace(';', '')
line = line.strip()
return len(line) == 0
def looks_like_variable(line):
line = line.split('//')[0]
line = line.strip()
if not ' ' in line and not '\t' in line: return False
if line.startswith('friend '): return False
if line.startswith('enum '): return False
if line.startswith(','): return False
if line.startswith(':'): return False
if line.startswith('typedef'): return False
if ' = ' in line: return True
if line.endswith(';'): return True
return False
def looks_like_forward_decl(line):
line = line.split('//')[0]
line = line.strip()
if not line.endswith(';'): return False
if '{' in line: return False
if '}' in line: return False
if line.startswith('friend '): return True
if line.startswith('struct '): return True
if line.startswith('class '): return True
return False
def looks_like_function(line):
if line.startswith('friend'): return False
if '::' in line.split('(')[0].split(' ')[-1]: return False
if line.startswith(','): return False
if line.startswith(':'): return False
return '(' in line;
def parse_function(lno, lines, filename):
current_fun = {}
start_paren = 0
end_paren = 0
signature = ''
while lno < len(lines):
l = lines[lno].strip()
lno += 1
if l.startswith('//'): continue
start_paren += l.count('(')
end_paren += l.count(')')
sig_line = l.replace('TORRENT_EXPORT ', '').replace('TORRENT_EXTRA_EXPORT','').strip()
if signature != '': sig_line = '\n ' + sig_line
signature += sig_line
if verbose: print 'fun %s' % l
if start_paren > 0 and start_paren == end_paren:
if signature[-1] != ';':
# we also need to consume the function body
start_paren = 0
end_paren = 0
for i in range(len(signature)):
if signature[i] == '(': start_paren += 1
elif signature[i] == ')': end_paren += 1
if start_paren > 0 and start_paren == end_paren:
for k in range(i, len(signature)):
if signature[k] == ':' or signature[k] == '{':
signature = signature[0:k].strip()
break
break
lno = consume_block(lno - 1, lines)
signature += ';'
ret = [{ 'file': filename[11:], 'signatures': set([ signature ]), 'names': set([ signature.split('(')[0].split(' ')[-1].strip() + '()'])}, lno]
if first_item(ret[0]['names']) == '()': return [None, lno]
return ret
if len(signature) > 0:
print '\x1b[31mFAILED TO PARSE FUNCTION\x1b[0m %s\nline: %d\nfile: %s' % (signature, lno, filename)
return [None, lno]
def parse_class(lno, lines, filename):
start_brace = 0
end_brace = 0
name = ''
funs = []
fields = []
enums = []
state = 'public'
context = ''
class_type = 'struct'
blanks = 0
decl = ''
while lno < len(lines):
l = lines[lno].strip()
decl += lines[lno].replace('TORRENT_EXPORT ', '').replace('TORRENT_EXTRA_EXPORT', '').split('{')[0].strip()
if '{' in l: break
if verbose: print 'class %s' % l
lno += 1
if decl.startswith('class'):
state = 'private'
class_type = 'class'
decl = decl.replace('TORRENT_FINAL', 'final')
name = decl.split(':')[0].replace('class ', '').replace('struct ', '').replace('final', '').strip()
while lno < len(lines):
l = lines[lno].strip()
lno += 1
if l == '':
blanks += 1
context = ''
continue
if l.startswith('/*'):
lno = consume_comment(lno - 1, lines)
continue
if l.startswith('#'):
lno = consume_ifdef(lno - 1, lines, True)
continue
if 'TORRENT_DEFINE_ALERT' in l:
if verbose: print 'xx %s' % l
blanks += 1
continue
if 'TORRENT_DEPRECATED' in l:
if verbose: print 'xx %s' % l
blanks += 1
continue
if l.startswith('//'):
if verbose: print 'desc %s' % l
# plain output prints just descriptions and filters out c++ code.
# it's used to run spell checker over
if plain_output:
line = l.split('//')[1]
# if the first character is a space, strip it
if len(line) > 0 and line[0] == ' ': line = line[1:]
global in_code
if in_code != None and not line.startswith(in_code) and len(line) > 1:
in_code = None
if line.strip().startswith('.. code::'):
in_code = line.split('.. code::')[0] + '\t'
# strip out C++ code from the plain text output since it's meant for
# running spell checking over
if not line.strip().startswith('.. ') and in_code == None:
plain_file.write(line + '\n')
l = l[2:]
if len(l) and l[0] == ' ': l = l[1:]
context += l + '\n'
continue
start_brace += l.count('{')
end_brace += l.count('}')
if l == 'private:': state = 'private'
elif l == 'protected:': state = 'protected'
elif l == 'public:': state = 'public'
if start_brace > 0 and start_brace == end_brace:
return [{ 'file': filename[11:], 'enums': enums, 'fields':fields, 'type': class_type, 'name': name, 'decl': decl, 'fun': funs}, lno]
if state != 'public' and not internal:
if verbose: print 'private %s' % l
blanks += 1
continue
if start_brace - end_brace > 1:
if verbose: print 'scope %s' % l
blanks += 1
continue;
if looks_like_function(l):
current_fun, lno = parse_function(lno - 1, lines, filename)
if current_fun != None and is_visible(context):
if context == '' and blanks == 0 and len(funs):
funs[-1]['signatures'].update(current_fun['signatures'])
funs[-1]['names'].update(current_fun['names'])
else:
current_fun['desc'] = context
if context == '' and not suppress_warning(filename, first_item(current_fun['names'])):
print 'WARNING: member function "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \
% (name + '::' + first_item(current_fun['names']), filename, lno)
funs.append(current_fun)
context = ''
blanks = 0
continue
if looks_like_variable(l):
if verbose: print 'var %s' % l
if not is_visible(context):
continue
l = l.split('//')[0].strip()
n = l.split(' ')[-1].split(':')[0].split(';')[0]
if context == '' and blanks == 0 and len(fields):
fields[-1]['names'].append(n)
fields[-1]['signatures'].append(l)
else:
if context == '' and not suppress_warning(filename, n):
print 'WARNING: field "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \
% (name + '::' + n, filename, lno)
fields.append({'signatures': [l], 'names': [n], 'desc': context})
context = ''
blanks = 0
continue
if l.startswith('enum '):
if verbose: print 'enum %s' % l
if not is_visible(context):
consume_block(lno - 1, lines)
else:
enum, lno = parse_enum(lno - 1, lines, filename)
if enum != None:
enum['desc'] = context
if context == '' and not suppress_warning(filename, enum['name']):
print 'WARNING: enum "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \
% (name + '::' + enum['name'], filename, lno)
enums.append(enum)
context = ''
continue
context = ''
if verbose:
if looks_like_forward_decl(l) \
or looks_like_blank(l) \
or looks_like_namespace(l):
print '-- %s' % l
else:
print '?? %s' % l
if len(name) > 0:
print '\x1b[31mFAILED TO PARSE CLASS\x1b[0m %s\nfile: %s:%d' % (name, filename, lno)
return [None, lno]
def parse_enum(lno, lines, filename):
start_brace = 0
end_brace = 0
global anon_index
l = lines[lno].strip()
name = l.replace('enum ', '').split('{')[0].strip()
if len(name) == 0:
if not internal:
print 'WARNING: anonymous enum at: \x1b[34m%s:%d\x1b[0m' % (filename, lno)
lno = consume_block(lno - 1, lines)
return [None, lno]
name = 'anonymous_enum_%d' % anon_index
anon_index += 1
values = []
context = ''
if not '{' in l:
if verbose: print 'enum %s' % lines[lno]
lno += 1
val = 0
while lno < len(lines):
l = lines[lno].strip()
lno += 1
if l.startswith('//'):
if verbose: print 'desc %s' % l
l = l[2:]
if len(l) and l[0] == ' ': l = l[1:]
context += l + '\n'
continue
if l.startswith('#'):
lno = consume_ifdef(lno - 1, lines)
continue
start_brace += l.count('{')
end_brace += l.count('}')
if '{' in l:
l = l.split('{')[1]
l = l.split('}')[0]
if len(l):
if verbose: print 'enumv %s' % lines[lno-1]
for v in l.split(','):
v = v.strip();
if v.startswith('//'): break
if v == '': continue
valstr = ''
try:
if '=' in v: val = int(v.split('=')[1].strip(), 0)
valstr = str(val)
except: pass
if '=' in v: v = v.split('=')[0].strip()
if is_visible(context):
values.append({'name': v.strip(), 'desc': context, 'val': valstr})
if verbose: print 'enumv %s' % valstr
context = ''
val += 1
else:
if verbose: print '?? %s' % lines[lno-1]
if start_brace > 0 and start_brace == end_brace:
return [{'file': filename[11:], 'name': name, 'values': values}, lno]
if len(name) > 0:
print '\x1b[31mFAILED TO PARSE ENUM\x1b[0m %s\nline: %d\nfile: %s' % (name, lno, filename)
return [None, lno]
def consume_block(lno, lines):
start_brace = 0
end_brace = 0
while lno < len(lines):
l = lines[lno].strip()
if verbose: print 'xx %s' % l
lno += 1
start_brace += l.count('{')
end_brace += l.count('}')
if start_brace > 0 and start_brace == end_brace:
break
return lno
def consume_comment(lno, lines):
while lno < len(lines):
l = lines[lno].strip()
if verbose: print 'xx %s' % l
lno += 1
if '*/' in l: break
return lno
def trim_define(l):
return l.replace('#ifndef', '').replace('#ifdef', '') \
.replace('#if', '').replace('defined', '') \
.replace('TORRENT_USE_IPV6', '').replace('TORRENT_NO_DEPRECATE', '') \
.replace('||', '').replace('&&', '').replace('(', '').replace(')','') \
.replace('!', '').replace('\\', '').strip()
def consume_ifdef(lno, lines, warn_on_ifdefs = False):
l = lines[lno].strip()
lno += 1
start_if = 1
end_if = 0
if verbose: print 'prep %s' % l
if warn_on_ifdefs and ('TORRENT_DEBUG' in l):
while l.endswith('\\'):
lno += 1
l += lines[lno].strip()
if verbose: print 'prep %s' % lines[lno].trim()
define = trim_define(l)
print '\x1b[31mWARNING: possible ABI breakage in public struct! "%s" \x1b[34m %s:%d\x1b[0m' % \
(define, filename, lno)
# we've already warned once, no need to do it twice
warn_on_ifdefs = False
if warn_on_ifdefs and '#if' in l:
while l.endswith('\\'):
lno += 1
l += lines[lno].strip()
if verbose: print 'prep %s' % lines[lno].trim()
define = trim_define(l)
if define != '':
print '\x1b[33msensitive define in public struct: "%s"\x1b[34m %s:%d\x1b[0m' % (define, filename, lno)
if l == '#ifndef TORRENT_NO_DEPRECATE' or \
l == '#ifdef TORRENT_DEBUG' or \
(l.startswith('#if ') and ' TORRENT_USE_ASSERTS' in l) or \
(l.startswith('#if ') and ' TORRENT_USE_INVARIANT_CHECKS' in l) or \
l == '#ifdef TORRENT_ASIO_DEBUGGING' or \
(l.startswith('#if') and 'defined TORRENT_DEBUG' in l) or \
(l.startswith('#if') and 'defined TORRENT_ASIO_DEBUGGING' in l):
while lno < len(lines):
l = lines[lno].strip()
lno += 1
if verbose: print 'prep %s' % l
if l.startswith('#endif'): end_if += 1
if l.startswith('#if'): start_if += 1
if l == '#else' and start_if - end_if == 1: break
if start_if - end_if == 0: break
return lno
else:
while l.endswith('\\') and lno < len(lines):
l = lines[lno].strip()
lno += 1
if verbose: print 'prep %s' % l
return lno
for filename in files:
h = open(filename)
lines = h.read().split('\n')
if verbose: print '\n=== %s ===\n' % filename
blanks = 0
lno = 0
while lno < len(lines):
l = lines[lno].strip()
lno += 1
if l == '':
blanks += 1
context = ''
continue
if l.startswith('//') and l[2:].strip() == 'OVERVIEW':
# this is a section overview
current_overview = ''
while lno < len(lines):
l = lines[lno].strip()
lno += 1
if not l.startswith('//'):
# end of overview
overviews[filename[11:]] = current_overview
current_overview = ''
break
l = l[2:]
if l.startswith(' '): l = l[1:]
current_overview += l + '\n'
if l.startswith('//'):
if verbose: print 'desc %s' % l
l = l[2:]
if len(l) and l[0] == ' ': l = l[1:]
context += l + '\n'
continue
if l.startswith('/*'):
lno = consume_comment(lno - 1, lines)
continue
if l.startswith('#'):
lno = consume_ifdef(lno - 1, lines)
continue
if (l == 'namespace detail' or \
l == 'namespace dht_detail' or \
l == 'namespace impl' or \
l == 'namespace aux') \
and not internal:
lno = consume_block(lno, lines)
continue
if 'TORRENT_CFG' in l:
blanks += 1
if verbose: print 'xx %s' % l
continue
if 'TORRENT_DEPRECATED' in l:
if ('class ' in l or 'struct ' in l) and not ';' in l:
lno = consume_block(lno - 1, lines)
context = ''
blanks += 1
if verbose: print 'xx %s' % l
continue
if 'TORRENT_EXPORT ' in l or l.startswith('inline ') or l.startswith('template') or internal:
if l.startswith('class ') or l.startswith('struct '):
if not l.endswith(';'):
current_class, lno = parse_class(lno -1, lines, filename)
if current_class != None and is_visible(context):
current_class['desc'] = context
if context == '':
print 'WARNING: class "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \
% (current_class['name'], filename, lno)
classes.append(current_class)
context = ''
blanks += 1
continue
if looks_like_function(l):
current_fun, lno = parse_function(lno - 1, lines, filename)
if current_fun != None and is_visible(context):
if context == '' and blanks == 0 and len(functions):
functions[-1]['signatures'].update(current_fun['signatures'])
functions[-1]['names'].update(current_fun['names'])
else:
current_fun['desc'] = context
if context == '':
print 'WARNING: function "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \
% (first_item(current_fun['names']), filename, lno)
functions.append(current_fun)
context = ''
blanks = 0
continue
if ('class ' in l or 'struct ' in l) and not ';' in l:
lno = consume_block(lno - 1, lines)
context = ''
blanks += 1
continue
if l.startswith('enum '):
if not is_visible(context):
consume_block(lno - 1, lines)
else:
current_enum, lno = parse_enum(lno - 1, lines, filename)
if current_enum != None and is_visible(context):
current_enum['desc'] = context
if context == '':
print 'WARNING: enum "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \
% (current_enum['name'], filename, lno)
enums.append(current_enum)
context = ''
blanks += 1
continue
blanks += 1
if verbose:
if looks_like_forward_decl(l) \
or looks_like_blank(l) \
or looks_like_namespace(l):
print '-- %s' % l
else:
print '?? %s' % l
context = ''
h.close()
# ====================================================================
#
# RENDER PART
#
# ====================================================================
if dump:
if verbose: print '\n===============================\n'
for c in classes:
print '\x1b[4m%s\x1b[0m %s\n{' % (c['type'], c['name'])
for f in c['fun']:
for s in f['signatures']:
print ' %s' % s.replace('\n', '\n ')
if len(c['fun']) > 0 and len(c['fields']) > 0: print ''
for f in c['fields']:
for s in f['signatures']:
print ' %s' % s
if len(c['fields']) > 0 and len(c['enums']) > 0: print ''
for e in c['enums']:
print ' \x1b[4menum\x1b[0m %s\n {' % e['name']
for v in e['values']:
print ' %s' % v['name']
print ' };'
print '};\n'
for f in functions:
print '%s' % f['signature']
for e in enums:
print '\x1b[4menum\x1b[0m %s\n{' % e['name']
for v in e['values']:
print ' %s' % v['name']
print '};'
categories = {}
for c in classes:
cat = categorize_symbol(c['name'], c['file'])
if not cat in categories:
categories[cat] = { 'classes': [], 'functions': [], 'enums': [], 'filename': 'reference-%s.rst' % cat.replace(' ', '_')}
if c['file'] in overviews:
categories[cat]['overview'] = overviews[c['file']]
filename = categories[cat]['filename'].replace('.rst', '.html') + '#'
categories[cat]['classes'].append(c)
symbols[c['name']] = filename + c['name']
for f in c['fun']:
for n in f['names']:
symbols[n] = filename + n
symbols[c['name'] + '::' + n] = filename + n
for f in c['fields']:
for n in f['names']:
symbols[c['name'] + '::' + n] = filename + n
for e in c['enums']:
symbols[e['name']] = filename + e['name']
symbols[c['name'] + '::' + e['name']] = filename + e['name']
for v in e['values']:
# symbols[v['name']] = filename + v['name']
symbols[e['name'] + '::' + v['name']] = filename + v['name']
symbols[c['name'] + '::' + v['name']] = filename + v['name']
for f in functions:
cat = categorize_symbol(first_item(f['names']), f['file'])
if not cat in categories:
categories[cat] = { 'classes': [], 'functions': [], 'enums': [], 'filename': 'reference-%s.rst' % cat.replace(' ', '_')}
if f['file'] in overviews:
categories[cat]['overview'] = overviews[f['file']]
for n in f['names']:
symbols[n] = categories[cat]['filename'].replace('.rst', '.html') + '#' + n
categories[cat]['functions'].append(f)
for e in enums:
cat = categorize_symbol(e['name'], e['file'])
if not cat in categories:
categories[cat] = { 'classes': [], 'functions': [], 'enums': [], 'filename': 'reference-%s.rst' % cat.replace(' ', '_')}
categories[cat]['enums'].append(e)
filename = categories[cat]['filename'].replace('.rst', '.html') + '#'
symbols[e['name']] = filename + e['name']
for v in e['values']:
symbols[e['name'] + '::' + v['name']] = filename + v['name']
def print_declared_in(out, o):
out.write('Declared in "%s"\n\n' % print_link(o['file'], '../include/%s' % o['file']))
print >>out, dump_link_targets()
# returns RST marked up string
def linkify_symbols(string):
lines = string.split('\n')
ret = []
in_literal = False
lno = 0
for l in lines:
lno += 1
# don't touch headlines, i.e. lines whose
# next line entirely contains one of =, - or .
if (lno < len(lines)-1): next_line = lines[lno]
else: next_line = ''
if len(next_line) > 0 and lines[lno].replace('=',''). \
replace('-','').replace('.', '') == '':
ret.append(l)
continue
if l.startswith('|'):
ret.append(l)
continue
if in_literal and not l.startswith('\t') and not l == '':
# print ' end literal: "%s"' % l
in_literal = False
if in_literal:
# print ' literal: "%s"' % l
ret.append(l)
continue
if l.strip() == '.. parsed-literal::' or \
l.strip().startswith('.. code::') or \
(not l.strip().startswith('..') and l.endswith('::')):
# print ' start literal: "%s"' % l
in_literal = True
words = l.split(' ')
for i in range(len(words)):
# it's important to preserve leading
# tabs, since that's relevant for
# rst markup
leading = ''
w = words[i]
if len(w) == 0: continue
while len(w) > 0 and \
w[0] in ['\t', ' ', '(', '[', '{']:
leading += w[0]
w = w[1:]
# preserve commas and dots at the end
w = w.strip()
trailing = ''
if len(w) == 0: continue
while len(w) > 1 and w[-1] in ['.', ',', ')'] and w[-2:] != '()':
trailing = w[-1] + trailing
w = w[:-1]
link_name = w;
# print w
if len(w) == 0: continue
if link_name[-1] == '_': link_name = link_name[:-1]
if w in symbols:
link_name = link_name.replace('-', ' ')
# print ' found %s -> %s' % (w, link_name)
words[i] = leading + print_link(link_name, symbols[w]) + trailing
ret.append(' '.join(words))
return '\n'.join(ret)
link_targets = []
def print_link(name, target):
global link_targets
link_targets.append(target)
return "`%s`__" % name
def dump_link_targets(indent = ''):
global link_targets
ret = '\n'
for l in link_targets:
ret += '%s__ %s\n' % (indent, l)
link_targets = []
return ret
def heading(string, c, indent = ''):
string = string.strip()
return '\n' + indent + string + '\n' + indent + (c * len(string)) + '\n'
def render_enums(out, enums, print_declared_reference, header_level):
for e in enums:
print >>out, '.. raw:: html\n'
print >>out, '\t<a name="%s"></a>' % e['name']
print >>out, ''
print >>out, heading('enum %s' % e['name'], header_level)
print_declared_in(out, e)
width = [len('name'), len('value'), len('description')]
for i in range(len(e['values'])):
e['values'][i]['desc'] = linkify_symbols(e['values'][i]['desc'])
for v in e['values']:
width[0] = max(width[0], len(v['name']))
width[1] = max(width[1], len(v['val']))
for d in v['desc'].split('\n'):
width[2] = max(width[2], len(d))
print >>out, '+-' + ('-' * width[0]) + '-+-' + ('-' * width[1]) + '-+-' + ('-' * width[2]) + '-+'
print >>out, '| ' + 'name'.ljust(width[0]) + ' | ' + 'value'.ljust(width[1]) + ' | ' + 'description'.ljust(width[2]) + ' |'
print >>out, '+=' + ('=' * width[0]) + '=+=' + ('=' * width[1]) + '=+=' + ('=' * width[2]) + '=+'
for v in e['values']:
d = v['desc'].split('\n')
if len(d) == 0: d = ['']
print >>out, '| ' + v['name'].ljust(width[0]) + ' | ' + v['val'].ljust(width[1]) + ' | ' + d[0].ljust(width[2]) + ' |'
for s in d[1:]:
print >>out, '| ' + (' ' * width[0]) + ' | ' + (' ' * width[1]) + ' | ' + s.ljust(width[2]) + ' |'
print >>out, '+-' + ('-' * width[0]) + '-+-' + ('-' * width[1]) + '-+-' + ('-' * width[2]) + '-+'
print >>out, ''
print >>out, dump_link_targets()
sections = \
{
'Core': 0,
'Session': 0,
'Settings': 0,
'Bencoding': 1,
'Bdecoding': 1,
'Filter': 1,
'Error Codes': 1,
'Create Torrents': 1,
'ed25519': 2,
'Utility': 2,
'Storage': 2,
'Custom Storage': 2,
'Plugins': 2,
'Alerts': 3
}
def print_toc(out, categories, s):
for cat in categories:
if (s != 2 and cat not in sections) or \
(cat in sections and sections[cat] != s): continue
print >>out, '\t.. rubric:: %s\n' % cat
if 'overview' in categories[cat]:
print >>out, '\t| overview__'
category_filename = categories[cat]['filename'].replace('.rst', '.html')
for c in categories[cat]['classes']:
print >>out, '\t| ' + print_link(c['name'], symbols[c['name']])
for f in categories[cat]['functions']:
for n in f['names']:
print >>out, '\t| ' + print_link(n, symbols[n])
for e in categories[cat]['enums']:
print >>out, '\t| ' + print_link(e['name'], symbols[e['name']])
print >>out, ''
if 'overview' in categories[cat]:
print >>out, '\t__ %s#overview' % categories[cat]['filename'].replace('.rst', '.html')
print >>out, dump_link_targets('\t')
out = open('reference.rst', 'w+')
out.write('''=======================
reference documentation
=======================
''')
out.write('`single-page version`__\n\n__ single-page-ref.html\n\n')
for i in range(4):
out.write('.. container:: main-toc\n\n')
print_toc(out, categories, i)
out.close()
for cat in categories:
out = open(categories[cat]['filename'], 'w+')
classes = categories[cat]['classes']
functions = categories[cat]['functions']
enums = categories[cat]['enums']
out.write('''
:Author: Arvid Norberg, arvid@libtorrent.org
:Version: 1.1.4
`home`__
__ reference.html
%s
.. contents:: Table of contents
:depth: 2
:backlinks: none
''' % heading(cat, '='))
if 'overview' in categories[cat]:
out.write('%s\n' % linkify_symbols(categories[cat]['overview']))
for c in classes:
print >>out, '.. raw:: html\n'
print >>out, '\t<a name="%s"></a>' % c['name']
print >>out, ''
out.write('%s\n' % heading(c['name'], '-'))
print_declared_in(out, c)
c['desc'] = linkify_symbols(c['desc'])
out.write('%s\n' % c['desc'])
print >>out, dump_link_targets()
print >>out,'\n.. parsed-literal::\n\t'
block = '\n%s\n{\n' % c['decl']
for f in c['fun']:
for s in f['signatures']:
block += ' %s\n' % highlight_signature(s.replace('\n', '\n '))
if len(c['fun']) > 0 and len(c['enums']) > 0: block += '\n'
first = True
for e in c['enums']:
if not first:
block += '\n'
first = False
block += ' enum %s\n {\n' % e['name']
for v in e['values']:
block += ' %s,\n' % v['name']
block += ' };\n'
if len(c['fun']) + len(c['enums']) > 0 and len(c['fields']): block += '\n'
for f in c['fields']:
for s in f['signatures']:
block += ' %s\n' % s
block += '};'
print >>out, block.replace('\n', '\n\t') + '\n'
for f in c['fun']:
if f['desc'] == '': continue
title = ''
print >>out, '.. raw:: html\n'
for n in f['names']:
print >>out, '\t<a name="%s"></a>' % n
print >>out, ''
for n in f['names']:
title += '%s ' % n
print >>out, heading(title.strip(), '.')
block = '.. parsed-literal::\n\n'
for s in f['signatures']:
block += highlight_signature(s.replace('\n', '\n ')) + '\n'
print >>out, '%s\n' % block.replace('\n', '\n\t')
f['desc'] = linkify_symbols(f['desc'])
print >>out, '%s' % f['desc']
print >>out, dump_link_targets()
render_enums(out, c['enums'], False, '.')
for f in c['fields']:
if f['desc'] == '': continue
print >>out, '.. raw:: html\n'
for n in f['names']:
print >>out, '\t<a name="%s"></a>' % n
print >>out, ''
for n in f['names']:
print >>out, '%s ' % n,
print >>out, ''
f['desc'] = linkify_symbols(f['desc'])
print >>out, '\t%s' % f['desc'].replace('\n', '\n\t')
print >>out, dump_link_targets()
for f in functions:
h = ''
print >>out, '.. raw:: html\n'
for n in f['names']:
print >>out, '\t<a name="%s"></a>' % n
print >>out, ''
for n in f['names']:
h += '%s ' % n
print >>out, heading(h, '-')
print_declared_in(out, f)
block = '.. parsed-literal::\n\n'
for s in f['signatures']:
block += highlight_signature(s) + '\n'
print >>out, '%s\n' % block.replace('\n', '\n\t')
print >>out, linkify_symbols(f['desc'])
print >>out, dump_link_targets()
render_enums(out, enums, True, '-')
print >>out, dump_link_targets()
for i in static_links:
print >>out, i
out.close()
#for s in symbols:
# print s
for i,o in preprocess_rst.items():
f = open(i, 'r')
out = open(o, 'w+')
print 'processing %s -> %s' % (i, o)
l = linkify_symbols(f.read())
print >>out, l,
print >>out, dump_link_targets()
out.close()
f.close()
|
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from quotes.models import Quote
class QuoteCreate(CreateView):
model = Quote
class QuoteUpdate(UpdateView):
model = Quote
class QuoteDelete(DeleteView):
model = Quote
success_url = reverse_lazy('quotes:index')
|
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
cols, alias = self._final_join_removal(cols, alias)
for col in cols:
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
cols, alias = self._final_join_removal(cols, alias)
return [(alias, cols, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
cols = [target.column for target in targets]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, cols, alias, joins, opts
def _final_join_removal(self, cols, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
if set(cols) != set(rhs_cols):
break
cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
self.query.unref_alias(alias)
alias = join.lhs_alias
return cols, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
table = model._meta.db_table
self.fill_related_selections(model._meta, table, cur_depth+1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.get_meta().db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
|
from rest_framework.permissions import BasePermission
class IsDeviceOwnerOnly(BasePermission):
def has_permission(self, request, view):
return request.user.is_superuser
def has_object_permission(self, request, view, obj):
return request.user.is_superuser
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Peng Zhou
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from lib.datasets.imdb import imdb
import lib.datasets.ds_utils as ds_utils
import numpy as np
import scipy.sparse
import scipy.io as sio
import lib.utils.cython_bbox
import pickle
import subprocess
import uuid
import pdb
from .voc_eval import voc_eval
from lib.config import config as cfg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class dist_fake(imdb):
def __init__(self, image_set, year, dist_path=None):
imdb.__init__(self, image_set)
self._year = year
self._image_set = image_set.split('dist_')[1]
self._dist_path = self._get_default_path() if dist_path is None \
else dist_path
self._data_path=self._dist_path
self._classes = ('__background__', # always index 0
'tamper','authentic')
self._classes = ('authentic', # always index 0
'tamper')
#self.classes =('authentic', # always index 0
#'splicing','removal')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = {'.png','.jpg','.tif','.bmp','.JPG'}
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(os.path.splitext(self._image_index[i].split(' ')[0])[0])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
for ext in self._image_ext:
#image_path = os.path.join('/home-3/pengzhou@umd.edu/work/xintong/medifor/portrait/test_data',
#index + ext)
image_path = os.path.join(self._data_path,
index + ext)
image_path1=os.path.join('/home-3/pengzhou@umd.edu/work/pengzhou/dataset/NC2016_Test0613',
index + ext)
if os.path.isfile(image_path):
return image_path
elif os.path.isfile(image_path1):
return image_path1
else:
continue
assert os.path.isfile(image_path) and os.path.isfile(image_path1), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path,
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
#print(image_index)
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'NC2016_Test0613')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self.roidb_gt(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def roidb_gt(self,image_id):
num_objs = int(len(image_id.split(' ')[1:])/5)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix in range(num_objs):
bbox = image_id.split(' ')[ix*5+1:ix*5+5]
# Make pixel indexes 0-based
x1 = float(bbox[0])
y1 = float(bbox[1])
x2 = float(bbox[2])
y2 = float(bbox[3])
if x1<0:
x1=0
if y1<0:
y1=0
try:
cls=self._class_to_ind[image_id.split(' ')[ix*5+5]]
except:
if int(image_id.split(' ')[ix*5+5])==0:
print('authentic')
cls=2
else:
cls = int(image_id.split(' ')[ix*5+5])
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 ) * (y2 - y1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'JPGed':False,
'noised':False,
'seg_areas': seg_areas}
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'nist_' + self._image_set + '_{:s}.txt'
path = os.path.join(
'.',
filename)
return path
def _get_voc_noise_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'nist_' + self._image_set + '_{:s}_noise.txt'
path = os.path.join(
'.',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
print(filename)
with open(filename, 'w') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
#pdb.set_trace()
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(index.split(' ')[0], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
#pdb.set_trace()
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.jpg'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
#if self.config['matlab_eval']:
#self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
#os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.dist_fake import dist_fake
d = dist_fake('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
|
# Copyright (c) OpenMMLab. All rights reserved.
# This scripts is copied from
# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501
# The code is licensed under the MIT licence.
import argparse
import os
import ssl
import subprocess
import mmcv
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
data_file = './data'
output_dir = f'{data_file}/videos'
def download_clip(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded'
def download_clip_wrapper(youtube_id, output_dir):
"""Wrapper for parallel processing purposes."""
# we do this to align with names in annotations
output_filename = os.path.join(output_dir, youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple([youtube_id, True, 'Exists'])
return status
downloaded, log = download_clip(youtube_id, output_filename)
status = tuple([youtube_id, downloaded, log])
return status
def main(youtube_ids, output_dir, num_jobs=24):
# Creates folders where videos will be saved later.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Download all clips.
if num_jobs == 1:
status_list = []
for index in youtube_ids:
status_list.append(download_clip_wrapper(index, output_dir))
else:
status_list = Parallel(n_jobs=num_jobs)(
delayed(download_clip_wrapper)(index, output_dir)
for index in youtube_ids)
# Save download report.
mmcv.dump(status_list, 'download_report.json')
if __name__ == '__main__':
f = open(f'{data_file}/tools/var_videos.txt', 'r')
youtube_ids = [s.strip() for s in list(f.readlines())]
main(youtube_ids, output_dir, 24)
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_container_engine_cluster_migrate_to_native_vcn_status_facts
short_description: Fetches details about a ClusterMigrateToNativeVcnStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ClusterMigrateToNativeVcnStatus resource in Oracle Cloud Infrastructure
- Get details on a cluster's migration to native VCN.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
cluster_id:
description:
- The OCID of the cluster.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific cluster_migrate_to_native_vcn_status
oci_container_engine_cluster_migrate_to_native_vcn_status_facts:
# required
cluster_id: "ocid1.cluster.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
cluster_migrate_to_native_vcn_status:
description:
- ClusterMigrateToNativeVcnStatus resource
returned: on success
type: complex
contains:
time_decommission_scheduled:
description:
- The date and time the non-native VCN is due to be decommissioned.
returned: on success
type: str
sample: "2017-07-21T16:11:29Z"
state:
description:
- The current migration status of the cluster.
returned: on success
type: str
sample: IN_PROGRESS
sample: {
"time_decommission_scheduled": "2017-07-21T16:11:29Z",
"state": "IN_PROGRESS"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.container_engine import ContainerEngineClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ClusterMigrateToNativeVcnStatusFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"cluster_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_cluster_migrate_to_native_vcn_status,
cluster_id=self.module.params.get("cluster_id"),
)
ClusterMigrateToNativeVcnStatusFactsHelperCustom = get_custom_class(
"ClusterMigrateToNativeVcnStatusFactsHelperCustom"
)
class ResourceFactsHelper(
ClusterMigrateToNativeVcnStatusFactsHelperCustom,
ClusterMigrateToNativeVcnStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(cluster_id=dict(aliases=["id"], type="str", required=True),)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="cluster_migrate_to_native_vcn_status",
service_client_class=ContainerEngineClient,
namespace="container_engine",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(cluster_migrate_to_native_vcn_status=result)
if __name__ == "__main__":
main()
|
def average(array):
array = list(set(array))
return sum(array)/len(array)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
|
#!env python3
from flask import Flask, request, redirect
from hashlib import sha256
import hmac
import base64
import time
import urllib
# allow for relative importing if run directly
if __name__ == "__main__":
from config import secrets, reports, listen_port
else:
from .config import secrets, reports, listen_port
app = Flask(__name__)
@app.route('/report/<report>')
def sign_report_url(report):
# check for a valid token
provided_token = request.args.get('token') or 'missing'
if provided_token != secrets.get('access_token'):
return "Missing or incorrect token provided"
# lookup report and generate URL from values
if report in reports:
this_report = reports.get(report)
# Generating the embed URL
mode_report_id = this_report.get('mode_report')
param_name = this_report.get('param_name')
param_value = request.args.get(
'account_id') or this_report.get('param_default_value')
do_iframe = request.args.get('iframe') or False
timestamp = str(int(time.time())) # current time in unix time
url = make_url('https://app.mode.com', secrets.get('mode_team'), 'reports',
mode_report_id, 'embed', access_key=secrets.get('mode_access_key'),
max_age=3600, **{param_name: param_value}, run='now', timestamp=timestamp)
else:
return f"Missing report {report}"
request_type = 'GET'
content_type = ''
# the MD5 digest of an empty content body, always the same, :shrug:
content_digest = '1B2M2Y8AsgTpgAmY7PhCfg=='
# signature fodder
request_string = ','.join(
[request_type, content_type, str(content_digest), url, timestamp])
signature = hmac.new(bytes(secrets.get('mode_access_secret'), 'utf-8'),
bytes(request_string, 'utf-8'), digestmod=sha256).hexdigest()
signed_url = '%s&signature=%s' % (url, signature)
if do_iframe is not False:
# return the signed URL as an iframe
return f"""
<iframe src='{signed_url}' width='100%' height='100%' frameborder='0' </iframe>
"""
else:
# return the signed URL as a redirect
return redirect(signed_url, code=302)
def make_url(base_url, *res, **params):
url = base_url
for r in res:
url = '{}/{}'.format(url, r)
if params:
url = '{}?{}'.format(url, urllib.parse.urlencode(params))
return url
@app.route('/status')
def status():
return 'Success'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=listen_port)
|
#
# PySNMP MIB module CISCO-DYNAMIC-TEMPLATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-DYNAMIC-TEMPLATE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:39:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
CbpElementName, = mibBuilder.importSymbols("CISCO-CBP-TC-MIB", "CbpElementName")
DynamicTemplateName, DynamicTemplateType, DynamicTemplateTargetId, DynamicTemplateTargetType = mibBuilder.importSymbols("CISCO-DYNAMIC-TEMPLATE-TC-MIB", "DynamicTemplateName", "DynamicTemplateType", "DynamicTemplateTargetId", "DynamicTemplateTargetType")
UnicastRpfOptions, UnicastRpfType = mibBuilder.importSymbols("CISCO-IP-URPF-MIB", "UnicastRpfOptions", "UnicastRpfType")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoVrfName, = mibBuilder.importSymbols("CISCO-TC", "CiscoVrfName")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
InetAddressIPv4, InetAddressIPv6, InetAddressPrefixLength = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv4", "InetAddressIPv6", "InetAddressPrefixLength")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
MibIdentifier, Unsigned32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity, Counter64, ObjectIdentity, TimeTicks, Bits, Counter32, Integer32, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Unsigned32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity", "Counter64", "ObjectIdentity", "TimeTicks", "Bits", "Counter32", "Integer32", "IpAddress", "iso")
RowStatus, TruthValue, TextualConvention, StorageType, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "TextualConvention", "StorageType", "MacAddress", "DisplayString")
ciscoDynamicTemplateMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 784))
ciscoDynamicTemplateMIB.setRevisions(('2007-09-06 00:00',))
if mibBuilder.loadTexts: ciscoDynamicTemplateMIB.setLastUpdated('200709060000Z')
if mibBuilder.loadTexts: ciscoDynamicTemplateMIB.setOrganization('Cisco Systems, Inc.')
ciscoDynamicTemplateMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 0))
ciscoDynamicTemplateMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1))
ciscoDynamicTemplateMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2))
cdtBase = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1))
cdtCommonIf = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2))
cdtPpp = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3))
cdtEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4))
cdtIpSubscriber = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 5))
cdtService = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6))
cdtSubscriberGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 7))
cdtTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1), )
if mibBuilder.loadTexts: cdtTemplateTable.setStatus('current')
cdtTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtTemplateEntry.setStatus('current')
cdtTemplateName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 1), DynamicTemplateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateName.setStatus('current')
cdtTemplateStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateStatus.setStatus('current')
cdtTemplateStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateStorage.setStatus('current')
cdtTemplateType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 4), DynamicTemplateType().clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateType.setStatus('current')
cdtTemplateSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("derived", 2), ("local", 3), ("aaaUserProfile", 4), ("aaaServiceProfile", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateSrc.setStatus('current')
cdtTemplateUsageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageCount.setStatus('current')
cdtTemplateTargetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2), )
if mibBuilder.loadTexts: cdtTemplateTargetTable.setStatus('current')
cdtTemplateTargetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetId"))
if mibBuilder.loadTexts: cdtTemplateTargetEntry.setStatus('current')
cdtTemplateTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 1), DynamicTemplateTargetType())
if mibBuilder.loadTexts: cdtTemplateTargetType.setStatus('current')
cdtTemplateTargetId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 2), DynamicTemplateTargetId())
if mibBuilder.loadTexts: cdtTemplateTargetId.setStatus('current')
cdtTemplateTargetStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateTargetStatus.setStatus('current')
cdtTemplateTargetStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 4), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateTargetStorage.setStatus('current')
cdtTemplateAssociationTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3), )
if mibBuilder.loadTexts: cdtTemplateAssociationTable.setStatus('current')
cdtTemplateAssociationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetId"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateAssociationName"))
if mibBuilder.loadTexts: cdtTemplateAssociationEntry.setStatus('current')
cdtTemplateAssociationName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1, 1), DynamicTemplateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateAssociationName.setStatus('current')
cdtTemplateAssociationPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateAssociationPrecedence.setStatus('current')
cdtTemplateUsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4), )
if mibBuilder.loadTexts: cdtTemplateUsageTable.setStatus('current')
cdtTemplateUsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetId"))
if mibBuilder.loadTexts: cdtTemplateUsageEntry.setStatus('current')
cdtTemplateUsageTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1, 1), DynamicTemplateTargetType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageTargetType.setStatus('current')
cdtTemplateUsageTargetId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1, 2), DynamicTemplateTargetId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageTargetId.setStatus('current')
cdtTemplateCommonTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5), )
if mibBuilder.loadTexts: cdtTemplateCommonTable.setStatus('current')
cdtTemplateCommonEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtTemplateCommonEntry.setStatus('current')
cdtCommonValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 1), Bits().clone(namedValues=NamedValues(("descr", 0), ("keepalive", 1), ("vrf", 2), ("addrPool", 3), ("ipv4AccessGroup", 4), ("ipv4Unreachables", 5), ("ipv6AccessGroup", 6), ("ipv6Unreachables", 7), ("srvSubControl", 8), ("srvRedirect", 9), ("srvAcct", 10), ("srvQos", 11), ("srvNetflow", 12)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonValid.setStatus('current')
cdtCommonDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonDescr.setStatus('current')
cdtCommonKeepaliveInt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(10)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonKeepaliveInt.setStatus('current')
cdtCommonKeepaliveRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(5)).setUnits('retries').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonKeepaliveRetries.setStatus('current')
cdtCommonVrf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 5), CiscoVrfName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonVrf.setStatus('current')
cdtCommonAddrPool = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonAddrPool.setStatus('current')
cdtCommonIpv4AccessGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv4AccessGroup.setStatus('current')
cdtCommonIpv4Unreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 8), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv4Unreachables.setStatus('current')
cdtCommonIpv6AccessGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv6AccessGroup.setStatus('current')
cdtCommonIpv6Unreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 10), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv6Unreachables.setStatus('current')
cdtCommonSrvSubControl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 11), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvSubControl.setStatus('current')
cdtCommonSrvRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 12), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvRedirect.setStatus('current')
cdtCommonSrvAcct = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 13), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvAcct.setStatus('current')
cdtCommonSrvQos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 14), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvQos.setStatus('current')
cdtCommonSrvNetflow = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 15), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvNetflow.setStatus('current')
cdtIfTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1), )
if mibBuilder.loadTexts: cdtIfTemplateTable.setStatus('current')
cdtIfTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtIfTemplateEntry.setStatus('current')
cdtIfValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 1), Bits().clone(namedValues=NamedValues(("mtu", 0), ("cdpEnable", 1), ("flowMonitor", 2), ("ipv4Unnumbered", 3), ("ipv4SubEnable", 4), ("ipv4Mtu", 5), ("ipv4TcpMssAdjust", 6), ("ipv4VerifyUniRpf", 7), ("ipv4VerifyUniRpfAcl", 8), ("ipv4VerifyUniRpfOpts", 9), ("ipv6Enable", 10), ("ipv6SubEnable", 11), ("ipv6TcpMssAdjust", 12), ("ipv6VerifyUniRpf", 13), ("ipv6VerifyUniRpfAcl", 14), ("ipv6VerifyUniRpfOpts", 15), ("ipv6NdPrefix", 16), ("ipv6NdValidLife", 17), ("ipv6NdPreferredLife", 18), ("ipv6NdOpts", 19), ("ipv6NdDadAttempts", 20), ("ipv6NdNsInterval", 21), ("ipv6NdReachableTime", 22), ("ipv6NdRaIntervalMax", 23), ("ipv6NdRaIntervalMin", 24), ("ipv6NdRaLife", 25), ("ipv6NdRaRouterPreference", 26)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfValid.setStatus('current')
cdtIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(64, 65535), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfMtu.setStatus('current')
cdtIfCdpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfCdpEnable.setStatus('current')
cdtIfFlowMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfFlowMonitor.setStatus('current')
cdtIfIpv4Unnumbered = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 5), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4Unnumbered.setStatus('current')
cdtIfIpv4SubEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4SubEnable.setStatus('current')
cdtIfIpv4Mtu = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(128, 65535), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4Mtu.setStatus('current')
cdtIfIpv4TcpMssAdjust = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(500, 1460), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4TcpMssAdjust.setStatus('current')
cdtIfIpv4VerifyUniRpf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 9), UnicastRpfType().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpf.setStatus('current')
cdtIfIpv4VerifyUniRpfAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 10), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpfAcl.setStatus('current')
cdtIfIpv4VerifyUniRpfOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 11), UnicastRpfOptions()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpfOpts.setStatus('current')
cdtIfIpv6Enable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 12), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6Enable.setStatus('current')
cdtIfIpv6SubEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 13), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6SubEnable.setStatus('current')
cdtIfIpv6TcpMssAdjust = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 14), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(500, 1460), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6TcpMssAdjust.setStatus('current')
cdtIfIpv6VerifyUniRpf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 15), UnicastRpfType().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpf.setStatus('current')
cdtIfIpv6VerifyUniRpfAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 16), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpfAcl.setStatus('current')
cdtIfIpv6VerifyUniRpfOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 17), UnicastRpfOptions()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpfOpts.setStatus('current')
cdtIfIpv6NdPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 18), InetAddressIPv6().clone(hexValue="00000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPrefix.setStatus('current')
cdtIfIpv6NdPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 19), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPrefixLength.setStatus('current')
cdtIfIpv6NdValidLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(2592000)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdValidLife.setStatus('current')
cdtIfIpv6NdPreferredLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(604800)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPreferredLife.setStatus('current')
cdtIfIpv6NdOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 22), Bits().clone(namedValues=NamedValues(("advertise", 0), ("onlink", 1), ("router", 2), ("autoConfig", 3), ("advertisementInterval", 4), ("managedConfigFlag", 5), ("otherConfigFlag", 6), ("framedIpv6Prefix", 7), ("raSuppress", 8))).clone(namedValues=NamedValues(("advertise", 0), ("onlink", 1), ("router", 2), ("autoConfig", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdOpts.setStatus('current')
cdtIfIpv6NdDadAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 600)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdDadAttempts.setStatus('current')
cdtIfIpv6NdNsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 3600000)).clone(1000)).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdNsInterval.setStatus('current')
cdtIfIpv6NdReachableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdReachableTime.setStatus('current')
cdtIfIpv6NdRaIntervalUnits = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("seconds", 1), ("milliseconds", 2))).clone('seconds')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalUnits.setStatus('current')
cdtIfIpv6NdRaIntervalMax = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalMax.setStatus('current')
cdtIfIpv6NdRaIntervalMin = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 28), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalMin.setStatus('current')
cdtIfIpv6NdRaLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 29), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)).clone(1800)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaLife.setStatus('current')
cdtIfIpv6NdRouterPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("high", 1), ("medium", 2), ("low", 3))).clone('medium')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRouterPreference.setStatus('current')
cdtPppTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1), )
if mibBuilder.loadTexts: cdtPppTemplateTable.setStatus('current')
cdtPppTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtPppTemplateEntry.setStatus('current')
cdtPppValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 1), Bits().clone(namedValues=NamedValues(("valid", 0), ("accounting", 1), ("authentication", 2), ("autthenticationMethods", 3), ("authorization", 4), ("loopbackIgnore", 5), ("maxBadAuth", 6), ("maxConfigure", 7), ("maxFailure", 8), ("maxTerminate", 9), ("timeoutAuthentication", 10), ("timeoutRetry", 11), ("chapOpts", 12), ("chapHostname", 13), ("chapPassword", 14), ("msChapV1Opts", 15), ("msChapV1Hostname", 16), ("msChapV1Password", 17), ("msChapV2Opts", 18), ("msChapV2Hostname", 19), ("msChapV2Password", 20), ("papOpts", 21), ("papUsername", 22), ("papPassword", 23), ("eapOpts", 24), ("eapIdentity", 25), ("eapPassword", 26), ("ipcpAddrOption", 27), ("ipcpDnsOption", 28), ("ipcpDnsPrimary", 29), ("ipcpDnsSecondary", 30), ("ipcpWinsOption", 31), ("ipcpWinsPrimary", 32), ("ipcpWinsSecondary", 33), ("ipcpMaskOption", 34), ("ipcpMask", 35), ("peerDefIpAddrOpts", 36), ("peerDefIpAddrSrc", 37), ("peerDefIpAddr", 38)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppValid.setStatus('current')
cdtPppAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAccounting.setStatus('current')
cdtPppAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 3), Bits().clone(namedValues=NamedValues(("chap", 0), ("msChap", 1), ("msChapV2", 2), ("pap", 3), ("eap", 4), ("optional", 5), ("callin", 6), ("oneTime", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthentication.setStatus('current')
cdtPppAuthenticationMethods = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthenticationMethods.setStatus('current')
cdtPppAuthorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 5), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthorization.setStatus('current')
cdtPppLoopbackIgnore = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppLoopbackIgnore.setStatus('current')
cdtPppMaxBadAuth = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxBadAuth.setStatus('current')
cdtPppMaxConfigure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxConfigure.setStatus('current')
cdtPppMaxFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(5)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxFailure.setStatus('current')
cdtPppMaxTerminate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxTerminate.setStatus('current')
cdtPppTimeoutAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(10)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppTimeoutAuthentication.setStatus('current')
cdtPppTimeoutRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(3)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppTimeoutRetry.setStatus('current')
cdtPppChapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 13), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapOpts.setStatus('current')
cdtPppChapHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 14), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapHostname.setStatus('current')
cdtPppChapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 15), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapPassword.setStatus('current')
cdtPppMsChapV1Opts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 16), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Opts.setStatus('current')
cdtPppMsChapV1Hostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 17), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Hostname.setStatus('current')
cdtPppMsChapV1Password = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 18), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Password.setStatus('current')
cdtPppMsChapV2Opts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 19), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Opts.setStatus('current')
cdtPppMsChapV2Hostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 20), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Hostname.setStatus('current')
cdtPppMsChapV2Password = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 21), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Password.setStatus('current')
cdtPppPapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 22), Bits().clone(namedValues=NamedValues(("refuse", 0), ("encrypted", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapOpts.setStatus('current')
cdtPppPapUsername = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 23), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapUsername.setStatus('current')
cdtPppPapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 24), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapPassword.setStatus('current')
cdtPppEapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 25), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("local", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapOpts.setStatus('current')
cdtPppEapIdentity = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 26), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapIdentity.setStatus('current')
cdtPppEapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 27), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapPassword.setStatus('current')
cdtPppIpcpAddrOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("required", 3), ("unique", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpAddrOption.setStatus('current')
cdtPppIpcpDnsOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("request", 3), ("reject", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsOption.setStatus('current')
cdtPppIpcpDnsPrimary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 30), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsPrimary.setStatus('current')
cdtPppIpcpDnsSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 31), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsSecondary.setStatus('current')
cdtPppIpcpWinsOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("request", 3), ("reject", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsOption.setStatus('current')
cdtPppIpcpWinsPrimary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 33), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsPrimary.setStatus('current')
cdtPppIpcpWinsSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 34), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsSecondary.setStatus('current')
cdtPppIpcpMaskOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("request", 2), ("reject", 3))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpMaskOption.setStatus('current')
cdtPppIpcpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 36), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpMask.setStatus('current')
cdtPppPeerDefIpAddrOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 37), Bits().clone(namedValues=NamedValues(("ipAddrForced", 0), ("matchAaaPools", 1), ("staticPool", 2))).clone(namedValues=NamedValues(("ipAddrForced", 0)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddrOpts.setStatus('current')
cdtPppPeerDefIpAddrSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("static", 1), ("pool", 2), ("dhcp", 3))).clone('pool')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddrSrc.setStatus('current')
cdtPppPeerDefIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 39), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddr.setStatus('current')
cdtPppPeerIpAddrPoolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2), )
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolTable.setStatus('current')
cdtPppPeerIpAddrPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolPriority"))
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolEntry.setStatus('current')
cdtPppPeerIpAddrPoolPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolPriority.setStatus('current')
cdtPppPeerIpAddrPoolStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolStatus.setStatus('current')
cdtPppPeerIpAddrPoolStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolStorage.setStatus('current')
cdtPppPeerIpAddrPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolName.setStatus('current')
cdtEthernetTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1), )
if mibBuilder.loadTexts: cdtEthernetTemplateTable.setStatus('current')
cdtEthernetTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtEthernetTemplateEntry.setStatus('current')
cdtEthernetValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 1), Bits().clone(namedValues=NamedValues(("bridgeDomain", 0), ("pppoeEnable", 1), ("ipv4PointToPoint", 2), ("macAddr", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetValid.setStatus('current')
cdtEthernetBridgeDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetBridgeDomain.setStatus('current')
cdtEthernetPppoeEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetPppoeEnable.setStatus('current')
cdtEthernetIpv4PointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetIpv4PointToPoint.setStatus('current')
cdtEthernetMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 5), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetMacAddr.setStatus('current')
cdtSrvTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1), )
if mibBuilder.loadTexts: cdtSrvTemplateTable.setStatus('current')
cdtSrvTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtSrvTemplateEntry.setStatus('current')
cdtSrvValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 1), Bits().clone(namedValues=NamedValues(("networkSrv", 0), ("vpdnGroup", 1), ("sgSrvGroup", 2), ("sgSrvType", 3), ("multicast", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvValid.setStatus('current')
cdtSrvNetworkSrv = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("local", 3), ("vpdn", 4))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvNetworkSrv.setStatus('current')
cdtSrvVpdnGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvVpdnGroup.setStatus('current')
cdtSrvSgSrvGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvSgSrvGroup.setStatus('current')
cdtSrvSgSrvType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2))).clone('secondary')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvSgSrvType.setStatus('current')
cdtSrvMulticast = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvMulticast.setStatus('current')
ciscoDynamicTemplateMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 1))
ciscoDynamicTemplateMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2))
ciscoDynamicTemplateR1Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 1, 1)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtBaseGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDynamicTemplateR1Compliance = ciscoDynamicTemplateR1Compliance.setStatus('current')
cdtBaseGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 1)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateSrc"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageCount"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateAssociationPrecedence"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtBaseGroup = cdtBaseGroup.setStatus('current')
cdtCommonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 2)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonDescr"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonKeepaliveInt"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonKeepaliveRetries"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonVrf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonAddrPool"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv4AccessGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv4Unreachables"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv6AccessGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv6Unreachables"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvSubControl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvRedirect"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvAcct"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvQos"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvNetflow"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtCommonGroup = cdtCommonGroup.setStatus('current')
cdtIfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 3)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfMtu"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfCdpEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfFlowMonitor"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4Unnumbered"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4SubEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4Mtu"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4TcpMssAdjust"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpfAcl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpfOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6Enable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6SubEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6TcpMssAdjust"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpfAcl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpfOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPrefix"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPrefixLength"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdValidLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPreferredLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdDadAttempts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdNsInterval"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdReachableTime"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalUnits"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalMax"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalMin"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRouterPreference"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtIfGroup = cdtIfGroup.setStatus('current')
cdtPppGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 4)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAccounting"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthentication"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthenticationMethods"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthorization"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppLoopbackIgnore"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxBadAuth"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxConfigure"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxFailure"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxTerminate"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppTimeoutAuthentication"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppTimeoutRetry"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapHostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Opts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Hostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Password"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Opts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Hostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Password"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapUsername"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapIdentity"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpAddrOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsPrimary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsSecondary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsPrimary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsSecondary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpMaskOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpMask"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddrOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddrSrc"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddr"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtPppGroup = cdtPppGroup.setStatus('current')
cdtEthernetGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 5)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetBridgeDomain"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetPppoeEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetIpv4PointToPoint"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetMacAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtEthernetGroup = cdtEthernetGroup.setStatus('current')
cdtSrvGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 6)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvNetworkSrv"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvVpdnGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvSgSrvGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvSgSrvType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvMulticast"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtSrvGroup = cdtSrvGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-DYNAMIC-TEMPLATE-MIB", cdtTemplateUsageEntry=cdtTemplateUsageEntry, cdtPppMaxTerminate=cdtPppMaxTerminate, cdtTemplateTargetId=cdtTemplateTargetId, cdtPppMsChapV1Hostname=cdtPppMsChapV1Hostname, cdtService=cdtService, cdtPppGroup=cdtPppGroup, cdtIfIpv6NdRouterPreference=cdtIfIpv6NdRouterPreference, cdtSrvSgSrvGroup=cdtSrvSgSrvGroup, cdtTemplateTargetEntry=cdtTemplateTargetEntry, cdtTemplateUsageTargetId=cdtTemplateUsageTargetId, ciscoDynamicTemplateMIBObjects=ciscoDynamicTemplateMIBObjects, cdtPppMaxBadAuth=cdtPppMaxBadAuth, cdtPppIpcpDnsPrimary=cdtPppIpcpDnsPrimary, cdtIfIpv6NdValidLife=cdtIfIpv6NdValidLife, cdtPppPeerIpAddrPoolStorage=cdtPppPeerIpAddrPoolStorage, cdtBaseGroup=cdtBaseGroup, cdtSrvTemplateTable=cdtSrvTemplateTable, cdtTemplateStatus=cdtTemplateStatus, cdtIfIpv4VerifyUniRpfAcl=cdtIfIpv4VerifyUniRpfAcl, cdtPppEapOpts=cdtPppEapOpts, cdtEthernetTemplateEntry=cdtEthernetTemplateEntry, cdtEthernetMacAddr=cdtEthernetMacAddr, cdtCommonKeepaliveInt=cdtCommonKeepaliveInt, cdtPppChapHostname=cdtPppChapHostname, cdtPppMsChapV2Password=cdtPppMsChapV2Password, cdtPppEapPassword=cdtPppEapPassword, cdtCommonIpv4Unreachables=cdtCommonIpv4Unreachables, cdtCommonIf=cdtCommonIf, cdtTemplateStorage=cdtTemplateStorage, cdtCommonKeepaliveRetries=cdtCommonKeepaliveRetries, cdtIfIpv6NdRaLife=cdtIfIpv6NdRaLife, cdtCommonSrvRedirect=cdtCommonSrvRedirect, cdtEthernetTemplateTable=cdtEthernetTemplateTable, cdtTemplateSrc=cdtTemplateSrc, cdtPppIpcpDnsSecondary=cdtPppIpcpDnsSecondary, cdtPppPeerDefIpAddr=cdtPppPeerDefIpAddr, cdtIfIpv6VerifyUniRpfAcl=cdtIfIpv6VerifyUniRpfAcl, cdtIfIpv6NdOpts=cdtIfIpv6NdOpts, cdtSrvMulticast=cdtSrvMulticast, cdtPppPeerIpAddrPoolTable=cdtPppPeerIpAddrPoolTable, ciscoDynamicTemplateR1Compliance=ciscoDynamicTemplateR1Compliance, cdtPppIpcpDnsOption=cdtPppIpcpDnsOption, cdtPppLoopbackIgnore=cdtPppLoopbackIgnore, cdtPppTimeoutAuthentication=cdtPppTimeoutAuthentication, cdtTemplateCommonTable=cdtTemplateCommonTable, cdtIfIpv6Enable=cdtIfIpv6Enable, PYSNMP_MODULE_ID=ciscoDynamicTemplateMIB, cdtTemplateEntry=cdtTemplateEntry, cdtPppMsChapV2Opts=cdtPppMsChapV2Opts, cdtIfIpv6TcpMssAdjust=cdtIfIpv6TcpMssAdjust, cdtEthernetIpv4PointToPoint=cdtEthernetIpv4PointToPoint, cdtCommonSrvNetflow=cdtCommonSrvNetflow, cdtTemplateAssociationPrecedence=cdtTemplateAssociationPrecedence, cdtIfIpv6NdDadAttempts=cdtIfIpv6NdDadAttempts, cdtIfTemplateEntry=cdtIfTemplateEntry, cdtIfIpv6VerifyUniRpf=cdtIfIpv6VerifyUniRpf, cdtIpSubscriber=cdtIpSubscriber, ciscoDynamicTemplateMIBGroups=ciscoDynamicTemplateMIBGroups, cdtPppPeerIpAddrPoolName=cdtPppPeerIpAddrPoolName, cdtIfFlowMonitor=cdtIfFlowMonitor, cdtTemplateUsageCount=cdtTemplateUsageCount, cdtPppEapIdentity=cdtPppEapIdentity, ciscoDynamicTemplateMIBNotifs=ciscoDynamicTemplateMIBNotifs, cdtCommonIpv4AccessGroup=cdtCommonIpv4AccessGroup, cdtSrvTemplateEntry=cdtSrvTemplateEntry, cdtPppTimeoutRetry=cdtPppTimeoutRetry, cdtCommonSrvAcct=cdtCommonSrvAcct, cdtIfIpv6NdPrefixLength=cdtIfIpv6NdPrefixLength, cdtPppTemplateTable=cdtPppTemplateTable, cdtPppAuthorization=cdtPppAuthorization, cdtPppIpcpAddrOption=cdtPppIpcpAddrOption, cdtPppMaxFailure=cdtPppMaxFailure, cdtPppValid=cdtPppValid, cdtTemplateTargetStorage=cdtTemplateTargetStorage, ciscoDynamicTemplateMIBConform=ciscoDynamicTemplateMIBConform, cdtTemplateAssociationTable=cdtTemplateAssociationTable, cdtIfIpv6NdReachableTime=cdtIfIpv6NdReachableTime, cdtIfGroup=cdtIfGroup, cdtSrvValid=cdtSrvValid, cdtPpp=cdtPpp, cdtPppTemplateEntry=cdtPppTemplateEntry, cdtSrvGroup=cdtSrvGroup, cdtIfIpv4Mtu=cdtIfIpv4Mtu, cdtCommonDescr=cdtCommonDescr, cdtTemplateUsageTable=cdtTemplateUsageTable, cdtIfIpv4TcpMssAdjust=cdtIfIpv4TcpMssAdjust, cdtIfIpv6VerifyUniRpfOpts=cdtIfIpv6VerifyUniRpfOpts, cdtSrvNetworkSrv=cdtSrvNetworkSrv, cdtPppAuthenticationMethods=cdtPppAuthenticationMethods, cdtPppChapOpts=cdtPppChapOpts, cdtCommonValid=cdtCommonValid, cdtCommonSrvQos=cdtCommonSrvQos, cdtIfIpv6NdRaIntervalMin=cdtIfIpv6NdRaIntervalMin, cdtEthernetGroup=cdtEthernetGroup, cdtTemplateTargetType=cdtTemplateTargetType, cdtTemplateName=cdtTemplateName, cdtCommonIpv6AccessGroup=cdtCommonIpv6AccessGroup, cdtPppMaxConfigure=cdtPppMaxConfigure, cdtIfIpv4VerifyUniRpf=cdtIfIpv4VerifyUniRpf, cdtPppIpcpMask=cdtPppIpcpMask, cdtIfIpv6SubEnable=cdtIfIpv6SubEnable, cdtIfIpv6NdPrefix=cdtIfIpv6NdPrefix, cdtIfValid=cdtIfValid, ciscoDynamicTemplateMIB=ciscoDynamicTemplateMIB, cdtEthernetBridgeDomain=cdtEthernetBridgeDomain, cdtPppIpcpWinsSecondary=cdtPppIpcpWinsSecondary, cdtCommonAddrPool=cdtCommonAddrPool, cdtPppMsChapV2Hostname=cdtPppMsChapV2Hostname, cdtIfIpv4SubEnable=cdtIfIpv4SubEnable, cdtPppMsChapV1Password=cdtPppMsChapV1Password, cdtTemplateAssociationName=cdtTemplateAssociationName, ciscoDynamicTemplateMIBCompliances=ciscoDynamicTemplateMIBCompliances, cdtTemplateCommonEntry=cdtTemplateCommonEntry, cdtPppPapOpts=cdtPppPapOpts, cdtPppMsChapV1Opts=cdtPppMsChapV1Opts, cdtTemplateType=cdtTemplateType, cdtIfTemplateTable=cdtIfTemplateTable, cdtPppIpcpMaskOption=cdtPppIpcpMaskOption, cdtSrvSgSrvType=cdtSrvSgSrvType, cdtPppPapUsername=cdtPppPapUsername, cdtBase=cdtBase, cdtIfIpv6NdRaIntervalUnits=cdtIfIpv6NdRaIntervalUnits, cdtTemplateTargetTable=cdtTemplateTargetTable, cdtTemplateTargetStatus=cdtTemplateTargetStatus, cdtPppPapPassword=cdtPppPapPassword, cdtPppAccounting=cdtPppAccounting, cdtIfIpv4Unnumbered=cdtIfIpv4Unnumbered, cdtCommonIpv6Unreachables=cdtCommonIpv6Unreachables, cdtPppChapPassword=cdtPppChapPassword, cdtSrvVpdnGroup=cdtSrvVpdnGroup, cdtSubscriberGroup=cdtSubscriberGroup, cdtTemplateAssociationEntry=cdtTemplateAssociationEntry, cdtPppPeerDefIpAddrOpts=cdtPppPeerDefIpAddrOpts, cdtEthernetValid=cdtEthernetValid, cdtIfCdpEnable=cdtIfCdpEnable, cdtIfIpv6NdRaIntervalMax=cdtIfIpv6NdRaIntervalMax, cdtPppIpcpWinsOption=cdtPppIpcpWinsOption, cdtPppPeerIpAddrPoolEntry=cdtPppPeerIpAddrPoolEntry, cdtEthernet=cdtEthernet, cdtPppPeerIpAddrPoolStatus=cdtPppPeerIpAddrPoolStatus, cdtCommonSrvSubControl=cdtCommonSrvSubControl, cdtIfMtu=cdtIfMtu, cdtPppPeerIpAddrPoolPriority=cdtPppPeerIpAddrPoolPriority, cdtPppAuthentication=cdtPppAuthentication, cdtCommonGroup=cdtCommonGroup, cdtTemplateUsageTargetType=cdtTemplateUsageTargetType, cdtEthernetPppoeEnable=cdtEthernetPppoeEnable, cdtIfIpv6NdPreferredLife=cdtIfIpv6NdPreferredLife, cdtPppPeerDefIpAddrSrc=cdtPppPeerDefIpAddrSrc, cdtCommonVrf=cdtCommonVrf, cdtTemplateTable=cdtTemplateTable, cdtIfIpv6NdNsInterval=cdtIfIpv6NdNsInterval, cdtIfIpv4VerifyUniRpfOpts=cdtIfIpv4VerifyUniRpfOpts, cdtPppIpcpWinsPrimary=cdtPppIpcpWinsPrimary)
|
# %% md
#### This notebook demonstrates the use of an optimized data pre-processing algorithm for bias mitigation
# - The
# debiasing
# function
# used is implemented in the
# `OptimPreproc`
#
# class .
# - Define
# parameters
# for optimized pre - processing specific to the dataset.
#
#
# - Divide
# the
# dataset
# into
# training, validation, and testing
# partitions.
# - Learn
# the
# optimized
# pre - processing
# transformation
# from the training
#
# data.
# - Train
# classifier
# on
# original
# training
# data.
# - Estimate
# the
# optimal
# classification
# threshold, that
# maximizes
# balanced
# accuracy
# without
# fairness
# constraints(
# from the original
#
# validation
# set).
# - Determine
# the
# prediction
# scores
# for original testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.
# - Transform
# the
# testing
# set
# using
# the
# learned
# probabilistic
# transformation.
# - Determine
# the
# prediction
# scores
# for transformed testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.
#
# %%
# Load all necessary packages
import sys
sys.path.append("../")
import numpy as np
from tqdm import tqdm
from aif360.datasets import BinaryLabelDataset
from aif360.datasets import AdultDataset, GermanDataset, CompasDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.metrics import ClassificationMetric
from aif360.metrics.utils import compute_boolean_conditioning_vector
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions \
import load_preproc_data_adult, load_preproc_data_german, load_preproc_data_compas
from aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions \
import get_distortion_adult, get_distortion_german, get_distortion_compas
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
from common_utils import compute_metrics
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
# %% md
#### Load dataset and specify options
# %%
# import dataset
dataset_used = "adult" # "adult", "german", "compas"
protected_attribute_used = 1 # 1, 2
if dataset_used == "adult":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
else:
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig = load_preproc_data_adult(['race'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
elif dataset_used == "german":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_german(['sex'])
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
else:
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
dataset_orig = load_preproc_data_german(['age'])
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.1,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
elif dataset_used == "compas":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_compas(['sex'])
else:
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig = load_preproc_data_compas(['race'])
optim_options = {
"distortion_fun": get_distortion_compas,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
# random seed
np.random.seed(1)
# Split into train, validation, and test
dataset_orig_train, dataset_orig_vt = dataset_orig.split([0.7], shuffle=True)
dataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5], shuffle=True)
# %% md
#### Display dataset attributes
# %%
# print out some labels, names, etc.
display(Markdown("#### Training Dataset shape"))
print(dataset_orig_train.features.shape)
display(Markdown("#### Favorable and unfavorable labels"))
print(dataset_orig_train.favorable_label, dataset_orig_train.unfavorable_label)
display(Markdown("#### Protected attribute names"))
print(dataset_orig_train.protected_attribute_names)
display(Markdown("#### Privileged and unprivileged protected attribute values"))
print(dataset_orig_train.privileged_protected_attributes,
dataset_orig_train.unprivileged_protected_attributes)
display(Markdown("#### Dataset feature names"))
print(dataset_orig_train.feature_names)
# %% md
#### Metric for original training data
# %%
# Metric for the original dataset
metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original training dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
# %% md
#### Train with and transform the original training data
# %%
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
OP = OP.fit(dataset_orig_train)
# Transform training data and align features
dataset_transf_train = OP.transform(dataset_orig_train, transform_Y=True)
dataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train)
# %% md
#### Metric with the transformed training data
# %%
metric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed training dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
# %% md
# Optimized
# preprocessing
# has
# reduced
# the
# disparity in favorable
# outcomes
# between
# the
# privileged and unprivileged
# groups(training
# data).
# %%
### Testing
assert np.abs(metric_transf_train.mean_difference()) < np.abs(metric_orig_train.mean_difference())
# %% md
#### Load, clean up original test data and compute metric
# %%
dataset_orig_test = dataset_transf_train.align_datasets(dataset_orig_test)
display(Markdown("#### Testing Dataset shape"))
print(dataset_orig_test.features.shape)
metric_orig_test = BinaryLabelDatasetMetric(dataset_orig_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original test dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_test.mean_difference())
# %% md
#### Transform test data and compute metric
# %%
dataset_transf_test = OP.transform(dataset_orig_test, transform_Y=True)
dataset_transf_test = dataset_orig_test.align_datasets(dataset_transf_test)
metric_transf_test = BinaryLabelDatasetMetric(dataset_transf_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed test dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_test.mean_difference())
# %% md
# Optimized
# preprocessing
# has
# reduced
# the
# disparity in favorable
# outcomes
# between
# the
# privileged and unprivileged
# groups(test
# data).
# %%
### Testing
assert np.abs(metric_transf_test.mean_difference()) < np.abs(metric_orig_test.mean_difference())
# %% md
### Train classifier on original data
# %%
# Logistic regression classifier and predictions
scale_orig = StandardScaler()
X_train = scale_orig.fit_transform(dataset_orig_train.features)
y_train = dataset_orig_train.labels.ravel()
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_train_pred = lmod.predict(X_train)
# positive class index
pos_ind = np.where(lmod.classes_ == dataset_orig_train.favorable_label)[0][0]
dataset_orig_train_pred = dataset_orig_train.copy()
dataset_orig_train_pred.labels = y_train_pred
# %% md
#### Obtain scores original test set
# %%
dataset_orig_valid_pred = dataset_orig_valid.copy(deepcopy=True)
X_valid = scale_orig.transform(dataset_orig_valid_pred.features)
y_valid = dataset_orig_valid_pred.labels
dataset_orig_valid_pred.scores = lmod.predict_proba(X_valid)[:, pos_ind].reshape(-1, 1)
dataset_orig_test_pred = dataset_orig_test.copy(deepcopy=True)
X_test = scale_orig.transform(dataset_orig_test_pred.features)
y_test = dataset_orig_test_pred.labels
dataset_orig_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)
# %% md
### Find the optimal classification threshold from the validation set
# %%
num_thresh = 100
ba_arr = np.zeros(num_thresh)
class_thresh_arr = np.linspace(0.01, 0.99, num_thresh)
for idx, class_thresh in enumerate(class_thresh_arr):
fav_inds = dataset_orig_valid_pred.scores > class_thresh
dataset_orig_valid_pred.labels[fav_inds] = dataset_orig_valid_pred.favorable_label
dataset_orig_valid_pred.labels[~fav_inds] = dataset_orig_valid_pred.unfavorable_label
classified_metric_orig_valid = ClassificationMetric(dataset_orig_valid,
dataset_orig_valid_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
ba_arr[idx] = 0.5 * (classified_metric_orig_valid.true_positive_rate() \
+ classified_metric_orig_valid.true_negative_rate())
best_ind = np.where(ba_arr == np.max(ba_arr))[0][0]
best_class_thresh = class_thresh_arr[best_ind]
print("Best balanced accuracy (no fairness constraints) = %.4f" % np.max(ba_arr))
print("Optimal classification threshold (no fairness constraints) = %.4f" % best_class_thresh)
# %% md
### Predictions and fairness metrics from original test set
# %%
display(Markdown("#### Predictions from original testing data"))
bal_acc_arr_orig = []
disp_imp_arr_orig = []
avg_odds_diff_arr_orig = []
display(Markdown("#### Testing set"))
display(Markdown("##### Raw predictions - No fairness constraints"))
for thresh in tqdm(class_thresh_arr):
fav_inds = dataset_orig_test_pred.scores > thresh
dataset_orig_test_pred.labels[fav_inds] = dataset_orig_test_pred.favorable_label
dataset_orig_test_pred.labels[~fav_inds] = dataset_orig_test_pred.unfavorable_label
if (thresh == best_class_thresh):
disp = True
else:
disp = False
metric_test_bef = compute_metrics(dataset_orig_test, dataset_orig_test_pred,
unprivileged_groups, privileged_groups, disp=disp)
bal_acc_arr_orig.append(metric_test_bef["Balanced accuracy"])
avg_odds_diff_arr_orig.append(metric_test_bef["Average odds difference"])
disp_imp_arr_orig.append(metric_test_bef["Disparate impact"])
# %%
fig, ax1 = plt.subplots(figsize=(10, 7))
ax1.plot(class_thresh_arr, bal_acc_arr_orig)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_orig)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(class_thresh_arr)[best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
disp_imp_at_best_bal_acc_orig = np.abs(1.0 - np.array(disp_imp_arr_orig))[best_ind]
# %% md
# ```abs(1 - disparate
# impact)``` must
# be
# close
# to
# zero
# for classifier predictions to be fair.
# %% md
### Train classifier on transformed data and obtain predictions with its fairness metrics
# %%
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_train.features)
y_train = dataset_transf_train.labels.ravel()
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_train_pred = lmod.predict(X_train)
dataset_transf_train_pred = dataset_transf_train.copy()
dataset_transf_train_pred.labels = y_train_pred
# %% md
### Predictions and fairness metrics from transformed test set
# %%
dataset_transf_test_pred = dataset_transf_test.copy(deepcopy=True)
X_test = scale_transf.transform(dataset_transf_test_pred.features)
y_test = dataset_transf_test_pred.labels
dataset_transf_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)
# %%
display(Markdown("#### Predictions from transformed testing data"))
bal_acc_arr_transf = []
disp_imp_arr_transf = []
avg_odds_diff_arr_transf = []
display(Markdown("#### Testing set"))
display(Markdown("##### Transformed predictions - No fairness constraints"))
for thresh in tqdm(class_thresh_arr):
fav_inds = dataset_transf_test_pred.scores > thresh
dataset_transf_test_pred.labels[fav_inds] = dataset_transf_test_pred.favorable_label
dataset_transf_test_pred.labels[~fav_inds] = dataset_transf_test_pred.unfavorable_label
if (thresh == best_class_thresh):
disp = True
else:
disp = False
metric_test_bef = compute_metrics(dataset_transf_test, dataset_transf_test_pred,
unprivileged_groups, privileged_groups, disp=disp)
bal_acc_arr_transf.append(metric_test_bef["Balanced accuracy"])
avg_odds_diff_arr_transf.append(metric_test_bef["Average odds difference"])
disp_imp_arr_transf.append(metric_test_bef["Disparate impact"])
# %%
fig, ax1 = plt.subplots(figsize=(10, 7))
ax1.plot(class_thresh_arr, bal_acc_arr_transf)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_transf)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(class_thresh_arr)[best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
disp_imp_at_best_bal_acc_transf = np.abs(1.0 - np.array(disp_imp_arr_transf))[best_ind]
# %% md
# ```abs(1 - disparate
# impact)``` must
# be
# close
# to
# zero
# for classifier predictions to be fair.This measure has improved using classifier trained using the transformed data compared to the original data.
# %%
### testing
assert disp_imp_at_best_bal_acc_transf < disp_imp_at_best_bal_acc_orig
# %% md
# Summary of Results
# We
# show
# the
# optimal
# classification
# thresholds, and the
# fairness and accuracy
# metrics.
# %% md
### Classification Thresholds
# | Dataset | Classification
# threshold |
# | - | - |
# | Adult | 0.2674 |
# | German | 0.6732 |
# | Compas | 0.5148 |
# %% md
### Fairness Metric: Disparate impact, Accuracy Metric: Balanced accuracy
#### Performance
# | Dataset | Sex(Acc - Bef) | Sex(Acc - Aft) | Sex(Fair - Bef) | Sex(Fair - Aft) | Race / Age(Acc - Bef) | Race / Age(
# Acc - Aft) | Race / Age(Fair - Bef) | Race / Age(Fair - Aft) |
# | - | - | - | - | - | - | - | - | - |
# | Adult(Test) | 0.7417 | 0.7021 | 0.2774 | 0.7729 | 0.7417 | 0.7408 | 0.4423 | 0.7645 |
# | German(Test) | 0.6524 | 0.5698 | 0.9948 | 1.0664 | 0.6524 | 0.6067 | 0.3824 | 0.8228 |
# | Compas(Test) | 0.6774 | 0.6606 | 0.6631 | 0.8085 | 0.6774 | 0.6790 | 0.6600 | 0.8430 |
# %%
|
from web3.contract import Contract
class PaymentLibFactory(Contract):
pass
def get_payment_lib(web3, address, abi):
return web3.eth.contract(
abi=abi,
address=address,
base_contract_factory_class=PaymentLibFactory,
)
|
#!/usr/bin/env python
# Contributors:
# Christopher P. Barnes <senrabc@gmail.com>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <mohan.das142@gmail.com>
# Philip Chase <philipbchase@gmail.com>
# Ruchi Vivek Desai <ruchivdesai@gmail.com>
# Taeber Rapczak <taeber@ufl.edu>
# Nicholas Rejack <nrejack@ufl.edu>
# Josh Hanna <josh@hanna.io>
# Copyright (c) 2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
import sys
import argparse
import json
from redcap import Project, RedcapError
def main():
parser = argparse.ArgumentParser(
description='Read some data from a REDCap Project')
parser.add_argument(
'--token',
dest='token',
default='',
required=True,
help='Specify the authentication/authorization token that will provide access to the REDCap project')
parser.add_argument(
'--url',
dest='url',
default='',
required=True,
help='Specify the url of the REDCap server to connect with')
parser.add_argument(
'--verify_ssl',
dest='verify_ssl',
default=True,
help='Specify whether the SSL cert of the REDCap server should be checked')
parser.add_argument('-i', '--import_data', dest='import_data', default='',
help='Specify the input data file to load into REDCap')
parser.add_argument(
'-f',
'--forms',
dest='forms',
default='',
help='Specify a list of forms, separated by spaces, for which data should be returned.')
parser.add_argument(
'-t',
'--type',
choices=['json', 'csv', 'xml'],
dest='data_type',
default='csv',
help='Specify the file type used as input or output. Valid types: json, csv, xml')
parser.add_argument(
'--fields',
dest='fields',
default='',
help='Specify a list of fields, separated by spaces, for which data should be returned.')
parser.add_argument(
'-e',
'--events',
dest='events',
default='',
help='Specify a list of events, separated by spaces, for which data should be returned.')
parser.add_argument(
'-r',
'--records',
dest='records',
default='',
help='Specify a list of records, separated by spaces, for which data should be returned.')
# prepare the arguments we were given
args = vars(parser.parse_args())
# According to http://pycap.readthedocs.org/en/latest/api.html
# allowed data_types are: csv, json, xml
data_type = args['data_type']
# Turn the 'verify_ssl' parameter into the truth value we need to make a
# REDCap connection
if args['verify_ssl'] == 'y':
args['verify_ssl'] = True
else:
args['verify_ssl'] = False
# Attempt to connect to the REDCap project
try:
project = Project(args['url'], args['token'], "", args['verify_ssl'])
except:
print "Cannot connect to project at " + args['url'] + ' with token ' + args['token']
quit()
# either we export data...
if args['import_data'] == '':
my_forms = args['forms'].split()
my_fields = args['fields'].split()
my_events = args['events'].split()
my_records = args['records'].split()
data = project.export_records(
forms=my_forms,
format = data_type,
fields=my_fields,
events=my_events,
records=my_records,
event_name='unique')
if 'json' == data_type:
print json.dumps(data, ensure_ascii=False)
else:
print str(data)
else:
# ...or we import data
file = args['import_data']
try:
input = open(file, 'r')
except IOError:
print "Cannot open file " + file
quit()
if 'json' == data_type:
json_data = json.load(input)
response = project.import_records(json_data)
else:
response = project.import_records(input.read(), format = data_type)
print response
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Various helper methods for PDF extraction.
"""
# This file contains mostly unused leftovers from pdf.py.
class Stream (object):
"""Wrapper around PdfMiner's stream class"""
def __init__(self, stream):
self.stream = stream
def get(self, attribute):
"""Returns a cleaned up PDF stream attribute value
"""
try:
value = self.stream[attribute]
return str(value).strip("/_").lower()
except Exception:
return None
"""
from pdfminer.pdftypes import resolve1, PDFObjRef
from binascii import b2a_hex
import zlib
from pdfminer.ccitt import ccittfaxdecode
hexadecimal = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6,
'7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12,
'd': 13, 'e': 14, 'f': 15}
base85m4 = long(pow(85, 4))
base85m3 = long(pow(85, 3))
base85m2 = long(pow(85, 2))
def get_colormode(color_space, bits=None):
color_mode = None
if isinstance(color_space, list):
color_space_family = _clean_up_stream_attribute(color_space[0])
else:
color_space_family = _clean_up_stream_attribute(color_space)
if color_space_family == "indexed":
color_schema = color_space[1]
if isinstance(color_schema, PDFObjRef):
color_schema = color_schema.resolve()
if isinstance(color_schema, list):
color_schema = color_schema[0]
color_schema = _clean_up_stream_attribute(color_schema)
bits = color_space[2] or bits
if isinstance(bits, PDFObjRef):
bits = bits.resolve()
if color_schema == "devicegray" and bits == 1:
color_mode = "1"
elif color_schema == "devicegray" and bits == 8:
color_mode = "L"
elif color_schema == "iccbased":
# FIXME This just happens to work often enough. We should
# let PDFMiner take care of all this work, though, rather
# than implementníng all the logic (this is complex!) ourselves
color_mode = "L"
elif color_space_family == "pattern":
pass
elif color_space_family == "separation":
pass
elif color_space_family == "devicen":
pass
elif color_space_family == "calgray":
pass
elif color_space_family == "calrgb":
pass
elif color_space_family == "lab":
pass
elif color_space_family == "iccbased":
color_mode = "L"
elif color_space_family == "devicegray":
if bits == 8:
color_mode = "L"
else:
color_mode = "1"
elif color_space_family == "devicergb":
color_mode = "RGB"
elif color_space_family == "devicecmyk":
pass
return color_mode
def _clean_up_stream_attribute(self, attribute):
try:
return str(attribute).strip("/_").lower()
except Exception:
return None
def _decompress(self):
Decompress the image raw data in this image
if self._filter == 'asciihexdecode':
self._raw_data = self._asciihexdecode(self._raw_data)
elif self._filter == 'ascii85decode':
self._raw_data = self._ascii85decode(self._raw_data)
elif self._filter == 'flatedecode':
self._raw_data = zlib.decompress(self._raw_data)
elif self._filter == "ccittfaxdecode":
self._raw_data = ccittfaxdecode(self._raw_data, self._filter_params)
return None
def _determine_image_type(self, stream_first_4_bytes):
Find out the image file type based on the magic number
file_type = None
bytes_as_hex = b2a_hex(stream_first_4_bytes)
if bytes_as_hex.startswith('ffd8'):
file_type = 'jpeg'
elif bytes_as_hex == '89504e47':
file_type = 'png'
elif bytes_as_hex == '47494638':
file_type = 'gif'
elif bytes_as_hex.startswith('424d'):
file_type = 'bmp'
return file_type
def _clean_hexadecimal(self, a):
Read the string, converting the pairs of digits to
characters
b = ''
shift = 4
value = 0
try:
for i in a:
value = value | (hexadecimal[i] << shift)
shift = 4 - shift
if shift == 4:
b = b + chr(value)
value = 0
except ValueError:
raise PDFError("Problem with hexadecimal string %s" % a)
return b
def _asciihexdecode(self, text):
at = text.find('>')
return self._clean_hexadecimal(text[:at].lower())
def _ascii85decode(self, text):
end = text.find('~>')
new = []
i = 0
ch = 0
value = 0
while i < end:
if text[i] == 'z':
if ch != 0:
raise PDFError('Badly encoded ASCII85 format.')
new.append('\000\000\000\000')
ch = 0
value = 0
else:
v = ord(text[i])
if v >= 33 and v <= 117:
if ch == 0:
value = ((v - 33) * base85m4)
elif ch == 1:
value = value + ((v - 33) * base85m3)
elif ch == 2:
value = value + ((v - 33) * base85m2)
elif ch == 3:
value = value + ((v - 33) * 85)
elif ch == 4:
value = value + (v - 33)
c1 = int(value >> 24)
c2 = int((value >> 16) & 255)
c3 = int((value >> 8) & 255)
c4 = int(value & 255)
new.append(chr(c1) + chr(c2) + chr(c3) + chr(c4))
ch = (ch + 1) % 5
i = i + 1
if ch != 0:
c = chr(value >> 24) + chr((value >> 16) & 255) + \
chr((value >> 8) & 255) + chr(value & 255)
new.append(c[:ch - 1])
return "".join(new)
def _get_image(self):
Return an image from this image data.
temp_image = None
image_data = self._stream.get_data()
print "len(image_data)",
print len(image_data)
try:
# Assume war image data
# temp_image = Image.frombuffer(self.color_mode,
# (self.width, self.height),
# self._raw_data, "raw",
# self.color_mode, 0, 1)
temp_image = Image.frombuffer(self.color_mode,
(self.width, self.height),
image_data, "raw",
self.color_mode, 0, 1)
except Exception:
# Not raw image data.
# Can we make sense of this stream some other way?
try:
import StringIO
# temp_image = Image.open(StringIO.StringIO(self._raw_data))
temp_image = Image.open(StringIO.StringIO(image_data))
except Exception:
# PIL failed us. Try to print data to a file, and open it
# file_ext = self._determine_image_type(self._raw_data[0:4])
file_ext = self._determine_image_type(image_data[0:4])
if file_ext:
# TODO use tempfile
file_name = os_sep.join(["header", file_ext])
with open("temp/" + file_name, "w") as image_file:
# image_file.write(self._raw_data)
image_file.write(image_data)
temp_image = Image.open(image_file)
return temp_image or None
"""
"""
if "F" in image_obj.stream:
self._filter = self._clean_up_stream_attribute(image_obj.stream["F"])
else:
self._filter = self._clean_up_stream_attribute(image_obj.stream["Filter"])
if "DP" in image_obj.stream:
self._filter_params = image_obj.stream["DP"]
elif "DecodeParms" in image_obj.stream:
self._filter_params = image_obj.stream["DecodeParms"]
elif "FDecodeParms" in image_obj.stream:
self._filter_params = image_obj.stream["FDecodeParms"]
self._bits = image_obj.stream["BitsPerComponent"]
self._raw_data = image_obj.stream.get_rawdata()
if self._filter is not None:
self._decompress()
if "CS" in image_obj.stream:
self.colorspace = image_obj.stream["CS"]
elif "ColorSpace" in image_obj.stream:
self.colorspace = image_obj.stream["ColorSpace"]
else:
self.colorspace = "DeviceGray"
if isinstance(self.colorspace, PDFObjRef):
self.colorspace = self.colorspace.resolve()
self.color_mode = self.get_colormode(self.colorspace,
bits=self._bits)
if self.color_mode is None:
print self.colorspace
raise Exception("No method for handling colorspace")
"""
|
while True:
print('Plese type your name')
name=input()
if name=='your name':
break
print('Thank you!')
|
#!/usr/bin/env python
"""
Populates blank uuid fields in datasets with randomly generated values
Going forward, these ids will be generated for all new datasets. This
script fixes datasets that were generated before the change.
"""
import sys, os, ConfigParser
import galaxy.app
from galaxy.util.bunch import Bunch
import galaxy.datatypes.tabular
from galaxy.model.orm.scripts import get_config
from galaxy import eggs
from galaxy.model import mapping
import uuid
eggs.require( "SQLAlchemy" )
from sqlalchemy import *
assert sys.version_info[:2] >= ( 2, 4 )
def main():
ini_file = sys.argv.pop(1)
config = get_config(ini_file)
model = mapping.init( ini_file, config['db_url'], create_tables = False )
for row in model.context.query( model.Dataset ):
if row.uuid is None:
row.uuid = uuid.uuid4()
print "Setting dataset:", row.id, " UUID to ", row.uuid
model.context.flush()
for row in model.context.query( model.Workflow ):
if row.uuid is None:
row.uuid = uuid.uuid4()
print "Setting Workflow:", row.id, " UUID to ", row.uuid
model.context.flush()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import overload_bit
from . import attached_bit
class lsp_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS LSP Operational Bits.
"""
__slots__ = ("_path_helper", "_extmethods", "__overload_bit", "__attached_bit")
_yang_name = "lsp-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
]
def _get_overload_bit(self):
"""
Getter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
YANG Description: This container defines Overload Bit configuration.
"""
return self.__overload_bit
def _set_overload_bit(self, v, load=False):
"""
Setter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overload_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overload_bit() directly.
YANG Description: This container defines Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """overload_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=overload_bit.overload_bit, is_container='container', yang_name="overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__overload_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_overload_bit(self):
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_attached_bit(self):
"""
Getter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
YANG Description: This container defines Attached Bit.
"""
return self.__attached_bit
def _set_attached_bit(self, v, load=False):
"""
Setter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_attached_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_attached_bit() directly.
YANG Description: This container defines Attached Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """attached_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=attached_bit.attached_bit, is_container='container', yang_name="attached-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__attached_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_attached_bit(self):
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
overload_bit = __builtin__.property(_get_overload_bit, _set_overload_bit)
attached_bit = __builtin__.property(_get_attached_bit, _set_attached_bit)
_pyangbind_elements = OrderedDict(
[("overload_bit", overload_bit), ("attached_bit", attached_bit)]
)
from . import overload_bit
from . import attached_bit
class lsp_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS LSP Operational Bits.
"""
__slots__ = ("_path_helper", "_extmethods", "__overload_bit", "__attached_bit")
_yang_name = "lsp-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
]
def _get_overload_bit(self):
"""
Getter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
YANG Description: This container defines Overload Bit configuration.
"""
return self.__overload_bit
def _set_overload_bit(self, v, load=False):
"""
Setter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overload_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overload_bit() directly.
YANG Description: This container defines Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """overload_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=overload_bit.overload_bit, is_container='container', yang_name="overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__overload_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_overload_bit(self):
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_attached_bit(self):
"""
Getter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
YANG Description: This container defines Attached Bit.
"""
return self.__attached_bit
def _set_attached_bit(self, v, load=False):
"""
Setter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_attached_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_attached_bit() directly.
YANG Description: This container defines Attached Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """attached_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=attached_bit.attached_bit, is_container='container', yang_name="attached-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__attached_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_attached_bit(self):
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
overload_bit = __builtin__.property(_get_overload_bit, _set_overload_bit)
attached_bit = __builtin__.property(_get_attached_bit, _set_attached_bit)
_pyangbind_elements = OrderedDict(
[("overload_bit", overload_bit), ("attached_bit", attached_bit)]
)
|
import pickle
import sys
sys.path.append("..")
from model import ECO
import paddle.fluid as fluid
# Load pickle, since pretrained model is too bigger than the threshold(150M), split them into 2 parts and then reload them
f0 = open('seg0.pkl', 'rb')
f1 = open('seg1.pkl', 'rb')
model_out = dict()
model_0 = pickle.load(f0)
model_1 = pickle.load(f1)
for i,key in enumerate(model_0):
model_out[key]=model_0[key]
for i,key in enumerate(model_1):
model_out[key]=model_1[key]
with fluid.dygraph.guard():
paddle_model = ECO.ECO(num_classes=101, num_segments=24)
paddle_model.load_dict(model_out)
fluid.dygraph.save_dygraph(paddle_model.state_dict(), 'ECO_FULL_RGB__seg16')
print('finished')
|
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for patron.cert.rpcapi
"""
import contextlib
import mock
from oslo_config import cfg
from patron.cert import rpcapi as cert_rpcapi
from patron import context
from patron import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.NoDBTestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.cert_topic)
orig_prepare = rpcapi.client.prepare
with contextlib.nested(
mock.patch.object(rpcapi.client, 'call'),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
rpc_mock.return_value = 'foo'
csv_mock.side_effect = (
lambda v: orig_prepare().can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with()
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
|
from toontown.toonbase.ToonPythonUtil import Functor
from otp.level import LevelMgrBase
class LevelMgr(LevelMgrBase.LevelMgrBase):
def __init__(self, level, entId):
LevelMgrBase.LevelMgrBase.__init__(self, level, entId)
self.geom = loader.loadModel(self.modelFilename)
if not self.geom:
import pdb
pdb.set_trace()
self.zoneNums = []
self.level.zoneNum2zoneId = {}
self.level.zoneId2zoneNum = {}
self.accept(self.level.getEntityOfTypeCreateEvent('zone'), self.handleZoneCreated)
def destroy(self):
del self.level.zoneIds
del self.level.zoneId2zoneNum
del self.level.zoneNum2zoneId
self.geom.removeNode()
del self.geom
LevelMgrBase.LevelMgrBase.destroy(self)
def handleZoneCreated(self, entId):
zoneEnt = self.level.getEntity(entId)
self.zoneNums.append(zoneEnt.entId)
self.privAssignZoneIds()
self.accept(self.level.getEntityDestroyEvent(entId), Functor(self.handleZoneDestroy, entId))
def handleZoneDestroy(self, entId):
zoneEnt = self.level.getEntity(entId)
del self.level.zoneId2zoneNum[self.level.zoneNum2zoneId[zoneEnt.entId]]
del self.level.zoneNum2zoneId[zoneEnt.entId]
self.zoneNums.remove(zoneEnt.entId)
self.privAssignZoneIds()
def privAssignZoneIds(self):
self.zoneNums.sort()
for i in xrange(len(self.zoneNums)):
zoneNum = self.zoneNums[i]
zoneEnt = self.level.getEntity(zoneNum)
zoneId = self.level.zoneIds[i]
zoneEnt.setZoneId(zoneId)
self.level.zoneNum2zoneId[zoneNum] = zoneId
self.level.zoneId2zoneNum[zoneId] = zoneNum
|
"""
`pwmio` - Support for PWM based protocols
===========================================================
See `CircuitPython:pwmio` in CircuitPython for more details.
Not supported by all boards.
* Author(s): Melissa LeBlanc-Williams
"""
import sys
from adafruit_blinka.agnostic import detector
# pylint: disable=unused-import
if detector.board.any_raspberry_pi:
from adafruit_blinka.microcontroller.bcm283x.pulseio.PWMOut import PWMOut
elif detector.board.any_coral_board:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.any_giant_board:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.any_beaglebone:
from adafruit_blinka.microcontroller.am335x.sysfs_pwmout import PWMOut
elif detector.board.any_rock_pi_board:
from adafruit_blinka.microcontroller.rockchip.PWMOut import PWMOut
elif detector.board.binho_nova:
from adafruit_blinka.microcontroller.nova.pwmout import PWMOut
elif detector.board.greatfet_one:
from adafruit_blinka.microcontroller.nxp_lpc4330.pwmout import PWMOut
elif detector.board.any_lubancat:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.pico_u2if:
from adafruit_blinka.microcontroller.rp2040_u2if.pwmio import PWMOut
elif (
detector.board.feather_u2if
or detector.board.qtpy_u2if
or detector.board.itsybitsy_u2if
or detector.board.macropad_u2if
or detector.board.qt2040_trinkey_u2if
):
from adafruit_blinka.microcontroller.rp2040_u2if.pwmio import PWMOut
elif "sphinx" in sys.modules:
pass
else:
raise NotImplementedError("pwmio not supported for this board.")
|
# Generated by Django 2.1.1 on 2018-09-03 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CV', '0004_auto_20180903_1229'),
]
operations = [
migrations.RemoveField(
model_name='cv',
name='availability_string',
),
migrations.AddField(
model_name='cv',
name='availability_offset_number',
field=models.IntegerField(blank=True, help_text="if 'Availability type' is 'offset'", null=True),
),
migrations.AddField(
model_name='cv',
name='availability_offset_quantity',
field=models.CharField(choices=[('DAY', 'day'), ('MONTH', 'month')], default='MONTH', help_text="if 'Availability type' is 'offset'", max_length=64),
),
migrations.AlterField(
model_name='cv',
name='availability_date',
field=models.DateField(blank=True, help_text="if 'Availability type' is 'at'", null=True),
),
migrations.AlterField(
model_name='cv',
name='availability_type',
field=models.CharField(choices=[('NOW', 'now'), ('OFFSET', 'offset'), ('AT', 'at')], default='NOW', max_length=64),
),
]
|
#!/usr/bin/env python
"""
Example code for properties
NOTE: if your getters and setters are this simple: don't do this!
"""
class C:
def __init__(self):
self._x = None
@property
def x(self):
print("in getter")
return self._x
@x.setter
def x(self, value):
print("in setter", value)
self._x = value
@x.deleter
def x(self):
del self._x
if __name__ == "__main__":
c = C()
c.x = 5
print(c.x)
|
from chocs.http_error import HttpError
from chocs.http_request import HttpRequest
from chocs.http_response import HttpResponse
from chocs.routing import Router
from chocs.serverless.serverless import ServerlessFunction
from .middleware import Middleware, MiddlewareHandler
class ApplicationMiddleware(Middleware):
def __init__(self, router: Router):
self.router = router
def handle(self, request: HttpRequest, next: MiddlewareHandler) -> HttpResponse:
try:
handler = request.attributes["__handler__"]
response: HttpResponse
if isinstance(handler, ServerlessFunction):
response = handler.function(request)
else:
response = handler(request)
return response
except HttpError as error:
return HttpResponse(status=error.status_code, body=error.http_message)
__all__ = ["ApplicationMiddleware"]
|
# Source: https://github.com/tracy-talent/curriculum/blob/ecaf850cb7932f23b5d7c0323e80a9f9a408bef6/Machine%20Learning/Dimension%20Reduction/src/ISOMAP.py
from numpy import *
from hw4.libs.metrics import _1NN
from queue import PriorityQueue
from os import path
import time
def loadData(filename):
content = open(filename).readlines()
# Split data and labels
data = [list(map(float32, line.strip().split(",")[:-1])) for line in content]
tag = [list(map(int, line.strip().split(",")[-1:])) for line in content]
return mat(data), mat(tag)
def calc_distance(dataMat):
dataSize = len(dataMat)
Euc_distanceMat = zeros([dataSize, dataSize], float32)
for i in range(dataSize):
for j in range(dataSize):
Euc_distanceMat[i][j] = linalg.norm(dataMat[i] - dataMat[j])
return Euc_distanceMat
# Adjacency table edge
class edge(object):
def __init__(self, cost, to):
self.cost = cost # 边权重
self.to = to # 入点
def __lt__(self, other):
return self.cost < other.cost
# dijkstra (Shortest path algorithm)
# @param{dist: Distance matrix, graph: Adjacency, src: Source}
def dijkstra(dist, graph, src):
que = PriorityQueue()
que.put(edge(0, src))
while not que.empty():
p = que.get()
v = p.to
if dist[src][v] < p.cost:
continue
for i in range(len(graph[v])):
if dist[src][graph[v][i].to] > dist[src][v] + graph[v][i].cost:
dist[src][graph[v][i].to] = dist[src][v] + graph[v][i].cost
que.put(edge(dist[src][graph[v][i].to], graph[v][i].to))
# @param{dist:Distance matrix,dims: Dimensionality reduction / Number of Components}
# return:降维后的矩阵
def mds(dist, dims):
dataSize = len(dist)
if dims > dataSize:
print('Dimension reduction dimension %d is greater than the dimension of the matrix to be reduced %d' % (dims, dist.shape()))
return
dist_i_dot_2 = zeros([dataSize], float32)
dist_dot_j_2 = zeros([dataSize], float32)
dist_dot_dot_2 = 0.0
bMat = zeros([dataSize, dataSize], float32)
for i in range(dataSize):
for j in range(dataSize):
dist_i_j_2 = square(dist[i][j])
dist_i_dot_2[i] += dist_i_j_2
dist_dot_j_2[j] += dist_i_j_2 / dataSize
dist_dot_dot_2 += dist_i_j_2
dist_i_dot_2[i] /= dataSize
dist_dot_dot_2 /= square(dataSize)
for i in range(dataSize):
for j in range(dataSize):
dist_i_j_2 = square(dist[i][j])
bMat[i][j] = -0.5 * (dist_i_j_2 - dist_i_dot_2[i] - dist_dot_j_2[j] + dist_dot_dot_2)
# Eigenvalues and eigenvectors
eigVals, eigVecs = linalg.eig(bMat)
# Index for large eigenvalues
eigVals_Idx = argpartition(eigVals, -dims)[:-(dims + 1):-1]
# Constructing a Diagonal Matrix of Eigenvalues
eigVals_Diag = diag(maximum(eigVals[eigVals_Idx], 0.0))
return matmul(eigVecs[:, eigVals_Idx], sqrt(eigVals_Diag))
# param{dataMat: Image Dataset (n_data, n_dimension),dims: Number of Components,KNN_K: Number of Neighbours}
# return:Dimensionality-reduced matrix
def isomap(dataMat, dims, KNN_K):
set_printoptions(threshold=None)
inf = float('inf')
dataSize = len(dataMat)
if KNN_K >= dataSize:
# raise ValueError('KNN_K的值最大为数据个数 - 1:%d' % dataSize - 1)
raise ValueError("The maximum value is the number of data = ", (dataSize-1))
Euc_distanceMat = calc_distance(dataMat)
# Setup KNN Connection diagram
knn_distanceMat = ones([dataSize, dataSize], float32) * inf
for i in range(dataSize):
knn_disIdx = argpartition(Euc_distanceMat[i], KNN_K)[:KNN_K + 1]
knn_distanceMat[i][knn_disIdx] = Euc_distanceMat[i][knn_disIdx]
for j in knn_disIdx:
knn_distanceMat[j][i] = knn_distanceMat[i][j]
# Build adjacency list
adjacencyTable = []
for i in range(dataSize):
edgelist = []
for j in range(dataSize):
if knn_distanceMat[i][j] != inf:
edgelist.append(edge(knn_distanceMat[i][j], j))
adjacencyTable.append(edgelist)
# dijkstra: Find the shortest
# dist: Store the shortest distance between any two points
dist = ones([dataSize, dataSize], float32) * inf
for i in range(dataSize):
dist[i][i] = 0.0
dijkstra(dist, adjacencyTable, i)
return mds(dist, dims)
|
{
"targets": [
{
"target_name": "node-simconnect",
"sources": [ "src/addon.cc" ],
"include_dirs": [
"SimConnect/Inc",
"<!(node -e \"require('nan')\")"
],
"link_settings": {
"libraries": [
"../SimConnect/lib/SimConnect"
]
},
'configurations': {
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '3' # /MDd
}
}
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '2' # /MD
}
}
}
}
}
]
}
|
import unittest
import flavio
from wilson import Wilson
from .Vllgamma import *
### implement test
class TestVllgamma(unittest.TestCase):
def test_np(self):
wc,br=Wilson({'CVRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),8.3949e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),6.2935e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CVRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),1.2887e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),9.1097e-7
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
|
#!/usr/bin/env python
import zmq
import numpy as np
import numpy.matlib
import importlib
from collections import defaultdict
from fastcluster import linkage_vector
import selfdrive.messaging as messaging
from selfdrive.services import service_list
from selfdrive.controls.lib.latcontrol_helpers import calc_lookahead_offset
from selfdrive.controls.lib.pathplanner import PathPlanner
from selfdrive.controls.lib.radar_helpers import Track, Cluster, fcluster, \
RDR_TO_LDR, NO_FUSION_SCORE
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.swaglog import cloudlog
from cereal import car
from common.params import Params
from common.realtime import set_realtime_priority, Ratekeeper
from common.kalman.ekf import EKF, SimpleSensor
DEBUG = False
#vision point
DIMSV = 2
XV, SPEEDV = 0, 1
VISION_POINT = -1
class EKFV1D(EKF):
def __init__(self):
super(EKFV1D, self).__init__(False)
self.identity = numpy.matlib.identity(DIMSV)
self.state = np.matlib.zeros((DIMSV, 1))
self.var_init = 1e2 # ~ model variance when probability is 70%, so good starting point
self.covar = self.identity * self.var_init
self.process_noise = np.matlib.diag([0.5, 1])
def calc_transfer_fun(self, dt):
tf = np.matlib.identity(DIMSV)
tf[XV, SPEEDV] = dt
tfj = tf
return tf, tfj
# fuses camera and radar data for best lead detection
def radard_thread(gctx=None):
set_realtime_priority(2)
# wait for stats about the car to come in from controls
cloudlog.info("radard is waiting for CarParams")
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
mocked = True #CP.carName == "mock"
VM = VehicleModel(CP)
cloudlog.info("radard got CarParams")
# import the radar from the fingerprint
cloudlog.info("radard is importing %s", CP.carName)
RadarInterface = importlib.import_module('selfdrive.car.%s.radar_interface' % CP.carName).RadarInterface
context = zmq.Context()
# *** subscribe to features and model from visiond
poller = zmq.Poller()
model = messaging.sub_sock(context, service_list['model'].port, conflate=True, poller=poller)
live100 = messaging.sub_sock(context, service_list['live100'].port, conflate=True, poller=poller)
PP = PathPlanner()
RI = RadarInterface(CP)
last_md_ts = 0
last_l100_ts = 0
# *** publish live20 and liveTracks
live20 = messaging.pub_sock(context, service_list['live20'].port)
liveTracks = messaging.pub_sock(context, service_list['liveTracks'].port)
path_x = np.arange(0.0, 140.0, 0.1) # 140 meters is max
# Time-alignment
rate = 20. # model and radar are both at 20Hz
tsv = 1./rate
v_len = 20 # how many speed data points to remember for t alignment with rdr data
active = 0
steer_angle = 0.
steer_override = False
tracks = defaultdict(dict)
# Kalman filter stuff:
ekfv = EKFV1D()
speedSensorV = SimpleSensor(XV, 1, 2)
# v_ego
v_ego = None
v_ego_array = np.zeros([2, v_len])
v_ego_t_aligned = 0.
rk = Ratekeeper(rate, print_delay_threshold=np.inf)
while 1:
rr = RI.update()
ar_pts = {}
for pt in rr.points:
ar_pts[pt.trackId] = [pt.dRel + RDR_TO_LDR, pt.yRel, pt.vRel, pt.measured]
# receive the live100s
l100 = None
md = None
for socket, event in poller.poll(0):
if socket is live100:
l100 = messaging.recv_one(socket)
elif socket is model:
md = messaging.recv_one(socket)
if l100 is not None:
active = l100.live100.active
v_ego = l100.live100.vEgo
steer_angle = l100.live100.angleSteers
steer_override = l100.live100.steerOverride
v_ego_array = np.append(v_ego_array, [[v_ego], [float(rk.frame)/rate]], 1)
v_ego_array = v_ego_array[:, 1:]
last_l100_ts = l100.logMonoTime
if v_ego is None:
continue
if md is not None:
last_md_ts = md.logMonoTime
# *** get path prediction from the model ***
PP.update(v_ego, md)
# run kalman filter only if prob is high enough
if PP.lead_prob > 0.7:
ekfv.update(speedSensorV.read(PP.lead_dist, covar=PP.lead_var))
ekfv.predict(tsv)
ar_pts[VISION_POINT] = (float(ekfv.state[XV]), np.polyval(PP.d_poly, float(ekfv.state[XV])),
float(ekfv.state[SPEEDV]), False)
else:
ekfv.state[XV] = PP.lead_dist
ekfv.covar = (np.diag([PP.lead_var, ekfv.var_init]))
ekfv.state[SPEEDV] = 0.
if VISION_POINT in ar_pts:
del ar_pts[VISION_POINT]
# *** compute the likely path_y ***
if (active and not steer_override) or mocked:
# use path from model (always when mocking as steering is too noisy)
path_y = np.polyval(PP.d_poly, path_x)
else:
# use path from steer, set angle_offset to 0 it does not only report the physical offset
path_y = calc_lookahead_offset(v_ego, steer_angle, path_x, VM, angle_offset=0)[0]
# *** remove missing points from meta data ***
for ids in tracks.keys():
if ids not in ar_pts:
tracks.pop(ids, None)
# *** compute the tracks ***
for ids in ar_pts:
# ignore standalone vision point, unless we are mocking the radar
if ids == VISION_POINT and not mocked:
continue
rpt = ar_pts[ids]
# align v_ego by a fixed time to align it with the radar measurement
cur_time = float(rk.frame)/rate
v_ego_t_aligned = np.interp(cur_time - RI.delay, v_ego_array[1], v_ego_array[0])
d_path = np.sqrt(np.amin((path_x - rpt[0]) ** 2 + (path_y - rpt[1]) ** 2))
# add sign
d_path *= np.sign(rpt[1] - np.interp(rpt[0], path_x, path_y))
# create the track if it doesn't exist or it's a new track
if ids not in tracks:
tracks[ids] = Track()
tracks[ids].update(rpt[0], rpt[1], rpt[2], d_path, v_ego_t_aligned, rpt[3], steer_override)
# allow the vision model to remove the stationary flag if distance and rel speed roughly match
if VISION_POINT in ar_pts:
fused_id = None
best_score = NO_FUSION_SCORE
for ids in tracks:
dist_to_vision = np.sqrt((0.5*(ar_pts[VISION_POINT][0] - tracks[ids].dRel)) ** 2 + (2*(ar_pts[VISION_POINT][1] - tracks[ids].yRel)) ** 2)
rel_speed_diff = abs(ar_pts[VISION_POINT][2] - tracks[ids].vRel)
tracks[ids].update_vision_score(dist_to_vision, rel_speed_diff)
if best_score > tracks[ids].vision_score:
fused_id = ids
best_score = tracks[ids].vision_score
if fused_id is not None:
tracks[fused_id].vision_cnt += 1
tracks[fused_id].update_vision_fusion()
if DEBUG:
print "NEW CYCLE"
if VISION_POINT in ar_pts:
print "vision", ar_pts[VISION_POINT]
idens = tracks.keys()
track_pts = np.array([tracks[iden].get_key_for_cluster() for iden in idens])
# If we have multiple points, cluster them
if len(track_pts) > 1:
link = linkage_vector(track_pts, method='centroid')
cluster_idxs = fcluster(link, 2.5, criterion='distance')
clusters = [None]*max(cluster_idxs)
for idx in xrange(len(track_pts)):
cluster_i = cluster_idxs[idx]-1
if clusters[cluster_i] == None:
clusters[cluster_i] = Cluster()
clusters[cluster_i].add(tracks[idens[idx]])
elif len(track_pts) == 1:
# TODO: why do we need this?
clusters = [Cluster()]
clusters[0].add(tracks[idens[0]])
else:
clusters = []
if DEBUG:
for i in clusters:
print i
# *** extract the lead car ***
lead_clusters = [c for c in clusters
if c.is_potential_lead(v_ego)]
lead_clusters.sort(key=lambda x: x.dRel)
lead_len = len(lead_clusters)
# *** extract the second lead from the whole set of leads ***
lead2_clusters = [c for c in lead_clusters
if c.is_potential_lead2(lead_clusters)]
lead2_clusters.sort(key=lambda x: x.dRel)
lead2_len = len(lead2_clusters)
# *** publish live20 ***
dat = messaging.new_message()
dat.init('live20')
dat.live20.mdMonoTime = last_md_ts
dat.live20.canMonoTimes = list(rr.canMonoTimes)
dat.live20.radarErrors = list(rr.errors)
dat.live20.l100MonoTime = last_l100_ts
if lead_len > 0:
lead_clusters[0].toLive20(dat.live20.leadOne)
if lead2_len > 0:
lead2_clusters[0].toLive20(dat.live20.leadTwo)
else:
dat.live20.leadTwo.status = False
else:
dat.live20.leadOne.status = False
dat.live20.cumLagMs = -rk.remaining*1000.
live20.send(dat.to_bytes())
# *** publish tracks for UI debugging (keep last) ***
dat = messaging.new_message()
dat.init('liveTracks', len(tracks))
for cnt, ids in enumerate(tracks.keys()):
if DEBUG:
print "id: %4.0f x: %4.1f y: %4.1f vr: %4.1f d: %4.1f va: %4.1f vl: %4.1f vlk: %4.1f alk: %4.1f s: %1.0f v: %1.0f" % \
(ids, tracks[ids].dRel, tracks[ids].yRel, tracks[ids].vRel,
tracks[ids].dPath, tracks[ids].vLat,
tracks[ids].vLead, tracks[ids].vLeadK,
tracks[ids].aLeadK,
tracks[ids].stationary,
tracks[ids].measured)
dat.liveTracks[cnt].trackId = ids
dat.liveTracks[cnt].dRel = float(tracks[ids].dRel)
dat.liveTracks[cnt].yRel = float(tracks[ids].yRel)
dat.liveTracks[cnt].vRel = float(tracks[ids].vRel)
dat.liveTracks[cnt].aRel = float(tracks[ids].aRel)
dat.liveTracks[cnt].stationary = tracks[ids].stationary
dat.liveTracks[cnt].oncoming = tracks[ids].oncoming
liveTracks.send(dat.to_bytes())
rk.monitor_time()
def main(gctx=None):
radard_thread(gctx)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""MIT - CSAIL - Gifford Lab - seqgra
seqgra complete pipeline:
1. generate data based on data definition (once), see run_simulator.py
2. train model on data (once), see run_learner.py
3. evaluate model performance with SIS, see run_sis.py
@author: Konstantin Krismer
"""
import argparse
import logging
import os
from typing import List, Optional
import seqgra
import seqgra.constants as c
from seqgra import MiscHelper
from seqgra.comparator import Comparator
from seqgra.idresolver import IdResolver
def get_all_grammar_ids(output_dir: str) -> List[str]:
folder = output_dir + "evaluation/"
return [o for o in os.listdir(folder)
if os.path.isdir(os.path.join(folder, o))]
def get_all_model_ids(output_dir: str, grammar_ids: List[str]) -> List[str]:
model_ids: List[str] = []
for grammar_id in grammar_ids:
folder = output_dir + "evaluation/" + grammar_id + "/"
model_ids += [o for o in os.listdir(folder)
if os.path.isdir(os.path.join(folder, o))]
return list(set(model_ids))
def run_seqgra_summary(analysis_id: str,
comparator_ids: List[str],
output_dir: str,
grammar_ids: Optional[List[str]] = None,
model_ids: Optional[List[str]] = None,
set_names: Optional[List[str]] = None,
model_labels: Optional[List[str]] = None) -> None:
analysis_id = MiscHelper.sanitize_id(analysis_id)
output_dir = MiscHelper.format_output_dir(output_dir.strip())
if comparator_ids:
for comparator_id in comparator_ids:
comparator: Comparator = IdResolver.get_comparator(analysis_id,
comparator_id,
output_dir,
model_labels)
if not grammar_ids:
grammar_ids = get_all_grammar_ids(output_dir)
if not model_ids:
model_ids = get_all_model_ids(output_dir, grammar_ids)
comparator.compare_models(grammar_ids, model_ids, set_names)
def create_parser():
parser = argparse.ArgumentParser(
prog="seqgras",
description="seqgra summary: Gather metrics across grammars, models, "
"evaluators")
parser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s " + seqgra.__version__)
parser.add_argument(
"-a",
"--analysis-id",
type=str,
required=True,
help="analysis id (folder name for output)"
)
parser.add_argument(
"-c",
"--comparators",
type=str,
required=True,
nargs="+",
help="comparator ID or IDs: IDs of "
"comparators include " +
", ".join(sorted(c.ComparatorID.ALL_COMPARATOR_IDS))
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
required=True,
help="output directory, subdirectories are created for generated "
"data, trained model, and model evaluation"
)
parser.add_argument(
"-g",
"--grammar-ids",
type=str,
default=None,
nargs="+",
help="one or more grammar IDs; defaults to all grammar IDs in "
"output dir"
)
parser.add_argument(
"-m",
"--model-ids",
type=str,
default=None,
nargs="+",
help="one or more model IDs; defaults to all model IDs for specified "
"grammars in output dir"
)
parser.add_argument(
"-s",
"--sets",
type=str,
default=["test"],
nargs="+",
help="one or more of the following: training, validation, or test"
)
parser.add_argument(
"-l",
"--model-labels",
type=str,
default=None,
nargs="+",
help="labels for models, must be same length as model_ids"
)
return parser
def main():
logging.basicConfig(level=logging.INFO)
parser = create_parser()
args = parser.parse_args()
for comparator in args.comparators:
if comparator not in c.ComparatorID.ALL_COMPARATOR_IDS:
raise ValueError(
"invalid comparator ID {s!r}".format(s=comparator))
run_seqgra_summary(args.analysis_id,
args.comparators,
args.output_dir,
args.grammar_ids,
args.model_ids,
args.sets,
args.model_labels)
if __name__ == "__main__":
main()
|
"""Functions that work on collections of shapes
"""
from __future__ import division, print_function
import numpy as np
from .convex import convex_area, convex_centroid
__all__ = ['recenter_polygon', 'centroid_for_shapes',
'centroid_for_uncomputed_shapes', 'recenter_system',
'rescale_and_recenter_system', 'rotate_polygon',
'rotate_system', 'mirror_polygon', 'mirror_system',
'find_concave_outline']
def recenter_polygon(vertices):
"""Returns a new convex polygon with centroid at (0,0)
Args:
vertices (list): list of (x,y) vertices of convex polygon
Returns:
A list just like the input with the recentered vertices (but possibly
transformed into numpy arrays)
"""
centroid = convex_centroid(vertices)
new_verts = []
for v in vertices:
v = np.array(v)
new_verts.append(v - centroid)
return new_verts
def centroid_for_shapes(centroids, areas = None):
"""Calculates the centroid for a set of shapes
Requires pre-computed centroids and areas
Args:
centroids (list): list of (x,y) centroids for each shape
areas (list): list of areas (floats) for each shape (if not given,
assumes they are all equal)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
gc = np.zeros(2)
area = 0
if areas is None:
areas = np.ones(len(centroids))
for pc, a in zip(centroids, areas):
gc += np.array(pc)*a
area += a
gc /= area
return np.array(gc)
def centroid_for_uncomputed_shapes(shape_list):
"""Like centroid_for_shapes but calculates centroids & areas
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
centroids = []
areas = []
for s in shape_list:
centroids.append(convex_centroid(s))
areas.append(convex_area(s))
return centroid_for_shapes(centroids, areas)
def recenter_system(shape_list):
"""Recenters a set of shapes around the centroid of all of them
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
centroid_for_uncomputed_shapes() on that list returns (0,0)
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([s+c for s in ns])
return final_shapes, center
def rescale_and_recenter_system(shape_list, total_area):
"""Recenters a set of shapes and resizes them to have a total fixed area
Args:
shape_list (list): a list of list of vertices (one for each shape)
total_area (float): the area to fix the shapes to
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
`centroid_for_uncomputed_shapes()` on that list returns (0,0) and summing
the areas gets to `total_area`
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Find rescaling factor
tot_a = sum(areas)
dim_scale = np.sqrt(total_area / tot_a)
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([(s+c)*dim_scale for s in ns])
return final_shapes, center
def rotate_polygon(vertices, angle, center_point = [0., 0.]):
"""Rotates a shape around a given point (the origin)
Args:
vertices (list): A list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A list of vertices rotated around the center point
"""
np_o = np.array(center_point)
np_vs = [np.array(v) - np_o for v in vertices]
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return [np.dot(rot_mat, v)+np_o for v in np_vs]
def rotate_system(shape_list, angle, center_point = None):
"""Rotates a set of shapes around a given point
If no center point is given, assume the center of mass of the shape
Args:
shape_list (list): A list of list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list]
def mirror_polygon(vertices, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
If center_point is None, mirror around the center of the shape
Args:
vertices (list): A list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new polygon with rotated vertices
"""
if center_point is None:
center_point = convex_centroid(vertices)
xm = -1 if axes[0] else 1
ym = -1 if axes[1] else 1
return [np.array([xm*(v[0]-center_point[0])+center_point[0],
ym*(v[1]-center_point[1])+center_point[1]]) for v
in vertices]
def mirror_system(shape_list, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
Mirrors around the center of the system if center_point is None
Args:
shape_list (list): A list of list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [mirror_polygon(s, axes, center_point) for s in shape_list]
def _point_equal(p1, p2):
return p1[0]==p2[0] and p1[1] == p2[1]
def _arr_eq(a1, a2):
return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))
def find_concave_outline(shape_list):
"""Find the outline of a set of shapes
Assuming all shapes have edges in common with other shapes where they touch,
provides a set of vertices for drawing the outline
Args:
shape_list (list): A list of list of (x,y) vertices
Returns:
A list of ordered (x,y) vertices for drawing an outline
"""
# Find the most lower-right point
current_shape = shape_list[0]
current_pt = current_shape[0]
test_idx = 1
next_test_dir = 1
for s in shape_list:
for i in range(len(s)):
p = s[i]
if ((p[0] < current_pt[0]) or
(p[0] == current_pt[0] and p[1] < current_pt[1])):
# Replace
current_pt = p
current_shape = s
test_idx = (i+1) % len(s)
next_test_dir = 1
vertex_list = [current_pt]
# Keep going until you reach back to the first point
while not _point_equal(current_shape[test_idx], vertex_list[0]):
# Iterate through all the shapes to try to find a matching edge
checking = True
for s in (s for s in shape_list if not _arr_eq(s, current_shape)):
if checking: # Way to break out if match found
for i in range(len(s)):
spt = s[i]
if _point_equal(current_pt, spt):
spt_after = s[(i+1) % len(s)]
spt_before = s[(i-1) % len(s)]
test_pt = current_shape[test_idx]
if _point_equal(test_pt, spt_after):
test_idx = (i-1) % len(s)
next_test_dir = -1
current_shape = s
checking = False
elif _point_equal(test_pt, spt_before):
test_idx = (i+1) % len(s)
next_test_dir = 1
current_shape = s
checking = False
# Have you exhausted all shapes?
if checking:
current_pt = current_shape[test_idx]
vertex_list.append(current_pt)
test_idx += next_test_dir
test_idx %= len(current_shape)
return vertex_list
|
import six
if six.PY3:
import unittest
else:
import unittest2 as unittest
from datetime import date
from mock import Mock
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
self.resource.get_instances = Mock()
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_list_after(self):
self.resource.get_instances = Mock()
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_list_before(self):
self.resource.get_instances = Mock()
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_create(self):
self.resource.create_instance = Mock()
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
self.resource.create_instance.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@gmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@gmail.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""User edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# admin/user/change/<id>
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create userpage works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Relax transformation passes."""
import functools
import inspect
import types
from typing import Callable, Dict, Union
import tvm.ir
from tvm.target import Target
from tvm.meta_schedule.database import PyDatabase
from . import _ffi_api
@tvm._ffi.register_object("relax.FunctionPass")
class FunctionPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relax.Function in a module. A function
pass class should be created through `function_pass`.
"""
@tvm._ffi.register_object("relax.DataflowBlockPass")
class DataflowBlockPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relax.DataflowBlock in a module."""
def FailTestRewrite() -> tvm.ir.transform.Pass:
"""Incorrectly transform the dataflow structure as fail testcases.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.FailTestRewrite()
def RewriteFMA() -> tvm.ir.transform.Pass:
"""Perform fused multiply add rewriting in dataflow blocks.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.RewriteFMA()
def FuseFMA() -> tvm.ir.transform.Pass:
"""Perform fused multiply add rewriting, generate a subgraph(sub function),
and call into the sub function in the main function.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.FuseFMA()
def ToNonDataflow() -> tvm.ir.transform.Pass:
"""Transform all dataflow structure to non-dataflow version.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.ToNonDataflow()
def CallTIRRewrite() -> tvm.ir.transform.Pass:
"""Perform explicit tensor allocation for call_tir.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.CallTIRRewrite()
def VMMemoryLower() -> tvm.ir.transform.Pass:
"""Perform memory lowering. Lowers the relax.builtin.alloc_tensor intrinsic to VM intrinsics.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.VMMemoryLower()
def VMShapeLower() -> tvm.ir.transform.Pass:
"""Lower the shape expressions in relax to VM shape heap manipulations and generate related
TIR functions to do shape calculations.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.VMShapeLower()
def Normalize() -> tvm.ir.transform.Pass:
"""Transforming Relax IR to normal form, i.e., the expressions are normalized(no nesting
and hence the AST is in ANF), and all checked_type_ and shape_ of expressions are available.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.Normalize()
def ResolveGlobals() -> tvm.ir.transform.Pass:
"""Resolve global variables using string equality. This ensures all GlobalVars in the IR refer
to the correct GlobalVar of the input IRModule. An error is reported if any GlobalVar cannot be
resolved.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.ResolveGlobals()
def MetaScheduleApplyHistoryBest(
database: PyDatabase,
target: Target,
) -> tvm.ir.transform.Pass:
"""Apply the best schedule from tuning database.
Parameters
----------
database : metaschedule tuning database
target: target info
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.MetaScheduleApplyHistoryBest(database, target)
def BindParams(func_name: str, params: Dict[str, tvm.runtime.NDArray]) -> tvm.ir.transform.Pass:
"""Bind params of function of the module to constant tensors.
Parameters
----------
func_name: str
The function name to be bound
params : dict from str to ndarray
The map from param name to constant tensors.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.BindParams(func_name, params)
def FoldConstant() -> tvm.ir.transform.Pass:
"""Fold constant expressions.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.FoldConstant()
def AnnotateTIROpPattern() -> tvm.ir.transform.Pass:
"""Annotate Op Pattern Kind for TIR functions
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.AnnotateTIROpPattern()
def FuseOps(fuse_opt_level=-1) -> tvm.ir.transform.Pass:
"""This pass groups bindings in a dataflow block of Relax functions and generate a new grouped
Relax function for each group, according to the fusion algorithm described in the pass
implementation. By grouping bindings into new Relax functions, we substitute the bindings in
the function being manipulated into function calls to the new grouped function.
A follow-up pass named "FuseTIR" will generate a TIR PrimFunc for each grouped function.
Parameters
----------
fuse_opt_level : int
The level of fuse optimization. -1 indicates that the level will be
inferred from pass context.
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator fusion.
"""
return _ffi_api.FuseOps(fuse_opt_level)
def FuseTIR() -> tvm.ir.transform.Pass:
"""Fuse primitive relax function into a larger TIR function if possible
Returns
-------
ret : tvm.transform.Pass
The registered pass for tir fusion.
"""
return _ffi_api.FuseTIR()
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass."""
class PyFunctionPass(FunctionPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in case pass_cls creation failed.
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(_ffi_api.MakeFunctionPass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def function_pass(
pass_func=None,
opt_level=None,
name=None,
required=None,
traceable=False,
) -> Union[Callable, FunctionPass]:
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(Function, Module, PassContext) -> Function]]
The transformation function or class.
opt_level : int
The optimization level of this function pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the function pass is dependent on.
traceable: Boolean
Boolean variable whether the function pass is traceable
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@relax.transform.function_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
@R.function
def f1(x: Tensor[(m, n), "float32"]):
return x
@tvm.script.ir_module
class InputMod:
@R.function
def f2(x: Tensor[(m, n), "float32"]):
gv0 = relax.add(x, x)
return gv0
# fpass is now a special pass that replaces every
# function to f1
fpass = TestReplaceFunc(f1)
# now every function in InputMod is replaced by f1
updated_mod = fpass(InputMod)
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@relax.transform.function_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, relax.transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = function_pass(m)
# Now transform should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required, traceable)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Function pass")
return _ffi_api.MakeFunctionPass(pass_arg, info)
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
def _wrap_class_dataflowblock_pass(pass_cls, pass_info):
"""Wrap a python class as dataflowblock pass"""
class PyDataflowBlockPass(DataflowBlockPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in case pass_cls creation failed.
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_dataflowblock(func, mod, ctx)
self.__init_handle_by_constructor__(
_ffi_api.MakeDataflowBlockPass, _pass_func, pass_info
)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyDataflowBlockPass.__init__, pass_cls.__init__)
PyDataflowBlockPass.__name__ = pass_cls.__name__
PyDataflowBlockPass.__doc__ = pass_cls.__doc__
PyDataflowBlockPass.__module__ = pass_cls.__module__
return PyDataflowBlockPass
def dataflowblock_pass(
pass_func=None, opt_level=None, name=None, required=None, traceable=False
) -> Union[Callable, DataflowBlockPass]:
"""Decorate a dataflowblock pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created dataflowblock pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(DataflowBlock, Module, PassContext) -> DataflowBlock]]
The transformation function or class.
opt_level : int
The optimization level of this dataflowblock pass.
name : Optional[str]
The name of the dataflowblock pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the dataflowblock pass is dependent on.
traceable: Boolean
Boolean variable whether the dataflowblock pass is traceable
Returns
-------
create_dataflowblock_pass : Union[Callable, DataflowBlockPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new DataflowBlockPass will be returned when we decorate a pass function.
A new DataflowBlockPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a dataflowblock pass class.
.. code-block:: python
@relax.transform.dataflowblock_pass(opt_level=1)
class TestReplaceBinding:
# Simple test function to replace the first VarBinding to another.
def __init__(self):
# create a new VarBinding
m, n = tir.Var("m", "int64"), tir.Var("n", "int64")
type_anno = relax.DynTensorType(2, "float32")
lv0 = relax.Var("lv1", [m, n], type_anno)
val = relax.const(np.random.rand(24, 56))
self.new_binding = relax.VarBinding(lv0, val)
def transform_dataflowblock(self, block, mod, ctx):
# just for demo purposes
# Replace the first binding in the DataflowBlock
new_bindings = [self.new_binding, block.bindings[1]]
new_block = relax.expr.DataflowBlock(new_bindings, block.span)
return new_block
@tvm.script.ir_module
class InputMod:
@R.function
def f1(x: Tensor[(m, n), "float32"]):
with relax.dataflow():
lv0 = relax.multiply(x, x)
gv0 = relax.add(x, x)
relax.output(gv0)
return gv0
# block_pass is now a special pass that replaces every
# first binding to the constant value binding
block_pass = TestReplaceBinding()
# now every first binding in DataflowBlock of InputMod
# is replaced by new_binding
updated_mod = block_pass(InputMod)
The following code creates a dataflowblock pass by decorating
a user defined transform function.
.. code-block:: python
@relax.transform.dataflowblock_pass(opt_level=2)
def transform(block, mod, ctx):
# my transformations here.
return block
block_pass = transform
assert isinstance(block_pass, relax.transform.DataflowBlockPass)
assert block_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = block_pass(m)
# Now transform should have been applied to every DataflowBlock in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the dataflowblock pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_dataflowblock_pass(pass_arg):
"""Internal function that creates a dataflowblock pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required, traceable)
if inspect.isclass(pass_arg):
return _wrap_class_dataflowblock_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for DataflowBlock pass")
return _ffi_api.MakeDataflowBlockPass(pass_arg, info)
if pass_func:
return create_dataflowblock_pass(pass_func)
return create_dataflowblock_pass
|
#
# The Python Imaging Library
# $Id$
#
# WMF stub codec
#
# history:
# 1996-12-14 fl Created
# 2004-02-22 fl Turned into a stub driver
# 2004-02-23 fl Added EMF support
#
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
# WMF/EMF reference documentation:
# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf
# http://wvware.sourceforge.net/caolan/index.html
# http://wvware.sourceforge.net/caolan/ora-wmf.html
from __future__ import print_function
from . import Image, ImageFile
from ._binary import i16le as word, si16le as short, i32le as dword, si32le as _long
__version__ = "0.2"
_handler = None
if str != bytes:
long = int
def register_handler(handler):
"""
Install application-specific WMF image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
if hasattr(Image.core, "drawwmf"):
# install default handler (windows only)
class WmfHandler(object):
def open(self, im):
im.mode = "RGB"
self.bbox = im.info["wmf_bbox"]
def load(self, im):
im.fp.seek(0) # rewind
return Image.frombytes(
"RGB", im.size,
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
"raw", "BGR", (im.size[0]*3 + 3) & -4, -1
)
register_handler(WmfHandler())
#
# --------------------------------------------------------------------
# Read WMF file
def _accept(prefix):
return (
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or
prefix[:4] == b"\x01\x00\x00\x00"
)
##
# Image plugin for Windows metafiles.
class WmfStubImageFile(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self):
# check placable header
s = self.fp.read(80)
if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
# placeable windows metafile
# get units per inch
inch = word(s, 14)
# get bounding box
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
# normalize size to 72 dots per inch
size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch
self.info["wmf_bbox"] = x0, y0, x1, y1
self.info["dpi"] = 72
# print(self.mode, self.size, self.info)
# sanity check (standard metafile header)
if s[22:26] != b"\x01\x00\t\x00":
raise SyntaxError("Unsupported WMF file format")
elif dword(s) == 1 and s[40:44] == b" EMF":
# enhanced metafile
# get bounding box
x0 = _long(s, 8)
y0 = _long(s, 12)
x1 = _long(s, 16)
y1 = _long(s, 20)
# get frame (in 0.01 millimeter units)
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
# normalize size to 72 dots per inch
size = x1 - x0, y1 - y0
# calculate dots per inch from bbox and frame
xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0])
ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
else:
raise SyntaxError("Unsupported file format")
self.mode = "RGB"
self.size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("WMF save handler not installed")
_handler.save(im, fp, filename)
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extension(WmfStubImageFile.format, ".wmf")
Image.register_extension(WmfStubImageFile.format, ".emf")
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# pylint: disable=wrong-import-position
import time
import struct
# CircuitPython / Blinka
import board
uart = board.UART()
uart.baudrate = 19200
# via USB cable
# import serial
# uart = serial.Serial("/dev/ttyUSB0", 19200)
from adafruit_rockblock import RockBlock
rb = RockBlock(uart)
# create some data
some_int = 2112
some_float = 42.123456789
some_text = "hello world"
text_len = len(some_text)
# create binary data
data = struct.pack("i", some_int)
data += struct.pack("f", some_float)
data += struct.pack("i", len(some_text))
data += struct.pack("{}s".format(text_len), some_text.encode())
# put data in outbound buffer
rb.data_out = data
# try a satellite Short Burst Data transfer
print("Talking to satellite...")
status = rb.satellite_transfer()
# loop as needed
retry = 0
while status[0] > 8:
time.sleep(10)
status = rb.satellite_transfer()
print(retry, status)
retry += 1
print("\nDONE.")
|
import logging
from keras import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dropout, Dense, SpatialDropout2D, K
from keras.optimizers import SGD
from keras.regularizers import l2
from utils_train_self_driving_car import INPUT_SHAPE, rmse
from models.abstract_model_provider import AbstractModelProvider
logger = logging.Logger("Chauffeur")
NAME = "chauffeur"
# Note: For chauffeur you still have to change the following in the drive method
# (not yet done automatically and im not working on it as it does not look like we're going to use chauffeur')
# def rmse(y_true, y_pred):
# '''Calculates RMSE
# '''
# return K.sqrt(K.mean(K.square(y_pred - y_true)))
#
#
# model = load_model(filepath=args.model, custom_objects={"rmse": rmse}, compile=True)
class Chauffeur(AbstractModelProvider):
def get_name(self) -> str:
return NAME
def get_model(self, args):
logger.warning("We are currently still ignoring the args settings (e.g. args.learning_rate) in chauffeur")
# Taken from https://github.com/udacity/self-driving-car/blob/master/steering-models/community-models/chauffeur/models.py
use_adadelta = True
learning_rate=0.01
W_l2=0.0001
input_shape = INPUT_SHAPE # Original Chauffeur uses input_shape=(120, 320, 3)
model = Sequential()
model.add(Convolution2D(16, 5, 5,
input_shape=input_shape,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(20, 5, 5,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(40, 3, 3,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(60, 3, 3,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(80, 2, 2,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 2, 2,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(
output_dim=1,
init='he_normal',
W_regularizer=l2(W_l2)))
optimizer = ('adadelta' if use_adadelta
else SGD(lr=learning_rate, momentum=0.9))
model.compile(
loss='mean_squared_error',
optimizer=optimizer,
metrics=[rmse])
return model
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dogAccountantProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
# Generated by Django 2.1.15 on 2021-08-12 11:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
import logging
def create_eventhub_producer_client():
# [START create_eventhub_producer_client_from_conn_str_sync]
import os
from azure.eventhub import EventHubProducerClient
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
producer = EventHubProducerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
# [END create_eventhub_producer_client_from_conn_str_sync]
# [START create_eventhub_producer_client_sync]
import os
from azure.eventhub import EventHubProducerClient, EventHubSharedKeyCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY']
shared_access_key = os.environ['EVENT_HUB_SAS_KEY']
credential = EventHubSharedKeyCredential(shared_access_policy, shared_access_key)
producer = EventHubProducerClient(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential
)
# [END create_eventhub_producer_client_sync]
return producer
def create_eventhub_consumer_client():
# [START create_eventhub_consumer_client_from_conn_str_sync]
import os
from azure.eventhub import EventHubConsumerClient
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
# [END create_eventhub_consumer_client_from_conn_str_sync]
# [START create_eventhub_consumer_client_sync]
import os
from azure.eventhub import EventHubConsumerClient, EventHubSharedKeyCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY']
shared_access_key = os.environ['EVENT_HUB_SAS_KEY']
credential = EventHubSharedKeyCredential(shared_access_policy, shared_access_key)
consumer = EventHubConsumerClient(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential)
# [END create_eventhub_consumer_client_sync]
return consumer
def example_eventhub_sync_send_and_receive():
producer = create_eventhub_producer_client()
consumer = create_eventhub_consumer_client()
try:
logger = logging.getLogger("azure.eventhub")
# [START create_event_data]
from azure.eventhub import EventData
event_data = EventData("String data")
event_data = EventData(b"Bytes data")
# [END create_event_data]
# [START eventhub_producer_client_create_batch_sync]
event_data_batch = producer.create_batch(max_size=10000)
while True:
try:
event_data_batch.try_add(EventData('Message inside EventBatchData'))
except ValueError:
# The EventDataBatch object reaches its max_size.
# You can send the full EventDataBatch object and create a new one here.
break
# [END eventhub_producer_client_create_batch_sync]
# [START eventhub_producer_client_send_sync]
with producer:
event_data = EventData(b"A single event")
producer.send(event_data)
# [END eventhub_producer_client_send_sync]
time.sleep(1)
# [START eventhub_consumer_client_receive_sync]
logger = logging.getLogger("azure.eventhub")
def on_event(partition_context, event):
logger.info("Received event from partition: {}".format(partition_context.partition_id))
# Do ops on received events
with consumer:
consumer.receive(on_event=on_event, consumer_group='$Default')
# [END eventhub_consumer_client_receive_sync]
finally:
pass
def example_eventhub_producer_ops():
# [START eventhub_producer_client_close_sync]
import os
from azure.eventhub import EventHubProducerClient, EventData
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
producer = EventHubProducerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
try:
producer.send(EventData(b"A single event"))
finally:
# Close down the producer handler.
producer.close()
# [END eventhub_producer_client_close_sync]
def example_eventhub_consumer_ops():
# [START eventhub_consumer_client_close_sync]
import os
import threading
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
from azure.eventhub import EventHubConsumerClient
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
logger = logging.getLogger("azure.eventhub")
def on_event(partition_context, event):
logger.info("Received event from partition: {}".format(partition_context.partition_id))
# Do ops on received events
# The receive method is blocking call, so execute it in a thread to
# better demonstrate how to stop the receiving by calling he close method.
worker = threading.Thread(
target=consumer.receive,
kwargs={"on_event": on_event, "consumer_group": "$Default"}
)
worker.start()
time.sleep(10) # Keep receiving for 10s then close.
# Close down the consumer handler explicitly.
consumer.close()
# [END eventhub_consumer_client_close_sync]
if __name__ == '__main__':
example_eventhub_producer_ops()
example_eventhub_consumer_ops()
# example_eventhub_sync_send_and_receive()
|
# -*- coding: utf-8 -*-
"""
Module to define CONSTANTS used across the project
"""
from os import path
# identify basedir for the package
BASE_DIR = path.dirname(path.normpath(path.dirname(__file__)))
# Training and predict(deepaas>=0.5.0) arguments as a dict of dicts
# with the following structure to feed the deepaas API parser:
# (see also get_train_args() )
# { 'arg1' : {'default': 1, # default value
# 'help': '', # can be an empty string
# 'required': False # bool
# },
# 'arg2' : {'default': 'value1',
# 'choices': ['value1', 'value2', 'value3'],
# 'help': 'multi-choice argument',
# 'required': False
# },
# 'arg3' : {...
# },
# ...
# }
train_args = { 'arg1': {'default': 1,
'help': '',
'required': False
},
}
# !!! deepaas>=0.5.0 calls get_test_args() to get args for 'predict'
predict_args = { 'arg2': {'default': 1,
'help': '',
'required': False
},
}
|
_base_ = [
'../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=[dict(
type='VitFpn',
img_size=[192, 96, 48, 24],
patch_size=[4, 4, 4, 4],
in_chans=[256, 512, 1024, 2048],
embed_dim=[256, 512, 1024, 2048],
depth=3,
num_heads=8,
num_classes=19,
drop_rate=0.1,
norm_cfg=norm_cfg,
pos_embed_interp=True,
align_corners=False),
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4,
)],
decode_head=dict(
type='TransFpnHead',
in_channels=256,
channels=128,
in_index=23,
img_size=768,
embed_dim=256,
num_classes=19,
norm_cfg=norm_cfg,
num_conv=4,
upsampling_method='bilinear',
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
optimizer = dict(lr=0.01, weight_decay=0.0,
paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})
)
crop_size = (768, 768)
train_cfg = dict()
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512))
find_unused_parameters = True
data = dict(samples_per_gpu=2)
# model settings
# 1. resnet output [H/4, W/4, 256], [H/8, W/8, 512], [H/16, W/16, 1024], [H/32, W/32, 1024], [192, 96, 48, 24]
# 2. patch size [4, 4, 4, 4], L[48*48, 24*24, 12*12, 6*6] pyramid size
# 3. transformer [256, 512, 1024, 2048]
# 4. img_size 768*768, batch size 2 for each gpu, 8682M for each gpu
# 5. official cityscapes lable should be converted by scripts tools/convert_datasets/cityscapes.py else raise runtimeerror cuda error
# model result
# 2021-10-04 08:35:52,622 - mmseg - INFO - Iter(val) [80000]
# Class IoU Acc
# road 96.98 98.51
# sidewalk 77.47 86.84
# building 88.15 94.70
# wall 45.00 53.99
# fence 43.74 58.45
# pole 39.98 49.01
# traffic light 46.02 58.42
# traffic sign 56.44 66.34
# vegetation 88.85 94.80
# terrain 52.96 67.15
# sky 90.97 95.54
# person 66.23 81.86
# rider 33.90 43.57
# car 90.36 95.72
# truck 56.61 69.87
# bus 58.94 79.92
# train 44.69 55.91
# motorcycle 20.97 25.95
# bicycle 62.36 79.14
# Summary:
# Scope mIoU mAcc aAcc
# global 61.09 71.35 93.44
|
#coding: UTF-8
import serial
import sys
import time
import binascii
import json
import math
from collections import deque
import paho.mqtt.client as mqtt
# 設定系変数(デフォルト値で初期化)
MQTT_BROKER_IP = "localhost"
MQTT_BROKER_PORT = 1883
SERIAL_PORT = "COM3"
MESSAGE_LEN = 240
NO_MESSAGE_TIME_OUT = 5.0
NO_MESSAGE_MAX_COUNT = 30000
# グローバル変数
cmd_gamepad = [99,109,100,254,253,252,0,0,0,128,128,128,128,128,128,252]
cmd_queue = deque([])
key_dict = {
"cross_x":0,
"cross_y":0,
"L3D_x":0,
"L3D_y":0,
"R3D_x":0,
"R3D_y":0,
"RT":0,
"LT":0,
"A":0,
"B":0,
"X":0,
"Y":0,
"RB":0,
"LB":0,
"BACK":0,
"START":0
}
def load_config():
with open('config.json', 'r') as f:
config = json.load(f)
MQTT_BROKER_IP = config["MQTT_BROKER_IP"]
MQTT_BROKER_PORT = config["MQTT_BROKER_PORT"]
MESSAGE_LEN = config["MESSAGE_LEN"]
NO_MESSAGE_TIME_OUT = config["NO_MESSAGE_TIME_OUT"]
NO_MESSAGE_MAX_COUNT = config["NO_MESSAGE_MAX_COUNT"]
def on_message(client, userdata, msg):
if msg.topic == "gamepad":
cmd_byte = [0 for x in range(16)]
key_dict = json.loads(msg.payload.decode('utf-8') )
cmd_byte[0] = 99 # ヘッダー
cmd_byte[1] = 109
cmd_byte[2] = 100
cmd_byte[3] = 254# ID0
cmd_byte[4] = 253 # ID1
cmd_byte[9] = 128
cmd_byte[10] = 128
cmd_byte[11] = 128
cmd_byte[12] = 128
cmd_byte[13] = 128
cmd_byte[14] = 128
cmd_byte[6] = key_dict["A"] + \
(key_dict["B"] << 1) + \
(key_dict["X"] << 2) +\
(key_dict["Y"] << 3) +\
(key_dict["RB"] << 4) +\
(key_dict["LB"] << 5) +\
(key_dict["BACK"] << 6) +\
(key_dict["START"] << 7)
cmd_byte[7] = key_dict["RT"]
cmd_byte[8] = key_dict["LT"]
cmd_byte[9] = key_dict["cross_x"]
cmd_byte[10] = key_dict["cross_y"]
cmd_byte[11] = key_dict["R3D_x"]
cmd_byte[12] = key_dict["R3D_y"]
cmd_byte[13] = key_dict["L3D_x"]
cmd_byte[14] = key_dict["L3D_y"]
cmd_byte[15] = 252
chk_sum = 0
for i in range(6,16):
chk_sum += cmd_byte[i]
cmd_byte[5] = chk_sum % 256 # チェックサム
global cmd_gamepad
cmd_gamepad = [cmd_byte[i] for i in range(16)]
if msg.topic == "cmd":
global cmd_queue
print(msg.payload)
cmd_byte = [ msg.payload[i] for i in range(16) ]
print(cmd_byte)
cmd_queue.append(cmd_byte)
def create_mqtt_client():
host = MQTT_BROKER_IP
port = MQTT_BROKER_PORT
# インスタンス作成時に protocl v3.1.1を指定
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.connect(host, port=port, keepalive=60)
client.publish("presence","this is " + __file__)
client.on_message = on_message
client.subscribe("gamepad")
client.subscribe("cmd")
return client
def publish_data_loop(client,ser):
buff = []
s = []
st = []
st_bytes=b""
length = 0
i = 0
start_time = time.time()
timestamp = 0
timestamp_pre = 0
elapsed_time = 0
no_message_count = 0
ser.write(cmd_gamepad)
print (len(buff))
while True:
#client.publish("TEST","While Loop")
s = [ele for ele in ser.read(ser.in_waiting)]
buff.extend(s)
if len(s) == 0:
no_message_count = no_message_count + 1
if no_message_count > NO_MESSAGE_MAX_COUNT:
client.loop_stop(force=False)
ser.close()
print("COM" + " is busy.")
client.publish("TEST", "Serial no message" +" error!")
client.disconnect()
print("exit!")
sys.exit(0)
return
else:
no_message_count = 0
length = len(buff)
if length < MESSAGE_LEN + 5:
continue
for i in range(length-MESSAGE_LEN-2):
if (buff[i] == 0xff) and (buff[i+1] == 0xff) and \
(buff[i+2]==0x48) and (buff[i+3]==0x45) and \
(buff[i+4] == 0x41) and (buff[i+5]==0x44): #and \
#(buff[i+message_len] == 0xff) and (buff[i+1+message_len] == 0xff):
polution_check = False
for j in range(i+6, i+MESSAGE_LEN - 3):
if (buff[j] == 0xff) and (buff[j+1] == 0xff) and (buff[j+2]==0x48):
buff = buff[j:]
polution_check = True
break
if polution_check == True:
break
start_time = time.time()
timestamp_pre = timestamp
timestamp = buff[11]
st = buff[i:i+MESSAGE_LEN]
st_bytes = binascii.hexlify(bytes(list(st)))
chk_sum = 0
for k in range(7,MESSAGE_LEN):
chk_sum = chk_sum + st[k]
if chk_sum%256 != st[6] or (timestamp-timestamp_pre+256)%256 != 10:
client.publish("error", str(timestamp)+" "+str(timestamp_pre) + " " + str(chk_sum%256) + " " + str(st[6])+" "+ str(len(buff)))
client.publish("mouse", bytes(list(st)))
try:
ser.write(cmd_gamepad)
client.publish("TEST", "in msg loop")
if len(cmd_queue) != 0:
ser.write(cmd_queue.popleft())
except:
ser.close()
client.publish("TEST", "serial write" +" error!")
return
buff = buff[i+MESSAGE_LEN:]
break
end_time = time.time()
elapsed_time = end_time - start_time
client.publish("TEST","elapsed:"+str(elapsed_time))
#print (elapsed_time,len(buff),timestamp, (timestamp-timestamp_pre+256)%256 )
#print(cmd_list)
if(elapsed_time > NO_MESSAGE_TIME_OUT):
client.loop_stop(force=False)
ser.close()
print("serial" + " is busy.")
client.publish("TEST", "serial" +" is busy!")
client.disconnect()
print("exit!")
sys.exit(0)
return
def main():
load_config()
client = create_mqtt_client()
while True:
try:
ser = serial.Serial(SERIAL_PORT,timeout = 0.05, write_timeout=0.05)
client.publish("TEST", "Serial connected")
print("Serial connected")
break
except:
client.publish("TEST", "Cannot connect serial!")
print("Cannot connect serial")
client.publish("TEST", "data send start!")
client.loop_start()
publish_data_loop(client,ser)
if __name__ == "__main__":
main()
|
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.classproperty import genericclassproperty
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
class ProviderSearchSystemExtensionItem(ExtensionBase):
# noinspection PyPep8Naming
def __init__(self, valueUri: FhirUri):
"""
:param valueUri:
"""
super().__init__(
url=ProviderSearchSystemExtensionItem.codeset,
valueUri=valueUri,
)
# noinspection PyMethodParameters
@genericclassproperty
def codeset(cls) -> FhirUri:
"""
forSystem
:return:
:rtype:
"""
return "forSystem"
|
import logging
import re
from collections import Counter
from textblob import TextBlob
from textblob.exceptions import MissingCorpusError
from . import Bigrams, english_bigrams
class SummaryEvaluator(object):
"""Evaluate summaries of a book to find a usable summary.
A usable summary will have good coverage of the popular noun
phrases found across all summaries of the book, will have an
approximate length of four sentences (this is customizable), and
will not mention words that indicate it's a summary of a specific
edition of the book.
All else being equal, a shorter summary is better.
A summary is penalized for apparently not being in English.
"""
# These phrases are indicative of a description we can't use for
# whatever reason.
default_bad_phrases = set(
[
"version of",
"retelling of",
"abridged",
"retelling",
"condensed",
"adaptation of",
"look for",
"new edition",
"excerpts",
"version",
"edition",
"selections",
"complete texts",
"in one volume",
"contains",
"--container",
"--original container",
"playaway",
"complete novels",
"all rights reserved",
]
)
bad_res = set(
[
re.compile("the [^ ]+ Collection"),
re.compile("Includes"),
re.compile("This is"),
]
)
_nltk_installed = True
log = logging.getLogger("Summary Evaluator")
def __init__(
self,
optimal_number_of_sentences=4,
noun_phrases_to_consider=10,
bad_phrases=None,
):
self.optimal_number_of_sentences = optimal_number_of_sentences
self.summaries = []
self.noun_phrases = Counter()
self.blobs = dict()
self.scores = dict()
self.noun_phrases_to_consider = float(noun_phrases_to_consider)
self.top_noun_phrases = None
if bad_phrases is None:
self.bad_phrases = self.default_bad_phrases
else:
self.bad_phrases = bad_phrases
def add(self, summary, parser=None):
parser_class = parser or TextBlob
if isinstance(summary, bytes):
summary = summary.decode("utf8")
if summary in self.blobs:
# We already evaluated this summary. Don't count it more than once
return
blob = parser_class(summary)
self.blobs[summary] = blob
self.summaries.append(summary)
if self._nltk_installed:
try:
for phrase in blob.noun_phrases:
self.noun_phrases[phrase] = self.noun_phrases[phrase] + 1
except MissingCorpusError as e:
self._nltk_installed = False
self.log.error("Summary cannot be evaluated: NLTK not installed %r" % e)
def ready(self):
"""We are done adding to the corpus and ready to start evaluating."""
self.top_noun_phrases = set(
[
k
for k, v in self.noun_phrases.most_common(
int(self.noun_phrases_to_consider)
)
]
)
def best_choice(self):
c = self.best_choices(1)
if c:
return c[0]
else:
return None, None
def best_choices(self, n=3):
"""Choose the best `n` choices among the current summaries."""
scores = Counter()
for summary in self.summaries:
scores[summary] = self.score(summary)
return scores.most_common(n)
def score(self, summary, apply_language_penalty=True):
"""Score a summary relative to our current view of the dataset."""
if not self._nltk_installed:
# Without NLTK, there's no need to evaluate the score.
return 1
if isinstance(summary, bytes):
summary = summary.decode("utf8")
if summary in self.scores:
return self.scores[summary]
score = 1
blob = self.blobs[summary]
top_noun_phrases_used = len(
[p for p in self.top_noun_phrases if p in blob.noun_phrases]
)
score = 1 * (top_noun_phrases_used / self.noun_phrases_to_consider)
try:
sentences = len(blob.sentences)
except Exception as e:
# Can't parse into sentences for whatever reason.
# Make a really bad guess.
sentences = summary.count(". ") + 1
off_from_optimal = abs(sentences - self.optimal_number_of_sentences)
if off_from_optimal == 1:
off_from_optimal = 1.5
if off_from_optimal:
# This summary is too long or too short.
score /= off_from_optimal ** 1.5
bad_phrases = 0
l = summary.lower()
for i in self.bad_phrases:
if i in l:
bad_phrases += 1
for i in self.bad_res:
if i.search(summary):
bad_phrases += 1
if l.count(" -- ") > 3:
bad_phrases += l.count(" -- ") - 3
score *= 0.5 ** bad_phrases
if apply_language_penalty:
language_difference = english_bigrams.difference_from(
Bigrams.from_string(summary)
)
if language_difference > 1:
score *= 0.5 ** (language_difference - 1)
return score
|
import random
def guess_random_number(tries, start, stop):
number = random.randint(start, stop)
while tries != 0:
print('Number of tries left: ', tries)
guess = int(input("Guess a number between 1 and 10: "))
tries -= 1
if guess < number:
print('Guess higher!')
if guess > number:
print('Guess lower!')
if guess == number:
break
if guess == number:
print('You guessed the correct number', str(number))
else:
print('You did not guess the number: ', str(number))
#guess_random_number(5, 0, 10)
def guess_random_num_linear(tries, start, stop):
number = random.randint(start, stop)
print('The number for the program to guess is:', number)
for x in range(0, 10) :
if tries != 0:
tries -= 1
print('The number for the program to guess is... ', x)
print('Number of tries left: ', tries)
if x == number:
print('You guessed the correct number', str(number))
return x
else:
print('You have failed to guess the correct number.')
break
#guess_random_num_linear(5, 0, 10)
def binary_search(tries, start, stop):
number = random.randint(start, stop)
lower_bound = int(start)
upper_bound = int(stop)
print("Random number to find:", number)
while tries != 0:
pivot = (lower_bound + upper_bound) // 2
pivot_value = pivot
tries -= 1
if pivot_value == number:
print('You guessed the correct number', str(pivot))
return pivot
elif pivot_value > number:
upper_bound = pivot - 1
print('Guessing Lower!')
else:
lower_bound = pivot + 1
print('Guessing Higher!')
else:
print('Your program has failed to find the number')
binary_search(5, 0, 10)
|
import threading
import ipaddress
import socket
import time
from typing import Optional, Union
import requests
requests.packages.urllib3.disable_warnings() # type: ignore
from concurrent.futures import ThreadPoolExecutor
import colorama
colorama.init(autoreset=True)
import os
import bs4
import argparse
folder = os.path.dirname(__file__)
output_strings: list[str] = []
ports = [80, 443, 8080, 8081, 8443, 4434]
keywords = ["cam", "rasp", " hp ", "system", "index of", "dashboard"]
output_tmp = ""
last_write = time.time()
global_lock = threading.Lock()
banner_targets: list[dict[str, Union[str, int]]] = []
def main():
print("----------------------------")
print(" Deep Web Scanner! ")
print("----------------------------\n")
print("Every active webserver url will be logged in the output file.")
print("This terminal will only show urls/metadata with the following keywords: " + ", ".join(keywords))
if indexof.lower() == "true":
print ("'Index of /' filenames will be logged!")
print("Scan will start...")
with open(input_file, "r") as myfile:
content = myfile.readlines()
for line in content:
# split ip range 2.56.20.0-2.56.23.255
if "-" in line:
ip_range_array = line.split("-")
ip_range_start = ip_range_array[0].strip()
ip_range_end = ip_range_array[1].strip()
print(f"Start scan from range: {ip_range_start} - {ip_range_end}")
current_ip = ipaddress.IPv4Address(ip_range_start)
end_ip = ipaddress.IPv4Address(ip_range_end)
with ThreadPoolExecutor(max_workers=100) as executor_portcheck:
while current_ip < end_ip:
executor_portcheck.submit(start_portcheck, current_ip.exploded)
current_ip += 1
elif "/" in line:
ip_range = ipaddress.ip_network(line.strip())
with ThreadPoolExecutor(max_workers=100) as executor_portcheck:
for ip in ip_range.hosts():
executor_portcheck.submit(start_portcheck, ip.exploded)
else:
print("No valid input file! Should be something like 2.56.20.0-2.56.23.255 per line!")
global banner_targets
print(f"{len(banner_targets)} responses")
for target in banner_targets:
start_request(target["ip"], target["port"]) # type: ignore
banner_targets.clear()
write_line("", True)
def start_portcheck(ip: str) -> None:
global banner_targets
# fast webserver port checking
for port in ports:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(3)
result = sock.connect_ex((ip, port))
if result == 0:
# queue normal browser request
banner_targets.append({"ip": ip, "port": port})
def start_request(ip: str, port: int) -> None:
# check for running websites
try:
url = "https://" + ip + ":" + str(port)
if port == 80:
url = "http://" + ip
elif port == 8080:
url = "http://" + ip + ":8080"
elif port == 8081:
url = "http://" + ip + ":8081"
site_result = request_url(url)
if not isinstance(site_result, bool) and site_result is not False:
# if the site is reachable get some information
get_banner(site_result[0], site_result[1])
except Exception as e:
print(e)
def request_url(url: str) -> Union[tuple[requests.Response, bs4.BeautifulSoup], bool]:
# request url and return the response
try:
session = requests.session()
session.headers[
"User-Agent"
] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36"
header = session.head(url=url, timeout=20, verify=False)
# check content type
one_allowed_content_type = False
content_type_header = header.headers.get("content-type")
if content_type_header is not None:
for allowed_content_type in ["html", "plain", "xml", "text", "json"]:
if allowed_content_type in content_type_header.lower():
one_allowed_content_type = True
if not one_allowed_content_type:
return False
else:
return False
response = session.get(url=url, timeout=30, verify=False)
session.close()
soup = bs4.BeautifulSoup(response.text, "html.parser")
return (response, soup)
except Exception:
return False
def get_banner(request: requests.Response, soup: bs4.BeautifulSoup):
# get banner information, show console output and save them to file
banner_array: list[str] = []
banner_array.append(request.url)
server_header = request.headers.get("Server")
if isinstance(server_header, str):
banner_array.append(server_header)
title = soup.find("title")
if isinstance(title, bs4.Tag):
title = title.get_text().strip().replace("\n", "")
banner_array.append(title)
meta_tags: bs4.element.ResultSet[bs4.Tag] = soup.find_all("meta", attrs={"name": "generator"})
if len(meta_tags) > 0:
for meta_tag in meta_tags:
attrs = meta_tag.attr
if isinstance(attrs, bs4.Tag):
generator = attrs.get("content")
if isinstance(generator, str):
banner_array.append(generator)
# has this site a password field?
password_fields = soup.find_all(attrs={"type": "password"})
if len(password_fields) > 0:
banner_array.append("login required")
# check for "index of" websites and show root files/folders
global indexof
if indexof.lower() == "true" and "index of" in request.text.lower():
a_array: list[bs4.Tag] = soup.find_all("a")
for a in a_array:
href = a.attrs.get("href")
if isinstance(href, str):
if href.find("?") != 0:
banner_array.append(href)
banner_array.append(f"{str(len(request.content))} content size")
fullstring = ", ".join(banner_array)
if fullstring not in output_strings:
output_strings.append(fullstring)
for keyword in keywords:
if keyword in fullstring.lower():
if "login required" in fullstring:
print(colorama.Fore.RED + fullstring)
elif "Index of /" in fullstring:
print(colorama.Fore.YELLOW + fullstring)
else:
print(colorama.Fore.GREEN + fullstring)
write_line(fullstring)
def write_line(line: str, force: Optional[bool] = False):
# buffers and writes output to file
global output_tmp, last_write
output_tmp += line + "\n"
if last_write + 30 < time.time() or force:
last_write = time.time()
while global_lock.locked():
continue
global_lock.acquire()
lines_to_write = output_tmp.count("\n")
with open(output_file, "a") as output_1:
output_1.write(output_tmp)
output_tmp = ""
if lines_to_write > 1:
print(f"{lines_to_write} webservers found and written to file")
else:
print(f"{lines_to_write} webserver found and written to file")
global_lock.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check if domain has an active website and grab banner."
)
parser.add_argument(
"-i", type=str, default="./asn-country-ipv4.csv", help="Path to input file"
)
parser.add_argument(
"-o", type=str, default="./deep-web.txt", help="Path to output file"
)
parser.add_argument(
"-indexof", type=str, default="no", help="Show files from index of sites"
)
args = parser.parse_args()
input_file = args.i
output_file = args.o
indexof = args.indexof
main()
|
from gemstone import MicroService, event_handler, exposed_method
from gemstone.event.transport import rabbitmq, redis_transport
class EventTestService2(MicroService):
name = "event.test2"
host = "127.0.0.1"
port = 8000
event_transports = [
redis_transport.RedisEventTransport("redis://127.0.0.1:6379/0")
]
@exposed_method()
def say_hello(self, name):
self.emit_event("said_hello", {"name": name})
return "Hello {}".format(name)
if __name__ == '__main__':
service = EventTestService2()
service.start()
|
from datetime import datetime, timedelta
class Pubg(object):
def __init__(self):
self.top_history = []
def _last_top(self, players=None):
if not players:
if self.top_history:
return self.top_history[-1]
else:
return None
for top in self.top_history:
print(top['players'] == players)
if top['players'] == players:
return top
def get_last(self, players=None):
now = datetime.now()
if players:
top = self._last_top(players)
else:
top = self._last_top()
if top:
msg = 'Last top for team ' + ' '.join(top['players'].split(',')) + ' was ' + str(now - top['time']) + ' ago'
else:
msg = 'No last top'
if players:
msg += ' for this team'
self._remove_old()
return msg
def new_top(self, players):
top = {
'time': datetime.now(),
'players': players
}
self.top_history.append(top)
print(self.top_history)
self._remove_old()
def _remove_old(self):
now = datetime.now()
for top in self.top_history:
if top['time'] < now - timedelta(days=30):
self.top_history.remove(top)
|
# -*- coding: utf-8 -*-
"""Tornado request argument parsing module.
Example: ::
import tornado.web
from marshmallow import fields
from webargs.tornadoparser import use_args
class HelloHandler(tornado.web.RequestHandler):
@use_args({'name': fields.Str(missing='World')})
def get(self, args):
response = {'message': 'Hello {}'.format(args['name'])}
self.write(response)
"""
import tornado.web
import tornado.concurrent
from tornado.escape import _unicode
from webargs import core
from webargs.compat import basestring
from webargs.multidictproxy import MultiDictProxy
class HTTPError(tornado.web.HTTPError):
"""`tornado.web.HTTPError` that stores validation errors."""
def __init__(self, *args, **kwargs):
self.messages = kwargs.pop("messages", {})
self.headers = kwargs.pop("headers", None)
super(HTTPError, self).__init__(*args, **kwargs)
def is_json_request(req):
content_type = req.headers.get("Content-Type")
return content_type is not None and core.is_json(content_type)
class WebArgsTornadoMultiDictProxy(MultiDictProxy):
"""
Override class for Tornado multidicts, handles argument decoding
requirements.
"""
def __getitem__(self, key):
try:
value = self.data.get(key, core.missing)
if value is core.missing:
return core.missing
elif key in self.multiple_keys:
return [_unicode(v) if isinstance(v, basestring) else v for v in value]
elif value and isinstance(value, (list, tuple)):
value = value[0]
if isinstance(value, basestring):
return _unicode(value)
else:
return value
# based on tornado.web.RequestHandler.decode_argument
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" % (key, value[:40]))
class WebArgsTornadoCookiesMultiDictProxy(MultiDictProxy):
"""
And a special override for cookies because they come back as objects with a
`value` attribute we need to extract.
Also, does not use the `_unicode` decoding step
"""
def __getitem__(self, key):
cookie = self.data.get(key, core.missing)
if cookie is core.missing:
return core.missing
elif key in self.multiple_keys:
return [cookie.value]
else:
return cookie.value
class TornadoParser(core.Parser):
"""Tornado request argument parser."""
def _raw_load_json(self, req):
"""Return a json payload from the request for the core parser's load_json
Checks the input mimetype and may return 'missing' if the mimetype is
non-json, even if the request body is parseable as json."""
if not is_json_request(req):
return core.missing
# request.body may be a concurrent.Future on streaming requests
# this would cause a TypeError if we try to parse it
if isinstance(req.body, tornado.concurrent.Future):
return core.missing
return core.parse_json(req.body)
def load_querystring(self, req, schema):
"""Return query params from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.query_arguments, schema)
def load_form(self, req, schema):
"""Return form values from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.body_arguments, schema)
def load_headers(self, req, schema):
"""Return headers from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.headers, schema)
def load_cookies(self, req, schema):
"""Return cookies from the request as a MultiDictProxy."""
# use the specialized subclass specifically for handling Tornado
# cookies
return WebArgsTornadoCookiesMultiDictProxy(req.cookies, schema)
def load_files(self, req, schema):
"""Return files from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.files, schema)
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
if status_code == 422:
reason = "Unprocessable Entity"
else:
reason = None
raise HTTPError(
status_code,
log_message=str(error.messages),
reason=reason,
messages=error.messages,
headers=error_headers,
)
def _handle_invalid_json_error(self, error, req, *args, **kwargs):
raise HTTPError(
400,
log_message="Invalid JSON body.",
reason="Bad Request",
messages={"json": ["Invalid JSON body."]},
)
def get_request_from_view_args(self, view, args, kwargs):
return args[0].request
parser = TornadoParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
|
#!/usr/bin/env python3
import argparse
import kotlin_plugin_versions
import glob
import platform
import re
import subprocess
import shutil
import os
import os.path
import sys
import shlex
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dependencies', default='../../../resources/kotlin-dependencies',
help='Folder containing the dependencies')
parser.add_argument('--many', action='store_true',
help='Build for all versions/kinds')
parser.add_argument('--single', action='store_false',
dest='many', help='Build for a single version/kind')
return parser.parse_args()
args = parse_args()
def is_windows():
'''Whether we appear to be running on Windows'''
if platform.system() == 'Windows':
return True
if platform.system().startswith('CYGWIN'):
return True
return False
kotlinc = 'kotlinc.bat' if is_windows() else 'kotlinc'
javac = 'javac'
kotlin_dependency_folder = args.dependencies
def quote_for_batch(arg):
if ';' in arg or '=' in arg:
if '"' in arg:
raise Exception('Need to quote something containing a quote')
return '"' + arg + '"'
else:
return arg
def run_process(cmd, capture_output=False):
print("Running command: " + shlex.join(cmd))
if is_windows():
cmd = ' '.join(map(quote_for_batch, cmd))
print("Converted to Windows command: " + cmd)
try:
return subprocess.run(cmd, check=True, capture_output=capture_output)
except subprocess.CalledProcessError as e:
print("In: " + os.getcwd(), file=sys.stderr)
shell_cmd = cmd if is_windows() else shlex.join(cmd)
print("Command failed: " + shell_cmd, file=sys.stderr)
if capture_output:
print("stdout output:\n" + e.stdout.decode(encoding='UTF-8',
errors='replace'), file=sys.stderr)
print("stderr output:\n" + e.stderr.decode(encoding='UTF-8',
errors='replace'), file=sys.stderr)
raise e
def compile_to_dir(srcs, classpath, java_classpath, output):
# Use kotlinc to compile .kt files:
run_process([kotlinc,
# kotlinc can default to 256M, which isn't enough when we are extracting the build
'-J-Xmx2G',
'-Xopt-in=kotlin.RequiresOptIn',
'-d', output,
'-module-name', 'codeql-kotlin-extractor',
'-no-reflect', '-no-stdlib',
'-jvm-target', '1.8',
'-classpath', classpath] + srcs)
# Use javac to compile .java files, referencing the Kotlin class files:
run_process([javac,
'-d', output,
'-source', '8', '-target', '8',
'-classpath', os.path.pathsep.join([output, classpath, java_classpath])] + [s for s in srcs if s.endswith(".java")])
def compile_to_jar(srcs, classpath, java_classpath, output):
builddir = 'build/classes'
if os.path.exists(builddir):
shutil.rmtree(builddir)
os.makedirs(builddir)
compile_to_dir(srcs, classpath, java_classpath, builddir)
run_process(['jar', 'cf', output,
'-C', builddir, '.',
'-C', 'src/main/resources', 'META-INF'])
shutil.rmtree(builddir)
def find_sources(path):
return glob.glob(path + '/**/*.kt', recursive=True) + glob.glob(path + '/**/*.java', recursive=True)
def get_kotlin_lib_folder():
x = run_process([kotlinc, '-version', '-verbose'], capture_output=True)
output = x.stderr.decode(encoding='UTF-8', errors='strict')
m = re.match(
r'.*\nlogging: using Kotlin home directory ([^\n]+)\n.*', output)
if m is None:
raise Exception('Cannot determine kotlinc home directory')
kotlin_home = m.group(1)
print("Kotlin home directory: " + kotlin_home)
return kotlin_home + '/lib'
def get_gradle_lib_folder():
x = run_process(['gradle', 'getHomeDir'], capture_output=True)
output = x.stdout.decode(encoding='UTF-8', errors='strict')
m = re.search(r'(?m)^> Task :getHomeDir\n([^\n]+)$', output)
if m is None:
print("gradle getHomeDir output:\n" + output, file=sys.stderr)
raise Exception('Cannot determine gradle home directory')
gradle_home = m.group(1)
print("Gradle home directory: " + gradle_home)
return gradle_home + '/lib'
def find_jar(path, pattern):
result = glob.glob(path + '/' + pattern + '*.jar')
if len(result) == 0:
raise Exception('Cannot find jar file %s under path %s' %
(pattern, path))
return result
def patterns_to_classpath(path, patterns):
result = []
for pattern in patterns:
result += find_jar(path, pattern)
return os.path.pathsep.join(result)
def transform_to_embeddable(srcs):
# replace imports in files:
for src in srcs:
with open(src, 'r') as f:
content = f.read()
content = content.replace('import com.intellij',
'import org.jetbrains.kotlin.com.intellij')
with open(src, 'w') as f:
f.write(content)
def compile(jars, java_jars, dependency_folder, transform_to_embeddable, output, tmp_dir, version):
classpath = patterns_to_classpath(dependency_folder, jars)
java_classpath = patterns_to_classpath(dependency_folder, java_jars)
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
shutil.copytree('src', tmp_dir)
for v in kotlin_plugin_versions.many_versions:
if v != version:
shutil.rmtree(
tmp_dir + '/main/kotlin/utils/versions/v_' + v.replace('.', '_'))
srcs = find_sources(tmp_dir)
transform_to_embeddable(srcs)
compile_to_jar(srcs, classpath, java_classpath, output)
shutil.rmtree(tmp_dir)
def compile_embeddable(version):
compile(['kotlin-stdlib-' + version, 'kotlin-compiler-embeddable-' + version],
['kotlin-stdlib-' + version],
kotlin_dependency_folder,
transform_to_embeddable,
'codeql-extractor-kotlin-embeddable-%s.jar' % (version),
'build/temp_src',
version)
def compile_standalone(version):
compile(['kotlin-stdlib-' + version, 'kotlin-compiler-' + version],
['kotlin-stdlib-' + version],
kotlin_dependency_folder,
lambda srcs: None,
'codeql-extractor-kotlin-standalone-%s.jar' % (version),
'build/temp_src',
version)
if args.many:
for version in kotlin_plugin_versions.many_versions:
compile_standalone(version)
compile_embeddable(version)
else:
compile_standalone(kotlin_plugin_versions.get_single_version())
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if torch.distributed.is_available():
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook as powerSGD
if _TORCH_GREATER_EQUAL_1_10:
import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
class TestDDPStrategy(DDPStrategy):
def __init__(self, expected_ddp_comm_hook_name, *args, **kwargs):
self.expected_ddp_comm_hook_name = expected_ddp_comm_hook_name
super().__init__(*args, **kwargs)
def teardown(self):
# check here before unwrapping DistributedDataParallel in self.teardown
attached_ddp_comm_hook_name = self.model._get_ddp_logging_data()["comm_hook"]
assert attached_ddp_comm_hook_name == self.expected_ddp_comm_hook_name
return super().teardown()
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_fp16_compress_comm_hook(tmpdir):
"""Test for DDP FP16 compress hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=default.fp16_compress_hook.__qualname__,
ddp_comm_hook=default.fp16_compress_hook,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_sgd_comm_hook(tmpdir):
"""Test for DDP FP16 compress hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=powerSGD.powerSGD_hook.__qualname__,
ddp_comm_state=powerSGD.PowerSGDState(process_group=None),
ddp_comm_hook=powerSGD.powerSGD_hook,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir):
"""Test for DDP FP16 compress wrapper for SGD hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=default.fp16_compress_wrapper(powerSGD.powerSGD_hook).__qualname__,
ddp_comm_state=powerSGD.PowerSGDState(process_group=None),
ddp_comm_hook=powerSGD.powerSGD_hook,
ddp_comm_wrapper=default.fp16_compress_wrapper,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_spawn_fp16_compress_comm_hook(tmpdir):
"""Test for DDP Spawn FP16 compress hook."""
model = BoringModel()
strategy = DDPSpawnStrategy(ddp_comm_hook=default.fp16_compress_hook)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.10.0", skip_windows=True, standalone=True)
def test_ddp_post_local_sgd_comm_hook(tmpdir):
"""Test for DDP post-localSGD hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=post_localSGD.post_localSGD_hook.__qualname__,
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):
"""Test that when using DDP with post-localSGD, model averaging is called."""
model = BoringModel()
# test regular ddp does not call model averaging
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy="ddp",
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
average_parameters_mock.assert_not_called()
# test ddp with post-localSGD does call model averaging
ddp_strategy = DDPStrategy(
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=ddp_strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
)
trainer.fit(model)
average_parameters_mock.assert_called()
@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging_value_error(average_parameters_mock, tmpdir):
"""Test that when using DDP with post-localSGD a ValueError is thrown when the optmizer is
ZeroRedundancyOptimizer."""
from torch.distributed.optim import ZeroRedundancyOptimizer
class OptimizerModel(BoringModel):
def configure_optimizers(self):
return ZeroRedundancyOptimizer(params=self.parameters(), optimizer_class=torch.optim.Adam, lr=0.01)
model = OptimizerModel()
strategy = DDPStrategy(
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
with pytest.raises(ValueError, match="Currently model averaging cannot work with a distributed optimizer"):
trainer.fit(model)
average_parameters_mock.assert_not_called()
|
# -*- coding: utf-8 -*-
"""Standard utility functions used throughout AlphaGradient"""
# Standard Imports
from __future__ import annotations
from abc import ABC, abstractmethod
import builtins
from datetime import (
date,
datetime,
time,
timedelta,
)
import math
from pathlib import Path
# Third Party Imports
import numpy as np
import pandas as pd
# Typing
from typing import (
TYPE_CHECKING,
Any,
Literal,
Generator,
Generic,
Iterable,
Optional,
TypeVar,
Union,
)
T = TypeVar("T")
class PropertyType(Generic[T]):
"""A Type class for property objects themselves, before being bound to a class instance"""
def fget(self, *args: Any) -> T:
...
Property = builtins.property
"""A Type for builtin properties that have been bound to a class instance"""
PyNumber = Union[int, float]
"""Numeric type that does not include complex numbers (only native python types)"""
Number = Union[PyNumber, np.number, pd.core.arrays.numeric.NumericDtype]
"""Numeric type that does not include complex numbers"""
DatetimeLike = Union[pd.Timestamp, np.datetime64, date, datetime, str]
"""Objects convertable to python datetimes"""
TimeLike = Union[time, str]
"""Objects convertable to python time objects"""
DateOrTime = Union[DatetimeLike, time]
"""Objects that are either DatetimeLike or TimeLike in nature"""
if TYPE_CHECKING:
from typeshed import SupportsLessThanT as SLTT
_global_persistent_path: PropertyType[Path]
def auto_batch(iterable: Iterable) -> Generator:
"""
Returns a generator which yields automatically sized batches
Given a sized iterable, determines an optimal batch size to be used for
multiprocessing purposes. Using this batch size, returns a generator which
yields batches of the iterable with the optimal size
Parameters:
iterable: An iterable from which to create a batch generator
Returns:
The batch generator of the iterable input
"""
return get_batches(iterable, auto_batch_size(iterable))
def auto_batch_size(iterable: Iterable) -> int:
"""
Returns a multiprocessing-optimal batch size for an iterable
Given an iterable, returns an integer value representing an optimal batch
size for use in python's multiprocessing library
Parameters:
iterable (Iterable): Sized iterable to determine optimal batch size for
Returns:
The optimal batch size for multiprocessing
"""
# Converting to a sized iterable to guarantee __len__ functionality
iterable = list(iterable)
# Output Parameters
horizontal_offset = 10000
horizontal_stretch = 70 / 100_000_000
vertical_offset = 100
# Building the quadratic
output: Number
output = len(iterable) - horizontal_offset
output = output**2
output *= -1
output *= horizontal_stretch
output += vertical_offset
# Output bounded between 30 and 100
return bounded(int(output), lower=30, upper=100)
def bounded(
to_bound: SLTT, lower: Optional[SLTT] = None, upper: Optional[SLTT] = None
) -> SLTT:
"""
Bounds an object between a lower and upper bound
Given an object that defines behavior for comparison (__lt__, __gt__),
returns the object bounded between the lower and upper bounds. Boundaries
will be ommited if they are not provided (None). If lower and upper are not
None, they must be of the same type as to_bound.
Type Explanation:
SLTT (SupportsLessThanT): A TypeVar which implements the __lt__ method.
Parameters:
to_bound (SLTT): the object to be bounded
lower (Optional[SLTT]): the lower boundary of the operation
upper (Optional[SLTT]): the upper boundary of the operation
Returns:
The bounded object
"""
if lower is None and upper is None:
raise ValueError(
"Of the parameters 'lower' and 'upper', at least one must be" "specified"
)
if lower:
to_bound = max(to_bound, lower)
if upper:
to_bound = min(to_bound, upper)
return to_bound
def deconstruct_dt(dt: DateOrTime) -> dict[str, float]:
"""
Returns a dictionary of datetime attribute values on object 'dt'
Given a DatetimeLike object, returns a dictionary where keys are the
object's date and time related attribute names, and values are the object's
associated attribute values.
Parameters:
dt (DateOrTime): the dt to deconstruct
Returns:
A dictionary of attributes and their associated values on dt
Raises:
TypeError: Raised if dt is not a datetime-like object, as it wont have
the proper attributes.
"""
# The potential attributes to be accessed
d = ["year", "month", "day"]
t = ["hour", "minute", "second", "microsecond"]
attrs = []
# Accept string arguments to convert to datetime
if isinstance(dt, str):
dt = read_timestring(dt)
# Determine which elements should be accessed on the dt
if isinstance(dt, datetime):
attrs = d + t
elif isinstance(dt, time):
attrs = t
elif isinstance(dt, date):
attrs = d
else:
raise TypeError(f"{dt=} is not a valid datetime object")
# Collecting the attributes
dtdict = {}
for attr in attrs:
dtdict[attr] = getattr(dt, attr)
return dtdict
def get_batches(iterable: Iterable, size: int = 100) -> Generator:
"""
Returns a generator of the iterable which yields batches of the given size
Given an iterable, uses the size parameter to create a generator which
yields batches of the iterable of the given size.
Parameter:
iterable: The iterable to yield batches of
size: The batch size of the returned generator
Returns:
A generator which yields batches of size 'size' of the iterable
"""
# Because we will be indexing the iterable, we must instantiate the entire
# thing in memory in case it isnt (ie generators)
iterable = list(iterable)
last = len(iterable)
for i in range(math.ceil(last / size)):
start = i * size
end = start + size
end = end if end < last else last
yield iterable[start:end]
def get_time(t: DateOrTime) -> time:
"""
Given a timestring or datetime-like object, returns a datetime.time object
Given an object t which represents a time or a datetime, returns a native
python datetime.time object of the appropriate time. t can be an isoformat
time string or datetime string, or a datetime-like object
Parameters:
dt (DateOrTime): The time object to convert
Returns:
The converted datetime.time object
"""
if isinstance(t, (time, str)):
return to_time(t)
return to_datetime(t).time()
def get_weekday(dt: DatetimeLike) -> str:
"""
Returns the day of the week on which a DatetimeLike object falls
Parameters:
dt (DatetimeLike): The object whose weekday is determined
Returns:
String of the day of the week on which the DatetimeLike object falls
"""
weekdays = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
}
return weekdays[to_datetime(dt).weekday()]
def is_func(f: Any) -> bool:
"""
Returns a boolean value indicating whether or not f is a kind of function
Given an object f, returns a boolean value indicating whether or not the
object is a function. Idenfities all python objects whose sole or primary
purpose is to be called directly, rather than objects that simply support
an implementation of __call__.
Behavior is slightly different than the inspect module's isfunction(), as it
includes methods (bound and unbound), as well as abstract, static, and class
methods.
A 'function' is an instance of any of the following:
* function
* method (bound or unbound)
* staticmethod
* classmethod
* abstractmethod
* lambda
* built-in-function
Parameters:
f: The object who's status as a function is being determined
Returns:
True if f is a method, function, builtin-method-or-function, or lambda,
else False
"""
# Fake class to access type 'method' and 'classmethod'
class C:
def method(self):
pass
# Getting abstract base methods
class ABCC(ABC):
@abstractmethod
def amethod(self):
pass
# Fake function to access type 'function'
def func():
pass
# Getting classic and static methods
cmethod = classmethod(func)
smethod = staticmethod(func)
# Fake lambda to access type 'lambda'
lamb = lambda: None
# Fake instance to access type 'bound method'
c = C()
# Gathering all callable types
functype = type(func)
methodtype = type(C.method)
classmethodtype = type(cmethod)
staticmethodtype = type(smethod)
abstractmethodtype = type(ABCC.amethod)
boundmethodtype = type(c.method)
lambdatype = type(lamb)
builtintype = type(print)
return isinstance(
f,
(
functype,
methodtype,
boundmethodtype,
lambdatype,
builtintype,
abstractmethodtype,
classmethodtype,
staticmethodtype,
),
)
def nearest_expiry(
expiry: DatetimeLike, method: Literal["after", "before", "both"] = "after"
) -> datetime:
"""
Returns the nearest valid expiry to the input datetime object
Determining expiries for options contracts can be difficult, because they
must fall on a business day, and their expiry time must be the market close.
Given an expiry whose validity is unknown, this function returns the
nearest expiry that is guaranteed to be valid. If the given expiry is
valid, it will be unchanged when it is returned.
The method argument is used to determine how the 'nearest' is defined. It
has three options: "after", "before", and "both"
Method must be one of the following string literals:
* "after": returns the nearest expiry that is AFTER the input expiry
* "before": returns the nearest expiry that is BEFORE the input expiry.
* | "both": compares the distances of the nearest before and after, and
| return the smaller of the two. In the case that they are equal, the
| date determined by "after" will be used.
The default argument is "after" because using "before" or "both" can
potentially lead to dangerous behavior for algorithms, as it can return an
expiry which is before the current date of the algorithm. This can cause
options contracts to initialize as expired. Only change the method
argument if you are positive that the returned expiry will be greater
than the algorithm's current date.
Parameters:
expiry (DatetimeLike):
The expiry who's closest valid expiry will be determined
method:
One of "after", "before", or "both"
Returns:
The nearest valid expiry
"""
# Ensuring expiry is a pydatetime
expiry = to_datetime(expiry)
# All expiries must expire at market close (4PM)
expiry = set_time(expiry, "4:00 PM")
# Change the expiry day if it is not a weekday
if expiry.weekday() > 4:
# Closest AFTER
if method == "after":
dist = 7 - expiry.weekday()
expiry += timedelta(days=dist)
# Closest BEFORE
elif method == "before":
dist = expiry.weekday() - 4
expiry -= timedelta(days=dist)
# Comparing both
elif method == "both":
bdist = expiry.weekday() - 4
adist = 7 - expiry.weekday()
if bdist < adist:
expiry -= timedelta(days=bdist)
else:
expiry += timedelta(days=adist)
return expiry
def optimal_start(
start: datetime,
max_start: datetime,
min_end: datetime,
end: Optional[DatetimeLike] = None,
t: Optional[TimeLike] = None,
) -> datetime:
"""
Based an Environment's instantiated/tracked assets, returns an optimal datetime
for starting a backtest
Returns a backtest starting datetime that:
* Is guaranteed to be within the date range of all intantiated assets
* | Is guaranteed to have ample time for calculations of historical
| volatility, beta, percent change etc. BEFORE the start date
* Automatically adjusts to accomodate shorter ending periods
Parameters:
start:
A datetime object indictating the actual starting datetime
max_start:
A datetime object indicating the maximum possible starting datetime
min_end:
A datetime object indicating the minimum possible ending datetime
end (Optional[DatetimeLike]):
The desired endpoint on which to base the optimal start point
t (Optional[TimeLike]):
The returned optimal start's time
Returns:
The optimal starting datetime
"""
end = min_end if end is None else to_datetime(end)
# If the maximum start date is before the minimum end date, there is
# no valid 'optimal start', because there is no date range that allows
# backtesting of all available data.
if max_start >= end:
return start
# Determining the optimal start period. To avoid errors, we will not sync to the beginning
optimal_delta = (end - max_start) / 2
optimal_date = max_start + optimal_delta
# Setting the optimal date's time to market open unless specified otherwise
t = "00:00:00" if t is None else to_time(t)
set_time(optimal_date, t)
# Bounding the date to acceptable minimums and maximums
lower_bound = set_time(max_start + timedelta(days=1), t)
upper_bound = set_time(max_start + timedelta(days=365), t)
optimal_start = bounded(optimal_date, lower=lower_bound, upper=upper_bound)
return optimal_start
def progress_print(to_print: Any, last: list[int] = [0]) -> None:
"""Prints, but returns the carriage to the front of the last print"""
print("\r" + (" " * last[0]), end="\r", flush=True) # type: ignore[operator]
print(to_print, end="", flush=True)
last[0] = len(str(to_print))
def read_timestring(timestring: str) -> time:
"""
Given a timestring, returns a datetime.time object representative of the time
This function reads in 'timestrings', which are one of two things:
#. | Isoformat times as strings, using 24 hours
| (eg 04:00:00, 18:30, 02:59:59.99, etc)
#. | Strings based on 12 hour clocks
| (see ag.utils.read_twelve_hour_timestring docs)
Using this timestring, returns a python datetime.time object corresponding
to the time in the timestring. if dtype is set to dict, a deconstructed
datetime attr dictionary will instead be returned. For more info on
dtdicts, read the docs for ag.utils.deconstruct_dt
Parameters:
timestring:
string representing the time
dtype:
The type of data to return
Returns:
The time or dict object corresponding to the time in the timestring
"""
try:
return read_twelve_hour_timestring(timestring)
except (TypeError, ValueError) as e:
return time.fromisoformat(timestring)
def read_twelve_hour_timestring(timestring: str) -> time:
"""Reads a timestring based on a 12 hour clock and returns a time
Given a timestring representing a time on a 12 hour clock, returns the
appropriate time object
Must be formatted as follows:
* hour | This is the only required value, integer
* minute | separated from hour by a colon, optional, integer
* second | separated from minute by a colon, optional, float
* AM/PM | string 'AM' or 'PM', separated from second by a space
When AM or PM is not provided in the timestring, AM will be assumed.
Valid Examples:
* '4:30 PM'
* '4:30 AM'
* '1 PM'
* '1'
* '11:59:59.999 PM'
* '12:00:00 AM'
Invalid Examples:
* '0:00'
* '13:30'
* '103 PM'
* '0'
* '22'
* '4:30:99 PM'
* '3:99 PM'
Parameters:
timestring: The string containing the time to convert to a time object
Returns:
The corresponding time object
Raises:
TypeError:
When timestring is not a string. Only str objects can be parsed
ValueError:
When the timetring is invalid / improperly formatted.
"""
# Timestrings must be strs
if not isinstance(timestring, str):
raise TypeError(f"timestring must be a string, got {type(timestring)}")
# Variable Initialization
ampm = "AM"
info = []
timestring = timestring.split(" ") # type: ignore[assignment]
# Getting AM/PM component
if len(timestring) > 1:
ampm = timestring[1]
# Getting individual time components
info = timestring[0].split(":")
# isoformat is 00:00:00.00, max 3 colons
if len(info) > 4:
raise ValueError(f"Failed to parse timestring {timestring}")
# collecting the attributes necessary to create a time object
tdict = {}
attrs = ["hour", "minute", "second", "microsecond"]
for attr, value in zip(attrs, info):
tdict[attr] = int(value)
# Setting missing components to 0
for attr in attrs:
if not tdict.get(attr):
tdict[attr] = 0
# hours less and 1 and more than 12 are off limits in 12 hour clocks
if not 1 <= tdict["hour"] <= 12:
raise ValueError(f"Failed to parse timestring {timestring}")
# 12:30 AM is 00:30 isoformat
if ampm == "AM" and tdict["hour"] == 12:
tdict["hour"] == 0
# 12:30 PM is 12:30 isoformat, 1:30 PM is 13:30 isoformat
elif ampm == "PM" and tdict["hour"] < 12:
tdict["hour"] += 12
# Building and returning a time object
return time(**tdict) # type: ignore[arg-type]
def set_time(dt: DatetimeLike, t: DateOrTime) -> datetime:
"""Sets the given datetime-like object to the given time
Given a DatetimeLike object 'dt' and a time-like object 't', returns a
datetime like object that shares the date of dt and the time of t.
Very similar to datetime.combine, but accepts datetime objects for both
inputs.
Parameters:
dt (DatetimeLike): Datetime to convert
t (DateOrTime): Time to convert to
Returns:
python datetime.datetime object with converted time
"""
# Initializing the new time that will be set
newtime: dict[str, float] = {}
# Reading the necessary time attributes
if isinstance(t, str):
t = read_timestring(t)
newtime = deconstruct_dt(t)
elif isinstance(t, time):
newtime = deconstruct_dt(t)
else:
newtime = deconstruct_dt(to_datetime(t).time())
# Creating the new datetime with t=t
return to_datetime(dt).replace(**newtime) # type: ignore [arg-type]
def timestring(t: DateOrTime) -> str:
"""Converts a time-like object to a 12-hour-clock timestring
Given a time-like object t, returns a timestring represented by the
12-hour-clock (eg. 4:30 PM).
Parameters:
t (DateOrTime):
date or time object to read into a 12-hour-clock-based timestring
Returns:
A string representing the time on a 12-hour-clock
"""
# Ensuring that t is a time object
if not isinstance(t, time):
t = to_datetime(t).time()
# Deconstructing components to create a time string
ampm = "AM"
hour = t.hour
minute = t.minute if t.minute > 9 else f"0{t.minute}"
if hour > 12:
ampm = "PM"
hour -= 12
return f"{hour}:{minute} {ampm}"
def to_datetime(dtlike: DatetimeLike) -> datetime:
"""
Given a datetime-like object, converts it to a python standard datetime
Parameters:
dtlike (DatetimeLike):
The Datetime-convertable object
Returns:
The converted python datetime
Raises:
TypeError: Only accepts python-datetime-convertable objects
"""
if isinstance(dtlike, datetime):
return dtlike
elif isinstance(dtlike, pd.Timestamp):
return dtlike.to_pydatetime()
elif isinstance(dtlike, np.datetime64):
return pd.Timestamp(dtlike).to_pydatetime()
elif isinstance(dtlike, date):
return datetime.combine(dtlike, datetime.min.time())
elif isinstance(dtlike, str):
return datetime.fromisoformat(dtlike)
raise TypeError(f"Can not convert passed object {dtlike} to python datetime")
def to_step(current: datetime, delta: Union[DateOrTime, timedelta, float]) -> timedelta:
"""
Converts an ambiguous delta object to a python timedelta
Given an amiguous object which can in some way be interpreted as a timedelta
relative to some 'current' time, converts that object to an appropriate
timedelta object, or 'step' in time.
Parameters:
current:
The 'current' time, which determines how to interpret the delta
delta (Union[DateOrTime, timedelta, float]);
The object being passed that may represent a 'step' in time
Returns:
the appropriate timedelta 'step'
Raises:
TypeError:
When passed a type that can not be coerced/interpreted
ValueError:
When a type-appropriate object can not be coerced, or is in some way
invalid (eg. the step in time is BEFORE the current time)
"""
# Multiple parses must be made on strings to successfully coerce all of them
if isinstance(delta, str):
try:
delta = set_time(current, read_timestring(delta))
except ValueError:
delta = datetime.fromisoformat(delta) # type: ignore[arg-type]
elif isinstance(delta, time):
delta = set_time(current, delta)
elif isinstance(delta, (float, int)):
delta = current + timedelta(days=delta)
elif isinstance(delta, timedelta):
delta = current + delta
# if isinstance(delta, DatetimeLike):
else:
delta = to_datetime(delta)
if delta > current:
return delta - current
raise ValueError(
f"Passed delta {delta} is prior to current time {current}. Please "
"choose a time AFTER the current date."
)
def to_time(tlike: TimeLike) -> time:
"""
Given a TimeLike object, converts it to a python standard time object
Parameters:
tlike:
The time-convertable object
Returns:
The converted python time object
Raises:
TypeError: Only accepts python-time-convertable objects
"""
if isinstance(tlike, str):
return read_timestring(tlike)
elif isinstance(tlike, time):
return tlike
raise TypeError(f"Can not convert passed object {tlike} to python time")
class NullClass:
"""
A class designed to take the place of other functions, modules, or classes
This class stands in place of a function, class, or module attached to
another class as an attribute. When an attribute is initialized as a
NullClass, one can safely access it as an attribute, call it, and access
attributes on it. These actions can also be performed recursively; any of
these operations performed on the nullclass will simply return itself,
allowing them to be chained infinitely.
Use this class in place of another function or class in order to safely
use an attribute without making constant checks.
This is most useful in place of functions/classes that perform
logging/printing, but also makes sense in place of functions that modify
things in place or always return None.
Examples:
.. highlight:: python
.. code-block:: python
class MyClass:
def __init__(self, data, verbose=False):
# This is cleaner and more pythonic than...
self.print = print if verbose else NullClass()
self.print("Initialized as Verbose!")
# Alternative 1
self.print = print if verbose else lambda *args, **kwargs: None
self.print("Initialized as Verbose!")
# Alternative 2
self.print = print if print is verbose else None
if self.print is not None:
self.print("Initialized as Verbose!")
# Alternative 3
self.verbose = verbose
if self.verbose:
print("Initialized as Verbose!")
# etc etc etc...
# This is cleaner and more pythonic than...
self.tqdm = tqdm.progress_bar if verbose else NullClass()
with self.tqdm(total=1000) as pbar:
while condition:
self.do_something()
pbar.update(1) # Safe!
# Alternative
self.verbose = verbose
if verbose:
with tqdm.progress_bar(total=1000) as pbar:
while condition:
self.do_something()
pbar.update(1)
else:
while condition:
self.do_something() # gross.
"""
def __call__(self, *args: Any, **kwargs: Any) -> NullClass:
return self
def __getattr__(self, attr: str) -> NullClass:
return self
def __enter__(self, *args, **kwargs) -> NullClass:
return self
def __exit__(self, *args, **kwargs) -> None:
pass
def __bool__(self) -> bool:
return False
|
"""
WSGI config for core project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = Cling(get_wsgi_application())
|
'''
Parsing - Exercise 16
The script reads a multiple sequence file in FASTA format and
only write to a new file the records the Uniprot ACs of which
are present in the list created in Exercise 14).
This version of the script collects the header and the sequence
separately, in case you wanted to manipulate them.
'''
# We need two input files
cancer_file = open('cancer-expressed.txt')
human_fasta = open('SwissProt-Human.fasta')
Outfile = open('cancer-expressed_records.fasta','w')
# Create the list from cancer-expressed.txt
cancer_list = []
for line in cancer_file:
AC = line.strip()
cancer_list.append(AC)
# Read the FASTA input and check if ACs are in cancer_list
seq = ""
for line in human_fasta:
# Take the first record into account
if line[0] == '>' and seq == '':
header = line
AC = line.split('|')[1]
elif line[0] != '>':
seq = seq + line
elif line[0] == '>' and seq != '':
if AC in cancer_list:
Outfile.write(header + seq)
# Re-initialise variables for the next record
header = line
AC = line.split('|')[1]
seq = ''
# Take the last record into account
if AC in cancer_list:
Outfile.write(header + seq)
Outfile.close()
|
import numpy as np
from gym_wmgds import utils
from gym_wmgds.envs.mujoco import mujoco_env
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
|
from django.db import models
class SFWManager(models.Manager):
def get_queryset(self):
return super(SFWManager, self).get_queryset().filter(sfwness=gifsong.SFW)
class gifsong(models.Model):
SFW = 1
NSFW = 2
UNKNOWN = 3
STATUS_CHOICES = (
(SFW, 'SFW'),
(NSFW, 'NSFW'),
(UNKNOWN, 'Unknown'),
)
image_url = models.CharField(max_length=255)
audio_url = models.CharField(max_length=255)
sfwness = models.PositiveIntegerField(choices=STATUS_CHOICES, default=UNKNOWN)
objects = models.Manager()
sfw = SFWManager()
def create(cls, image_url, audio_url):
gifsong = cls()
return gifsong
|
"""
Implementation from: https://raw.githubusercontent.com/Zenglinxiao/OpenNMT-py/bert/onmt/encoders/bert.py
@Author: Zenglinxiao
"""
import torch.nn as nn
from onmt.encoders.transformer import TransformerEncoderLayer
from onmt.utils.misc import sequence_mask
class BertEncoder(nn.Module):
"""BERT Encoder: A Transformer Encoder with LayerNorm and BertPooler.
:cite:`DBLP:journals/corr/abs-1810-04805`
Args:
embeddings (onmt.modules.BertEmbeddings): embeddings to use
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
"""
def __init__(self, embeddings, num_layers=12, d_model=768, heads=12,
d_ff=3072, dropout=0.1, attention_dropout=0.1,
max_relative_positions=0):
super(BertEncoder, self).__init__()
self.num_layers = num_layers
self.d_model = d_model
self.heads = heads
self.dropout = dropout
# Feed-Forward size should be 4*d_model as in paper
self.d_ff = d_ff
self.embeddings = embeddings
# Transformer Encoder Block
self.encoder = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff,
dropout, attention_dropout,
max_relative_positions=max_relative_positions,
activation='gelu') for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)
self.pooler = BertPooler(d_model)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
embeddings,
opt.enc_layers,
opt.word_vec_size,
opt.heads,
opt.transformer_ff,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.attention_dropout,
opt.max_relative_positions
)
def forward(self, input_ids, lengths, token_type_ids=None):
"""
Args:
input_ids (Tensor): ``(seq_len, batch_size, feature_dim)``, padding ids=0
lengths (Tensor): ``(batch_size)``, record length of sequence
token_type_ids (seq_len, batch_size): ``(B, S)``, A(0), B(1), pad(0)
Returns:
all_encoder_layers (list of Tensor): ``(B, S, H)``, token level
pooled_output (Tensor): ``(B, H)``, sequence level
"""
# remove the feature dimension
# seq_len x batch_size
emb = self.embeddings(input_ids, token_type_ids)
out = emb.transpose(0, 1).contiguous()
# [batch, seq] -> [batch, 1, seq]
mask = ~sequence_mask(lengths).unsqueeze(1)
for layer in self.encoder:
out = layer(out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout):
self.dropout = dropout
self.embeddings.update_dropout(dropout)
for layer in self.encoder:
layer.update_dropout(dropout)
class BertPooler(nn.Module):
def __init__(self, hidden_size):
"""A pooling block (Linear layer followed by Tanh activation).
Args:
hidden_size (int): size of hidden layer.
"""
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation_fn = nn.Tanh()
def forward(self, hidden_states):
"""hidden_states[:, 0, :] --> {Linear, Tanh} --> Returns.
Args:
hidden_states (Tensor): last layer's hidden_states, ``(B, S, H)``
Returns:
pooled_output (Tensor): transformed output of last layer's hidden
"""
first_token_tensor = hidden_states[:, 0, :] # [batch, d_model]
pooled_output = self.activation_fn(self.dense(first_token_tensor))
return pooled_output
|
import uuid
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Profile(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_photo = models.ImageField(upload_to='profile_pictures/', blank=True, null=True)
bio = models.TextField(max_length=200, blank=True)
def __str__(self):
return str(self.id)
class Image(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
image = models.ImageField(upload_to='images/')
image_name = models.CharField(max_length=50, blank=True)
image_caption = models.TextField(blank=True)
poster = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return str(self.id)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_caption(self, caption):
self.update(caption=caption)
@classmethod
def get_image_by_id(cls, n):
Image.objects.get(id=n)
def create_user_profile(sender, **kwargs):
if kwargs['created']:
user_profile = Profile.objects.create(user=kwargs['instance'])
post_save.connect(create_user_profile, sender=User)
class Comment(models.Model):
text = models.TextField()
image = models.ForeignKey(Image, on_delete=models.CASCADE)
commenter = models.ForeignKey(User, verbose_name="Comment", on_delete=models.CASCADE)
|
#!/usr/bin/env python
# _*_ coding:UTF-8 _*_
"""
__author__ = 'shede333'
"""
import plistlib
import shutil
from pathlib import Path
from mobileprovision import util
from mobileprovision import MobileProvisionModel
RESOURCE_PATH = Path(__file__).resolve().parent.joinpath("resource")
SRC_MP_PATH = RESOURCE_PATH.joinpath("sw-src.mobileprovision")
def test_cli_import():
origin_path = util.MP_ROOT_PATH
util.MP_ROOT_PATH = RESOURCE_PATH.joinpath("Provisioning Profiles")
if util.MP_ROOT_PATH.is_dir():
shutil.rmtree(util.MP_ROOT_PATH)
util.MP_ROOT_PATH.mkdir()
mp_model = MobileProvisionModel(SRC_MP_PATH)
file_name = "{}.mobileprovision".format(mp_model.uuid)
dst_path = util.MP_ROOT_PATH.joinpath(file_name)
assert not dst_path.is_file()
util.import_mobileprovision(SRC_MP_PATH)
assert dst_path.is_file()
assert len(list(util.MP_ROOT_PATH.iterdir())) == 1
cp_mp_path = dst_path.with_name("123.mobileprovision")
assert not cp_mp_path.is_file()
shutil.copy(SRC_MP_PATH, cp_mp_path)
assert cp_mp_path.is_file()
assert len(list(util.MP_ROOT_PATH.iterdir())) == 2
util.import_mobileprovision(SRC_MP_PATH, replace_at_attrs=None)
assert len(list(util.MP_ROOT_PATH.iterdir())) == 2
util.import_mobileprovision(SRC_MP_PATH, replace_at_attrs='name')
assert len(list(util.MP_ROOT_PATH.iterdir())) == 1
# 删除测试目录
shutil.rmtree(util.MP_ROOT_PATH)
# 恢复路径
util.MP_ROOT_PATH = origin_path
def test_cli_convert():
dst_path = SRC_MP_PATH.with_name("dst.plist")
if dst_path.is_file():
dst_path.unlink()
MobileProvisionModel(SRC_MP_PATH).convert_to_plist_file(dst_path)
assert dst_path.is_file()
p_obj = plistlib.loads(dst_path.read_bytes())
assert p_obj["AppIDName"] == "XC xyz shede333 testFirst"
dst_path.unlink()
def test_mp_property():
from datetime import datetime
from datetime import timezone
from datetime import timedelta
mp_model = MobileProvisionModel(SRC_MP_PATH)
assert mp_model["name"] == "iOS Team Provisioning Profile: xyz.shede333.testFirst"
assert mp_model.app_id_name == "XC xyz shede333 testFirst"
assert mp_model.name == "iOS Team Provisioning Profile: xyz.shede333.testFirst"
assert len(mp_model.provisioned_devices) == 1
assert mp_model.team_name == "ShaoWei Wang"
assert mp_model.team_identifier == "RR23U62KET"
assert mp_model.uuid == "5e3f9cc7-59d2-4cef-902b-97ba409e5874"
assert isinstance(mp_model.entitlements, dict)
assert mp_model.app_id_prefix == "RR23U62KET"
assert mp_model.app_id() == "xyz.shede333.testFirst"
assert mp_model.app_id(is_need_prefix=True) == "RR23U62KET.xyz.shede333.testFirst"
assert mp_model.contain_device_id("00008020-0009306C1429002E")
assert mp_model.creation_timestamp == datetime(2019, 11, 19, 9, 27, 50).timestamp()
assert mp_model.expiration_timestamp == datetime(2019, 11, 26, 9, 27, 50).timestamp()
assert mp_model.date_is_valid() == (datetime.utcnow().timestamp() < mp_model.expiration_timestamp)
# utc_dt = datetime.fromtimestamp(mp_model.creation_timestamp, tz=timezone.utc)
utc_dt = datetime(2019, 11, 19, 9, 27, 50).replace(tzinfo=timezone.utc)
assert utc_dt.strftime("%Y-%m-%d %H:%M:%S") == "2019-11-19 09:27:50"
tz_8h = timezone(timedelta(hours=8)) # 东八区
local_dt = utc_dt.astimezone(tz_8h)
assert local_dt.strftime("%Y-%m-%d %H:%M:%S") == "2019-11-19 17:27:50"
import tempfile
with tempfile.TemporaryDirectory() as dir_path:
ent_dst_path = Path(dir_path).joinpath("entitlements.plist")
if ent_dst_path.is_file():
ent_dst_path.unlink()
mp_model.export_entitlements_file(ent_dst_path)
assert ent_dst_path.is_file()
p_obj = plistlib.loads(ent_dst_path.read_bytes())
assert p_obj["application-identifier"] == "RR23U62KET.xyz.shede333.testFirst"
assert len(mp_model.developer_certificates) == 2
cer_model = mp_model.developer_certificates[0]
assert cer_model.common_name == "iPhone Developer: 333wshw@163.com (6EWWJK58A9)"
assert cer_model.sha256 == "122F041D0C659348CC9CB1C1CBC6A60BBB3C8184D9261C73F117DBE785F9AA20"
assert cer_model.sha1 == "38C56BC325AF693E16E8B4C17CAAB50982868C32"
assert cer_model.not_valid_before == datetime(2019, 5, 21, 4, 28, 15).replace(
tzinfo=timezone.utc).timestamp()
assert cer_model.not_valid_after == datetime(2020, 5, 20, 4, 28, 15).replace(
tzinfo=timezone.utc).timestamp()
|
import distutils.ccompiler
import os
import os.path
import platform
import shutil
import sys
import subprocess
from typing import Optional, List
import setuptools
import setuptools.msvc
from setuptools import Extension
from cupy_builder._context import Context
import cupy_builder.install_build as build
def _nvcc_gencode_options(cuda_version: int) -> List[str]:
"""Returns NVCC GPU code generation options."""
if sys.argv == ['setup.py', 'develop']:
return []
envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)
if envcfg is not None and envcfg != 'current':
return ['--generate-code={}'.format(arch)
for arch in envcfg.split(';') if len(arch) > 0]
if envcfg == 'current' and build.get_compute_capabilities() is not None:
ccs = build.get_compute_capabilities()
arch_list = [
f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')
for cc in ccs]
else:
# The arch_list specifies virtual architectures, such as 'compute_61',
# and real architectures, such as 'sm_61', for which the CUDA
# input files are to be compiled.
#
# The syntax of an entry of the list is
#
# entry ::= virtual_arch | (virtual_arch, real_arch)
#
# where virtual_arch is a string which means a virtual architecture and
# real_arch is a string which means a real architecture.
#
# If a virtual architecture is supplied, NVCC generates a PTX code
# the virtual architecture. If a pair of a virtual architecture and a
# real architecture is supplied, NVCC generates a PTX code for the
# virtual architecture as well as a cubin code for the real one.
#
# For example, making NVCC generate a PTX code for 'compute_60' virtual
# architecture, the arch_list has an entry of 'compute_60'.
#
# arch_list = ['compute_60']
#
# For another, making NVCC generate a PTX code for 'compute_61' virtual
# architecture and a cubin code for 'sm_61' real architecture, the
# arch_list has an entry of ('compute_61', 'sm_61').
#
# arch_list = [('compute_61', 'sm_61')]
#
# See the documentation of each CUDA version for the list of supported
# architectures:
#
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation
if cuda_version >= 11040:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
('compute_87', 'sm_87'),
'compute_87']
elif cuda_version >= 11010:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
'compute_86']
elif cuda_version >= 11000:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
'compute_80']
elif cuda_version >= 10000:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
'compute_70']
else:
# This should not happen.
assert False
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
return options
class DeviceCompilerBase:
"""A class that invokes NVCC or HIPCC."""
def __init__(self, ctx: Context):
self._context = ctx
def _get_preprocess_options(self, ext: Extension) -> List[str]:
# https://setuptools.pypa.io/en/latest/deprecated/distutils/apiref.html#distutils.core.Extension
# https://github.com/pypa/setuptools/blob/v60.0.0/setuptools/_distutils/command/build_ext.py#L524-L526
incdirs = ext.include_dirs[:] # type: ignore
macros = ext.define_macros[:] # type: ignore
for undef in ext.undef_macros: # type: ignore
macros.append((undef,))
return distutils.ccompiler.gen_preprocess_options(macros, incdirs)
def spawn(self, commands: List[str]) -> None:
print('Command:', commands)
subprocess.check_call(commands)
class DeviceCompilerUnix(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
self._compile_unix_hipcc(obj, src, ext)
else:
self._compile_unix_nvcc(obj, src, ext)
def _compile_unix_nvcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
# For CUDA C source files, compile them with NVCC.
nvcc_path = build.get_nvcc_path()
base_opts = build.get_compiler_base_options(nvcc_path)
compiler_so = nvcc_path
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + [
'-O2', '--compiler-options="-fPIC"']
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
else:
postargs += ['--std=c++11']
postargs += ['-Xcompiler=-fno-gnu-unique']
print('NVCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
def _compile_unix_hipcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
# For CUDA C source files, compile them with HIPCC.
rocm_path = build.get_hipcc_path()
base_opts = build.get_compiler_base_options(rocm_path)
compiler_so = rocm_path
hip_version = build.get_hip_version()
postargs = ['-O2', '-fPIC', '--include', 'hip_runtime.h']
if hip_version >= 402:
postargs += ['--std=c++14']
else:
postargs += ['--std=c++11']
print('HIPCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
class DeviceCompilerWin32(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
raise RuntimeError('ROCm is not supported on Windows')
compiler_so = build.get_nvcc_path()
cc_args = self._get_preprocess_options(ext) + ['-c']
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
if cuda_version >= 11020:
# MSVC 14.0 (2015) is deprecated for CUDA 11.2 but we need it
# to build CuPy because some Python versions were built using it.
# REF: https://wiki.python.org/moin/WindowsCompilers
postargs += ['-allow-unsupported-compiler']
postargs += ['-Xcompiler', '/MD', '-D_USE_MATH_DEFINES']
# This is to compile thrust with MSVC2015
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
cl_exe_path = self._find_host_compiler_path()
if cl_exe_path is not None:
print(f'Using host compiler at {cl_exe_path}')
postargs += ['--compiler-bindir', cl_exe_path]
print('NVCC options:', postargs)
self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
def _find_host_compiler_path(self) -> Optional[str]:
# c.f. cupy.cuda.compiler._get_extra_path_for_msvc
cl_exe = shutil.which('cl.exe')
if cl_exe:
# The compiler is already on PATH, no extra path needed.
return None
vctools: List[str] = setuptools.msvc.EnvironmentInfo(
platform.machine()).VCTools
for path in vctools:
cl_exe = os.path.join(path, 'cl.exe')
if os.path.exists(cl_exe):
return path
print(f'Warning: cl.exe could not be found in {vctools}')
return None
|
import keyboard
import time
while True:
if keyboard.is_pressed('esc'):
print('Pressed ESC')
break
else:
time.sleep(1e-3)
|
# Based on the astropy test suite (v4.2.1)
# (https://github.com/astropy/astropy/blob/v4.2.1/astropy/cosmology/tests/test_cosmology.py)
from io import StringIO
from typing import Type
import numpy as np
import pytest
import torch
from pytest import mark
from torch import tensor
import phytorch.cosmology.drivers.analytic
import phytorch.cosmology.drivers.analytic_diff
import phytorch.cosmology.special
from phytorch.constants import codata2014, G as Newton_G
from phytorch.cosmology.special import AbstractFlatLambdaCDMR, AbstractLambdaCDMR
from phytorch.units.astro import Gpc, Gyr, Mpc
from phytorch.units.si import cm, gram, kelvin, km, s
from phytorch.units.unit import Unit
from tests.common.closeness import close
from tests.common.dtypes import with_default_double
ZERO = torch.zeros(())
ONE = torch.ones(())
SMALL = 1e-16
Z = tensor([0, 0.5, 1, 2])
H70 = 70 * km/s/Mpc
H704 = 70.4 * km/s/Mpc
def test_critical_density():
fac = (Newton_G / codata2014.G).to(Unit())
cosmo = AbstractFlatLambdaCDMR()
cosmo.H0 = H704
cosmo.Om0 = 0.272
# constants defined only so accurately
assert ((cosmo.critical_density0 * fac).to(gram / cm**3) - 9.309668456020899e-30) < 1e-9
assert cosmo.critical_density0 == cosmo.critical_density(0)
assert close((cosmo.critical_density(tensor([1, 5])) * fac).to(gram / cm**3).value,
[2.70352772e-29, 5.53739080e-28])
def test_xtfuncs():
cosmo = AbstractLambdaCDMR()
cosmo.H0, cosmo.Om0, cosmo.Ode0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, 0.5, 3.04, 2.725 * kelvin
z = tensor([2, 3.2])
assert close(cosmo.lookback_time_integrand(tensor(3)), 0.052218976654969378)
assert close(cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541])
assert close(cosmo.abs_distance_integrand(tensor(3)), 3.3420145059180402)
assert close(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758])
def test_zeroing():
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.27
cosmo.Ode0 = 0
cosmo.Or0 = 0
assert cosmo.Ode(1.5) == 0
assert (cosmo.Ode(Z) == ZERO).all()
assert cosmo.Or(1.5) == 0
assert (cosmo.Or(Z) == ZERO).all()
# TODO: add neutrinos
# assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
# assert allclose(cosmo.Onu(z), [0, 0, 0, 0])
assert (cosmo.Ob(Z) == ZERO).all()
def test_matter():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ob0 = 0.045
assert cosmo.Om(0) == 0.3
assert cosmo.Ob(0) == 0.045
assert close(cosmo.Om(Z), [0.3, 0.59124088, 0.77419355, 0.92045455])
assert close(cosmo.Ob(Z), [0.045, 0.08868613, 0.11612903, 0.13806818])
assert close(cosmo.Odm(Z), [0.255, 0.50255474, 0.65806452, 0.78238636])
assert close(cosmo.Ob(Z) + cosmo.Odm(Z), cosmo.Om(Z))
def test_ocurv():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
assert cosmo.Ok0 == 0
assert cosmo.Ok(0) == 0
assert (cosmo.Ok(Z) == ZERO).all()
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ode0 = 0.5
assert abs(cosmo.Ok0 - 0.2) < SMALL
assert abs(cosmo.Ok(0) - 0.2) < SMALL
assert close(cosmo.Ok(Z), [0.2, 0.22929936, 0.21621622, 0.17307692])
assert (cosmo.Ok(Z) + cosmo.Om(Z) + cosmo.Ode(Z) == ONE).all()
def test_ode():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
assert cosmo.Ode(0) == cosmo.Ode0
assert close(cosmo.Ode(Z), [0.7, 0.408759, 0.2258065, 0.07954545])
def test_tcmb():
cosmo = AbstractFlatLambdaCDMR()
cosmo.H0 = H704
cosmo.Om0 = 0.272
cosmo.Tcmb0 = 2.5 * kelvin
assert cosmo.Tcmb(2) == 7.5 * kelvin
assert (cosmo.Tcmb(tensor([0, 1, 2, 3, 9.])).to(kelvin).value == tensor([2.5, 5, 7.5, 10, 25])).all()
def test_efunc_vs_invefunc():
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ode0 = 0.7
assert cosmo.efunc(0.5) * cosmo.inv_efunc(0.5) == 1
assert (cosmo.efunc(Z) * cosmo.inv_efunc(Z) == ONE).all()
# TODO: test this for subclasses?
class BaseLambdaCDMDriverTest:
flat_cosmo_cls: Type[phytorch.cosmology.special.BaseFlatLambdaCDM]
cosmo_cls: Type[phytorch.cosmology.special.BaseLambdaCDM]
class BaseLambdaCDMTest(BaseLambdaCDMDriverTest):
flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDM]
cosmo_cls: Type[phytorch.cosmology.special.LambdaCDM]
@with_default_double
@mark.parametrize(('func', 'vals', 'unit', 'rtol'), (
# From the astropy test suite:
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
(phytorch.cosmology.special.FlatLambdaCDM.comoving_distance,
(3364.5, 3364.8, 3364.7988), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.angular_diameter_distance,
(1682.3, 1682.4, 1682.3994), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.luminosity_distance,
(6729.2, 6729.6, 6729.5976), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.lookback_time,
(7.841, 7.84178, 7.843), Gyr, 1e-3),
(phytorch.cosmology.special.FlatLambdaCDM.lookback_distance,
(2404.0, 2404.24, 2404.4), Mpc, 1e-3),
))
def test_flat_z1(self, func, vals, unit, rtol):
cosmo = self.flat_cosmo_cls()
cosmo.H0 = H70
cosmo.Om0 = 0.27
assert close(getattr(cosmo, func.__name__)(1).to(unit).value, vals, rtol=rtol)
@mark.parametrize('Om0, Ode0, vals', (
(0.27, 0.73, (29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802)),
(0.27, 0, (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814)),
(2, 0, (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))
))
def test_comoving_volume(self, Om0, Ode0, vals):
z = tensor([0.5, 1, 2, 3, 5, 9])
# for (Om0, Ode0), vals in zip(
# ((0.27, 0.73), (0.27, 0), (2, 0)),
# # Form Ned Wright's calculator: not very *accurate* (sic), so
# # like astropy, test to very low precision
# ((29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802),
# (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814),
# (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))
# ):
c = self.cosmo_cls()
c.H0, c.Om0, c.Ode0 = H70, Om0, Ode0
assert close(c.comoving_volume(z).to(Gpc**3).value, vals, rtol=1e-2)
# TODO: (requires integration) test_differential_comoving_volume
icosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
icosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
icosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
@mark.parametrize('Om0, Ode0, data', (
(0.3, 0.7, icosmo_flat), (0.3, 0.1, icosmo_open), (2, 0.1, icosmo_closed)
))
def test_flat_open_closed_icosmo(self, Om0, Ode0, data):
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, Om0, Ode0
z, dm, da, dl = (tensor(_, dtype=torch.get_default_dtype())
for _ in np.loadtxt(StringIO(data), unpack=True))
assert close(cosmo.comoving_transverse_distance(z).to(Mpc).value, dm)
assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, da)
assert close(cosmo.luminosity_distance(z).to(Mpc).value, dl)
def test_distmod(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
assert cosmo.hubble_distance.to(Mpc) == 4258.415596590909
assert close(cosmo.distmod(tensor([1, 5])), [44.124857, 48.40167258])
@with_default_double
def test_negdistmod(self):
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, 0.2, 1.3
z = tensor([50, 100])
assert close(cosmo.luminosity_distance(z).to(Mpc).value, [16612.44047622, -46890.79092244])
assert close(cosmo.distmod(z), [46.102167189, 48.355437790944])
def test_comoving_distance_z1z2(self):
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 0.3, 0.8
with pytest.raises(RuntimeError):
cosmo.comoving_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert cosmo.comoving_distance_z1z2(1, 2) == - cosmo.comoving_distance_z1z2(2, 1)
assert close(
cosmo.comoving_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,
[3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683]
)
@with_default_double
@mark.parametrize('Om0, val', (
# (0, 2997.92458), # TODO: cannot do Om0=0 with LambdaCDM, need special cosmology
(1, 1756.1435599923348),
))
def test_distance_in_special_cosmologies(self, Om0, val):
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = Om0
assert close(cosmo.comoving_distance(0).to(Mpc).value, 0)
assert close(cosmo.comoving_distance(1).to(Mpc).value, val)
@with_default_double
def test_comoving_transverse_distance_z1z2(self):
z1, z2 = tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = 0.3
with pytest.raises(RuntimeError):
cosmo.comoving_transverse_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert close(cosmo.comoving_transverse_distance_z1z2(1, 2).to(Mpc).value, 1313.2232194828466)
assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = 1.5
assert close(
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,
[2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258]
)
assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 0.3, 0.5
assert close(
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,
[3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884]
)
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 1, 0.2
assert close(
cosmo.comoving_transverse_distance_z1z2(0.1, tensor([0, 0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,
[-281.31602666724865, 0, 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927]
)
def test_angular_diameter_distance_z1z2(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
with pytest.raises(RuntimeError):
cosmo.angular_diameter_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 646.22968662822018)
assert close(
cosmo.angular_diameter_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,
[1760.0628637762106, 1670.7497657219858, -969.34452994, 1159.0970895962193, 115.72768186186921]
)
assert close(
cosmo.angular_diameter_distance_z1z2(0.1, tensor([0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,
[0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976]
)
# Non-flat (positive Ok0) test
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H704, 0.2, 0.5
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 620.1175337852428)
# Non-flat (negative Ok0) test
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 2, 1
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 228.42914659246014)
def test_absorption_distance(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
assert close(cosmo.absorption_distance(3), 7.98685853)
assert close(cosmo.absorption_distance(tensor([1, 3])), [1.72576635, 7.98685853])
class BaseLambdaCDMRTest(BaseLambdaCDMDriverTest):
flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDMR]
cosmo_cls: Type[phytorch.cosmology.special.LambdaCDMR]
@with_default_double
def test_ogamma(self):
z = tensor([1, 10, 500, 1000])
for Neff, Tcmb0, vals in (
# (3, 0, [1651.9, 858.2, 26.855, 13.642]), # cannot have Or0=0
(3, 2.725, [1651.8, 857.9, 26.767, 13.582]),
(3, 4, [1651.4, 856.6, 26.489, 13.405]),
# (3.04, 0, [1651.91, 858.205, 26.8586, 13.6469]), # cannot have Or0=0
(3.04, 2.725, [1651.76, 857.817, 26.7688, 13.5841]),
(3.04, 4, [1651.21, 856.411, 26.4845, 13.4028]),
):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, Neff, Tcmb0*kelvin
assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, vals, rtol=5e-4)
# from astropy: Just to be really sure, we also do a version where the
# integral is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
hubdis = (299792.458 / 70.0)
Neff = 3.04
for Tcmb0 in (2.725, 5):
Ogamma0h2 = 4 * 5.670373e-8 / 299792458**3 * Tcmb0**4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7/8 * (4 / 11)**(4/3) * Neff
Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2
vals = 2 * hubdis * (((1 + Or0*z) / (1+z))**0.5 - 1) / (Or0 - 1)
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Neff, cosmo.Tcmb0, cosmo.Ode0 = H70, Neff, Tcmb0 * kelvin, 0
assert close(cosmo.comoving_distance(z).to(Mpc).value, vals)
class TestAnalyticLambdaCDM(BaseLambdaCDMTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDM
cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDM
class TestAnalyticCDMR(BaseLambdaCDMRTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDMR
cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDMR
class TestAnalyticDiffLambdaCDM(BaseLambdaCDMTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDM
cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDM
class TestAnalyticDiffCDMR(BaseLambdaCDMRTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDMR
cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDMR
# TODO: (age...) test_age
# TODO: (age...) test_age_in_special_cosmologies
# TODO: (neutrinos, weird models...) test_distances
|
"""Cli entry point to setup db indexes."""
import click
import structlog
from click import Context
from miscutils.logging_config import Verbosity
from pymongo import ASCENDING, IndexModel
from pymongo.collection import Collection
from selectedtests.config.logging_config import config_logging
from selectedtests.datasource.mongo_wrapper import MongoWrapper
LOGGER = structlog.get_logger()
def setup_queue_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for ProjectTestMappingWorkItems.
:param collection: Collection to add indexes to.
"""
index = IndexModel([("project", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the test and task mappings collections.
:param collection: Collection to add indexes to.
"""
# project, source_file on it's own could be unique, but the repo and branch are needed when
# there is a module.
index = IndexModel(
[
("project", ASCENDING),
("repo", ASCENDING),
("branch", ASCENDING),
("source_file", ASCENDING),
],
unique=True,
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_tasks_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the mapping tasks collection.
The indexes must support both the $lookup operation and uniqueness constraints.
:param collection: Collection to add indexes to.
"""
index = IndexModel(
[("task_mapping_id", ASCENDING), ("name", ASCENDING), ("variant", ASCENDING)], unique=True
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_test_files_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the mapping test files collection.
The indexes must support both the $lookup operation and uniqueness constraints.
:param collection: Collection to add indexes to.
"""
index = IndexModel([("test_mapping_id", ASCENDING), ("name", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
@click.group()
@click.option("--verbose", is_flag=True, default=False, help="Enable verbose logging.")
@click.option("--mongo-uri", required=True, type=str, help="Mongo URI to connect to.")
@click.pass_context
def cli(ctx: Context, verbose: bool, mongo_uri: str) -> None:
"""Suite of MongoDB related commands, see the commands help for more details."""
ctx.ensure_object(dict)
ctx.obj["mongo"] = MongoWrapper.connect(mongo_uri)
verbosity = Verbosity.DEBUG if verbose else Verbosity.INFO
config_logging(verbosity, human_readable=False)
@cli.command()
@click.pass_context
def create_indexes(ctx: Context) -> None:
"""Initialize the mongo database with proper indexes."""
# Creating index no-ops if index already exists
setup_queue_indexes(ctx.obj["mongo"].test_mappings_queue())
setup_queue_indexes(ctx.obj["mongo"].task_mappings_queue())
setup_mappings_indexes(ctx.obj["mongo"].test_mappings())
setup_mappings_indexes(ctx.obj["mongo"].task_mappings())
setup_mappings_test_files_indexes(ctx.obj["mongo"].test_mappings_test_files())
setup_mappings_tasks_indexes(ctx.obj["mongo"].task_mappings_tasks())
def main() -> None:
"""Entry point for setting up selected-tests db indexes."""
return cli(obj={}, auto_envvar_prefix="SELECTED_TESTS")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['Route']
class Route(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
next_hop_ip_address: Optional[pulumi.Input[str]] = None,
next_hop_type: Optional[pulumi.Input[Union[str, 'RouteNextHopType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Route resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: The destination CIDR to which the route applies.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
:param pulumi.Input[Union[str, 'RouteNextHopType']] next_hop_type: The type of Azure hop the packet should be sent to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_name: The name of the route.
:param pulumi.Input[str] route_table_name: The name of the route table.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_prefix'] = address_prefix
__props__['id'] = id
__props__['name'] = name
__props__['next_hop_ip_address'] = next_hop_ip_address
if next_hop_type is None and not opts.urn:
raise TypeError("Missing required property 'next_hop_type'")
__props__['next_hop_type'] = next_hop_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_name'] = route_name
if route_table_name is None and not opts.urn:
raise TypeError("Missing required property 'route_table_name'")
__props__['route_table_name'] = route_table_name
__props__['etag'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200701:Route"), pulumi.Alias(type_="azure-native:network:Route"), pulumi.Alias(type_="azure-nextgen:network:Route"), pulumi.Alias(type_="azure-native:network/latest:Route"), pulumi.Alias(type_="azure-nextgen:network/latest:Route"), pulumi.Alias(type_="azure-native:network/v20150501preview:Route"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:Route"), pulumi.Alias(type_="azure-native:network/v20150615:Route"), pulumi.Alias(type_="azure-nextgen:network/v20150615:Route"), pulumi.Alias(type_="azure-native:network/v20160330:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160330:Route"), pulumi.Alias(type_="azure-native:network/v20160601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160601:Route"), pulumi.Alias(type_="azure-native:network/v20160901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160901:Route"), pulumi.Alias(type_="azure-native:network/v20161201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20161201:Route"), pulumi.Alias(type_="azure-native:network/v20170301:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170301:Route"), pulumi.Alias(type_="azure-native:network/v20170601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170601:Route"), pulumi.Alias(type_="azure-native:network/v20170801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170801:Route"), pulumi.Alias(type_="azure-native:network/v20170901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170901:Route"), pulumi.Alias(type_="azure-native:network/v20171001:Route"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Route"), pulumi.Alias(type_="azure-native:network/v20171101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20171101:Route"), pulumi.Alias(type_="azure-native:network/v20180101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180101:Route"), pulumi.Alias(type_="azure-native:network/v20180201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180201:Route"), pulumi.Alias(type_="azure-native:network/v20180401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180401:Route"), pulumi.Alias(type_="azure-native:network/v20180601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180601:Route"), pulumi.Alias(type_="azure-native:network/v20180701:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180701:Route"), pulumi.Alias(type_="azure-native:network/v20180801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180801:Route"), pulumi.Alias(type_="azure-native:network/v20181001:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181001:Route"), pulumi.Alias(type_="azure-native:network/v20181101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181101:Route"), pulumi.Alias(type_="azure-native:network/v20181201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181201:Route"), pulumi.Alias(type_="azure-native:network/v20190201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190201:Route"), pulumi.Alias(type_="azure-native:network/v20190401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190401:Route"), pulumi.Alias(type_="azure-native:network/v20190601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190601:Route"), pulumi.Alias(type_="azure-native:network/v20190701:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190701:Route"), pulumi.Alias(type_="azure-native:network/v20190801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190801:Route"), pulumi.Alias(type_="azure-native:network/v20190901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190901:Route"), pulumi.Alias(type_="azure-native:network/v20191101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20191101:Route"), pulumi.Alias(type_="azure-native:network/v20191201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20191201:Route"), pulumi.Alias(type_="azure-native:network/v20200301:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200301:Route"), pulumi.Alias(type_="azure-native:network/v20200401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200401:Route"), pulumi.Alias(type_="azure-native:network/v20200501:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200501:Route"), pulumi.Alias(type_="azure-native:network/v20200601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200601:Route"), pulumi.Alias(type_="azure-native:network/v20200801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200801:Route")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Route, __self__).__init__(
'azure-native:network/v20200701:Route',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Route':
"""
Get an existing Route resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_prefix"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["next_hop_ip_address"] = None
__props__["next_hop_type"] = None
__props__["provisioning_state"] = None
return Route(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> pulumi.Output[str]:
"""
The type of Azure hop the packet should be sent to.
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route resource.
"""
return pulumi.get(self, "provisioning_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
"""
Translates DMU table in NCGMP09-style geodatabase into a fully formatted
Microsoft Word .docx file.
Assumes formatting and style names in USGS Pubs template MapManuscript_v1-0_04-11.dotx
Arguments
Input geodatabase
Output workspace
Output filename (if it doesn't end in .docx, .docx will be appended)
UseMapUnitForUnitLabl (Boolean, either 'true' or 'false')
"""
import sys, os.path, arcpy
from GeMS_utilityFunctions import *
from docxModified import *
versionString = 'GeMS_DMUtoDocx_Arc10.py, version of 2 September 2017'
addMsgAndPrint( versionString )
debug = False
debug2 = False
tab = '<tab></tab>'
emDash = u"\u2014"
startTags = []
endTags = []
tags = ['b','i','g','ul','sup','sub','tab']
for tag in tags:
startTags.append('<'+tag+'>')
endTags.append('</'+tag+'>')
def isNotBlank(thing):
if thing <> '' and thing <> None:
return True
else:
return False
def isKnownStyle(pStyle):
if pStyle == 'DMUHeadnote' or pStyle.find('DMU-Heading') > -1 or pStyle.find('DMUUnit') > -1:
return True
else:
return False
def notNullText(txt):
if txt == '#null' or txt == None or txt == '#Null' or txt == '#' or txt == '' or len(txt.split()) == 0:
return False
else:
return True
gdb = sys.argv[1]
outfl = sys.argv[3]
if outfl.lower()[-5:] <> '.docx':
outfl = outfl+'.docx'
outDMUdocx = os.path.join(sys.argv[2],outfl)
if sys.argv[4] == 'true':
useMapUnitForUnitLabl = True
else:
useMapUnitForUnitLabl = False
if sys.argv[5] == 'true': # LMU only
notLMU = False
else:
notLMU = True
arcpy.env.workspace = gdb
relationships = relationshiplist()
document = newdocument()
docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0]
if notLMU:
docbody.append(paragraph('Description of Map Units','DMU-Heading1'))
else:
docbody.append(paragraph('List of Map Units','DMU-Heading1'))
lastParaWasHeading = True
"""
DMU has many rows
Each row has content for 1 or more paragraphs. 1st paragraph has style 'row.ParagraphStyle'
2nd and subsequent paragraphs have style 'DMUParagraph'
Each paragraph is composed of one or more runs, each of which _may_ include
markup tags
We sort DMU on HierarchyKey and then step through the rows, constructing rowtext w/ markup
according to row.paragraphStyle.
We then divide the newly-built rowtext into paragraphs.
For each paragraph, we
"""
addMsgAndPrint('Getting DMU rows and creating output paragraphs')
dmuRows = arcpy.SearchCursor('DescriptionOfMapUnits',"","","",'HierarchyKey')
for row in dmuRows:
rowText = ''
if isNotBlank(row.HierarchyKey) and isKnownStyle(row.ParagraphStyle):
addMsgAndPrint(' '+ str(row.HierarchyKey)+' '+str(row.ParagraphStyle))
#if row.ParagraphStyle == 'DMUHeadnote':
# rowText = '['+row.Description+']'
if row.ParagraphStyle.find('DMU-Heading') > -1: # is a heading
rowText = row.Name
paraStyle = row.ParagraphStyle
if notNullText(row.Description): # heading has headnote. Append heading to docbody and make new row
addMsgAndPrint('Description='+row.Description)
docbody.append(paragraph(rowText,row.ParagraphStyle))
rowText = row.Description
paraStyle = 'DMUHeadnote'
elif row.ParagraphStyle.find('DMUUnit') > -1: # is a unit
if not useMapUnitForUnitLabl and notNullText(row.Label):
rowText = '<ul>'+row.Label+'</ul>'
elif useMapUnitForUnitLabl and notNullText(row.MapUnit):
rowText = '<ul>'+row.MapUnit+'</ul>'
rowText = rowText + tab
if row.ParagraphStyle[-1:] in ('4','5'):
rowText = rowText + tab # add second tab for DMUUnit4 and DMUUnit4
if isNotBlank(row.Name):
rowText = rowText + '<b>'+row.Name+'</b>'
if isNotBlank(row.Age):
rowText = rowText + '<b> ('+row.Age+')</b>'
if isNotBlank(row.Description) and notLMU:
rowText = rowText + emDash + row.Description
paraStyle = row.ParagraphStyle
else: # Unrecognized paragraph style
addMsgAndPrint('Do not recognize paragraph style '+row.ParagraphStyle)
## divide into paragraphs and build list of [paraText, paraStyle]
if debug: addMsgAndPrint(' dividing into paragraphs')
paras = []
if paraStyle == 'DMUUnit1' and lastParaWasHeading:
paraStyle = 'DMUUnit11stafterheading'
if rowText.find('<br>') > 0:
print ' got multiple paragraphs!'
while rowText.find('<br>') > 0:
paras.append([rowText.partition('<br>')[0],paraStyle])
rowText = rowText.partition('<br>')[2]
paraStyle = 'DMUParagraph'
paras.append([rowText,paraStyle])
if paraStyle.find('Head') > -1:
lastParaWasHeading = True
else:
lastParaWasHeading = False
if debug: addMsgAndPrint(' finding formatting')
# for each paragraph:
for pgraph in paras:
para = pgraph[0]; paraStyle = pgraph[1]
runs = []
while len(para) > 0:
## Look for initial unformatted text chunk
firstPos = len(para)
for tag in startTags:
pos = para.find(tag)
if pos > -1 and pos < firstPos:
firstPos = pos
if firstPos > 0:
runs.append([[],para[0:firstPos]])
newPara = para[firstPos:]
para = newPara
elif firstPos == len(para):
runs.append([[],para])
para = ''
## or may be no initial unformatted chunk (firstpos = 0), in which case do nothing
## Then pull succeeding chunks
runTags = []
isTag = True
# trim starting tags (and append them to runTags)
while len(para) > 0 and isTag == True:
isTag = False
for tag in startTags:
if para.find(tag) == 0:
runTags.append(tag.replace('<','').replace('>',''))
newPara = para[len(tag):]
para = newPara
isTag = True
# find first endTag
endPos = len(para)
for tag in endTags:
tagPos = para.find(tag)
if tagPos > -1 and tagPos < endPos:
endPos = tagPos
runs.append([runTags,para[0:endPos]])
newPara = para[endPos:]
para = newPara
# strip end tags
isTag = True
while len(para) > 0 and isTag:
isTag = False
for tag in endTags:
if para.find(tag) == 0:
isTag = True
newPara = para[len(tag):]
para = newPara
pText = []
for run in runs:
#if debug: addMsgAndPrint(str(run))
text = run[1]
if text <> '':
tags = ''
if 'b' in run[0]:
tags = tags+'b'
if 'i' in run[0]:
tags = tags+'i'
if 'g' in run[0]:
tags = tags+'g'
if 'ul' in run[0]:
tags = tags+'l'
if 'sup' in run[0]:
tags = tags+'p'
if 'sub' in run[0]:
tags = tags+'d'
pText.append([text,tags])
elif 'tab' in run[0]:
# if this run is a tab, ignore any other tags and set tags to 'tab'
tags = 'tab'
pText.append(['',tags])
docbody.append(paragraph(pText,paraStyle))
addMsgAndPrint(' finished appending paragraphs')
if sys.argv[4] == 3:
print Null
pass
addMsgAndPrint('Setting core properties')
coreprops = coreproperties(title='DMU for '+gdb,subject='',creator=versionString,keywords=['python','NCGMP09','Word'])
appprops = appproperties()
contenttypes = contenttypes()
websettings = websettings()
wordrelationships = wordrelationships(relationships)
# Save our document
addMsgAndPrint('Saving to file '+outDMUdocx)
savedocx(document,coreprops,appprops,contenttypes,websettings,wordrelationships,outDMUdocx)
|
# coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python2 python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_gan.examples.progressive_gan import layers
from tensorflow_gan.examples.progressive_gan import networks
def _get_grad_norm(ys, xs):
"""Compute 2-norm of dys / dxs."""
return tf.sqrt(
tf.add_n([
tf.reduce_sum(input_tensor=tf.square(g))
for g in tf.gradients(ys=ys, xs=xs)
]))
def _num_filters_stub(block_id):
return networks.num_filters(block_id, 8, 1, 8)
class NetworksTest(tf.test.TestCase):
def test_resolution_schedule_correct(self):
rs = networks.ResolutionSchedule(
start_resolutions=[5, 3], scale_base=2, num_resolutions=3)
self.assertEqual(rs.start_resolutions, (5, 3))
self.assertEqual(rs.scale_base, 2)
self.assertEqual(rs.num_resolutions, 3)
self.assertEqual(rs.final_resolutions, (20, 12))
self.assertEqual(rs.scale_factor(1), 4)
self.assertEqual(rs.scale_factor(2), 2)
self.assertEqual(rs.scale_factor(3), 1)
with self.assertRaises(ValueError):
rs.scale_factor(0)
with self.assertRaises(ValueError):
rs.scale_factor(4)
def test_block_name(self):
self.assertEqual(networks.block_name(10), 'progressive_gan_block_10')
def test_min_total_num_images(self):
self.assertEqual(networks.min_total_num_images(7, 8, 4), 52)
def test_compute_progress(self):
if tf.executing_eagerly():
progress_output = []
for current_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]:
progress = networks.compute_progress(
current_image_id,
stable_stage_num_images=7,
transition_stage_num_images=8,
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
progress_output.append(sess.run(progress))
else:
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images=7,
transition_stage_num_images=8,
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
progress_output = [
sess.run(progress, feed_dict={current_image_id_ph: cur_image_id})
for cur_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]
]
self.assertArrayNear(progress_output,
[0.0, 0.0, 0.0, 0.0, 0.125, 0.375, 1.0, 1.0, 1.0],
1.0e-6)
def test_generator_alpha(self):
with self.cached_session(use_gpu=True) as sess:
alpha_fixed_block_id = [
sess.run(
networks._generator_alpha(2, tf.constant(progress, tf.float32)))
for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]
]
alpha_fixed_progress = [
sess.run(
networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))
for block_id in range(1, 5)
]
self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],
1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)
def test_discriminator_alpha(self):
with self.cached_session(use_gpu=True) as sess:
alpha_fixed_block_id = [sess.run(networks._discriminator_alpha(
2, tf.constant(progress, tf.float32))) for progress in
[0, 0.2, 1, 1.2, 2, 2.2, 3]]
alpha_fixed_progress = [sess.run(networks._discriminator_alpha(
block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5)]
self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6)
def test_blend_images_in_stable_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
progress=tf.constant(0.0),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_blend_images_in_transition_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
tf.constant(0.2),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = 0.8 * sess.run(
layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_num_filters(self):
self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)
self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)
def test_generator_grad_norm_progress(self):
if tf.executing_eagerly():
# tf.placeholder() is not compatible with eager execution.
return
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
z = tf.random.normal([2, 10], dtype=tf.float32)
x, _ = networks.generator(
z, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(input_tensor=tf.square(x))
grad_norms = [
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})
x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})
grad_norms_output = np.array([
sess.run(grad_norms, feed_dict={current_image_id_ph: i})
for i in range(15) # total num of images
])
self.assertEqual((2, 16, 16, 3), x1_np.shape)
self.assertEqual((2, 16, 16, 3), x2_np.shape)
# The gradient of block_1 is always on.
self.assertEqual(
np.argmax(grad_norms_output[:, 0] > 0), 0,
'gradient norms {} for block 1 is not always on'.format(
grad_norms_output[:, 0]))
# The gradient of block_2 is on after 1 stable stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 1] > 0), 3,
'gradient norms {} for block 2 is not on at step 3'.format(
grad_norms_output[:, 1]))
# The gradient of block_3 is on after 2 stable stage + 1 transition stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 2] > 0), 8,
'gradient norms {} for block 3 is not on at step 8'.format(
grad_norms_output[:, 2]))
def test_discriminator_grad_norm_progress(self):
if tf.executing_eagerly():
# tf.placeholder() is not compatible with eager execution.
return
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
x = tf.random.normal([2, 16, 16, 3])
logits, _ = networks.discriminator(
x, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(input_tensor=tf.square(logits))
grad_norms = [
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
grad_norms_output = np.array([
sess.run(grad_norms, feed_dict={current_image_id_ph: i})
for i in range(15) # total num of images
])
# The gradient of block_1 is always on.
self.assertEqual(
np.argmax(grad_norms_output[:, 0] > 0), 0,
'gradient norms {} for block 1 is not always on'.format(
grad_norms_output[:, 0]))
# The gradient of block_2 is on after 1 stable stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 1] > 0), 3,
'gradient norms {} for block 2 is not on at step 3'.format(
grad_norms_output[:, 1]))
# The gradient of block_3 is on after 2 stable stage + 1 transition stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 2] > 0), 8,
'gradient norms {} for block 3 is not on at step 8'.format(
grad_norms_output[:, 2]))
if __name__ == '__main__':
tf.test.main()
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1DeploymentSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'min_ready_seconds': 'int',
'paused': 'bool',
'progress_deadline_seconds': 'int',
'replicas': 'int',
'revision_history_limit': 'int',
'rollback_to': 'ExtensionsV1beta1RollbackConfig',
'selector': 'V1LabelSelector',
'strategy': 'ExtensionsV1beta1DeploymentStrategy',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'paused': 'paused',
'progress_deadline_seconds': 'progressDeadlineSeconds',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'rollback_to': 'rollbackTo',
'selector': 'selector',
'strategy': 'strategy',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, rollback_to=None, selector=None, strategy=None, template=None):
"""
ExtensionsV1beta1DeploymentSpec - a model defined in Swagger
"""
self._min_ready_seconds = None
self._paused = None
self._progress_deadline_seconds = None
self._replicas = None
self._revision_history_limit = None
self._rollback_to = None
self._selector = None
self._strategy = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if paused is not None:
self.paused = paused
if progress_deadline_seconds is not None:
self.progress_deadline_seconds = progress_deadline_seconds
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
if rollback_to is not None:
self.rollback_to = rollback_to
if selector is not None:
self.selector = selector
if strategy is not None:
self.strategy = strategy
self.template = template
@property
def min_ready_seconds(self):
"""
Gets the min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:return: The min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""
Sets the min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:param min_ready_seconds: The min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def paused(self):
"""
Gets the paused of this ExtensionsV1beta1DeploymentSpec.
Indicates that the deployment is paused and will not be processed by the deployment controller.
:return: The paused of this ExtensionsV1beta1DeploymentSpec.
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""
Sets the paused of this ExtensionsV1beta1DeploymentSpec.
Indicates that the deployment is paused and will not be processed by the deployment controller.
:param paused: The paused of this ExtensionsV1beta1DeploymentSpec.
:type: bool
"""
self._paused = paused
@property
def progress_deadline_seconds(self):
"""
Gets the progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".
:return: The progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._progress_deadline_seconds
@progress_deadline_seconds.setter
def progress_deadline_seconds(self, progress_deadline_seconds):
"""
Sets the progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".
:param progress_deadline_seconds: The progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._progress_deadline_seconds = progress_deadline_seconds
@property
def replicas(self):
"""
Gets the replicas of this ExtensionsV1beta1DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
:return: The replicas of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this ExtensionsV1beta1DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
:param replicas: The replicas of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""
Gets the revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".
:return: The revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""
Sets the revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".
:param revision_history_limit: The revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def rollback_to(self):
"""
Gets the rollback_to of this ExtensionsV1beta1DeploymentSpec.
DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.
:return: The rollback_to of this ExtensionsV1beta1DeploymentSpec.
:rtype: ExtensionsV1beta1RollbackConfig
"""
return self._rollback_to
@rollback_to.setter
def rollback_to(self, rollback_to):
"""
Sets the rollback_to of this ExtensionsV1beta1DeploymentSpec.
DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.
:param rollback_to: The rollback_to of this ExtensionsV1beta1DeploymentSpec.
:type: ExtensionsV1beta1RollbackConfig
"""
self._rollback_to = rollback_to
@property
def selector(self):
"""
Gets the selector of this ExtensionsV1beta1DeploymentSpec.
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
:return: The selector of this ExtensionsV1beta1DeploymentSpec.
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this ExtensionsV1beta1DeploymentSpec.
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
:param selector: The selector of this ExtensionsV1beta1DeploymentSpec.
:type: V1LabelSelector
"""
self._selector = selector
@property
def strategy(self):
"""
Gets the strategy of this ExtensionsV1beta1DeploymentSpec.
The deployment strategy to use to replace existing pods with new ones.
:return: The strategy of this ExtensionsV1beta1DeploymentSpec.
:rtype: ExtensionsV1beta1DeploymentStrategy
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""
Sets the strategy of this ExtensionsV1beta1DeploymentSpec.
The deployment strategy to use to replace existing pods with new ones.
:param strategy: The strategy of this ExtensionsV1beta1DeploymentSpec.
:type: ExtensionsV1beta1DeploymentStrategy
"""
self._strategy = strategy
@property
def template(self):
"""
Gets the template of this ExtensionsV1beta1DeploymentSpec.
Template describes the pods that will be created.
:return: The template of this ExtensionsV1beta1DeploymentSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this ExtensionsV1beta1DeploymentSpec.
Template describes the pods that will be created.
:param template: The template of this ExtensionsV1beta1DeploymentSpec.
:type: V1PodTemplateSpec
"""
if template is None:
raise ValueError("Invalid value for `template`, must not be `None`")
self._template = template
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ExtensionsV1beta1DeploymentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-13 14:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female'), ('not-specified', 'Not specified')], max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(max_length=140, null=True),
),
migrations.AddField(
model_name='user',
name='website',
field=models.URLField(null=True),
),
]
|
"""
WSGI config for polityper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from dotenv import load_dotenv
from django.core.wsgi import get_wsgi_application
load_dotenv(dotenv_path="prod.env", verbose=True)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'polityper.settings')
application = get_wsgi_application()
|
from django.urls import path
from .views import (
product_orders_modal,
make_product_purchase,
allocate_product_to_order,
receive_product_stock,
ProductListView,
product_add_to_purchase,
product_search,
)
app_name = "products"
urlpatterns = [
path('', ProductListView.as_view(), name="product-list"),
path('search/', product_search, name="product-search"),
path(
'add-to-purchase/<uuid:guid>/',
product_add_to_purchase,
name="product-add-to-purchase",
),
path(
"product-orders-modal/<uuid:guid>/",
product_orders_modal,
name="product_orders_modal_workflow",
),
path(
"products/make-purchase/<uuid:guid>",
make_product_purchase,
name="make_product_purchase",
),
path(
"products/allocation/<uuid:guid>/",
allocate_product_to_order,
name="allocate_product_to_order",
),
path(
"products/receive/<uuid:guid>/",
receive_product_stock,
name="receive_product_stock",
),
]
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlFileSharedirInstall(PerlPackage):
"""Install shared files"""
homepage = "http://search.cpan.org/~ether/File-ShareDir-Install-0.11/lib/File/ShareDir/Install.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/File-ShareDir-Install-0.11.tar.gz"
version('0.11', '61107e6ce6eee42bf29525b1a4d029e0')
depends_on('perl-module-build', type='build')
|
class BooleanData:
def __init__(self, objects):
self.objects = list(objects)
@ property
def head(self):
return(self.objects[0])
@ property
def tail(self):
if len(self.objects) > 1:
return self.objects[1:]
else:
return []
class BooleanUnion(BooleanData):
def get_distance(self, pt):
loc_diss = []
for loc_obj in self.objects:
loc_diss.append(loc_obj.get_distance(pt))
return min(loc_diss)
class BooleanDifference(BooleanData):
def get_distance(self, pt):
loc_diss = [self.head.get_distance(pt)]
for loc_obj in self.tail:
loc_diss.append(- loc_obj.get_distance(pt))
return max(loc_diss)
class BooleanIntersection(BooleanData):
def get_distance(self, pt):
loc_diss = []
for loc_obj in self.objects:
loc_diss.append(loc_obj.get_distance(pt))
return max(loc_diss)
|
#!/usr/bin/env python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import pexpect
os.environ['ANSIBLE_NOCOLOR'] = '1'
out = pexpect.run(
'ansible -c ssh -i localhost, -u cliuser1 -e ansible_python_interpreter={0} '
'-m command -a whoami -Kkb --become-user cliuser2 localhost'.format(sys.argv[1]),
events={
'SSH password:': 'secretpassword\n',
'BECOME password': 'secretpassword\n',
},
timeout=10
)
print(out)
assert b'cliuser2' in out
|
import datetime
import sys
from collections import OrderedDict
class DiskEntry:
"""A disk entry."""
def __init__(self, read_in_kb, write_in_kb, timestamp):
"""Initialize a DiskEntry."""
self._read_in_kb = read_in_kb
self._write_in_kb = write_in_kb
self._timestamp = timestamp
def read_in_kb(self):
return self._read_in_kb
def write_in_kb(self):
return self._write_in_kb
def timestamp(self):
return self._timestamp
def main(iterator):
# List of timestamps and DiskEntry.
timestamps = []
disk_entries = OrderedDict()
# Process disk raw file.
for disk_line in iterator:
# Check if it is a comment.
if disk_line[0] == '#':
continue
disk_entry_data = disk_line.split()
timestamp = datetime.datetime.strptime(disk_entry_data[1], "%H:%M:%S.%f")
total_read_in_kb = 0
total_write_in_kb = 0
for disk_no in range((len(disk_entry_data) - 2) // 14):
total_read_in_kb += int(disk_entry_data[disk_no * 14 + 5])
total_write_in_kb += int(disk_entry_data[disk_no * 14 + 9])
# if len(disk_entries) < disk_no + 1:
# disk_entries.append(OrderedDict())
disk_entries[timestamp] = DiskEntry(total_read_in_kb, total_write_in_kb, timestamp)
return disk_entries
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import string
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class CoVoST(Dataset):
"""Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
target_language (str, optional): target (text) language,
None for no translation (default: None)
version (int, optional): CoVoST version. (default: 2)
download (bool, optional): Whether to download the dataset if it is not
found at root path. (default: ``False``).
"""
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
# SPLITS = ["train", "dev", "test"]
SPLITS = ["train"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
# Hack here so that we can get "split" column from CoVoST TSV.
# Note that we use CoVoST train split for ASR which is an extension
# to Common Voice train split.
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
cv_tsv = load_df_from_tsv(cv_tsv_path)
if self.no_translation:
print("No target translation.")
df = cv_tsv[["path", "sentence", "client_id"]]
df = df.set_index(["path"], drop=False)
else:
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "wav" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Path, int, int, str, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(wav_path, sample_rate, n_frames, sentence, translation, speaker_id,
sample_id)``
"""
data = self.data[n]
path = self.root / "wav" / data["path"]
info = torchaudio.info(path)
sample_rate = info.sample_rate
n_frames = info.num_frames
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return path, sample_rate, n_frames, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
output_root = Path(args.output_root).absolute()
if args.tgt_lang is not None:
output_root = output_root / f"{args.src_lang}-{args.tgt_lang}"
else:
output_root = output_root / f"{args.src_lang}"
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
zip_path = output_root / "fbank80.zip"
if not zip_path.exists():
# Extract features
feature_root = output_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in CoVoST.SPLITS:
print(f"Fetching split {split}...")
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
print("Extracting log mel filter bank features...")
for wav_path, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
waveform, sample_rate = torchaudio.load(wav_path)
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
# Pack features into ZIP
print("ZIPing features...")
create_zip(feature_root, zip_path)
# # Clean up
# shutil.rmtree(feature_root)
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
task = args.task
# if args.tgt_lang is not None:
# task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
if args.task == "st" and args.add_src:
manifest["src_text"] = []
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for _, sr, n_frames, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(n_frames / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
if args.lowercase_src:
src_utt = src_utt.lower()
if args.rm_punc_src:
for w in string.punctuation:
src_utt = src_utt.replace(w, "")
src_utt = src_utt.replace(" ", "")
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
if args.task == "st" and args.add_src:
manifest["src_text"].append(src_utt)
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
if args.task == "st" and args.add_src and args.share:
train_text.extend(manifest["src_text"])
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, output_root / f"{split}_{task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{task}"
asr_spm_filename = None
gen_vocab_flag = True
if args.task == "st" and args.add_src:
if args.share:
if args.st_spm_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.st_spm_prefix
else:
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}_share"
asr_spm_filename = spm_filename_prefix + ".model"
else:
if args.st_spm_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.st_spm_prefix
assert args.asr_prefix is not None
asr_spm_filename = args.asr_prefix + ".model"
elif args.task == "asr":
if args.asr_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.asr_prefix
if gen_vocab_flag:
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
output_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size
)
# Generate config YAML
gen_config_yaml(
output_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
asr_spm_filename=asr_spm_filename,
share_src_and_tgt=True if args.task == "asr" else False
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root", "-d", required=True, type=str,
help="data root with sub-folders for each language <root>/<src_lang>"
)
parser.add_argument(
"--output-root", "-o", required=True, type=str,
help="output root to save the results"
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--task", type=str, default="asr", choices=["asr", "st"])
parser.add_argument("--tgt-lang", "-t", type=str)
parser.add_argument("--share", action="store_true",
help="share the tokenizer and dictionary of the transcription and translation")
parser.add_argument("--add-src", action="store_true", help="add the src text for st task")
parser.add_argument("--asr-prefix", type=str, help="prefix of the asr dict")
parser.add_argument("--st-spm-prefix", type=str, default=None, help="prefix of the existing st dict")
parser.add_argument("--lowercase-src", action="store_true", help="lowercase the source text")
parser.add_argument("--rm-punc-src", action="store_true", help="remove the punctuation of the source text")
parser.add_argument("--cmvn-type", default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
|
#! /usr/bin/env python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import sys
import os
import subprocess
import atexit
from vmrunner import vmrunner
vm = vmrunner.vms[0]
import socket
# Set up a temporary interface
import platform
if platform.system() == 'Darwin':
subprocess.call(["sudo", "ifconfig", "bridge43", "alias", "10.0.0.3/24"])
else:
subprocess.call(["sudo", "ip", "addr", "add", "10.0.0.3/24", "dev", "bridge43", "label", "bridge43:1"])
# Tear down interface on exit
@atexit.register
def tear_down():
if platform.system() == 'Darwin':
subprocess.call(["sudo", "ifconfig", "bridge43", "-alias", "10.0.0.3"])
else:
subprocess.call(["sudo", "ip", "addr", "del", "10.0.0.3/24", "dev", "bridge43", "label", "bridge43:1"])
S_HOST, S_PORT = '10.0.0.3', 4242
S_MESSAGE = "Only hipsters uses POSIX"
server = socket.socket
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind((S_HOST, S_PORT))
HOST, PORT = '10.0.0.58', 1042
RECEIVED = ''
def UDP_send(trigger_line):
MESSAGE = str.encode("POSIX is for hipsters")
sock = socket.socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((S_HOST, S_PORT + 1))
sock.sendto(MESSAGE, (HOST, PORT))
def UDP_recv():
RECEIVED = server.recv(1024)
def verify_recv(trigger_line):
ok = RECEIVED == S_MESSAGE
RECEIVED = ''
return ok
def UDP_send_much(trigger_line):
MESSAGE = "Message #"
sock = socket.socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((HOST, PORT))
for i in range(0, 5):
msg = str.encode(MESSAGE + repr(i))
sock.send(msg)
print("Sending {}".format(msg))
import _thread
_thread.start_new_thread(UDP_recv, ())
# Add custom event-handler
vm.on_output("recvfrom()", UDP_send)
vm.on_output("sendto() called", verify_recv)
vm.on_output("sendto() called", verify_recv)
vm.on_output("reading from buffer", UDP_send_much)
# Boot the VM, taking a timeout as parameter
if len(sys.argv) > 1:
vm.boot(image_name=str(sys.argv[1]))
else:
vm.cmake().boot(10,image_name='posix_udp').clean()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual('a', string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re #import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(["the", "rain", "in", "spain"], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(r'\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
def execute():
language_map = {
"中国(简体)": "簡體中文",
"中國(繁體)": "正體中文"
}
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings in language_map:
new_language_name = language_map[language_in_system_settings]
frappe.db.set_value("System Settings", "System Settings", "language", new_language_name)
for old_name, new_name in language_map.items():
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.